repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Ximpia/ximpia | ximpia/xpsite/constants.py | 1 | 3312 | # coding: utf-8
class Services:
USERS = 'Users'
class Slugs:
LOGIN = 'login'
LOGOUT = 'logout'
REMINDER_NEW_PASSWORD = 'reminder-new-password'
CHANGE_PASSWORD = 'change-password'
SIGNUP = 'signup'
ACTIVATION_USER = 'activation-user'
HOME_LOGIN = 'in'
SITE = 'site'
REQUEST_REMINDER = 'request-reminder'
FINALIZE_REMINDER = 'finalize-reminder'
ACTIVATE_USER = 'activate-user'
class Views:
LOGIN = 'login'
LOGOUT = 'logout'
REMINDER_NEW_PASSWORD = 'reminderNewPassword'
CHANGE_PASSWORD = 'changePassword'
SIGNUP = 'signup'
ACTIVATION_USER = 'activationUser'
HOME_LOGIN = 'homeLogin'
class Actions:
LOGIN = 'login'
REQUEST_REMINDER = 'requestReminder'
FINALIZE_REMINDER = 'finalizeReminder'
LOGOUT = 'logout'
SIGNUP = 'signup'
CHANGE_PASSWORD = 'changePassword'
ACTIVATE_USER = 'activateUser'
class Menus:
SYS = 'sys'
SIGN_OUT = 'signOut'
CHANGE_PASSWORD = 'changePassword'
HOME_LOGIN = 'homeLogin'
HOME = 'home'
LOGIN = 'login'
SIGNUP = 'signup'
class Tmpls:
LOGIN = 'login'
PASSWORD_REMINDER = 'password_reminder'
LOGOUT = 'logout'
CHANGE_PASSWORD = 'change_password'
SIGNUP = 'signup'
REMINDER_NEW_PASSWORD = 'reminder_new_password'
ACTIVATION_USER = 'activation_user'
HOME_LOGIN = 'home_login'
class Flows:
pass
#APP = 'site'
XIMPIA = 'ximpia'
TWITTER = 'twitter'
FACEBOOK = 'facebook'
XING = 'xing'
LINKEDIN = 'linkedin'
GOOGLE = 'google'
EMAIL = 'email'
PASSWORD = 'password'
SMS = 'sms'
FILE_QUOTA_DEFAULT = 2000
FILE_QUOTA_ORG = 5000
MSG_MODE_REC = 'received'
MSG_MODE_SENT = 'sent'
USER_SETTINGS = 'user_settings'
SETTINGS_ALLOW_PRIVATE_GRP_SUBS = 'ALLOW_PRIVATE_GRP_SUBS'
NUMBER_MATCHES = 10
OK = 'ok'
BLOCKED = 'blocked'
UNBLOCKED = 'unblocked'
ERROR = 'error'
ARCHIVE = 'archive'
UNARCHIVE = 'unarchive'
PROFESSIONAL = 'professional'
USER = 'user'
ORGANIZATION = 'organization'
SETTINGS_DEFAULT = ''
IMPORT = 'import'
GMAIL = 'gmail'
YAHOO = 'yahoo'
MSN = 'msn'
HOME = 'home'
WORK = 'work'
MOBILE = 'mobile'
WORK_MOBILE = 'work_mobile'
FAX = 'fax'
NETWORK = 'network'
SITE = 'site'
BLOG = 'blog'
FACEBOOK_PAGE = 'facebook_page'
IM = 'im'
RESERVED_GROUP_NAME_LIST = ['ximpia']
PENDING = 'pending'
USED = 'used'
NUMBER_INVITATIONS_USER = 15
NUMBER_INVITATIONS_STAFF = 500
SOCIAL_NETWORK = 'social_network'
# Signup constants
SIGNUP_USER_GROUP_ID = '1'
# Parameters
PARAM_LOGIN = 'LOGIN'
PARAM_REMINDER_DAYS = 'REMINDER_DAYS'
PARAM_USER_STATUS = 'USER_STATUS'
PARAM_USER_STATUS_PENDING = 'PENDING'
PARAM_USER_STATUS_ACTIVE = 'ACTIVE'
PARAM_ADDRESS_TYPE = 'ADDRESS_TYPE'
PARAM_ADDRESS_TYPE_PERSONAL = 'PERSONAL'
PARAM_CATEGORY_TYPE = 'CATEGORY_TYPE'
KEY_HAS_VALIDATED_EMAIL = 'HAS_VALIDATED_EMAIL'
# Cookies
COOKIE_LOGIN_REDIRECT = 'XP_LR'
# Meta Keys
META_REMINDER_ID = 'REMINDER_ID'
META_RESET_PASSWORD_DATE = 'RESET_PASSWORD_DATE'
# Settings
SET_SITE_SIGNUP_INVITATION = 'SITE_SIGNUP_INVITATION'
SET_SIGNUP_SOCIAL_NETWORK = 'SIGNUP_SOCIAL_NETWORK'
SET_SIGNUP_USER_PASSWORD = 'SIGNUP_USER_PASSWORD'
SET_REMINDER_DAYS = 'REMINDER_DAYS'
SET_NUMBER_RESULTS_LIST = 'NUMBER_RESULTS_LIST'
| apache-2.0 | 3,870,277,391,610,701,300 | 22.533333 | 58 | 0.658514 | false |
profxj/desispec | py/desispec/io/frame.py | 1 | 3003 | """
desispec.io.frame
=================
IO routines for frame.
"""
import os.path
import numpy as np
import scipy,scipy.sparse
from astropy.io import fits
from desispec.frame import Frame
from desispec.io import findfile
from desispec.io.util import fitsheader, native_endian, makepath
from desispec.log import get_logger
log = get_logger()
def write_frame(outfile, frame, header=None):
"""Write a frame fits file and returns path to file written.
Args:
outfile: full path to output file, or tuple (night, expid, channel)
frame: desispec.frame.Frame object with wave, flux, ivar...
header: optional astropy.io.fits.Header or dict to override frame.header
Returns:
full filepath of output file that was written
Note:
to create a Frame object to pass into write_frame,
frame = Frame(wave, flux, ivar, resolution_data)
"""
outfile = makepath(outfile, 'frame')
if header is not None:
hdr = fitsheader(header)
else:
hdr = fitsheader(frame.header)
if 'SPECMIN' not in hdr:
hdr['SPECMIN'] = 0
if 'SPECMAX' not in hdr:
hdr['SPECMAX'] = hdr['SPECMIN'] + frame.nspec
hdus = fits.HDUList()
x = fits.PrimaryHDU(frame.flux, header=hdr)
x.header['EXTNAME'] = 'FLUX'
hdus.append(x)
hdus.append( fits.ImageHDU(frame.ivar, name='IVAR') )
hdus.append( fits.ImageHDU(frame.mask, name='MASK') )
hdus.append( fits.ImageHDU(frame.wave, name='WAVELENGTH') )
hdus.append( fits.ImageHDU(frame.resolution_data, name='RESOLUTION' ) )
hdus.writeto(outfile, clobber=True)
return outfile
def read_frame(filename, nspec=None):
"""Reads a frame fits file and returns its data.
Args:
filename: path to a file, or (night, expid, camera) tuple where
night = string YEARMMDD
expid = integer exposure ID
camera = b0, r1, .. z9
Returns:
desispec.Frame object with attributes wave, flux, ivar, etc.
"""
#- check if filename is (night, expid, camera) tuple instead
if not isinstance(filename, (str, unicode)):
night, expid, camera = filename
filename = findfile('frame', night, expid, camera)
if not os.path.isfile(filename) :
raise IOError("cannot open"+filename)
fx = fits.open(filename, uint=True)
hdr = fx[0].header
flux = native_endian(fx['FLUX'].data)
ivar = native_endian(fx['IVAR'].data)
wave = native_endian(fx['WAVELENGTH'].data)
if 'MASK' in fx:
mask = native_endian(fx['MASK'].data)
else:
mask = None #- let the Frame object create the default mask
resolution_data = native_endian(fx['RESOLUTION'].data)
fx.close()
if nspec is not None:
flux = flux[0:nspec]
ivar = ivar[0:nspec]
resolution_data = resolution_data[0:nspec]
# return flux,ivar,wave,resolution_data, hdr
return Frame(wave, flux, ivar, mask, resolution_data, hdr)
| bsd-3-clause | 1,386,145,176,748,292,600 | 29.03 | 80 | 0.638362 | false |
jiangzhonglian/MachineLearning | src/py3.x/16.RecommenderSystems/RS-itemcf.py | 1 | 8671 | #!/usr/bin/python
# coding:utf8
'''
Created on 2015-06-22
Update on 2017-05-16
Author: Lockvictor/片刻
《推荐系统实践》协同过滤算法源代码
参考地址:https://github.com/Lockvictor/MovieLens-RecSys
更新地址:https://github.com/apachecn/AiLearning
'''
import sys
import math
import random
from operator import itemgetter
# 作用:使得随机数据可预测
random.seed(0)
class ItemBasedCF():
''' TopN recommendation - ItemBasedCF '''
def __init__(self):
self.trainset = {}
self.testset = {}
# n_sim_user: top 20个用户, n_rec_movie: top 10个推荐结果
self.n_sim_movie = 20
self.n_rec_movie = 10
# user_sim_mat: 电影之间的相似度, movie_popular: 电影的出现次数, movie_count: 总电影数量
self.movie_sim_mat = {}
self.movie_popular = {}
self.movie_count = 0
print >> sys.stderr, 'Similar movie number = %d' % self.n_sim_movie
print >> sys.stderr, 'Recommended movie number = %d' % self.n_rec_movie
@staticmethod
def loadfile(filename):
"""loadfile(加载文件,返回一个生成器)
Args:
filename 文件名
Returns:
line 行数据,去空格
"""
fp = open(filename, 'r')
for i, line in enumerate(fp):
yield line.strip('\r\n')
if i > 0 and i % 100000 == 0:
print >> sys.stderr, 'loading %s(%s)' % (filename, i)
fp.close()
print >> sys.stderr, 'load %s success' % filename
def generate_dataset(self, filename, pivot=0.7):
"""loadfile(加载文件,将数据集按照7:3 进行随机拆分)
Args:
filename 文件名
pivot 拆分比例
"""
trainset_len = 0
testset_len = 0
for line in self.loadfile(filename):
# 用户ID,电影名称,评分,时间戳
# user, movie, rating, _ = line.split('::')
user, movie, rating, _ = line.split('\t')
# 通过pivot和随机函数比较,然后初始化用户和对应的值
if (random.random() < pivot):
# dict.setdefault(key, default=None)
# key -- 查找的键值
# default -- 键不存在时,设置的默认键值
self.trainset.setdefault(user, {})
self.trainset[user][movie] = int(rating)
trainset_len += 1
else:
self.testset.setdefault(user, {})
self.testset[user][movie] = int(rating)
testset_len += 1
print >> sys.stderr, '分离训练集和测试集成功'
print >> sys.stderr, 'train set = %s' % trainset_len
print >> sys.stderr, 'test set = %s' % testset_len
def calc_movie_sim(self):
"""calc_movie_sim(计算用户之间的相似度)"""
print >> sys.stderr, 'counting movies number and popularity...'
# 统计在所有的用户中,不同电影的总出现次数, user, movies
for _, movies in self.trainset.items():
for movie in movies:
# count item popularity
if movie not in self.movie_popular:
self.movie_popular[movie] = 0
self.movie_popular[movie] += 1
print >> sys.stderr, 'count movies number and popularity success'
# save the total number of movies
self.movie_count = len(self.movie_popular)
print >> sys.stderr, 'total movie number = %d' % self.movie_count
# 统计在相同用户时,不同电影同时出现的次数
itemsim_mat = self.movie_sim_mat
print >> sys.stderr, 'building co-rated users matrix...'
# user, movies
for _, movies in self.trainset.items():
for m1 in movies:
for m2 in movies:
if m1 == m2:
continue
itemsim_mat.setdefault(m1, {})
itemsim_mat[m1].setdefault(m2, 0)
itemsim_mat[m1][m2] += 1
print >> sys.stderr, 'build co-rated users matrix success'
# calculate similarity matrix
print >> sys.stderr, 'calculating movie similarity matrix...'
simfactor_count = 0
PRINT_STEP = 2000000
for m1, related_movies in itemsim_mat.items():
for m2, count in related_movies.iteritems():
# 余弦相似度
itemsim_mat[m1][m2] = count / math.sqrt(
self.movie_popular[m1] * self.movie_popular[m2])
simfactor_count += 1
# 打印进度条
if simfactor_count % PRINT_STEP == 0:
print >> sys.stderr, 'calculating movie similarity factor(%d)' % simfactor_count
print >> sys.stderr, 'calculate movie similarity matrix(similarity factor) success'
print >> sys.stderr, 'Total similarity factor number = %d' % simfactor_count
# @profile
def recommend(self, user):
"""recommend(找出top K的电影,对电影进行相似度sum的排序,取出top N的电影数)
Args:
user 用户
Returns:
rec_movie 电影推荐列表,按照相似度从大到小的排序
"""
''' Find K similar movies and recommend N movies. '''
K = self.n_sim_movie
N = self.n_rec_movie
rank = {}
watched_movies = self.trainset[user]
# 计算top K 电影的相似度
# rating=电影评分, w=不同电影出现的次数
# 耗时分析:98.2%的时间在 line-154行
for movie, rating in watched_movies.iteritems():
for related_movie, w in sorted(
self.movie_sim_mat[movie].items(),
key=itemgetter(1),
reverse=True)[0:K]:
if related_movie in watched_movies:
continue
rank.setdefault(related_movie, 0)
rank[related_movie] += w * rating
# return the N best movies
return sorted(rank.items(), key=itemgetter(1), reverse=True)[0:N]
def evaluate(self):
''' return precision, recall, coverage and popularity '''
print >> sys.stderr, 'Evaluation start...'
# 返回top N的推荐结果
N = self.n_rec_movie
# varables for precision and recall
# hit表示命中(测试集和推荐集相同+1),rec_count 每个用户的推荐数, test_count 每个用户对应的测试数据集的电影数
hit = 0
rec_count = 0
test_count = 0
# varables for coverage
all_rec_movies = set()
# varables for popularity
popular_sum = 0
# enumerate将其组成一个索引序列,利用它可以同时获得索引和值
# 参考地址:http://blog.csdn.net/churximi/article/details/51648388
for i, user in enumerate(self.trainset):
if i > 0 and i % 500 == 0:
print >> sys.stderr, 'recommended for %d users' % i
test_movies = self.testset.get(user, {})
rec_movies = self.recommend(user)
# 对比测试集和推荐集的差异 movie, w
for movie, _ in rec_movies:
if movie in test_movies:
hit += 1
all_rec_movies.add(movie)
# 计算用户对应的电影出现次数log值的sum加和
popular_sum += math.log(1 + self.movie_popular[movie])
rec_count += N
test_count += len(test_movies)
precision = hit / (1.0 * rec_count)
recall = hit / (1.0 * test_count)
coverage = len(all_rec_movies) / (1.0 * self.movie_count)
popularity = popular_sum / (1.0 * rec_count)
print >> sys.stderr, 'precision=%.4f \t recall=%.4f \t coverage=%.4f \t popularity=%.4f' % (
precision, recall, coverage, popularity)
if __name__ == '__main__':
# ratingfile = 'data/16.RecommenderSystems/ml-1m/ratings.dat'
ratingfile = 'data/16.RecommenderSystems/ml-100k/u.data'
# 创建ItemCF对象
itemcf = ItemBasedCF()
# 将数据按照 7:3的比例,拆分成:训练集和测试集,存储在usercf的trainset和testset中
itemcf.generate_dataset(ratingfile, pivot=0.7)
# 计算用户之间的相似度
itemcf.calc_movie_sim()
# 评估推荐效果
# itemcf.evaluate()
# 查看推荐结果用户
user = "2"
print("推荐结果", itemcf.recommend(user))
print("---", itemcf.testset.get(user, {}))
| gpl-3.0 | -4,455,626,402,074,316,000 | 33.09292 | 100 | 0.547047 | false |
wpoa/wiki-imports | lib/python2.7/site-packages/pywikibot-2.0b1-py2.7.egg/pywikibot/userinterfaces/terminal_interface_win32.py | 1 | 2982 | # -*- coding: utf-8 -*-
#
# (C) Pywikipedia bot team, 2003-2012
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: d6e1d28165e1e0b54b746762992665e7d30cab04 $'
import re
from . import terminal_interface_base
try:
import ctypes
ctypes_found = True
except ImportError:
ctypes_found = False
windowsColors = {
'default': 7,
'black': 0,
'blue': 1,
'green': 2,
'aqua': 3,
'red': 4,
'purple': 5,
'yellow': 6,
'lightgray': 7,
'gray': 8,
'lightblue': 9,
'lightgreen': 10,
'lightaqua': 11,
'lightred': 12,
'lightpurple': 13,
'lightyellow': 14,
'white': 15,
}
colorTagR = re.compile('\03{(?P<name>%s)}' % '|'.join(list(windowsColors.keys())))
# Compat for python <= 2.5
class Win32BaseUI(terminal_interface_base.UI):
def __init__(self):
terminal_interface_base.UI.__init__(self)
self.encoding = 'ascii'
class Win32CtypesUI(Win32BaseUI):
def __init__(self):
Win32BaseUI.__init__(self)
from .win32_unicode import stdin, stdout, stderr, argv
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.argv = argv
self.encoding = 'utf-8'
def printColorized(self, text, targetStream):
std_out_handle = ctypes.windll.kernel32.GetStdHandle(-11)
# Color tags might be cascaded, e.g. because of transliteration.
# Therefore we need this stack.
colorStack = []
tagM = True
while tagM:
tagM = colorTagR.search(text)
if tagM:
# print the text up to the tag.
targetStream.write(text[:tagM.start()].encode(self.encoding, 'replace'))
newColor = tagM.group('name')
if newColor == 'default':
if len(colorStack) > 0:
colorStack.pop()
if len(colorStack) > 0:
lastColor = colorStack[-1]
else:
lastColor = 'default'
ctypes.windll.kernel32.SetConsoleTextAttribute(std_out_handle, windowsColors[lastColor])
else:
colorStack.append(newColor)
# set the new color
ctypes.windll.kernel32.SetConsoleTextAttribute(std_out_handle, windowsColors[newColor])
text = text[tagM.end():]
# print the rest of the text
targetStream.write(text.encode(self.encoding, 'replace'))
# just to be sure, reset the color
ctypes.windll.kernel32.SetConsoleTextAttribute(std_out_handle, windowsColors['default'])
def _raw_input(self):
data = self.stdin.readline()
if '\x1a' in data:
raise EOFError()
return data.strip()
if ctypes_found:
Win32UI = Win32CtypesUI
else:
Win32UI = Win32BaseUI
| gpl-3.0 | -2,789,536,079,870,463,000 | 29.742268 | 112 | 0.550302 | false |
nexdatas/writer | test/DataSourceFactory_test.py | 1 | 15926 | #!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <[email protected]>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file DataSourceFactoryTest.py
# unittests for field Tags running Tango Server
#
import unittest
import sys
import struct
import json
import TestDataSource
from nxswriter.DataSourceFactory import DataSourceFactory
from nxswriter.DataSourcePool import DataSourcePool
from nxswriter.Element import Element
from nxswriter.EField import EField
from nxswriter import DataSources
from nxswriter import ClientSource
from nxswriter import PyEvalSource
from nxswriter.Errors import DataSourceSetupError
from nxswriter.DecoderPool import DecoderPool
# if 64-bit machione
IS64BIT = (struct.calcsize("P") == 8)
# test fixture
class DataSourceFactoryTest(unittest.TestCase):
# constructor
# \param methodName name of the test method
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
self._tfname = "doc"
self._fname = "test.h5"
self._nxDoc = None
self._eDoc = None
self._fattrs = {"short_name": "test", "units": "m"}
self._gname = "testDoc"
self._gtype = "NXentry"
self._bint = "int64" if IS64BIT else "int32"
self._buint = "uint64" if IS64BIT else "uint32"
self._bfloat = "float64" if IS64BIT else "float32"
self._tfname = "field"
self._tfname = "group"
self._fattrs = {"short_name": "test", "units": "m"}
# test starter
# \brief Common set up
def setUp(self):
# file handle
print("\nsetting up...")
# test closer
# \brief Common tear down
def tearDown(self):
print("tearing down ...")
# Exception tester
# \param exception expected exception
# \param method called method
# \param args list with method arguments
# \param kwargs dictionary with method arguments
def myAssertRaise(self, exception, method, *args, **kwargs):
try:
error = False
method(*args, **kwargs)
except Exception:
error = True
self.assertEqual(error, True)
# Data check
# \brief It check the source Data
# \param data tested data
# \param format data format
# \param value data value
# \param ttype data Tango type
# \param shape data shape
def checkData(self, data, format, value, ttype, shape):
self.assertEqual(data["rank"], format)
self.assertEqual(data["tangoDType"], ttype)
self.assertEqual(data["shape"], shape)
if format == 'SCALAR':
self.assertEqual(data["value"], value)
elif format == 'SPECTRUM':
self.assertEqual(len(data["value"]), len(value))
for i in range(len(value)):
self.assertEqual(data["value"][i], value[i])
else:
self.assertEqual(len(data["value"]), len(value))
for i in range(len(value)):
self.assertEqual(len(data["value"][i]), len(value[i]))
for j in range(len(value[i])):
self.assertEqual(data["value"][i][j], value[i][j])
# constructor test
# \brief It tests default settings
def test_constructor_default(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
ds = DataSourceFactory(self._fattrs, None)
self.assertTrue(isinstance(ds, Element))
self.assertEqual(ds.tagName, "datasource")
self.assertEqual(ds._tagAttrs, self._fattrs)
self.assertEqual(ds.content, [])
self.assertEqual(ds.doc, "")
self.assertEqual(ds.last, None)
el = Element(self._tfname, self._fattrs)
ds = DataSourceFactory(self._fattrs, el)
self.assertTrue(isinstance(ds, Element))
self.assertEqual(ds.tagName, "datasource")
self.assertEqual(ds._tagAttrs, self._fattrs)
self.assertEqual(ds.content, [])
self.assertEqual(ds.doc, "")
self.assertEqual(ds.last, el)
# constructor test
# \brief It tests default settings
def test_store_default(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
el = Element(self._tfname, self._fattrs)
ds = DataSourceFactory(self._fattrs, el)
self.assertTrue(isinstance(ds, Element))
self.assertEqual(ds.tagName, "datasource")
self.assertEqual(ds._tagAttrs, self._fattrs)
self.assertEqual(ds.content, [])
self.assertEqual(ds.doc, "")
self.assertEqual(ds.last, el)
self.assertEqual(ds.store(["<datasource>", "", "</datasource>"]), None)
self.assertEqual(type(ds.last.source), DataSources.DataSource)
self.assertTrue(not hasattr(ds.last, "tagAttributes"))
atts = {"type": "TANGO"}
el = Element(self._tfname, self._fattrs)
ds = DataSourceFactory(atts, el)
self.assertTrue(isinstance(ds, Element))
self.assertEqual(ds.tagName, "datasource")
self.assertEqual(ds._tagAttrs, atts)
self.assertEqual(ds.content, [])
self.assertEqual(ds.doc, "")
self.assertEqual(ds.last, el)
self.assertEqual(ds.store(["<datasource>", "", "</datasource>"]), None)
self.assertEqual(type(ds.last.source), DataSources.DataSource)
self.assertTrue(not hasattr(ds.last, "tagAttributes"))
atts = {"type": "CLIENT"}
el = Element(self._tfname, self._fattrs)
ds = DataSourceFactory(atts, el)
self.assertTrue(isinstance(ds, Element))
self.assertEqual(ds.tagName, "datasource")
self.assertEqual(ds._tagAttrs, atts)
self.assertEqual(ds.content, [])
self.assertEqual(ds.doc, "")
self.assertEqual(ds.last, el)
self.assertEqual(ds.setDataSources(DataSourcePool()), None)
self.myAssertRaise(
DataSourceSetupError, ds.store,
["<datasource>", "", "</datasource>"])
self.assertTrue(not hasattr(ds.last, "tagAttributes"))
atts = {"type": "CLIENT"}
el = Element(self._tfname, self._fattrs)
ds = DataSourceFactory(atts, el)
self.assertTrue(isinstance(ds, Element))
self.assertEqual(ds.tagName, "datasource")
self.assertEqual(ds._tagAttrs, atts)
self.assertEqual(ds.content, [])
self.assertEqual(ds.doc, "")
self.assertEqual(ds.last, el)
self.assertEqual(ds.setDataSources(DataSourcePool()), None)
self.myAssertRaise(
DataSourceSetupError, ds.store, [
"<datasource type='CLIENT'>", "<record/>", "</datasource>"])
self.assertTrue(not hasattr(ds.last, "tagAttributes"))
atts = {"type": "CLIENT"}
name = "myRecord"
el = Element(self._tfname, self._fattrs)
ds = DataSourceFactory(atts, el)
self.assertTrue(isinstance(ds, Element))
self.assertEqual(ds.tagName, "datasource")
self.assertEqual(ds._tagAttrs, atts)
self.assertEqual(ds.content, [])
self.assertEqual(ds.doc, "")
self.assertEqual(ds.last, el)
self.assertEqual(ds.setDataSources(DataSourcePool()), None)
self.assertEqual(ds.store(["<datasource type='CLIENT'>",
'<record name="%s"/>' % name,
"</datasource>"]), None)
self.assertEqual(type(ds.last.source), ClientSource.ClientSource)
self.assertEqual(ds.last.source.name, name)
self.assertEqual(ds.last.source.name, name)
self.assertEqual(ds.last.source.__str__(), " CLIENT record %s"
% (name))
self.assertTrue(not hasattr(ds.last, "tagAttributes"))
atts = {"type": "CLIENT"}
name = "myRecord"
el = EField(self._fattrs, None)
ds = DataSourceFactory(atts, el)
self.assertTrue(isinstance(ds, Element))
self.assertEqual(ds.tagName, "datasource")
self.assertEqual(ds._tagAttrs, atts)
self.assertEqual(ds.content, [])
self.assertEqual(ds.doc, "")
self.assertEqual(ds.last, el)
self.assertEqual(ds.setDataSources(DataSourcePool()), None)
self.assertEqual(ds.store(["<datasource type='CLIENT'>",
'<record name="%s"/>' % name,
"</datasource>"]), None)
self.assertEqual(type(ds.last.source), ClientSource.ClientSource)
self.assertEqual(ds.last.source.name, name)
self.assertEqual(ds.last.source.name, name)
self.assertEqual(ds.last.source.__str__(), " CLIENT record %s"
% (name))
self.assertEqual(len(ds.last.tagAttributes), 1)
self.assertEqual(ds.last.tagAttributes["nexdatas_source"], (
'NX_CHAR',
'<datasource type=\'CLIENT\'><record name="myRecord"/>'
'</datasource>'))
atts = {"type": "CLIENT"}
name = "myRecord"
# wjson = json.loads('{"datasources":
# {"CL":"DataSources.ClientSource"}}')
gjson = json.loads('{"data":{"myRecord":"1"}}')
el = EField(self._fattrs, None)
ds = DataSourceFactory(atts, el)
self.assertTrue(isinstance(ds, Element))
self.assertEqual(ds.tagName, "datasource")
self.assertEqual(ds._tagAttrs, atts)
self.assertEqual(ds.content, [])
self.assertEqual(ds.doc, "")
self.assertEqual(ds.last, el)
self.assertEqual(ds.setDataSources(DataSourcePool()), None)
self.assertEqual(ds.store(["<datasource type='CLIENT'>",
'<record name="%s"/>' % name,
"</datasource>"], gjson), None)
self.assertEqual(type(ds.last.source), ClientSource.ClientSource)
self.assertEqual(ds.last.source.name, name)
self.assertEqual(ds.last.source.name, name)
self.assertEqual(ds.last.source.__str__(), " CLIENT record %s"
% (name))
self.assertEqual(len(ds.last.tagAttributes), 1)
self.assertEqual(ds.last.tagAttributes["nexdatas_source"], (
'NX_CHAR',
'<datasource type=\'CLIENT\'><record name="myRecord"/>'
'</datasource>'))
dt = ds.last.source.getData()
self.checkData(dt, "SCALAR", '1', "DevString", [])
atts = {"type": "PYEVAL"}
name = "myRecord"
# wjson = json.loads(
# '{"datasources":{"CL":"ClientSource.ClientSource"}}')
gjson = json.loads('{"data":{"myRecord":1123}}')
el = EField(self._fattrs, None)
ds = DataSourceFactory(atts, el)
self.assertTrue(isinstance(ds, Element))
self.assertEqual(ds.tagName, "datasource")
self.assertEqual(ds._tagAttrs, atts)
self.assertEqual(ds.content, [])
self.assertEqual(ds.doc, "")
self.assertEqual(ds.last, el)
self.assertEqual(ds.setDataSources(DataSourcePool()), None)
self.assertEqual(ds.store(["<datasource type='PYEVAL'>",
"""
<datasource type="CLIENT" name="myclient">
<record name="%s"/>
</datasource>
<result>
ds.result = ds.myclient + 1
</result>
""" % name,
"</datasource>"], gjson), None)
self.assertEqual(type(ds.last.source), PyEvalSource.PyEvalSource)
self.assertEqual(ds.last.source.__str__(),
" PYEVAL \nds.result = ds.myclient + 1\n")
self.assertEqual(len(ds.last.tagAttributes), 1)
self.assertEqual(ds.last.tagAttributes["nexdatas_source"], (
'NX_CHAR',
'<datasource type=\'PYEVAL\'>\n'
'<datasource type="CLIENT" name="myclient">\n '
'<record name="myRecord"/>\n</datasource>\n'
'<result>\nds.result = ds.myclient + 1\n</result>\n'
'</datasource>'))
dt = ds.last.source.getData()
self.checkData(dt, "SCALAR", 1124, "DevLong64", [])
# constructor test
# \brief It tests default settings
def test_check_flow(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
atts = {"type": "CL"}
name = "myRecord"
if "test.TestDataSource" in sys.modules.keys():
wjson = json.loads(
'{"datasources":{'
'"CL":"test.TestDataSource.TestDataSource"}}')
else:
wjson = json.loads(
'{"datasources":{'
'"CL":"TestDataSource.TestDataSource"}}')
gjson = json.loads('{"data":{"myRecord":1123}}')
el = EField(self._fattrs, None)
ds = DataSourceFactory(atts, el)
self.assertTrue(isinstance(ds, Element))
self.assertEqual(ds.tagName, "datasource")
self.assertEqual(ds._tagAttrs, atts)
self.assertEqual(ds.content, [])
self.assertEqual(ds.doc, "")
self.assertEqual(ds.last, el)
dsp = DataSourcePool(wjson)
dcp = DecoderPool()
self.assertEqual(ds.setDataSources(dsp), None)
self.assertEqual(ds.store(["<datasource type='CL'>",
"""
<datasource type="CLIENT" name="myclient">
<record name="%s"/>
</datasource>
<result>
ds.result = ds.myclient + 1
</result>
""" % name,
"</datasource>"], gjson), None)
td = ds.last.source
self.assertEqual(len(td.stack), 7)
self.assertEqual(td.stack[0], "setup")
self.assertEqual(
td.stack[1],
'<datasource type=\'CL\'>\n'
'<datasource type="CLIENT" name="myclient">\n '
'<record name="myRecord"/>\n</datasource>\n'
'<result>\nds.result = ds.myclient + 1\n</result>\n'
'</datasource>')
self.assertEqual(td.stack[2], 'setJSON')
self.assertEqual(td.stack[3], {u'data': {u'myRecord': 1123}})
self.assertEqual(td.stack[4], None)
self.assertEqual(td.stack[5], "setDataSources")
self.assertEqual(td.stack[6], dsp)
ds.setDecoders(dcp)
self.assertEqual(len(td.stack), 10)
self.assertEqual(td.stack[7], "isValid")
self.assertEqual(td.stack[8], "setDecoders")
self.assertEqual(td.stack[9], dcp)
self.assertEqual(type(ds.last.source), TestDataSource.TestDataSource)
self.assertEqual(ds.last.source.__str__(), "Test DataSource")
self.assertEqual(len(td.stack), 11)
self.assertEqual(td.stack[10], '__str__')
self.assertEqual(len(ds.last.tagAttributes), 1)
self.assertEqual(ds.last.tagAttributes["nexdatas_source"], (
'NX_CHAR',
'<datasource type=\'CL\'>\n<datasource type="CLIENT" '
'name="myclient">\n <record name="myRecord"/>\n'
'</datasource>\n<result>\nds.result = ds.myclient + 1\n</result>'
'\n</datasource>'))
dt = ds.last.source.getData()
self.assertEqual(len(td.stack), 12)
self.assertEqual(td.stack[11], 'getData')
self.checkData(dt, "SCALAR", 1, "DevLong", [0, 0])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -2,973,735,091,690,234,000 | 39.42132 | 79 | 0.591172 | false |
ouihelp/yesaide | tests/test_database.py | 1 | 1060 | import unittest
from yesaide import database, worker
class TestDatabase(unittest.TestCase):
class FakeDbSession(object):
def __init__(self):
self.has_been_committed = False
def commit(self):
self.has_been_committed = True
class TestedWorker(worker.RawWorker):
@database.db_method
def fake_method(self):
pass
def setUp(self):
self.dbsession = self.FakeDbSession()
self.worker = self.TestedWorker(self.dbsession)
def tearDown(self):
del self.dbsession
del self.worker
def test_db_method(self):
self.worker.fake_method()
# Default is to auto commit.
self.assertTrue(self.dbsession.has_been_committed)
def test_db_method_with_commit_true(self):
self.worker.fake_method(commit=True)
self.assertTrue(self.dbsession.has_been_committed)
def test_db_method_with_commit_false(self):
self.worker.fake_method(commit=False)
self.assertTrue(not self.dbsession.has_been_committed)
| mit | 5,233,421,344,965,670,000 | 26.894737 | 62 | 0.65 | false |
Azure/azure-sdk-for-python | sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2020_09_01/operations/_alerts_operations.py | 1 | 8465 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AlertsOperations(object):
"""AlertsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AlertList"]
"""Gets all the alerts for a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AlertList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2020_09_01.models.AlertList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AlertList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AlertList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/alerts'} # type: ignore
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Alert"
"""Gets an alert by name.
Gets an alert by name.
:param device_name: The device name.
:type device_name: str
:param name: The alert name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Alert, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2020_09_01.models.Alert
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Alert"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Alert', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/alerts/{name}'} # type: ignore
| mit | -8,069,543,008,806,840,000 | 44.510753 | 208 | 0.631424 | false |
ZeikJT/DisgaeaHacking | unpacker.py | 1 | 4726 | #!/usr/bin/python
import os,struct,sys
''' Version 0.1.0
ARC, DAT and MPP unpacker. '''
class FileBundle:
def __init__(self):
self.files = []
def addFiles(self):
raise NotImplementedError()
def extractFiles(self, outputFolder):
if not os.path.exists(outputFolder):
os.mkdir(outputFolder)
class FileBundleWithSizes(FileBundle):
def addFile(self, fileName, fileStart, fileSize):
self.files.append({'fileName': fileName, 'fileStart': fileStart, 'fileSize': fileSize})
def extractFiles(self, outputFolder, inputFile):
super().extractFiles(outputFolder)
for fileData in self.files:
inputFile.seek(fileData['fileStart'])
outputFile = open(os.path.join(outputFolder, fileData['fileName']), 'wb')
outputFile.write(inputFile.read(fileData['fileSize']))
outputFile.close()
class FileBundleWithOffsets(FileBundle):
def addFile(self, fileName, fileStart):
self.files.append({'fileName': fileName, 'fileStart': fileStart})
def extractFiles(self, outputFolder, inputFile):
super().extractFiles(outputFolder)
fileSize = os.fstat(inputFile.fileno()).st_size
for i in range(0, len(self.files)):
fileData = self.files[i]
fileEnd = fileSize if (i == len(self.files) - 1) else self.files[i + 1]['fileStart']
inputFile.seek(fileData['fileStart'])
outputFile = open(os.path.join(outputFolder, fileData['fileName']), 'wb')
outputFile.write(inputFile.read(fileEnd - fileData['fileStart']))
outputFile.close()
def unpackMPP(filePath):
mpp = open(filePath, 'rb')
u1,isNew,unknown1,unknown2,fileSize,dataOffset = struct.unpack('<HHHHLL', mpp.read(16))
if fileSize != os.fstat(mpp.fileno()).st_size or dataOffset <= 0xF:
print('Invalid header', filePath)
else:
fileBundle = FileBundleWithOffsets()
fileBundle.addFile('0', dataOffset)
i = 1
while mpp.tell() < dataOffset:
fileOffset, = struct.unpack('<L', mpp.read(4))
if fileOffset == 0:
break
fileBundle.addFile(str(i), fileOffset)
i += 1
fileBundle.extractFiles(filePath + ' Files', mpp)
mpp.close()
def unpackPSPFS_V1(file, filePath):
fileCount,unknown1 = struct.unpack('<LL', file.read(8))
if fileCount == 0:
print('Invalid fileCount %d:'.format(fileCount), filePath)
else:
fileBundle = FileBundleWithSizes()
for i in range(0, fileCount):
name = file.read(44).split(b'\x00')[0].decode()
size,offset = struct.unpack('<LL', file.read(8))
fileBundle.addFile(name, offset, size)
fileBundle.extractFiles(filePath + ' Files', file)
def unpack0x00020000(file, filePath):
fileCount,unknown1 = struct.unpack('<LL', file.read(8))
if fileCount == 0:
print('Invalid file count %d:'.format(fileCount), filePath)
elif unknown1 != 0x00020000:
print('Invalid header:', filePath)
else:
fileBundle = FileBundleWithOffsets()
for i in range(0, fileCount):
fileBundle.addFile(str(i), struct.unpack('<L', file.read(4))[0])
fileBundle.extractFiles(filePath + ' Files', file)
def unpackDAT(filePath):
dat = open(filePath, 'rb')
if dat.read(8).decode() == 'PSPFS_V1':
unpackPSPFS_V1(dat, filePath)
else:
dat.seek(0)
unpack0x00020000(dat, filePath)
dat.close()
def unpackARC(filePath):
arc = open(filePath, 'rb')
dsarcidx,fileCount,unknown1 = struct.unpack('<8sLL', arc.read(16))
if dsarcidx.decode() != 'DSARCIDX' or unknown1 != 0:
print('Invalid header:', filePath)
elif fileCount == 0:
print('Invalid file count %d:'.format(fileCount), filePath)
else:
arc.seek(int((0x1F + (fileCount * 2)) / 0x10) * 0x10)
fileBundle = FileBundleWithSizes()
for i in range(0, fileCount):
name = arc.read(40).split(b'\x00')[0].decode()
size,offset = struct.unpack('<LL', arc.read(8))
fileBundle.addFile(name, offset, size)
fileBundle.extractFiles(filePath + ' Files', arc)
arc.close()
for arg in sys.argv[1:]:
if os.path.isfile(arg):
if arg.endswith('.ARC'):
unpackARC(arg)
elif arg.endswith('.DAT'):
unpackDAT(arg)
elif arg.endswith('.MPP'):
unpackMPP(arg)
else:
print('Unknown file extension:', arg)
else:
print('File not accessible:', arg)
| mit | 4,212,476,860,702,476,000 | 37.383333 | 96 | 0.596064 | false |
tanghaibao/goatools | goatools/semantic.py | 1 | 9628 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Compute semantic similarities between GO terms. Borrowed from book chapter from
Alex Warwick Vesztrocy and Christophe Dessimoz (thanks). For details, please
check out:
notebooks/semantic_similarity.ipynb
"""
from __future__ import print_function
import sys
from collections import Counter
from collections import defaultdict
from goatools.godag.consts import NAMESPACE2GO
from goatools.godag.consts import NAMESPACE2NS
from goatools.godag.go_tasks import get_go2ancestors
from goatools.gosubdag.gosubdag import GoSubDag
from goatools.godag.relationship_combos import RelationshipCombos
from goatools.anno.update_association import clean_anno
from goatools.utils import get_b2aset
class TermCounts:
'''
TermCounts counts the term counts for each
'''
# pylint: disable=too-many-instance-attributes
def __init__(self, go2obj, annots, relationships=None, **kws):
'''
Initialise the counts and
'''
_prt = kws.get('prt')
# Backup
self.go2obj = go2obj # Full GODag
self.annots, go_alts = clean_anno(annots, go2obj, _prt)[:2]
# Genes annotated to all associated GO, including inherited up ancestors'
_relationship_set = RelationshipCombos(go2obj).get_set(relationships)
self.go2genes = self._init_go2genes(_relationship_set, go2obj)
self.gene2gos = get_b2aset(self.go2genes)
# Annotation main GO IDs (prefer main id to alt_id)
self.goids = set(self.go2genes.keys())
self.gocnts = Counter({go:len(geneset) for go, geneset in self.go2genes.items()})
# Get total count for each branch: BP MF CC
self.aspect_counts = {
'biological_process': self.gocnts.get(NAMESPACE2GO['biological_process'], 0),
'molecular_function': self.gocnts.get(NAMESPACE2GO['molecular_function'], 0),
'cellular_component': self.gocnts.get(NAMESPACE2GO['cellular_component'], 0)}
self._init_add_goid_alt(go_alts)
self.gosubdag = GoSubDag(
set(self.gocnts.keys()),
go2obj,
tcntobj=self,
relationships=_relationship_set,
prt=None)
if _prt:
self.prt_objdesc(_prt)
def get_annotations_reversed(self):
"""Return go2geneset for all GO IDs explicitly annotated to a gene"""
return set.union(*get_b2aset(self.annots))
def _init_go2genes(self, relationship_set, godag):
'''
Fills in the genes annotated to each GO, including ancestors
Due to the ontology structure, gene products annotated to
a GO Terma are also annotated to all ancestors.
'''
go2geneset = defaultdict(set)
go2up = get_go2ancestors(set(godag.values()), relationship_set)
# Fill go-geneset dict with GO IDs in annotations and their corresponding counts
for geneid, goids_anno in self.annots.items():
# Make a union of all the terms for a gene, if term parents are
# propagated but they won't get double-counted for the gene
allterms = set()
for goid_main in goids_anno:
allterms.add(goid_main)
if goid_main in go2up:
allterms.update(go2up[goid_main])
# Add 1 for each GO annotated to this gene product
for ancestor in allterms:
go2geneset[ancestor].add(geneid)
return dict(go2geneset)
def _init_add_goid_alt(self, not_main):
'''
Add alternate GO IDs to term counts. Report GO IDs not found in GO DAG.
'''
if not not_main:
return
for go_id in not_main:
if go_id in self.go2obj:
goid_main = self.go2obj[go_id].item_id
self.gocnts[go_id] = self.gocnts[goid_main]
self.go2genes[go_id] = self.go2genes[goid_main]
def get_count(self, go_id):
'''
Returns the count of that GO term observed in the annotations.
'''
return self.gocnts[go_id]
def get_total_count(self, aspect):
'''
Gets the total count that's been precomputed.
'''
return self.aspect_counts[aspect]
def get_term_freq(self, go_id):
'''
Returns the frequency at which a particular GO term has
been observed in the annotations.
'''
num_ns = float(self.get_total_count(self.go2obj[go_id].namespace))
return float(self.get_count(go_id))/num_ns if num_ns != 0 else 0
def get_gosubdag_all(self, prt=sys.stdout):
'''
Get GO DAG subset include descendants which are not included in the annotations
'''
goids = set()
for gos in self.gosubdag.rcntobj.go2descendants.values():
goids.update(gos)
return GoSubDag(goids, self.go2obj, self.gosubdag.relationships, tcntobj=self, prt=prt)
def prt_objdesc(self, prt=sys.stdout):
"""Print TermCount object description"""
ns_tot = sorted(self.aspect_counts.items())
cnts = ['{NS}({N:,})'.format(NS=NAMESPACE2NS[ns], N=n) for ns, n in ns_tot if n != 0]
go_msg = "TermCounts {CNT}".format(CNT=' '.join(cnts))
prt.write('{GO_MSG} {N:,} genes\n'.format(GO_MSG=go_msg, N=len(self.gene2gos)))
self.gosubdag.prt_objdesc(prt, go_msg)
def get_info_content(go_id, termcounts):
'''
Retrieve the information content of a GO term.
'''
ntd = termcounts.gosubdag.go2nt.get(go_id)
return ntd.tinfo if ntd else 0.0
def resnik_sim(go_id1, go_id2, godag, termcounts):
'''
Computes Resnik's similarity measure.
'''
goterm1 = godag[go_id1]
goterm2 = godag[go_id2]
if goterm1.namespace == goterm2.namespace:
msca_goid = deepest_common_ancestor([go_id1, go_id2], godag)
return get_info_content(msca_goid, termcounts)
return None
def lin_sim(goid1, goid2, godag, termcnts, dfltval=None):
'''
Computes Lin's similarity measure.
'''
sim_r = resnik_sim(goid1, goid2, godag, termcnts)
return lin_sim_calc(goid1, goid2, sim_r, termcnts, dfltval)
def lin_sim_calc(goid1, goid2, sim_r, termcnts, dfltval=None):
'''
Computes Lin's similarity measure using pre-calculated Resnik's similarities.
'''
# If goid1 and goid2 are in the same namespace
if sim_r is not None:
tinfo1 = get_info_content(goid1, termcnts)
tinfo2 = get_info_content(goid2, termcnts)
info = tinfo1 + tinfo2
# Both GO IDs must be annotated
if tinfo1 != 0.0 and tinfo2 != 0.0 and info != 0:
return (2*sim_r)/info
if termcnts.go2obj[goid1].item_id == termcnts.go2obj[goid2].item_id:
return 1.0
# The GOs are separated by the root term, so are not similar
if sim_r == 0.0:
return 0.0
return dfltval
def common_parent_go_ids(goids, godag):
'''
This function finds the common ancestors in the GO
tree of the list of goids in the input.
'''
# Find main GO ID candidates from first main or alt GO ID
rec = godag[goids[0]]
candidates = rec.get_all_parents()
candidates.update({rec.item_id})
# Find intersection with second to nth GO ID
for goid in goids[1:]:
rec = godag[goid]
parents = rec.get_all_parents()
parents.update({rec.item_id})
# Find the intersection with the candidates, and update.
candidates.intersection_update(parents)
return candidates
def deepest_common_ancestor(goterms, godag):
'''
This function gets the nearest common ancestor
using the above function.
Only returns single most specific - assumes unique exists.
'''
# Take the element at maximum depth.
return max(common_parent_go_ids(goterms, godag), key=lambda t: godag[t].depth)
def min_branch_length(go_id1, go_id2, godag, branch_dist):
'''
Finds the minimum branch length between two terms in the GO DAG.
'''
# First get the deepest common ancestor
goterm1 = godag[go_id1]
goterm2 = godag[go_id2]
if goterm1.namespace == goterm2.namespace:
dca = deepest_common_ancestor([go_id1, go_id2], godag)
# Then get the distance from the DCA to each term
dca_depth = godag[dca].depth
depth1 = goterm1.depth - dca_depth
depth2 = goterm2.depth - dca_depth
# Return the total distance - i.e., to the deepest common ancestor and back.
return depth1 + depth2
if branch_dist is not None:
return goterm1.depth + goterm2.depth + branch_dist
return None
def semantic_distance(go_id1, go_id2, godag, branch_dist=None):
'''
Finds the semantic distance (minimum number of connecting branches)
between two GO terms.
'''
return min_branch_length(go_id1, go_id2, godag, branch_dist)
def semantic_similarity(go_id1, go_id2, godag, branch_dist=None):
'''
Finds the semantic similarity (inverse of the semantic distance)
between two GO terms.
'''
dist = semantic_distance(go_id1, go_id2, godag, branch_dist)
if dist is not None:
return 1.0 / float(dist) if dist != 0 else 1.0
return None
# 1. Schlicker, Andreas et al.
# "A new measure for functional similarity of gene products based on Gene Ontology"
# BMC Bioinformatics (2006)
#
# 2. Yang, Haixuan et al.
# Improving GO semantic similarity measures by exploring the ontology
# beneath the terms and modelling uncertainty
# Bioinformatics (2012)
| bsd-2-clause | -4,900,397,585,464,170,000 | 35.608365 | 95 | 0.634919 | false |
gajim/gajim | test/gtk/htmltextview.py | 1 | 5593 | from unittest.mock import MagicMock
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from gajim.common import app
from gajim.common import configpaths
configpaths.init()
from gajim import gui
gui.init('gtk')
from gajim.common.helpers import AdditionalDataDict
from gajim.conversation_textview import ConversationTextview
from gajim.gui_interface import Interface
app.settings = MagicMock()
app.plugin_manager = MagicMock()
app.logger = MagicMock()
app.cert_store = MagicMock()
app.storage = MagicMock()
app.interface = Interface()
XHTML = [
'''
<div>
<span style="color: red; text-decoration:underline">Hello</span>
<br/>\n
<img src="http://images.slashdot.org/topics/topicsoftware.gif"/>
<br/>\n
<span style="font-size: 500%; font-family: serif">World</span>\n
</div>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<p xmlns='http://www.w3.org/1999/xhtml'>Look here
<a href='http://google.com/'>Google</a>
</p>
<br/>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<p style='font-size:large'>
<span style='font-style: italic'>O
<span style='font-size:larger'>M</span>G
</span>, I'm <span style='color:green'>green</span> with
<span style='font-weight: bold'>envy</span>!
</p>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<p>
As Emerson said in his essay
<span style='font-style: italic; background-color:cyan'>
Self-Reliance</span>:
</p>
<p style='margin-left: 5px; margin-right: 2%'>
"A foolish consistency is the hobgoblin of little minds."
</p>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<p style='text-align:center'>
Hey, are you licensed to <a href='http://www.jabber.org/'>Jabber</a>?
</p>
<p style='text-align:right'>
<img src='http://www.xmpp.org/images/psa-license.jpg'
alt='A License to Jabber' width='50%' height='50%'/>
</p>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<ul style='background-color:rgb(120,140,100)'>
<li> One </li>
<li> Two </li>
<li> Three </li>
</ul>
<hr />
<pre style="background-color:rgb(120,120,120)">def fac(n):
def faciter(n,acc):
if n==0: return acc
return faciter(n-1, acc*n)
if n<0: raise ValueError('Must be non-negative')
return faciter(n,1)</pre>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<ol style='background-color:rgb(120,140,100)'>
<li> One </li>
<li>
Two is nested:
<ul style='background-color:rgb(200,200,100)'>
<li> One </li>
<li style='font-size:50%'> Two </li>
<li style='font-size:200%'> Three </li>
<li style='font-size:9999pt'> Four </li>
</ul>
</li>
<li> Three </li>
</ol>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<p>
<strong>
<a href='xmpp:[email protected]'>xmpp link</a>
</strong>:
</p>
<div xmlns='http://www.w3.org/1999/xhtml'>
<cite style='margin: 7px;' title='xmpp:[email protected]'>
<p>
<strong>[email protected] wrote:</strong>
</p>
<p>this cite - bla bla bla, smile- :-) …</p>
</cite>
<div>
<p>some text</p>
</div>
</div>
<p/>
<p>#232/1</p>
</body>
''',
'''
<hr />
''',
'''
<body xmlns='http://www.w3.org/1999/xhtml'>
<img src='data:image/png;base64,R0lGODdhMAAwAPAAAAAAAP///ywAAAAAMAAw\
AAAC8IyPqcvt3wCcDkiLc7C0qwyGHhSWpjQu5yqmCYsapyuvUUlvONmOZtfzgFz\
ByTB10QgxOR0TqBQejhRNzOfkVJ+5YiUqrXF5Y5lKh/DeuNcP5yLWGsEbtLiOSp\
a/TPg7JpJHxyendzWTBfX0cxOnKPjgBzi4diinWGdkF8kjdfnycQZXZeYGejmJl\
ZeGl9i2icVqaNVailT6F5iJ90m6mvuTS4OK05M0vDk0Q4XUtwvKOzrcd3iq9uis\
F81M1OIcR7lEewwcLp7tuNNkM3uNna3F2JQFo97Vriy/Xl4/f1cf5VWzXyym7PH\
hhx4dbgYKAAA7' alt='Larry'/>
</body>
''',
]
class TextviewWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Textview Test")
self.set_default_size(600, 600)
self._textview = ConversationTextview(None)
scrolled = Gtk.ScrolledWindow()
scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled.add(self._textview.tv)
self.add(scrolled)
self.show()
self._print_xhtml()
def _print_xhtml(self):
for xhtml in XHTML:
additional_data = AdditionalDataDict()
additional_data.set_value('gajim', 'xhtml', xhtml)
self._textview.print_real_text(None, additional_data=additional_data)
self._textview.print_real_text('\n')
win = TextviewWindow()
win.connect("destroy", Gtk.main_quit)
win.show_all()
Gtk.main()
| gpl-3.0 | 3,587,926,064,607,005,000 | 25.126168 | 81 | 0.528349 | false |
aolindahl/aolPyModules | cookie_box.py | 1 | 16382 | import numpy as np
import tof
from configuration import loadConfiguration as loadConfig
from aolUtil import struct
import sys
import random
import lmfit
from burning_detectors import projector
import simplepsana
# A bunch of methods to take care of the cookie box data
_source_dict = {}
def get_source(source_string):
global _source_dict
if source_string not in _source_dict:
_source_dict[source_string] = psana.Source(source_string)
return _source_dict[source_string]
proj = projector()
def model_function(params, x, y=None, eps=None):
A = params['A'].value
beta = params['beta'].value
tilt = params['tilt'].value
linear = params['linear'].value
mod = A * ( 1 + beta * 0.25 * ( 1 + 3 * linear * np.cos( 2*(x-tilt) ) ) )
if y is None:
ret = mod
elif eps is None:
ret = mod-y
else:
ret = (mod-y)/eps
return ret
def initial_params(y_data=None):
params = lmfit.Parameters()
params.add('A', 10, min=0)
params.add('beta', 2, min=-1, max=2)
params.add('tilt', 0, min = -np.pi/2, max=np.pi/2)
params.add('linear', 0.5, min=0)
#params.add('tilt', np.pi, min=-2*np.pi, max=2*np.pi)
#params.add('linear', 0.5, min=-0.5, max=1.5)
if y_data!=None:
params['A'].value = y_data[np.isfinite(y_data)].mean()
tilt = initial_angle(y_data)
#params['tilt'].value = tilt
#params['tilt'].min = tilt - 2*np.pi
#params['tilt'].max = tilt + 2*np.pi
return params
phi_deg = np.arange(0, 360, 22.5)
phi_rad = phi_deg * np.pi / 180
_angles_deg = np.arange(0, 360, 22.5)
_angles_rad = _angles_deg * np.pi / 180
_sin_vect = np.sin( 2*_angles_rad )
_cos_vect = np.cos( 2*_angles_rad )
def initial_angle(y_data):
vec = y_data.copy()
vec[np.isnan(vec)] = 0
A = y_data.dot(_sin_vect)
B = y_data.dot(_cos_vect)
phi = 0.5 * ( np.arctan(A/B)
+ np.pi * ( -1 * ((A<0) & (B<0)) + 1 * ((A>0) & (B<0)) ) )
return phi
def slice_from_range(data, range):
data_list = isinstance(data, list)
range_list = isinstance(range[0], list)
if data_list and range_list:
if len(data) != len(range):
return None
return [ slice( d.searchsorted(np.min(r)), d.searchsorted(np.max(r)) )
for d, r in zip(data, range) ]
if data_list and (not range_list):
return [ slice( d.searchsorted(np.min(range)),
d.searchsorted(np.max(range)))
for d in data ]
if (not data_list) and range_list:
return [ slice( data.searchsorted(np.min(r)),
data.searchsorted(np.max(r)))
for r in range ]
return slice( data.searchsorted(np.min(range)),
data.searchsorted(np.max(range)))
def get_raw_signals(evt, source_string, time_slice=slice(None), verbose=False):
if verbose:
print 'Trying to grab raw signals from source{}.'.format(get_source(source_string))
try:
# try to get the acqiris data
acqiris_data = evt.get(psana.Acqiris.DataDescV1, get_source(source_string))
except:
if verbose:
print 'Fail. Exception was thrown.'
return None
if acqiris_data is None:
return None
return np.array([ acqiris_data.data(ch).waveforms()[0][time_slice] for ch in
range(16) ])
#def sumSignals(signals, slices):
# if type(slices) != slice:
# slices = [slices]*16
# [
def get_signal_scaling(env, source_string, verbose=False):
temp = np.array( [tof.get_acqiris_scales(env, source_string, ch,
verbose=verbose) for ch in range(16)] )
return temp[:,1], temp[:,2]
class CookieBox:
'Class that handels the cookiebox data'
def __init__(self, config, verbose=False):
'''\
Initialization method.\
The configuration should be a single object or a list of 16 objects.\
'''
if verbose:
print 'In cookie_box.CookieBox.__init__().'
self._verbose = verbose
# A list of the TofData objects
if verbose:
print 'Make the list of TofData objects.'
self._tof_list = []
verbose_first = verbose
for conf in config.tof_config_list:
self._tof_list.append(tof.TofData(conf, verbose=verbose_first))
verbose_first = False
self._phi_deg = _angles_deg
self._phi_rad = _angles_rad
self._time_amplitudes_up_to_date = False
self.proj = projector()
def setup_scales(self, energy_scale_eV,
env=None, time_scale=None, retardation=0, verbose=None):
#print 'energy scale: ', energy_scale_eV
if verbose is None:
verbose = self._verbose
if verbose:
print 'In cookie_box.CookieBox.setup_scales().'
for tof in self._tof_list:
tof.setup_scales(energy_scale_eV, env, time_scale,
retardation=retardation)
def set_baseline_subtraction_averaging(self, weightLast):
for tof in self._tof_list:
tof.set_baseline_subtraction_averaging(weightLast)
def set_raw_data(self, evt, verbose=False, newDataFactor=1):
for tof in self._tof_list:
tof.set_raw_data(evt, newDataFactor=newDataFactor)
self._time_amplitudes_up_to_date = False
def get_time_scales_us(self, roi=None, verbose=False):
return [tof.get_time_scale_us(roi=roi) for tof in self._tof_list]
def get_time_amplitudes(self, roiSlices=None, verbose=False):
if not self._time_amplitudes_up_to_date:
self._timeAmplitudes = [tof.get_time_amplitude() for tof in
self._tof_list]
if roiSlices is None:
return self._timeAmplitudes
return [amp[s] for amp, s in zip(self._timeAmplitudes, roiSlices)]
def get_time_amplitudes_filtered(self, roi=None, verbose=False):
return [tof.get_time_amplitude_filtered(roi=roi) for tof
in self._tof_list]
def get_energy_scales_eV(self, roi=None, verbose=False):
return [tof.get_energy_scale_eV(roi=roi) for tof in self._tof_list]
def get_energy_amplitudes(self, roi=None, verbose=False):
return [tof.get_energy_amplitude(roi=roi) for tof in self._tof_list]
def get_energy_spectra_width(self, threshold_V=0.02, min_width_eV=2,
energy_offset=0, use_rel=False,
threshold_rel=0.5, roi=None):
return [tof.get_trace_bounds(threshold_V=threshold_V,
min_width_eV=min_width_eV,
energy_offset=energy_offset,
useRel=use_rel,
threshold_rel=threshold_rel,
roi=roi) for
tof in self._tof_list]
def get_moments(self, domain='Time', roi=None):
return [ tof.get_moments(domain=domain, roi=roi) for tof in
self._tof_list]
def get_positions(self):
moments = np.array(self.get_moments(domain='Time', roi=0))
positions = np.array([moments[i,0] - moments[i+8,0] for i in range(8)])
return positions
def get_photon_energy(self, energyShift=0):
moments = np.array(self.get_moments(domain='Energy'))
amplitudes = self.get_intensity_distribution(domain='Energy')
return (np.average(moments, axis=0, weights=amplitudes)
+ np.array([energyShift, 0]))
def get_intensity_distribution(self, rois=[slice(None)]*16,
domain='Time', verbose=None, detFactors=[1]*16):
if verbose is None:
verbose = self._verbose
if verbose:
print 'Geting the intensity distribution',
if rois is None:
print '.'
else:
print 'in roi {}.'.format(rois)
intensity = []
if domain=='Energy':
if verbose:
print 'Using energy domain.'
ampFunk = self.get_energy_amplitudes
else:
ampFunk = self.get_time_amplitudes_filtered
for amp, factor, roi in zip(ampFunk(verbose=verbose,
roi=(rois if isinstance(rois, int)
else None)),
detFactors,
rois if isinstance(rois, list) else
[rois]*16):
if amp is None:
intensity.append(np.nan)
else:
intensity.append(amp[roi if isinstance(roi, slice) else
slice(None)].sum() * factor)
if verbose:
print 'Returning vector of length {}.'.format(len(intensity))
return np.array(intensity)
def getAngles(self, kind='rad'):
if kind=='rad':
return self._phi_rad
else:
return self._phi_deg
def randomize_amplitudes(self, verbose=False):
'''This is only for testing. Direct manipulation of the private
variables of the TofData class (as done here) is not recommended.'''
params = initial_params()
params['linear'].value = random.random()
params['tilt'].value = random.random()*np.pi - np.pi/2
params['A'].value = 1
if verbose:
for par in params.itervalues():
print par
factors = ( model_function(params, self.getAngles()) *
np.random.normal(1, 0.05, (16,)) )
#factors = model_function(params, self.getAngles())
for factor, tof in zip(factors, self._tof_list):
tof._time_amplitude *= factor
#tof._timeAmplitude_filtered *= factor
tof._energy_amplitude *= factor
return params
if __name__ == '__main__':
do_plot = False
verbose = True
import time
if do_plot:
import matplotlib.pyplot as plt
plt.ion()
if verbose:
print 'Connect to data source.'
ds = simplepsana.get_data_source('exp=amoi0114:run=33', verbose=verbose)
if verbose:
print 'Import the configutration.'
import cookie_box_default_config as config
reload(config)
config
if hasattr(config, 'domainToDisplay') and config.domainToDisplay=='Energy':
domain = 'Energy'
else:
domain = 'Time'
if verbose:
print 'Create the CookieBox object.'
cb = CookieBox(config, verbose=verbose)
t_temp = None
if verbose:
print 'Go through the events.'
for i, evt in enumerate(ds.events()):
if t_temp is not None:
print 'event processing time:', time.time()-t_temp, 's'
t_temp = time.time()
if i >= 1:
break
if i%10==0 and do_plot:
plot=True
else:
plot=False
if i==0:
if verbose:
print 'Set up the time and energy scales.'
cb.setup_scales(config.energy_scale_eV, ds.env())
if verbose:
print 'Get the energy scales.'
energy_scales = cb.get_energy_scales_eV()
#x_scales_roi_0 = cb.get_energy_scales_eV(roi=0)
#x_scales_roi_1 = cb.get_energy_scales_eV(roi=1)
if verbose:
print 'Get the time scales.'
time_scales = cb.get_time_scales_us()
if verbose:
print 'Get the ROI slices in time domain.'
t_roi_0s = slice_from_range(time_scales, config.time_roi_0_us)
t_roi_1s = slice_from_range(time_scales, config.time_roi_1_us)
if verbose:
print 'Get the time scales corresponding to the ROIs.'
time_scales_roi_0 = [t[s] for t,s in zip(time_scales, t_roi_0s)]
time_scales_roi_1 = [t[s] for t,s in zip(time_scales, t_roi_1s)]
angles = cb.getAngles('rad')
if do_plot:
fig1 = plt.figure(1); plt.clf()
for k, x , x_roi_0, x_roi_1 in zip(range(16), time_scales,
time_scales_roi_0, time_scales_roi_1):
plt.subplot(4,4,k+1)
plt.title('det at {} deg'.format(cb.getAngles('deg')[k]))
plt.plot(x, np.zeros_like(x))
plt.plot(x_roi_0, np.zeros_like(x_roi_0), 'r')
plt.plot(x_roi_1, np.zeros_like(x_roi_1), 'g')
yTMin, yTMax = 0, 0
fig2 = plt.figure(2);
fig2.clf()
fig2ax = fig2.add_subplot(111, polar=True)
angles_fit = np.linspace(0, 2*np.pi, 10000)
fig2ax.plot(
angles, np.ones_like(angles), 'ro',
angles, np.ones_like(angles), 'gs',
angles_fit, np.zeros_like(angles_fit), 'm-')
#angles_fit, np.zeros_like(angles_fit), 'm--')
fig3 = plt.figure(3)
fig3.clf()
fig3ax = fig3.add_subplot(111)
fig3ax.plot(energy_scales[0],
np.zeros_like(energy_scales[0]))
print 'event number', i
if verbose:
print 'Set the data of the event to the data structure.'
cb.set_raw_data(evt)
#rand_params = cb.randomize_amplitudes(verbose=True)
#rand_tilt.append(rand_params['tilt'].value)
if plot:
amplitudes0 = cb.get_intensity_distribution(roi=0, domain=domain,
verbose=True)
amplitudes1 = cb.get_intensity_distribution(roi=1, domain=domain,
verbose=True)
energy_data = cb.get_energy_amplitudes()
#print len(energy_data)
#print energy_data[0].shape
#spectrum = np.average(energy_data, axis=0)
spectrum = energy_data[15]
#print spectrum
#energy_data_roi_0 = cb.get_energy_amplitudes(roi=0)
#energy_data_roi_1 = cb.get_energy_amplitudes(roi=1)
time_data = cb.get_time_amplitudes()
time_data_roi_0 = [t[s] for t,s in zip(time_data, t_roi_0s)]
time_data_roi_1 = [t[s] for t,s in zip(time_data, t_roi_1s)]
tMin = np.min(time_data)
tMax = np.max(time_data)
rescale_flag = False
if tMin < yTMin:
yTMin = tMin
rescale_flag = True
if tMax > yTMax:
yTMax = tMax
rescale_flag = True
for ax, y, y_roi_0, y_roi_1 in zip(fig1.axes, time_data, time_data_roi_0,
time_data_roi_1):
ax.lines[0].set_y_data(y)
ax.lines[1].set_y_data(y_roi_0)
ax.lines[2].set_y_data(y_roi_1)
if rescale_flag:
ax.set_ybound(yTMin, yTMax)
if verbose:
print 'Get the signal amplitudes.'
amplitudes = cb.get_intensity_distribution(rois=0, domain=domain,
verbose=verbose)
params = initial_params(amplitudes)
#proj_tilt.append(params['tilt'].value)
params['beta'].vary=False
res = lmfit.minimize(model_function, params, args=(angles, amplitudes),
method='leastsq')
print res.nfev, 'function evaluations'
print 'Fit', ('succeded' if res.success else 'failed')
print res.message
print lmfit.fit_report(params)
moments = cb.get_moments(domain=domain, roi=0)
if plot:
fig2ax.lines[0].set_y_data(amplitudes0)
fig2ax.lines[1].set_y_data(amplitudes1)
fig2ax.lines[2].set_y_data(model_function(params, angles_fit))
#fig2ax.lines[3].set_y_data(amplitudes0.mean() * model_function(rand_params, angles_fit))
fig2ax.relim()
fig2ax.autoscale_view()
fig3ax.lines[0].set_y_data(spectrum)
fig3ax.relim()
fig3ax.autoscale_view()
for fig in [fig1, fig2, fig3]:
fig.canvas.draw()
#rand_tilt = np.array(rand_tilt)
#proj_tilt = np.array(proj_tilt)
raw_input('Press enter to exit...')
| gpl-2.0 | -6,610,112,985,150,867,000 | 34.230108 | 101 | 0.545843 | false |
AlexisTM/flyingros | flyingros_nav/nodes/control_thread.py | 1 | 6591 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
control_thread.py
This script sends positions to control the UAV in X, Y, Z
ILPS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ILPS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ILPS. If not, see <http://www.gnu.org/licenses/>.
Software created by Alexis Paques and Nabil Nehri for the UCL
in a Drone-Based Additive Manufacturing of Architectural Structures
project financed by the MIT Seed Fund
Originaly inspired of Vladimir Ermakov work (c) 2015 under GNU GPLv3
Copyright (c) Alexis Paques 2016
Copyright (c) Nabil Nehri 2016
"""
from __future__ import division
import rospy
import mavros
import time
import tf
import numpy as np
from threading import Thread
from mavros.utils import *
from geometry_msgs.msg import PoseStamped, Point, Pose
from sensor_msgs.msg import Imu, Range
from mavros_msgs.srv import SetMode, CommandBool
from mavros_msgs.msg import State, PositionTarget
from math import pi
# Returns a radian from a degree
def deg2radf(a):
return float(a)*pi/180
# Returns a degree from a radian
def rad2degf(a):
return float(a)*180/pi
class _GetchUnix:
"""Fetch and character using the termios module."""
def __init__(self):
import tty, sys
from select import select
def __call__(self):
import sys, tty, termios
from select import select
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
[i, o, e] = select([sys.stdin.fileno()], [], [], 1)
if i:
ch = sys.stdin.read(1)
else:
ch = None
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
getch = _GetchUnix()
def State_Callback(data):
global state
state = data
def Pose_Callback(data):
global pose
pose = data
def sendSetpoint():
# Input data
global setpoint, yawSetPoint, run, position_control
# Output data
local_setpoint_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=0)
rate = rospy.Rate(5)
while run:
q = tf.transformations.quaternion_from_euler(0, 0, deg2radf(yawSetPoint), axes="sxyz")
msg = PoseStamped()
msg.header.stamp = rospy.Time.now()
msg.pose.position.x = float(setpoint.x)
msg.pose.position.y = float(setpoint.y)
msg.pose.position.z = float(setpoint.z)
msg.pose.orientation.x = q[0]
msg.pose.orientation.y = q[1]
msg.pose.orientation.z = q[2]
msg.pose.orientation.w = q[3]
local_setpoint_pub.publish(msg)
rate.sleep()
def InterfaceKeyboard():
# Input data
global pose
# Output data
global setpoint, yawSetPoint, run, position_control
# Publishers
global arming_client, set_mode_client, lasers_yaw
what = getch()
if what == "t":
setpoint.x = setpoint.x - 0.1
if what == "g":
setpoint.x = setpoint.x + 0.1
if what == "f":
setpoint.y = setpoint.y - 0.1
if what == "h":
setpoint.y = setpoint.y + 0.1
if what == "i":
setpoint.z = setpoint.z + 0.1
if what == "k":
setpoint.z = setpoint.z - 0.1
if what == "b":
yawSetPoint = yawSetPoint + 45
if what == "n":
yawSetPoint = yawSetPoint - 45
if what == "c":
setpoint.x = pose.pose.position.x
setpoint.y = pose.pose.position.y
setpoint.z = pose.pose.position.z
if what == "q":
arming_client(False)
if what == "a":
arming_client(True)
if what == "e":
set_mode_client(custom_mode = "OFFBOARD")
if what == "m":
run = False
time.sleep(1)
exit()
Q = (
pose.pose.orientation.x,
pose.pose.orientation.y,
pose.pose.orientation.z,
pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(Q)
rospy.loginfo("Position x: %s y: %s z: %s", pose.pose.position.x, pose.pose.position.y, pose.pose.position.z)
rospy.loginfo("Setpoint is now x:%s, y:%s, z:%s", setpoint.x, setpoint.y, setpoint.z)
rospy.loginfo("IMU :")
rospy.loginfo("roll : %s", rad2degf(euler[0]))
rospy.loginfo("pitch : %s", rad2degf(euler[1]))
rospy.loginfo("yaw : %s and from lasers %s", rad2degf(euler[2]), rad2degf(lasers_yaw))
rospy.loginfo("wanted yaw : %s", yawSetPoint)
def init():
# Input data
# Output data
global state, setpoint, yawSetPoint, \
run, laserposition, pose, lasers_raw, position_control
# Publishers
global local_pos_pub, arming_client, set_mode_client, lasers_yaw
# Objects
lasers_yaw = 0
# Global variable initialisation
pose = PoseStamped()
laserposition = PoseStamped()
setpoint = Point()
setpoint.x = 1
setpoint.y = 1
setpoint.z = 1
# When true, setpoints are positions
# When false, setpoints is a velocity
position_control = True
yawSetPoint = 0
run = True
state = State()
# Node initiation
rospy.init_node('control_position_setpoint_py')
time.sleep(1)
# Publishers, subscribers and services
pose_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, Pose_Callback)
state_sub = rospy.Subscriber('/mavros/state', State, State_Callback)
rospy.wait_for_service('mavros/set_mode')
set_mode_client = rospy.ServiceProxy('mavros/set_mode', SetMode)
rospy.wait_for_service('mavros/cmd/arming')
arming_client = rospy.ServiceProxy('mavros/cmd/arming', CommandBool)
# Thread to send setpoints
tSetPoints = Thread(target=sendSetpoint).start()
while not rospy.is_shutdown():
InterfaceKeyboard()
if __name__ == '__main__':
rospy.loginfo("We are ready")
try:
init()
except rospy.ROSInterruptException:
rospy.loginfo("init failed")
pass
| gpl-3.0 | 7,663,449,591,026,991,000 | 28.799065 | 113 | 0.62206 | false |
robbje/eis | src/eqc/eqc.py | 1 | 3399 | #!/usr/bin/env python2
from parser import Node
from copy import deepcopy
import numpy as np
from eqclib import getClassDefinition, resetClassDefinition
class CircuitTree(Node):
def __init__(
self,
params=[],
eqc=lambda w,
p: 0,
name="",
pNames="",
jac=[],
constraints=[]):
"""Constructor for a CircuitTree node
params: a list of numerical values
eqc: frequency dependent equivalent circuit function
name: name of the element
pNames: names of the elements' parameters
jac: elements jacobian vector of parameters
"""
Node.__init__(self)
self.p = params
self.eqc = eqc
self.name = name
self.pNames = pNames
self.jac = jac
self.constraints = constraints
def collapseCircuit(self):
"""Builds the function describing the frequency dependence of circuit
Returns the root node of the parser tree, with all equivalent
circuit functions generated.
"""
if self.value.type == 'SYMBOL':
cdef = getClassDefinition(self.value.value)
new = CircuitTree(**cdef)
elif self.value.type == 'PARALLEL':
new = self.left.collapseCircuit()
new |= self.right.collapseCircuit()
elif self.value.type == 'SERIES':
new = self.left.collapseCircuit()
new += self.right.collapseCircuit()
else:
raise ValueError('BUG: Unknown type in parse tree')
return None
self.eqc = deepcopy(new.eqc)
self.p = deepcopy(new.p)
self.name = deepcopy(new.name)
self.pNames = deepcopy(new.pNames)
self.jac = deepcopy(new.jac)
self.constraints = deepcopy(new.constraints)
return self
def getCircuit(self):
resetClassDefinition()
self.collapseCircuit()
return self.eqc, self.jac
def getParameterSet(self):
np = len(self.pNames)
return [[self.pNames[i], self.constraints[i]] for i in xrange(np)]
def __add__(self, other):
pu = len(self.p)
self.p = np.append(self.p, other.p)
self.pNames += other.pNames
self.constraints += other.constraints
f = self.eqc
self.name = "(%s+%s)" % (self.name, other.name)
self.eqc = lambda w, p: f(w, p) + other.eqc(w, p[pu:])
self.jac += [lambda w, p: j(w, p[pu:]) for j in other.jac]
return self
def __or__(self, other):
pu = len(self.p)
self.p = np.append(self.p, other.p)
self.pNames += other.pNames
self.constraints += other.constraints
f = self.eqc
self.name = "(%s|%s)" % (self.name, other.name)
self.eqc = lambda w, p: \
1.0 / (1.0 / f(w, p) + 1.0 / other.eqc(w, p[pu:]))
for i, jac in enumerate(self.jac):
self.jac[i] = lambda w, p: np.power(other.eqc(
w, p[pu:]), 2.0) * jac(w, p) / np.power(other.eqc(w, p[pu:]) + f(w, p), 2.0)
for jac in other.jac:
self.jac.append(lambda w, p: np.power(f(w, p), 2.0) *
jac(w, p[pu:]) /
np.power(other.eqc(w, p[pu:]) +
f(w, p), 2.0))
return self
| mit | -6,485,698,705,719,781,000 | 32.653465 | 92 | 0.531921 | false |
zbqf109/goodo | openerp/addons/account/__init__.py | 1 | 1247 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import models
import wizard
import report
from openerp import SUPERUSER_ID
def _auto_install_l10n(cr, registry):
#check the country of the main company (only) and eventually load some module needed in that country
country_code = registry['res.users'].browse(cr, SUPERUSER_ID, SUPERUSER_ID, {}).company_id.country_id.code
if country_code:
#auto install localization module(s) if available
module_list = []
if country_code in ['BJ', 'BF', 'CM', 'CF', 'KM', 'CG', 'CI', 'GA', 'GN', 'GW', 'GQ', 'ML', 'NE', 'CD', 'SN', 'TD', 'TG']:
#countries using OHADA Chart of Accounts
module_list.append('l10n_syscohada')
else:
module_list.append('l10n_' + country_code.lower())
if country_code == 'US':
module_list.append('account_plaid')
if country_code in ['US', 'AU', 'NZ']:
module_list.append('account_yodlee')
module_ids = registry['ir.module.module'].search(cr, SUPERUSER_ID, [('name', 'in', module_list), ('state', '=', 'uninstalled')])
registry['ir.module.module'].button_install(cr, SUPERUSER_ID, module_ids, {})
| gpl-3.0 | 3,208,437,055,561,920,000 | 43.535714 | 136 | 0.61668 | false |
StephDC/MiniBioKit | bioChemTool/wigUtil.py | 1 | 4604 | from . import commonUtil
class ucscFile():
'''Universal file structure for UCSC Genome Sequence files including wig and bedgraph'''
def __init__(self,name,description='',visibility='hide',color='0,0,0',priority='100',additionConf='',browserConf=None):
self.config = commonUtil.equalDict()
self.config['type'] = 'unknown'
self.config['name'] = name
self.config['description'] = description
self.config['visibility'] = visibility
self.config['color'] = color
self.config['priority'] = priority
self.addn = additionConf
if browserConf is None:
self.brow = commonUtil.equalDict()
else:
self.brow = browserConf
self.data = []
def __str__(self):
result = str(self.brow) if self.brow else ''
result += '\ntrack '
result += str(self.config)
if self.addn.strip():
result += ' '+self.addn.strip()
result += '\n'
for item in self.data:
result += str(item)
return result
def addItem(self,item):
self.data.append(item)
def remItem(self,item):
self.data.remove(item)
def writeFile(self,fName):
stdout = open(fName,'w')
stdout.write(str(self))
stdout.close()
class wigFile(ucscFile):
'''A write-only wig file creator'''
def __init__(self,name,description='',visibility='hide',color='255,255,255',priority='100',additionConf='',browserConf=''):
self.config = commonUtil.equalDict()
self.config['type'] = 'wiggle_0'
self.config['name'] = name
self.config['description'] = description
self.config['visibility'] = visibility
self.config['color'] = color
self.config['priority'] = priority
self.addn = additionConf
self.brow = browserConf
self.data = []
class bedFile(ucscFile):
'''UCSC BED File'''
pass
class wigItem():
'''Items that could be joined into a wig file
Has two types:
variableStep - varStep = True (default)
fixedStep - varStep = False
Need to specify chromosome when initializing.'''
def __init__(self,chromosome,span,varStep=True,start=None):
self.chr = chromosome
self.type = varStep
self.start = start
if not varStep and not start:
raise SyntaxError('fixedStep requires start position.')
self.span = span
self.data = []
def __str__(self):
if self.type:
result = 'variableStep '
else:
result = 'fixedStep '
result += 'chrom='+self.chr
if self.type:
if self.span:
result += ' span='+str(self.span)
result += '\n'
else:
result += ' start='+str(self.start)
result += ' step='+str(self.span)
for item in self.data:
result += str(item)+'\n'
return result
def __getitem__(self,key):
return self.data[key]
def __setitem__(self,key,item):
self.data[key] = item
def __iter__(self):
return self.data.__iter__()
def append(self,item):
self.data.append(item)
add = append
def pop(self):
return self.data.pop()
def bedParse(line):
if not bool(line.strip()) or line.strip()[0] == '#':
return None
result = []
typeList = [str,int,int,str,float]
tmp = line.strip().split()
for item in range(5):
result.append(typeList[item](tmp[item]))
return result
def readBED(fName):
'''Read the BED file that was created before.
First attempt to ever read a file.
Alert: Modifying the file in a thread safe way is not supported.'''
from . import dsvUtil
result = bedFile(fName,browserConf = '')
stdin = open(fName,'r')
confLine = ''
while confLine is not None:
confLine = stdin.readline().strip()
if len(confLine) > 5 and confLine[:5] == 'track':
for item in commonUtil.splitQuote(confLine,' '):
if item.strip():
try:
result.config[item[:item.index('=')]] = item[item.index('=')+1:]
except ValueError:
print('Unquoted parameter: '+item)
confLine = None
elif len(confLine) > 7 and confLine[:7] == 'browser':
result.brow += confLine+'\n'
## Configuration stored.
fileContent = dsvUtil.iterParse_iterator(stdin,['chrom','start','end','value'],bedParse)
for item in fileContent:
result.data.append(item)
return result
| gpl-3.0 | 7,058,044,371,761,962,000 | 33.878788 | 127 | 0.570808 | false |
faisalp4p/slang-python | step6/AST.py | 1 | 14814 | from abc import ABCMeta, abstractmethod
from Lexer import RELATIONAL_OPERATOR
class OPERATOR:
PLUS = "+"
MINUS = "-"
DIV = "/"
MUL = "*"
class TYPE_INFO:
TYPE_ILLEGAL = -1
TYPE_NUMERIC = 0
TYPE_BOOL = 1
TYPE_STRING = 2
class SYMBOL_INFO:
def __init__(self, symbol_name=None, type=None, val=None):
self.symbol_name = symbol_name
self.type = type
self.val = val
class Exp:
__metaclass__ = ABCMeta
@abstractmethod
def Evaluate(self): pass
@abstractmethod
def TypeCheck(self): pass
@abstractmethod
def GetType(self): pass
class BooleanConstant(Exp):
def __init__(self, pvalue):
self.info = SYMBOL_INFO(symbol_name=None,
val=pvalue,
type=TYPE_INFO.TYPE_BOOL)
def Evaluate(self, run_cntxt):
return self.info
def TypeCheck(self, run_cntxt):
return self.info.type
def GetType(self):
return self.info.type
class NumericConstant(Exp):
def __init__(self, value):
self.info = SYMBOL_INFO(symbol_name=None,
val=value,
type=TYPE_INFO.TYPE_NUMERIC)
def Evaluate(self, run_cntxt):
return self.info
def TypeCheck(self, run_cntxt):
return self.info.type
def GetType(self):
return self.info.type
def __str__(self):
return u'NumericConstant(%d)' % self.info.val
class StringLiteral(Exp):
def __init__(self, pvalue):
self.info = SYMBOL_INFO(symbol_name=None,
val=pvalue,
type=TYPE_INFO.TYPE_STRING)
def Evaluate(self, run_cntxt):
return self.info
def TypeCheck(self, run_cntxt):
return self.info.type
def GetType(self):
return self.info.type
class Variable(Exp):
def __init__(self, info=None, com_cntxt=None, name=None, _val=None):
if info:
self.var_name = info.symbol_name
return
if type(_val) in [int, long]:
t = TYPE_INFO.TYPE_NUMERIC
elif type(_val) == bool:
t = TYPE_INFO.TYPE_BOOL
elif type(_val) == str:
t = TYPE_INFO.TYPE_STRING
else:
raise Exception("Fuck")
s = SYMBOL_INFO(symbol_name=name,
type=t,
val=_val)
com_cntxt.add(s)
self.var_name = name
def GetName(self):
return self.var_name
def Evaluate(self, run_cntxt):
if not run_cntxt.TABLE:
return None
else:
return run_cntxt.get(self.var_name)
def TypeCheck(self, com_cntxt):
if not com_cntxt.TABLE:
return TYPE_INFO.TYPE_ILLEGAL
else:
a = com_cntxt.get(self.var_name)
if a:
self._type = a.type
return a.type
return TYPE_INFO.TYPE_ILLEGAL
def GetType(self):
return self._type
class BinaryPlus(Exp):
def __init__(self, ex1, ex2):
self.ex1 = ex1
self.ex2 = ex2
def Evaluate(self, run_cntxt):
eval_left = self.ex1.Evaluate(run_cntxt)
eval_right = self.ex2.Evaluate(run_cntxt)
if (eval_left.type == TYPE_INFO.TYPE_STRING and eval_right.type == TYPE_INFO.TYPE_STRING) or (eval_left.type == TYPE_INFO.TYPE_NUMERIC and eval_right.type == TYPE_INFO.TYPE_NUMERIC):
retval = SYMBOL_INFO(type=eval_left.type,
val=eval_left.val + eval_right.val,
symbol_name="")
return retval
else:
raise Exception("Type mismatch")
def TypeCheck(self, com_cntxt):
eval_left = self.ex1.TypeCheck(com_cntxt)
eval_right = self.ex2.TypeCheck(com_cntxt)
if eval_left == eval_right and eval_left != TYPE_INFO.TYPE_BOOL:
self._type = eval_left
return self._type
else:
raise Exception("Type mismatch")
def GetType(self):
return self._type
class BinaryMinus(Exp):
def __init__(self, ex1, ex2):
self.ex1 = ex1
self.ex2 = ex2
def Evaluate(self, run_cntxt):
eval_left = self.ex1.Evaluate(run_cntxt)
eval_right = self.ex2.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_NUMERIC and eval_right.type == TYPE_INFO.TYPE_NUMERIC:
retval = SYMBOL_INFO(type=eval_left.type,
val=eval_left.val - eval_right.val,
symbol_name="")
return retval
else:
raise Exception("Type mismatch")
def TypeCheck(self, com_cntxt):
eval_left = self.ex1.TypeCheck(com_cntxt)
eval_right = self.ex2.TypeCheck(com_cntxt)
if eval_left == eval_right and eval_left == TYPE_INFO.TYPE_NUMERIC:
self._type = eval_left
return self._type
else:
raise Exception("Type mismatch")
def GetType(self):
return self._type
class Mul(Exp):
def __init__(self, ex1, ex2):
self.ex1 = ex1
self.ex2 = ex2
def Evaluate(self, run_cntxt):
eval_left = self.ex1.Evaluate(run_cntxt)
eval_right = self.ex2.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_NUMERIC and eval_right.type == TYPE_INFO.TYPE_NUMERIC:
retval = SYMBOL_INFO(type=eval_left.type,
val=eval_left.val * eval_right.val,
symbol_name="")
return retval
else:
raise Exception("Type mismatch")
def TypeCheck(self, com_cntxt):
eval_left = self.ex1.TypeCheck(com_cntxt)
eval_right = self.ex2.TypeCheck(com_cntxt)
if eval_left == eval_right and eval_left == TYPE_INFO.TYPE_NUMERIC:
self._type = eval_left
return self._type
else:
raise Exception("Type mismatch")
def GetType(self):
return self._type
class Div(Exp):
def __init__(self, ex1, ex2):
self.ex1 = ex1
self.ex2 = ex2
def Evaluate(self, run_cntxt):
eval_left = self.ex1.Evaluate(run_cntxt)
eval_right = self.ex2.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_NUMERIC and eval_right.type == TYPE_INFO.TYPE_NUMERIC:
retval = SYMBOL_INFO(type=eval_left.type,
val=eval_left.val / eval_right.val,
symbol_name="")
return retval
else:
raise Exception("Type mismatch")
def TypeCheck(self, com_cntxt):
eval_left = self.ex1.TypeCheck(com_cntxt)
eval_right = self.ex2.TypeCheck(com_cntxt)
if eval_left == eval_right and eval_left == TYPE_INFO.TYPE_NUMERIC:
self._type = eval_left
return self._type
else:
raise Exception("Type mismatch")
def GetType(self):
return self._type
class UnaryPlus(Exp):
def __init__(self, ex1):
self.ex1 = ex1
def Evaluate(self, run_cntxt):
eval_left = self.ex1.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_NUMERIC:
retval = SYMBOL_INFO(type=eval_left.type,
val=eval_left.val,
symbol_name="")
return retval
else:
raise Exception("Type mismatch")
def TypeCheck(self, com_cntxt):
eval_left = self.ex1.TypeCheck(com_cntxt)
if eval_left == TYPE_INFO.TYPE_NUMERIC:
self._type = eval_left
return self._type
else:
raise Exception("Type mismatch")
def GetType(self):
return self._type
class UnaryMinus(Exp):
def __init__(self, ex1):
self.ex1 = ex1
def Evaluate(self, run_cntxt):
eval_left = self.ex1.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_NUMERIC:
retval = SYMBOL_INFO(type=eval_left.type,
val=-eval_left.val,
symbol_name="")
return retval
else:
raise Exception("Type mismatch")
def TypeCheck(self, com_cntxt):
eval_left = self.ex1.TypeCheck(com_cntxt)
if eval_left == TYPE_INFO.TYPE_NUMERIC:
self._type = eval_left
return self._type
else:
raise Exception("Type mismatch")
def GetType(self):
return self._type
class RelationExp(Exp):
def __init__(self, op, ex1, ex2):
self.m_op = op
self._ex1 = ex1
self._ex2 = ex2
def Evaluate(self, run_cntxt):
eval_left = self._ex1.Evaluate(run_cntxt)
eval_right = self._ex2.Evaluate(run_cntxt)
retval = SYMBOL_INFO()
if eval_left.type == TYPE_INFO.TYPE_NUMERIC and eval_right.type == TYPE_INFO.TYPE_NUMERIC:
retval.type = TYPE_INFO.TYPE_BOOL
retval.symbol_name = ""
if self.m_op == RELATIONAL_OPERATOR.TOK_EQ:
retval.val = eval_left.val == eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_NEQ:
retval.val = eval_left.val != eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_GT:
retval.val = eval_left.val > eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_GTE:
retval.val = eval_left.val >= eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_LT:
retval.val = eval_left.val < eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_LTE:
retval.val = eval_left.val <= eval_right.val
return retval
elif eval_left.type == TYPE_INFO.TYPE_STRING and eval_right.type == TYPE_INFO.TYPE_STRING:
retval.type = TYPE_INFO.TYPE_BOOL
retval.symbol_name = ""
if self.m_op == RELATIONAL_OPERATOR.TOK_EQ:
retval.val = eval_left.val == eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_NEQ:
retval.val = eval_left.val != eval_right.val
else:
retval = False
return retval
elif eval_left.type == TYPE_INFO.TYPE_BOOL and eval_right.type == TYPE_INFO.TYPE_BOOL:
retval.type = TYPE_INFO.TYPE_BOOL
retval.symbol_name = ""
if self.m_op == RELATIONAL_OPERATOR.TOK_EQ:
retval.val = eval_left.val == eval_right.val
elif self.m_op == RELATIONAL_OPERATOR.TOK_NEQ:
retval.val = eval_left.val != eval_right.val
else:
retval = False
return retval
return None
def TypeCheck(self, com_cntxt):
eval_left = self._ex1.TypeCheck(com_cntxt)
eval_right = self._ex2.TypeCheck(com_cntxt)
if eval_right != eval_left:
raise Exception("Wrong Type in Expression")
if eval_left == TYPE_INFO.TYPE_STRING and not (self.m_op == RELATIONAL_OPERATOR.TOK_EQ or self.m_op == RELATIONAL_OPERATOR.TOK_NEQ):
raise Exception("Only == and != supported for string type")
if eval_left == TYPE_INFO.TYPE_BOOL and not (self.m_op == RELATIONAL_OPERATOR.TOK_EQ or self.m_op == RELATIONAL_OPERATOR.TOK_NEQ):
raise Exception("Only == and != supported for boolean type")
self._optype = eval_left
self._type = TYPE_INFO.TYPE_BOOL
return self._type
def GetType(self):
return self._type
class LogicalExp(Exp):
def __init__(self, op, ex1, ex2):
self.m_op = op
self._ex1 = ex1
self._ex2 = ex2
def TypeCheck(self, com_cntxt):
eval_left = self._ex1.TypeCheck(com_cntxt)
eval_right = self._ex2.TypeCheck(com_cntxt)
if eval_left == eval_right and eval_left == TYPE_INFO.TYPE_BOOL:
self._type = TYPE_BOOL.TYPE_BOOL
return self._type
else:
raise "Wrong Type in Expression"
def Evaluate(self, run_cntxt):
eval_left = self._ex1.Evaluate(run_cntxt)
eval_right = self._ex2.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_BOOL and eval_right == TYPE_INFO.TYPE_BOOL:
retval = SYMBOL_INFO()
retval.type = TYPE_INFO.TYPE_BOOL
retval.symbol_name = ""
if self.m_op == TOKEN.TOK_AND:
retval.val = eval_left.val and eval_right.val
elif self.m_op == TOKEN.TOK_OR:
retval.val = eval_left.val or eval_right.val
else:
return None
return retval
return None
def GetType(self):
return self._type
class LogicalNot(Exp):
def __init__(self, ex):
self._ex = ex
def Evaluate(self, run_cntxt):
eval_left = self._ex.Evaluate(run_cntxt)
if eval_left.type == TYPE_INFO.TYPE_BOOL:
retval = SYMBOL_INFO(type=TYPE_INFO.TYPE_BOOL, symbol_name="", val=not eval_left.val)
return retval
else:
return None
def TypeCheck(self, com_cntxt):
eval_left = self._ex.TypeCheck(com_cntxt)
if eval_left == TYPE_INFO.TYPE_BOOL:
self._type = TYPE_INFO.TYPE_BOOL
return self._type
else:
raise Exception("Wrong Type in Expression")
def GetType(self):
return self._type
if __name__ == "__main__":
# Abstract Syntax Tree(AST) for 5*10
exp1 = BinaryExp(NumericConstant(5), NumericConstant(10), OPERATOR.MUL)
print (exp1.Evaluate())
# AST for - (10 + (30 + 50))
exp2 = UnaryExp(
BinaryExp(NumericConstant(10),
BinaryExp(NumericConstant(30),
NumericConstant(50),
OPERATOR.PLUS
),
OPERATOR.PLUS
),
OPERATOR.PLUS
)
print (exp2.Evaluate())
# AST for (400 + exp2)
exp3 = BinaryExp(NumericConstant(400), exp2, OPERATOR.PLUS)
print (exp3.Evaluate())
| mit | 8,989,551,126,098,030,000 | 27.765049 | 192 | 0.524706 | false |
percyfal/ratatosk | ratatosk/lib/tools/fastqc.py | 1 | 2849 | # Copyright (c) 2013 Per Unneberg
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Provide wrappers for `fastqc <http://www.bioinformatics.babraham.ac.uk/projects/fastqc/>`_
Classes
-------
"""
import os
import luigi
import ratatosk.lib.files.input
from ratatosk.job import JobTask
from ratatosk.jobrunner import DefaultShellJobRunner
from ratatosk.log import get_logger
import ratatosk.shell as shell
logger = get_logger()
class InputBamFile(ratatosk.lib.files.input.InputBamFile):
pass
class InputFastqFile(ratatosk.lib.files.input.InputFastqFile):
pass
# This was a nightmare to get right. Temporary output is a directory,
# so would need custom _fix_paths for cases like this
class FastQCJobRunner(DefaultShellJobRunner):
"""This job runner must take into account that there is no default
output file but rather an output directory"""
def _make_arglist(self, job):
arglist = [job.exe()]
if job.opts():
arglist += job.opts()
(tmp_files, job_args) = DefaultShellJobRunner._fix_paths(job)
(tmpdir, outdir) = tmp_files[0]
arglist += ['-o', tmpdir.path]
arglist += [job_args[0]]
return (arglist, tmp_files)
def run_job(self, job):
(arglist, tmp_files) = self._make_arglist(job)
(tmpdir, outdir) = tmp_files[0]
os.makedirs(os.path.join(os.curdir, tmpdir.path))
# Need to send output to temporary *directory*, not file
cmd = ' '.join(arglist)
logger.info("Job runner '{0}'; running command '{1}'".format(self.__class__, cmd))
(stdout, stderr, returncode) = shell.exec_cmd(cmd, shell=True)
if returncode == 0:
logger.info("Shell job completed")
for a, b in tmp_files:
logger.info("renaming {0} to {1}".format(a.path, b.path))
a.move(os.path.join(os.curdir, b.path))
else:
raise Exception("Job '{}' failed: \n{}".format(cmd.replace("= ", "="), " ".join([stderr])))
class FastQC(JobTask):
executable = luigi.Parameter(default="fastqc")
parent_task = luigi.Parameter(default = "ratatosk.lib.tools.fastqc.InputFastqFile")
suffix = luigi.Parameter(default="_fastqc")
def job_runner(self):
return FastQCJobRunner()
def args(self):
return [self.input()[0], self.output()]
| apache-2.0 | -1,005,853,423,500,576,800 | 35.063291 | 103 | 0.665146 | false |
prlosana/BuzzBoards | fortitoServer_broadcast.py | 1 | 14915 | import time
import pygame
import buzzbox
import tricolourleds8
import buttons8
import wear_multiplexer
import wear_sensor_heat
import wear_sensor_light
import wear_sensor_motion
from twisted.internet.protocol import Protocol, Factory
from twisted.internet import reactor, task
i2cBus = 1 #This depends on the model of the Raspberry Pi
box = None
tricolor = None
buttons = None
multiplexer = None
temperatureSensor = None
lightSensor = None
motionSensor = None
connected_multiplexer = False
connected_sensor_light = False
connected_sensor_temp = False
connected_sensor_motion = False
current_channel = 0
#Creates box instance
try:
box = buzzbox.BuzzBox(i2cBus)
box.clean()
except Exception as e:
print ("ERROR: BUZZBOX Unexpected error:", e) #sys.exc_info()[0]
#exitFlag = 1
#Creates tricolour leds instance
try:
# Colour code: 0=yellow, 1=green, 2=red, 3=off
tricolor = tricolourleds8.TricolourLeds8(i2cBus)
tricolor.clean()
except Exception as e:
print ("ERROR: TRICOLOUR LEDS Unexpected error:", e)
#exitFlag = 1
#Creates buttons instance
try:
buttons = buttons8.Buttons8(i2cBus)
buttons.clean()
except Exception as e:
print ("ERROR: BUTTONS Unexpected error:", e)
#exitFlag = 1
#Creates multiplexer instance
try:
multiplexer = wear_multiplexer.WearMultiplexer(i2cBus)
connected_multiplexer = True
except Expection as e:
connected_multiplexer = False
print ("ERROR: Multiplexer Unexpected error:", e)
#exitFlag = 1
class Fortito(Protocol):
def __init__(self, factory):
self.factory = factory
#Starts service for buttons
self.buttons_checker = task.LoopingCall(self.get_BUTTONS_STATUS)
self.buttons_checker.start(1, True)
#Starts service for sensors
self.sensors_checker = task.LoopingCall(self.sensorDiscoveryService)
self.sensors_checker.start(1, True)
def connectionMade(self):
self.factory.clients.append(self)
print ("Client connected.")#, self.factory.clients)
def connectionLost(self, reason):
self.factory.clients.remove(self)
def dataReceived(self, data):
print ("Data received: ", data)
self.get_BUZZBOX_STATUS(data)
def handle_MESSAGE(self, message):
for client in self.factory.clients:
client.transport.write(message)
def get_BUTTONS_STATUS(self):
global buttons
#print ("get_BUTTONS_STATUS running......")
result = buttons.readValue()
if result <> "":
print (str(result), " pressed..............")
self.handle_MESSAGE(str(result) + "\n")
result = ""
def get_BUZZBOX_STATUS(self, data):
global i2cBus
global box
global tricolor
global buttons
global multiplexer
global temperatureSensor
global lightSensor
global motionSensor
global connected_multiplexer
global connected_sensor_light
global connected_sensor_temp
global connected_sensor_motion
global current_channel
#Evaluates data
data = data.upper()
msg = "OK"
subdata_pos = data.find("%")
subdata = data[0:subdata_pos]
subvalue = 0
#print "character % found at ", subdata_pos, " command ", subdata
subdata1_pos = data.find("&")
subdata1 = data[0:subdata1_pos]
subvalue1 = 0
if data == "HELLO\n":
msg = "Greetings!"
print ("Greetings!")
elif data == "PLAY1\n":
pygame.mixer.init()
pygame.mixer.music.load("/home/pi/BuzzBoards/music1.mp3")
pygame.mixer.music.play()
elif data == "PLAY2\n":
pygame.mixer.init()
pygame.mixer.music.load("/home/pi/BuzzBoards/music2.mp3")
pygame.mixer.music.play()
elif data == "STOP\n":
pygame.mixer.stop()
pygame.mixer.music.stop()
pygame.mixer.quit()
elif data == "LIGHT1_ON\n":
print ("Lighting set 1 ON")
box.setLighting1 (True, 0, False)
elif data == "LIGHT1_BLINK\n":
print ("Lighting set 1 BLINK")
box.setLighting1 (True, 0, True)
elif subdata == "LIGHT1_DIM": #format for dimmable values LIGHT1_DIM%5
try:
subvalue = float (data[subdata_pos+1:])
except ValueError:
msg = "ERROR: INVALID DIM VALUE"
subvalue = 0
#print "subvalue=", subvalue
if subvalue > 100 or subvalue < 0:
msg = "ERROR: VALUE OUT OF RANGE"
print ("Lighting set 1 DIMMABLE", msg)
else:
dim = float(subvalue / 100)
print ("Lighting set 1 DIMMABLE ", subvalue , " % - ", dim)
box.setLighting1 (True, dim, False)
elif data == "GET_LIGHT1\n":
msg = box.getLighting1()
print ("Lighting set 1 - Get status ",msg)
elif data == "LIGHT2_ON\n":
print ("Lighting set 2 ON")
box.setLighting2 (True, 0, False)
elif data == "LIGHT2_BLINK\n":
print ("Lighting set 2 BLINK")
box.setLighting2 (True, 0, True)
elif subdata == "LIGHT2_DIM": #format for dimmable values LIGHT1_DIM%5
try:
subvalue = float (data[subdata_pos+1:])
except ValueError:
msg = "ERROR: INVALID DIM VALUE"
subvalue = 0
#print "subvalue=", subvalue
if subvalue > 100 or subvalue < 0:
msg = "ERROR: VALUE OUT OF RANGE"
print ("Lighting set 2 DIMMABLE", msg)
else:
dim = float(subvalue / 100)
print ("Lighting set 2 DIMMABLE ", subvalue , " % - ", dim)
box.setLighting2 (True, dim, False)
elif data == "GET_LIGHT2\n":
msg = box.getLighting2()
print ("Lighting set 2 - Get status ",msg)
elif data == "FAN_ON\n":
print ("Fan ON")
box.setFan (True)
elif data == "HEATER_ON\n":
print ("Heater ON")
box.setHeater (True)
elif data == "LIGHT1_OFF\n":
print ("Lighting set 1 OFF")
box.setLighting1 (False, 0, False)
elif data == "LIGHT2_OFF\n":
print ("Lighting set 2 OFF")
box.setLighting2 (False, 0, False)
elif data == "FAN_OFF\n":
print ("Fan OFF")
box.setFan (False)
elif data == "HEATER_OFF\n":
print ("Heater OFF")
box.setHeater (False)
elif data == "GET_FAN\n":
msg = box.getFan()
print ("Fan - Get status ",msg)
elif data == "GET_HEATER\n":
msg = box.getHeater()
print ("Heater - Get status ",msg)
elif data == "PRESS_BTN1\n":
msg = buttons.readValueVirtualBtn("BTN1")
print ("Virtual BTN1 - Get status ", msg)
elif data == "PRESS_BTN2\n":
msg = buttons.readValueVirtualBtn("BTN2")
print ("Virtual BTN2 - Get status ", msg)
elif data == "PRESS_BTN3\n":
msg = buttons.readValueVirtualBtn("BTN3")
print ("Virtual BTN3 - Get status ", msg)
elif data == "PRESS_BTN4\n":
msg = buttons.readValueVirtualBtn("BTN4")
print ("Virtual BTN4 - Get status ", msg)
elif data == "PRESS_BTN5\n":
msg = buttons.readValueVirtualBtn("BTN5")
print ("Virtual BTN5 - Get status ", msg)
elif data == "PRESS_BTN6\n":
msg = buttons.readValueVirtualBtn("BTN6")
print ("Virtual BTN6 - Get status ", msg)
elif data == "PRESS_BTN7\n":
msg = buttons.readValueVirtualBtn("BTN7")
print ("Virtual BTN7 - Get status ", msg)
elif data == "PRESS_BTN8\n":
msg = buttons.readValueVirtualBtn("BTN8")
print ("Virtual BTN8 - Get status ", msg)
elif data == "GET_LED1\n":
msg = tricolor.getLed1()
print ("Led 1 - Get status ",msg)
elif data == "GET_LED2\n":
msg = tricolor.getLed2()
print ("Led 2 - Get status ",msg)
elif data == "GET_LED3\n":
msg = tricolor.getLed3()
print ("Led 3 - Get status ",msg)
elif data == "GET_LED4\n":
msg = tricolor.getLed4()
print ("Led 4 - Get status ",msg)
elif data == "GET_LED5\n":
msg = tricolor.getLed5()
print ("Led 5 - Get status ",msg)
elif data == "GET_LED6\n":
msg = tricolor.getLed6()
print ("Led 6 - Get status ",msg)
elif data == "GET_LED7\n":
msg = tricolor.getLed7()
print ("Led 7 - Get status ",msg)
elif data == "GET_LED8\n":
msg = tricolor.getLed8()
print ("Led 8 - Get status ",msg)
elif data == "LED1_R\n":
print ("Led 1 RED")
tricolor.turnOnLed (1,2)
elif data == "LED1_G\n":
print ("Led 1 GREEN")
tricolor.turnOnLed (1,1)
elif data == "LED1_Y\n":
print ("Led 1 YELLOW")
tricolor.turnOnLed (1,0)
elif data == "LED1_OFF\n":
print ("Led 1 OFF")
tricolor.turnOnLed (1,3)
elif data == "LED2_R\n":
print ("Led 2 RED")
tricolor.turnOnLed (2,2)
elif data == "LED2_G\n":
print ("Led 2 GREEN")
tricolor.turnOnLed (2,1)
elif data == "LED2_Y\n":
print ("Led 2 YELLOW")
tricolor.turnOnLed (2,0)
elif data == "LED2_OFF\n":
print ("Led 2 OFF")
tricolor.turnOnLed (2,3)
elif data == "LED3_R\n":
print ("Led 3 RED")
tricolor.turnOnLed (3,2)
elif data == "LED3_G\n":
print ("Led 3 GREEN")
tricolor.turnOnLed (3,1)
elif data == "LED3_Y\n":
print ("Led 3 YELLOW")
tricolor.turnOnLed (3,0)
elif data == "LED3_OFF\n":
print ("Led 3 OFF")
tricolor.turnOnLed (3,3)
elif data == "LED4_R\n":
print ("Led 4 RED")
tricolor.turnOnLed (4,2)
elif data == "LED4_G\n":
print ("Led 4 GREEN")
tricolor.turnOnLed (4,1)
elif data == "LED4_Y\n":
print ("Led 4 YELLOW")
tricolor.turnOnLed (4,0)
elif data == "LED4_OFF\n":
print ("Led 4 OFF")
tricolor.turnOnLed (4,3)
elif data == "LED5_R\n":
print ("Led 5 RED")
tricolor.turnOnLed (5,2)
elif data == "LED5_G\n":
print ("Led 5 GREEN")
tricolor.turnOnLed (5,1)
elif data == "LED5_Y\n":
print ("Led 5 YELLOW")
tricolor.turnOnLed (5,0)
elif data == "LED5_OFF\n":
print ("Led 5 OFF")
tricolor.turnOnLed (5,3)
elif data == "LED6_R\n":
print ("Led 6 RED")
tricolor.turnOnLed (6,2)
elif data == "LED6_G\n":
print ("Led 6 GREEN")
tricolor.turnOnLed (6,1)
elif data == "LED6_Y\n":
print ("Led 6 YELLOW")
tricolor.turnOnLed (6,0)
elif data == "LED6_OFF\n":
print ("Led 6 OFF")
tricolor.turnOnLed (6,3)
elif data == "LED7_R\n":
print ("Led 7 RED")
tricolor.turnOnLed (7,2)
elif data == "LED7_G\n":
print ("Led 7 GREEN")
tricolor.turnOnLed (7,1)
elif data == "LED7_Y\n":
print ("Led 7 YELLOW")
tricolor.turnOnLed (7,0)
elif data == "LED7_OFF\n":
print ("Led 7 OFF")
tricolor.turnOnLed (7,3)
elif data == "LED8_R\n":
print ("Led 8 RED")
tricolor.turnOnLed (8,2)
elif data == "LED8_G\n":
print ("Led 8 GREEN")
tricolor.turnOnLed (8,1)
elif data == "LED8_Y\n":
print ("Led 8 YELLOW")
tricolor.turnOnLed (8,0)
elif data == "LED8_OFF\n":
print ("Led 8 OFF")
tricolor.turnOnLed (8,3)
elif data == "GET_CHANNEL\n":
try:
msg = 0
msg = str(multiplexer.getChannel(i2cBus))
print "MULTIPLEXER - Current channel selected ", msg
except Exception as e:
msg = "ERROR: MULTIPLEXER BOARD NOT CONNECTED"
elif data == "GET_TEMPERATURE\n":
try:
msg = 0
msg = str(multiplexer.getChannel(i2cBus))
#print "MULTIPLEXER - Current channel selected ", msg
try:
temperatureSensor = wear_sensor_heat.WearSensorHeat(i2cBus)
read_val = temperatureSensor.setPrecision(4)
msg = str(temperatureSensor.getTemperature())
print "HEAT SENSOR - Temperature ", msg, " C"
except Exception as e:
msg = "ERROR: HEAT SENSOR BOARD NOT CONNECTED"
except Exception as e:
msg = "ERROR: MULTIPLEXER BOARD NOT CONNECTED"
elif data == "GET_LUX\n":
try:
msg = 0
msg = str(multiplexer.getChannel(i2cBus))
#print "MULTIPLEXER - Current channel selected ", msg
try:
lightSensor = wear_sensor_light.WearSensorLight(i2cBus)
msg = str(lightSensor.getLux())
print "LIGHT SENSOR - Light ", msg, " Lux"
except Exception as e:
msg = "ERROR: LIGHT SENSOR BOARD NOT CONNECTED"
except Exception as e:
msg = "ERROR: MULTIPLEXER BOARD NOT CONNECTED"
elif data == "GET_MOTION\n":
try:
msg = 0
msg = str(multiplexer.getChannel(i2cBus))
#print "MULTIPLEXER - Current channel selected ", msg
try:
motionSensor = wear_sensor_motion.WearSensorMotion(i2cBus)
x = motionSensor.getXAxis()
y = motionSensor.getYAxis()
z = motionSensor.getZAxis()
msg = str(x) + "X&" + str(y) + "Y&" + str(z) + "Z"
print "MOTION SENSOR - values ", msg
except Exception as e:
msg = "ERROR: MOTION SENSOR BOARD NOT CONNECTED"
except Exception as e:
msg = "ERROR: MULTIPLEXER BOARD NOT CONNECTED"
else:
msg = "ERROR: WRONG CODE"
print ("Result: ", msg + "\n")
self.handle_MESSAGE(msg + "\n")
def sensorDiscoveryService(self):
global i2cBus
global multiplexer
global temperatureSensor
global lightSensor
global motionSensor
global connected_multiplexer
global connected_sensor_light
global connected_sensor_temp
global connected_sensor_motion
global current_channel
#print ("sensorDiscoveryService running......")
if (connected_sensor_temp or connected_sensor_light or connected_sensor_motion):
pass
else:
print ("sensorDiscoveryService running......")
for channel in range(1,17):
try:
result = multiplexer.setChannel(channel)
print ("MULTIPLEXER - Enabling channel ",channel," in the board... ", result)
current_channel = channel
if (not connected_sensor_temp):
try:
#Start temperature sensor
temperatureSensor = wear_sensor_heat.WearSensorHeat(i2cBus)
#Set precision
decimals = 4
result = temperatureSensor.setPrecision(decimals)
connected_sensor_temp = True
except Exception as e:
#print "ERROR: HEAT SENSOR - ", e
connected_sensor_temp = False
if (not connected_sensor_light):
try:
#Start light sensor
lightSensor = wear_sensor_light.WearSensorLight(i2cBus)
connected_sensor_light = True
except Exception as e:
#print "ERROR: LIGHT SENSOR - ", e
connected_sensor_light = False
if (not connected_sensor_motion):
try:
#Start motion sensor
motionSensor = wear_sensor_motion.WearSensorMotion(bus)
connected_sensor_motion = True
except Exception as e:
#print "ERROR: MOTION SENSOR - ", e
connected_sensor_motion = False
if (connected_sensor_temp or connected_sensor_light or connected_sensor_motion):
break
except Exception as e:
pass
#Start reading sensors
if (connected_sensor_temp):
try:
result = temperatureSensor.getTemperature()
#print ("HEAT SENSOR - Temperature ", result, " C")
except Exception as e:
#print ("ERROR: HEAT SENSOR - ", e)
connected_sensor_temp = False
if (connected_sensor_light):
try:
result = lightSensor.getLux()
#print ("LIGHT SENSOR - Lux ", result)
except Exception as e:
#print ("ERROR: LIGHT SENSOR - ", e)
connected_sensor_light = False
if (connected_sensor_motion):
try:
x = motionSensor.getXAxis()
y = motionSensor.getYAxis()
z = motionSensor.getZAxis()
#print ("MOTION SENSOR - X=", x, ", Y=", y, ", Z=", z)
except Exception as e:
#print ("ERROR: MOTION SENSOR - ", e)
connected_sensor_motion = False
class FortitoFactory(Factory):
def __init__(self):
self.clients = []
def buildProtocol(self, addr):
return Fortito(self)
reactor.listenTCP(50000, FortitoFactory())
print ("Fortito server started.")
reactor.run() | agpl-3.0 | 8,068,275,674,875,720,000 | 28.132813 | 85 | 0.648676 | false |
cmpe-295/project-backend | safe_ride/ride/migrations/0001_initial.py | 1 | 1399 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_client_activation_link_offset'),
]
operations = [
migrations.CreateModel(
name='Ride',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=False)),
('pickup_latitude', models.FloatField()),
('pickup_longitude', models.FloatField()),
('drop_latitude', models.FloatField()),
('drop_longitude', models.FloatField()),
('request_received_at', models.DateTimeField(null=True, blank=True)),
('request_processed_at', models.DateTimeField(null=True, blank=True)),
('initial_eta', models.FloatField()),
('pickup_at', models.DateTimeField(null=True, blank=True)),
('drop_at', models.DateTimeField(null=True, blank=True)),
('client', models.ForeignKey(related_name='rides', to='core.Client')),
('serviced_by', models.ForeignKey(related_name='rides', to='core.Driver')),
],
),
]
| mit | 6,725,087,668,841,999,000 | 41.393939 | 114 | 0.565404 | false |
dbdd4us/compose | compose/config/types.py | 1 | 6996 | """
Types for objects parsed from the configuration.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from collections import namedtuple
import six
from compose.config.config import V1
from compose.config.errors import ConfigurationError
from compose.const import IS_WINDOWS_PLATFORM
from compose.utils import splitdrive
class VolumeFromSpec(namedtuple('_VolumeFromSpec', 'source mode type')):
# TODO: drop service_names arg when v1 is removed
@classmethod
def parse(cls, volume_from_config, service_names, version):
func = cls.parse_v1 if version == V1 else cls.parse_v2
return func(service_names, volume_from_config)
@classmethod
def parse_v1(cls, service_names, volume_from_config):
parts = volume_from_config.split(':')
if len(parts) > 2:
raise ConfigurationError(
"volume_from {} has incorrect format, should be "
"service[:mode]".format(volume_from_config))
if len(parts) == 1:
source = parts[0]
mode = 'rw'
else:
source, mode = parts
type = 'service' if source in service_names else 'container'
return cls(source, mode, type)
@classmethod
def parse_v2(cls, service_names, volume_from_config):
parts = volume_from_config.split(':')
if len(parts) > 3:
raise ConfigurationError(
"volume_from {} has incorrect format, should be one of "
"'<service name>[:<mode>]' or "
"'container:<container name>[:<mode>]'".format(volume_from_config))
if len(parts) == 1:
source = parts[0]
return cls(source, 'rw', 'service')
if len(parts) == 2:
if parts[0] == 'container':
type, source = parts
return cls(source, 'rw', type)
source, mode = parts
return cls(source, mode, 'service')
if len(parts) == 3:
type, source, mode = parts
if type not in ('service', 'container'):
raise ConfigurationError(
"Unknown volumes_from type '{}' in '{}'".format(
type,
volume_from_config))
return cls(source, mode, type)
def repr(self):
return '{v.type}:{v.source}:{v.mode}'.format(v=self)
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigurationError(
"Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
def serialize_restart_spec(restart_spec):
parts = [restart_spec['Name']]
if restart_spec['MaximumRetryCount']:
parts.append(six.text_type(restart_spec['MaximumRetryCount']))
return ':'.join(parts)
def parse_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, dict):
return dict(extra_hosts_config)
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
# TODO: validate string contains ':' ?
host, ip = extra_hosts_line.split(':', 1)
extra_hosts_dict[host.strip()] = ip.strip()
return extra_hosts_dict
def normalize_path_for_engine(path):
"""Windows paths, c:\my\path\shiny, need to be changed to be compatible with
the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
"""
drive, tail = splitdrive(path)
if drive:
path = '/' + drive.lower().rstrip(':') + tail
return path.replace('\\', '/')
class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
@classmethod
def _parse_unix(cls, volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigurationError(
"Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
external = None
internal = os.path.normpath(parts[0])
else:
external = os.path.normpath(parts[0])
internal = os.path.normpath(parts[1])
mode = 'rw'
if len(parts) == 3:
mode = parts[2]
return cls(external, internal, mode)
@classmethod
def _parse_win32(cls, volume_config):
# relative paths in windows expand to include the drive, eg C:\
# so we join the first 2 parts back together to count as one
mode = 'rw'
def separate_next_section(volume_config):
drive, tail = splitdrive(volume_config)
parts = tail.split(':', 1)
if drive:
parts[0] = drive + parts[0]
return parts
parts = separate_next_section(volume_config)
if len(parts) == 1:
internal = normalize_path_for_engine(os.path.normpath(parts[0]))
external = None
else:
external = parts[0]
parts = separate_next_section(parts[1])
external = normalize_path_for_engine(os.path.normpath(external))
internal = normalize_path_for_engine(os.path.normpath(parts[0]))
if len(parts) > 1:
if ':' in parts[1]:
raise ConfigurationError(
"Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config
)
mode = parts[1]
return cls(external, internal, mode)
@classmethod
def parse(cls, volume_config):
"""Parse a volume_config path and split it into external:internal[:mode]
parts to be returned as a valid VolumeSpec.
"""
if IS_WINDOWS_PLATFORM:
return cls._parse_win32(volume_config)
else:
return cls._parse_unix(volume_config)
def repr(self):
external = self.external + ':' if self.external else ''
return '{ext}{v.internal}:{v.mode}'.format(ext=external, v=self)
@property
def is_named_volume(self):
return self.external and not self.external.startswith(('.', '/', '~'))
class ServiceLink(namedtuple('_ServiceLink', 'target alias')):
@classmethod
def parse(cls, link_spec):
target, _, alias = link_spec.partition(':')
if not alias:
alias = target
return cls(target, alias)
def repr(self):
if self.target == self.alias:
return self.target
return '{s.target}:{s.alias}'.format(s=self)
@property
def merge_field(self):
return self.alias
| apache-2.0 | 5,409,932,919,929,041,000 | 30.656109 | 83 | 0.575472 | false |
shinose/qplaybox | packages/addons/driver/hdhomerun/source/resources/actions.py | 1 | 1477 | ################################################################################
# This file is part of OpenELEC - http://www.openelec.tv
# Copyright (C) 2009-2014 Stephan Raue ([email protected])
#
# OpenELEC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# OpenELEC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenELEC. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import os
import sys
import xbmcaddon
__settings__ = xbmcaddon.Addon(id = 'driver.dvb.hdhomerun')
__cwd__ = __settings__.getAddonInfo('path')
__resources_lib__ = xbmc.translatePath(os.path.join(__cwd__, 'resources', 'lib'))
__settings_xml__ = xbmc.translatePath(os.path.join(__cwd__, 'resources', 'settings.xml'))
if len(sys.argv) == 2 and sys.argv[1] == 'refresh_tuners':
sys.path.append(__resources_lib__)
from functions import refresh_hdhomerun_tuners
refresh_hdhomerun_tuners(__settings_xml__)
__settings__.openSettings()
| gpl-2.0 | -2,070,411,174,875,305,700 | 45.15625 | 90 | 0.633717 | false |
axelberndt/Raspberry-Pi-Tools | src/ShutdownRebootVolumeControl.py | 1 | 6353 | #!/usr/bin/env python3.5
# This is a combination of ShutdownRebootButton.py and VolumeRotaryControl.py. It is handy for those who use a rotary switch.
# Author: Axel Berndt
from RPi import GPIO
from time import sleep, time
from subprocess import call
import alsaaudio
GPIOpinButton = 27 # Button is on GPIO channel 27 / pin 13 of 40way connector with GND on pin 14
GPIOpinA = 23 # left pin of the rotary encoder is on GPIO 23 (Pi pin 16)
GPIOpinB = 24 # right pin of the rotary encoder is on GPIO 24 (Pi pin 18)
aDown = False # this is set True to wait for GPIO A to go down
bUp = False # this is set True to wait for GPIO B to go up
bDown = False # this is set True to wait for GPIO B to go down
pressTime = float('Inf') # this is used to keep track of the time passing between button press and release, when waiting for button press/falling it has the positive inf value to prevent unintended shutdowns
# initialize GPIO input and define interrupts
def init():
GPIO.setmode(GPIO.BCM) # set the GPIO naming/numbering convention to BCM
GPIO.setup(GPIOpinA, GPIO.IN, pull_up_down=GPIO.PUD_UP) # input channel A
GPIO.setup(GPIOpinB, GPIO.IN, pull_up_down=GPIO.PUD_UP) # input channel B
GPIO.setup(GPIOpinButton, GPIO.IN, pull_up_down=GPIO.PUD_UP) # setup the channel as input with a 50K Ohm pull up. A push button will ground the pin, creating a falling edge.
GPIO.add_event_detect(GPIOpinA, GPIO.BOTH, callback=rotaryInterruptA) # define interrupt for action on channel A (no bouncetime needed)
GPIO.add_event_detect(GPIOpinB, GPIO.BOTH, callback=rotaryInterruptB) # define interrupt for action on channel B (no bouncetime needed)
GPIO.add_event_detect(GPIOpinButton, GPIO.BOTH, callback=buttonInterrupt)#, bouncetime=100) # define interrupt, add the bouncetime if it works better with your button
# the callback functions when turning the encoder
# this one reacts on action on channel A
def rotaryInterruptA(GPIOpin):
A = GPIO.input(GPIOpinA) # read current value of channel A
B = GPIO.input(GPIOpinB) # read current value of channel B
global aDown, bUp, bDown # get access to some more global variables
if aDown: # if we are waiting for channel A to go down (to finish -> rotation cycle)
if not A: # check if it is down now
aDown = False # -> rotation cycle finished
elif bUp or bDown: # if a <- rotation cycle is unfinished so far
pass # don't do anything new
elif A: # if a new rotation cycle starts, i.e. nothing to go up or down
mixer = alsaaudio.Mixer() # get ALSA mixer channel 'Master'
volume = int(mixer.getvolume()[0]) # get the left channel's volume gain (right channel is the same)
if B: # if B is already up, the rotation direction is ->
aDown = True # to finish the cycle, wait for A to go down again
if volume < 100: # do not get greater than 100 (ALSA max)
volume += 1 # increase volume gain
else: # if B still has to come up, the rotation direction is <-
bUp = True # in this rotation cycle B has to come up and down again, we start with waiting for B to come up
if volume < 0: # do not get below 0 (ALSA min)
volume -= 1 # decrease volume gain
mixer.setvolume(volume) # apply the new volume gain to the mixer channel
return # done
# this callback function reacts on action on channel B
def rotaryInterruptB(GPIOpin):
B = GPIO.input(GPIOpin) # read current value of channel B
global bUp, bDown # get access to some more global variables
if B: # if B is up
if bUp: # and we have been waiting for B to come up (this is part of the <- rotation cycle)
bDown = True # wait for B to come down again
bUp = False # done with this
elif bDown: # B is down (if B: was False) and if we were waiting for B to come down
bDown = False # <- rotation cycle finished
return # done
# the callback function when button is pressed/released
def buttonInterrupt(GPIOpin):
global pressTime # get access to the global time variable
if not GPIO.input(GPIOpin): # if button falling event
pressTime = time() # get the current time
if pressTime != float('Inf'): # if button is already pressed due to missing rise event or bouncing
return # keep the current pressTime value, done
else: # if button rising event
timePassed = time() - pressTime # compute how long the button was pressed
if timePassed < 2: # if it is less than 2 seconds
pressTime = float('Inf') # waiting for next button falling, prevent unintended reboot/shutdowns by setting this variable to positive infinity
elif timePassed < 5: # if pressed for 2 up to 5 seconds
call(['sudo reboot &'], shell=True) # do reboot
else: # if pressed for 5 seconds and more
call(['shutdown -h now "System shutdown by GPIO action" &'], shell=True) # do shutdown
# the main function
def main():
try: # run the program
init() # initialize everything
while True: # idle loop
sleep(300) # wakes up once every 5 minutes = 300 seconds
except KeyboardInterrupt:
GPIO.cleanup() # clean up GPIO on CTRL+C exit
GPIO.cleanup() # clean up GPIO on normal exit
# the entry point
if __name__ == '__main__':
main() | gpl-3.0 | 9,147,913,887,483,484,000 | 60.096154 | 210 | 0.582087 | false |
andikleen/pmu-tools | interval-plot.py | 1 | 3554 | #!/usr/bin/env python
# plot interval CSV output from perf/toplev
# perf stat -I1000 -x, -o file ...
# toplev -I1000 -x, -o file ...
# interval-plot.py file (or stdin)
# delimeter must be ,
# this is for data that is not normalized
# TODO: move legend somewhere else where it doesn't overlap?
from __future__ import print_function
import os
import csv
import sys
import collections
import argparse
import re
import matplotlib
if os.getenv("DISPLAY") is None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import csv_formats
import gen_level
p = argparse.ArgumentParser(
usage='plot interval CSV output from perf stat/toplev',
description='''
perf stat -I1000 -x, -o file ...
toplev -I1000 -x, -o file ...
interval-plot.py file (or stdin)
delimeter must be ,
this is for data that is not normalized.''')
p.add_argument('--xkcd', action='store_true', help='enable xkcd mode')
p.add_argument('--style', help='set mpltools style (e.g. ggplot)')
p.add_argument('file', help='CSV file to plot (or stdin)', nargs='?')
p.add_argument('--output', '-o', help='Output to file. Otherwise show.',
nargs='?')
args = p.parse_args()
if args.style:
try:
from mpltools import style
style.use(args.style)
except ImportError:
print("Need mpltools for setting styles (pip install mpltools)")
try:
import brewer2mpl
all_colors = brewer2mpl.get_map('Paired', 'Qualitative', 12).hex_colors
except ImportError:
print("Install brewer2mpl for better colors (pip install brewer2mpl)")
all_colors = ('green','orange','red','blue',
'black','olive','purple','#6960EC', '#F0FFFF',
'#728C00', '#827B60', '#F87217', '#E55451', # 16
'#F88017', '#C11B17', '#17BFC2', '#C48793') # 20
cur_colors = collections.defaultdict(lambda: all_colors)
assigned = dict()
if args.file:
inf = open(args.file, "r")
else:
inf = sys.stdin
rc = csv.reader(inf)
timestamps = dict()
value = dict()
def isnum(x):
return re.match(r'[0-9.]+', x)
val = ""
for row in rc:
r = csv_formats.parse_csv_row(row)
if r is None:
continue
ts, cpu, event, val = r.ts, r.cpu, r.ev, r.val
if event not in assigned:
level = gen_level.get_level(event)
assigned[event] = cur_colors[level][0]
cur_colors[level] = cur_colors[level][1:]
if len(cur_colors[level]) == 0:
cur_colors[level] = all_colors
value[event] = []
timestamps[event] = []
timestamps[event].append(float(ts))
try:
value[event].append(float(val.replace("%","")))
except ValueError:
value[event].append(0.0)
levels = set(map(gen_level.get_level, assigned.keys()))
if args.xkcd:
try:
plt.xkcd()
except NameError:
print("Please update matplotlib. Cannot enable xkcd mode.")
n = 1
for l in levels:
ax = plt.subplot(len(levels), 1, n)
if val.find('%') >= 0:
ax.set_ylim(0, 100)
t = []
for j in assigned.keys():
print(j, gen_level.get_level(j), l)
if gen_level.get_level(j) == l:
t.append(j)
if 'style' not in globals():
ax.plot(timestamps[j], value[j], assigned[j])
else:
ax.plot(timestamps[j], value[j])
leg = ax.legend(t, loc='upper left')
leg.get_frame().set_alpha(0.5)
n += 1
plt.xlabel('Time')
if val.find('%') >= 0:
plt.ylabel('Bottleneck %')
else:
plt.ylabel("Counter value")
if args.output:
plt.savefig(args.output)
else:
plt.show()
| gpl-2.0 | 6,773,985,436,433,152,000 | 27.66129 | 75 | 0.612549 | false |
wglass/kiel | kiel/protocol/join_group.py | 1 | 1780 | from .part import Part
from .request import Request
from .response import Response
from .primitives import Array, String, Bytes, Int16, Int32
api_name = "join_group"
__all__ = [
"JoinGroupRequest",
"JoinGroupResponse",
"GroupProtocol",
"Member",
]
class GroupProtocol(Part):
"""
::
GroupProtocol =>
name => String
version => Int16
subscription => Array.of(String)
user_data => Bytes
"""
parts = (
("name", String),
("version", Int16),
("subscription", Array.of(String)),
("user_data", Bytes),
)
class JoinGroupRequest(Request):
"""
::
JoinGroupRequest =>
group_id => String
session_timeout => Int32
member_id => String
protocol_type => String
group_protocols => [GroupProtocol]
"""
api = "join_group"
parts = (
("group_id", String),
("session_timeout", Int32),
("member_id", String),
("protocol_type", String),
("group_protocols", Array.of(GroupProtocol)),
)
class Member(Part):
"""
::
Member =>
member_id => String
metadata => Bytes
"""
parts = (
("member_id", String),
("metadata", Bytes),
)
class JoinGroupResponse(Response):
"""
::
JoinGroupResponse =>
error_code => Int16
generation_id => Int32
protocol => String
leader_id => String
member_id => String
members => [Member]
"""
api = "join_group"
parts = (
("error_code", Int16),
("generation_id", Int32),
("protocol", String),
("leader_id", String),
("member_id", String),
("members", Array.of(Member)),
)
| apache-2.0 | -4,484,983,534,276,010,500 | 18.139785 | 58 | 0.51573 | false |
Golker/wttd | eventex/core/tests/test_model_contact.py | 1 | 2098 | from django.core.exceptions import ValidationError
from django.test import TestCase
from eventex.core.models import Speaker, Contact
class ContactModelTest(TestCase):
def setUp(self):
self.speaker = Speaker.objects.create(
name='Luca Bezerra',
slug='luca-bezerra',
photo='http://hbn.link/hb-pic'
)
def test_email(self):
contact = Contact.objects.create(speaker=self.speaker,
kind=Contact.EMAIL,
value='[email protected]')
self.assertTrue(Contact.objects.exists())
def test_phone(self):
contact = Contact.objects.create(speaker=self.speaker,
kind=Contact.PHONE,
value='987654321')
self.assertTrue(Contact.objects.exists())
def test_choices(self):
""" Contact kind should be limited to E or P """
contact = Contact(speaker=self.speaker, kind='A', value='B')
self.assertRaises(ValidationError, contact.full_clean)
def test_str(self):
contact = Contact.objects.create(speaker=self.speaker,
kind=Contact.EMAIL,
value='[email protected]')
self.assertEqual('[email protected]', str(contact))
class ContactManagerTest(TestCase):
def setUp(self):
s = Speaker.objects.create(
name='Luca Bezerra',
slug='luca-bezerra',
photo='http://hbn.link/hb-pic'
)
s.contact_set.create(kind=Contact.EMAIL, value='[email protected]')
s.contact_set.create(kind=Contact.PHONE, value='987654321')
def test_emails(self):
qs = Contact.objects.emails()
expected = ['[email protected]']
self.assertQuerysetEqual(qs, expected, lambda o: o.value)
def test_phones(self):
qs = Contact.objects.phones()
expected = ['987654321']
self.assertQuerysetEqual(qs, expected, lambda o: o.value)
| mit | 4,053,719,748,523,270,000 | 35.807018 | 79 | 0.57817 | false |
yelizariev/addons-yelizariev | odoo_backup_sh_google_disk/models/odoo_backup_sh.py | 1 | 9797 | # Copyright 2019 Dinar Gabbasov <https://it-projects.info/team/GabbasovDinar>
# Copyright 2019 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
import io
import logging
import tempfile
from datetime import datetime
from odoo import api, fields, models
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT
from odoo.addons.odoo_backup_sh.models.odoo_backup_sh import (
ModuleNotConfigured,
compute_backup_filename,
compute_backup_info_filename,
get_backup_by_id,
)
try:
from googleapiclient.http import MediaIoBaseDownload, MediaIoBaseUpload
from apiclient import errors
except ImportError as err:
logging.getLogger(__name__).debug(err)
_logger = logging.getLogger(__name__)
GOOGLE_DRIVE_STORAGE = "google_drive"
class BackupConfig(models.Model):
_inherit = "odoo_backup_sh.config"
storage_service = fields.Selection(
selection_add=[(GOOGLE_DRIVE_STORAGE, "Google Drive")]
)
@api.model
def get_backup_list(self, cloud_params, service):
backup_list = (
super(BackupConfig, self).get_backup_list(cloud_params, service) or dict()
)
if service != GOOGLE_DRIVE_STORAGE:
return backup_list
# get all backups from Google Drive
try:
GoogleDriveService = self.env[
"ir.config_parameter"
].get_google_drive_service()
except ModuleNotConfigured:
return backup_list
folder_id = self.env["ir.config_parameter"].get_param(
"odoo_backup_sh_google_disk.google_disk_folder_id"
)
response = (
GoogleDriveService.files()
.list(
q="'" + folder_id + "' in parents",
fields="nextPageToken, files(id, name)",
spaces="drive",
)
.execute()
)
google_drive_backup_list = [
(r.get("name"), GOOGLE_DRIVE_STORAGE) for r in response.get("files", [])
]
if "all_files" in backup_list:
backup_list.update(
{"all_files": backup_list["all_files"] + google_drive_backup_list}
)
else:
backup_list["all_files"] = google_drive_backup_list
return backup_list
@api.model
def get_info_file_object(self, cloud_params, info_file_name, storage_service):
if storage_service == GOOGLE_DRIVE_STORAGE:
GoogleDriveService = self.env[
"ir.config_parameter"
].get_google_drive_service()
file_id = self.get_google_drive_file_id(info_file_name)
if file_id:
fh = io.BytesIO()
request = GoogleDriveService.files().get_media(fileId=file_id)
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
fh.seek(0)
return fh
else:
return super(BackupConfig, self).get_info_file_object(
cloud_params, info_file_name, storage_service
)
@api.model
def get_google_drive_file_id(self, file_name):
GoogleDriveService = self.env["ir.config_parameter"].get_google_drive_service()
folder_id = self.env["ir.config_parameter"].get_param(
"odoo_backup_sh_google_disk.google_disk_folder_id"
)
response = (
GoogleDriveService.files()
.list(
q="'" + folder_id + "' in parents and name = '" + file_name + "'",
fields="nextPageToken, files(id)",
spaces="drive",
)
.execute()
)
file = response.get("files", [])
return file[0].get("id")
@api.model
def create_info_file(self, info_file_object, storage_service):
if storage_service == GOOGLE_DRIVE_STORAGE:
info_file_object.seek(0)
info_file = tempfile.NamedTemporaryFile()
info_file.write(info_file_object.read())
info_file.seek(0)
return info_file
else:
return super(BackupConfig, self).create_info_file(
info_file_object, storage_service
)
@api.model
def delete_remote_objects(self, cloud_params, remote_objects):
GoogleDriveService = None
google_drive_remove_objects = []
for file in remote_objects:
if file[1] == GOOGLE_DRIVE_STORAGE:
if not GoogleDriveService:
GoogleDriveService = self.env[
"ir.config_parameter"
].get_google_drive_service()
google_drive_remove_objects.append(file)
file_id = self.get_google_drive_file_id(file[0])
try:
GoogleDriveService.files().delete(fileId=file_id).execute()
except errors.HttpError as e:
_logger.exception(e)
return super(BackupConfig, self).delete_remote_objects(
cloud_params, list(set(remote_objects) - set(google_drive_remove_objects))
)
@api.model
def make_backup_google_drive(
self, ts, name, dump_stream, info_file, info_file_content, cloud_params
):
# Upload two backup objects to Google Drive
GoogleDriveService = self.env["ir.config_parameter"].get_google_drive_service()
folder_id = self.env["ir.config_parameter"].get_param(
"odoo_backup_sh_google_disk.google_disk_folder_id"
)
db_metadata = {
"name": compute_backup_filename(
name, ts, info_file_content.get("encrypted")
),
"parents": [folder_id],
}
info_metadata = {
"name": compute_backup_info_filename(name, ts),
"parents": [folder_id],
}
db_mimetype = "application/zip"
info_mimetype = "text/plain"
dump_stream.seek(0)
info_file.seek(0)
for obj, mimetype, metadata in [
[dump_stream, db_mimetype, db_metadata],
[info_file, info_mimetype, info_metadata],
]:
media = MediaIoBaseUpload(obj, mimetype, resumable=True)
GoogleDriveService.files().create(
body=metadata, media_body=media, fields="id"
).execute()
class BackupInfo(models.Model):
_inherit = "odoo_backup_sh.backup_info"
storage_service = fields.Selection(
selection_add=[(GOOGLE_DRIVE_STORAGE, "Google Drive")]
)
@api.multi
def download_backup_action(self, backup=None):
self.assert_user_can_download_backup()
if backup is None:
backup = get_backup_by_id(self.env, self._context["active_id"])
if backup.storage_service != GOOGLE_DRIVE_STORAGE:
return super(BackupInfo, self).download_backup_action(backup)
# TODO: add file_id in backup_info for this
file_id = self.env["odoo_backup_sh.config"].get_google_drive_file_id(
backup.backup_filename
)
return {
"type": "ir.actions.act_url",
"url": "https://drive.google.com/uc?id={}&export=download".format(file_id),
"target": "self",
}
class BackupRemoteStorage(models.Model):
_inherit = "odoo_backup_sh.remote_storage"
google_drive_used_remote_storage = fields.Integer(
string="Google Drive Usage, MB", readonly=True
)
@api.multi
def compute_total_used_remote_storage(self):
self.compute_google_drive_used_remote_storage()
super(BackupRemoteStorage, self).compute_total_used_remote_storage()
@api.multi
def compute_google_drive_used_remote_storage(self):
amount = sum(
self.env["odoo_backup_sh.backup_info"]
.search([("storage_service", "=", GOOGLE_DRIVE_STORAGE)])
.mapped("backup_size")
)
today_record = self.search(
[
(
"date",
"=",
datetime.strftime(datetime.now(), DEFAULT_SERVER_DATE_FORMAT),
)
]
)
if today_record:
today_record.google_drive_used_remote_storage = amount
else:
self.create(
{"date": datetime.now(), "google_drive_used_remote_storage": amount}
)
class DeleteRemoteBackupWizard(models.TransientModel):
_inherit = "odoo_backup_sh.delete_remote_backup_wizard"
@api.multi
def delete_remove_backup_button(self):
record_ids = []
if self._context.get("active_model") == "odoo_backup_sh.backup_info":
record_ids = self._context.get("active_ids")
backup_info_records = self.env["odoo_backup_sh.backup_info"].search(
[("id", "in", record_ids)]
)
GoogleDriveService = self.env["ir.config_parameter"].get_google_drive_service()
backup_google_drive_info_records = backup_info_records.filtered(
lambda r: r.storage_service == GOOGLE_DRIVE_STORAGE
)
for record in backup_google_drive_info_records:
for obj_name in [record.backup_filename, record.backup_info_filename]:
file_id = self.env["odoo_backup_sh.config"].get_google_drive_file_id(
obj_name
)
try:
GoogleDriveService.files().delete(fileId=file_id).execute()
except errors.HttpError as e:
_logger.exception(e)
backup_google_drive_info_records.unlink()
super(DeleteRemoteBackupWizard, self).delete_remove_backup_button()
| lgpl-3.0 | -8,098,747,287,417,663,000 | 35.151292 | 87 | 0.575891 | false |
annavonzansen/exams | exams/migrations/0027_auto__del_field_orderitem_special_arrangement__del_field_historicalord.py | 1 | 38780 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'OrderItem.special_arrangement'
db.delete_column(u'exams_orderitem', 'special_arrangement_id')
# Removing M2M table for field attached_candidates on 'OrderItem'
db.delete_table(db.shorten_name(u'exams_orderitem_attached_candidates'))
# Deleting field 'HistoricalOrderItem.special_arrangement_id'
db.delete_column(u'exams_historicalorderitem', u'special_arrangement_id')
def backwards(self, orm):
# Adding field 'OrderItem.special_arrangement'
db.add_column(u'exams_orderitem', 'special_arrangement',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['exams.SpecialArrangement'], null=True, blank=True),
keep_default=False)
# Adding M2M table for field attached_candidates on 'OrderItem'
m2m_table_name = db.shorten_name(u'exams_orderitem_attached_candidates')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('orderitem', models.ForeignKey(orm[u'exams.orderitem'], null=False)),
('candidate', models.ForeignKey(orm[u'exams.candidate'], null=False))
))
db.create_unique(m2m_table_name, ['orderitem_id', 'candidate_id'])
# Adding field 'HistoricalOrderItem.special_arrangement_id'
db.add_column(u'exams_historicalorderitem', u'special_arrangement_id',
self.gf('django.db.models.fields.IntegerField')(blank=True, null=True, db_index=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'education.school': {
'Meta': {'ordering': "('name', 'school_id')", 'object_name': 'School'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'fi'", 'max_length': '2'}),
'managers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'school_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "['name']", 'overwrite': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'education.schoolsite': {
'Meta': {'unique_together': "(('school', 'name'),)", 'object_name': 'SchoolSite'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_extra': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'Finland'", 'max_length': '32'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'postal_code': ('django.db.models.fields.PositiveIntegerField', [], {}),
'postal_office': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['education.School']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'E'", 'max_length': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.answer': {
'Meta': {'object_name': 'Answer'},
'assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Assignment']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Test']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.answeroption': {
'Meta': {'object_name': 'AnswerOption'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'max_length': '255'})
},
u'exams.assignment': {
'Meta': {'ordering': "('order',)", 'object_name': 'Assignment'},
'answer_options': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.AnswerOption']", 'null': 'True', 'blank': 'True'}),
'assignment_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'attached_files': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['exams.File']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Test']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.candidate': {
'Meta': {'unique_together': "(('examination', 'school', 'candidate_number'),)", 'object_name': 'Candidate', '_ormbases': [u'people.Person']},
'candidate_number': ('django.db.models.fields.PositiveIntegerField', [], {}),
'candidate_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.CandidateType']"}),
'examination': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Examination']"}),
u'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['people.Person']", 'unique': 'True', 'primary_key': 'True'}),
'retrying': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['education.School']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['education.SchoolSite']", 'null': 'True', 'blank': 'True'})
},
u'exams.candidatetype': {
'Meta': {'object_name': 'CandidateType'},
'code': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.candidateupload': {
'Meta': {'object_name': 'CandidateUpload'},
'by_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'examination': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Examination']"}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['education.School']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.examination': {
'Meta': {'unique_together': "(('year', 'season'),)", 'object_name': 'Examination'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'registration_begin': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_status': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '1'}),
'season': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "['year', 'season']", 'overwrite': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'default': '2013', 'max_length': '4'})
},
u'exams.examregistration': {
'Meta': {'unique_together': "(('subject', 'candidate'),)", 'object_name': 'ExamRegistration'},
'additional_details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'candidate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Candidate']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'special_arrangements': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['exams.SpecialArrangement']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'R'", 'max_length': '1'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Subject']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.file': {
'Meta': {'object_name': 'File'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalcandidate': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalCandidate'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'candidate_number': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'candidate_type_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'examination_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'identity_number': ('django.db.models.fields.CharField', [], {'max_length': '11', 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'merge_with_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'person_ptr_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'retrying': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'school_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'site_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalcandidatetype': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalCandidateType'},
'code': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalcandidateupload': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalCandidateUpload'},
u'by_user_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'examination_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.TextField', [], {'max_length': '100'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'school_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalexamination': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalExamination'},
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'registration_begin': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'registration_status': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '1'}),
'season': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "['year', 'season']", 'overwrite': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'default': '2013', 'max_length': '4'})
},
u'exams.historicalorder': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalOrder'},
'additional_details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'created_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'examination_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'parent_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'site_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'c'", 'max_length': '2'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalorderitem': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalOrderItem'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
u'material_type_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'order_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'subject_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalspecialarrangement': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalSpecialArrangement'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'short': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicalsubject': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalSubject'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'group_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'short': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'subject_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.historicaltest': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalTest'},
'begin': ('django.db.models.fields.DateTimeField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
u'examination_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'subject_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.materialtype': {
'Meta': {'object_name': 'MaterialType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'short': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.order': {
'Meta': {'ordering': "('-date', 'site', 'examination')", 'object_name': 'Order'},
'additional_details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'examination': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Examination']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Order']", 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['education.SchoolSite']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'c'", 'max_length': '2'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.MaterialType']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Order']"}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Subject']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.specialarrangement': {
'Meta': {'object_name': 'SpecialArrangement'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'short': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '5'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.subject': {
'Meta': {'object_name': 'Subject'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.SubjectGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['exams.MaterialType']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'short': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'subject_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.subjectgroup': {
'Meta': {'ordering': "('order', 'name')", 'object_name': 'SubjectGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'exams.test': {
'Meta': {'object_name': 'Test'},
'begin': ('django.db.models.fields.DateTimeField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'examination': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Examination']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['exams.Subject']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'people.person': {
'Meta': {'object_name': 'Person'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_names': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity_number': ('django.db.models.fields.CharField', [], {'max_length': '11', 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'merge_with': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['people.Person']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
}
}
complete_apps = ['exams'] | gpl-2.0 | 5,900,534,965,387,721,000 | 87.339408 | 220 | 0.555982 | false |
phelios/moneyleft | moneyleft/migrations/0001_initial.py | 1 | 2230 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Entry'
db.create_table('moneyleft_entry', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('desc', self.gf('django.db.models.fields.CharField')(max_length=100)),
('amount', self.gf('django.db.models.fields.DecimalField')(decimal_places=2, max_digits=10)),
('type', self.gf('django.db.models.fields.IntegerField')()),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['moneyleft.Categories'])),
))
db.send_create_signal('moneyleft', ['Entry'])
# Adding model 'Categories'
db.create_table('moneyleft_categories', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('moneyleft', ['Categories'])
def backwards(self, orm):
# Deleting model 'Entry'
db.delete_table('moneyleft_entry')
# Deleting model 'Categories'
db.delete_table('moneyleft_categories')
models = {
'moneyleft.categories': {
'Meta': {'object_name': 'Categories'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'moneyleft.entry': {
'Meta': {'object_name': 'Entry'},
'amount': ('django.db.models.fields.DecimalField', [], {'decimal_places': '2', 'max_digits': '10'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['moneyleft.Categories']"}),
'desc': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['moneyleft'] | apache-2.0 | 6,019,200,249,292,548,000 | 41.09434 | 114 | 0.58296 | false |
zrecore/alexventure.com | alexventure/portfolio/models.py | 1 | 1277 | from django.db import models
# Create your models here.
class Category(models.Model):
name = models.CharField( max_length = 110 )
slug = models.CharField( max_length = 110 )
published = models.IntegerField( default = 0 )
parent = models.ForeignKey( 'self', on_delete = models.CASCADE, blank = True, null = True, default = None )
# ...to string helper
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField( max_length = 110 )
slug = models.CharField( max_length = 110 )
published = models.IntegerField( default = 0 )
creation_date = models.DateTimeField( 'date created' )
published_date = models.DateTimeField( 'date published' )
edit_date = models.DateTimeField( 'date edited' )
category = models.ForeignKey( Category, on_delete = models.CASCADE )
content_file = models.CharField( max_length = 255 )
# ...to string helper
def __str__(self):
return self.title
class Tag(models.Model):
name = models.CharField( max_length = 32 )
slug = models.CharField( max_length = 32 )
# ...to string helper
def __str__(self):
return self.name
| gpl-3.0 | 204,253,478,973,647,900 | 38.90625 | 120 | 0.602976 | false |
fletin/AutoComp | Comp.avs.py | 1 | 4350 | # ver 1.01
# .supports multitask simultaneously
import os,sys
import uuid
#Generate uuid to make sure filename unique
task_uuid=str(uuid.uuid1())
#tools path
x264_path=sys.path[0]+"\\x264\\x264.exe"
ffms2_path=sys.path[0]+"\\ffms2\\ffms2.dll"
bepipe_path=sys.path[0]+"\\BePipe\\BePipe.exe"
nero_path=sys.path[0]+"\\neroaac\\neroAacEnc.exe"
mp4box_path=sys.path[0]+"\\mp4box\\mp4box.exe"
work_path=sys.path[0]
#avs filters
newfps=0
newx=848
newy=480
#x264 para
x264_preset="veryslow" # faster normal slow veryslow, lower the speed, higher the compress ratio
x264_bitrate="2000" # kb/s *time(seconds)/8/1024/1024=MB
#x264_1passOutput="NUL" # one for no result while the other gets v2 for crf22
x264_1passOutput="\""+work_path+"\\temp\\"+task_uuid+".v.mp4\""
crf_value=24
# ffmpegsource2 function
ffms2_script="""function FFmpegSource2(string source, int "vtrack", int "atrack", bool "cache", \\
string "cachefile", int "fpsnum", int "fpsden", int "threads", \\
string "timecodes", int "seekmode", bool "overwrite", int "width", int "height", \\
string "resizer", string "colorspace", int "rffmode", int "adjustdelay", \\
bool "utf8", string "varprefix") {
vtrack = default(vtrack,-1)
atrack = default(atrack,-2)
cache = default(cache,true)
cachefile = default(cachefile,source+".ffindex")
fpsnum = default(fpsnum,-1)
fpsden = default(fpsden,1)
threads = default(threads,-1)
timecodes = default(timecodes,"")
seekmode = default(seekmode,1)
overwrite = default(overwrite,false)
width = default(width,-1)
height = default(height,-1)
resizer = default(resizer,"BICUBIC")
colorspace = default(colorspace,"")
rffmode = default(rffmode,0)
adjustdelay = default(adjustdelay,-1)
utf8 = default(utf8,false)
varprefix = default(varprefix, "")
((cache == true) && (atrack <= -2)) ? ffindex(source=source, cachefile=cachefile, \\
indexmask=0, overwrite=overwrite, utf8=utf8) : (cache == true) ? ffindex(source=source, \\
cachefile=cachefile, indexmask=-1, overwrite=overwrite, utf8=utf8) : nop
v = ffvideosource(source=source, track=vtrack, cache=cache, cachefile=cachefile, \\
fpsnum=fpsnum, fpsden=fpsden, threads=threads, timecodes=timecodes, \\
seekmode=seekmode, rffmode=rffmode, width=width, height=height, resizer=resizer, \\
colorspace=colorspace, utf8=utf8, varprefix=varprefix)
a = (atrack <= -2) ? blankclip(audio_rate=0) : ffaudiosource(source=source, \\
track=atrack, cache=cache, cachefile=cachefile, adjustdelay=adjustdelay, \\
utf8=utf8, varprefix=varprefix)
return audiodubex(v,a)
}"""
print("Input File: "+sys.argv[1]+"\n\r")
#AviSource frameserving
avspath=""
ext_name=sys.argv[1].split(".")[-1]
if ext_name.upper()=="AVS":
avspath=sys.argv[1]
else:
avspath=work_path+"\\temp\\"+task_uuid+".avs"
avsfile=open(avspath,"w+")
if ext_name.upper()=="AVI":
avsfile.write("AviSource(\""+sys.argv[1]+"\")\r\n")
else:
#avsfile.write("LoadPlugin(\""+ffms2_path+"\")\r\nAudioDub(FFVideoSource(\""+sys.argv[1]+"\"), FFAudioSource(\""+sys.argv[1]+"\"))\r\n")
avsfile.write(ffms2_script+"\r\n\r\n\r\n")
avsfile.write("LoadPlugin(\""+ffms2_path+"\")\r\nFFmpegSource2(\""+sys.argv[1]+"\")\r\n")
if newfps>0:
if newfps>20:
avsfile.write("convertfps("+str(newfps)+")\r\n")
else:
avsfile.write("changefps("+str(newfps)+")\r\n")
if (newx>0) & (newy>0):
avsfile.write("lanczosresize("+str(newx)+","+str(newy)+")\r\n")
avsfile.write("ConvertToYUY2()")
avsfile.close()
#Video Section
#x264
os.system(x264_path+" --pass 1 --stats \""+sys.path[0]+"\\temp\\"+task_uuid+".stats\" --level 5.1 --preset "+x264_preset+" --tune psnr --crf "+str(crf_value)+" --output "+x264_1passOutput+" \""+avspath+"\"")
#os.system(x264_path+" --pass 2 --stats \""+sys.path[0]+"\\temp\\temp.stats\" --level 5.1 --preset "+x264_preset+" --tune psnr --bitrate "+x264_bitrate+" --output \""+work_path+"\\temp\\v.mp4\" \""+avspath+"\"")
#Audio Section - neroaac
os.system(bepipe_path+" --script \"Import(^"+avspath+"^)\" | \""+nero_path+"\" -lc -cbr 96000 -if - -of "+work_path+"\\temp\\"+task_uuid+".a.m4a\"")
#Muxing
os.system(mp4box_path+" -add \""+work_path+"\\temp\\"+task_uuid+".v.mp4\" -add \""+work_path+"\\temp\\"+task_uuid+".a.m4a\" \""+sys.argv[1]+".mp4\"")
#Finishing
print("Finished.")
os.system("pause")
os.system("del "+work_path+"\\temp\\*.* /q")
| gpl-2.0 | -3,835,330,320,334,072,300 | 30.521739 | 211 | 0.665747 | false |
timofeymukha/turbulucid | turbulucid/core/case.py | 1 | 14104 | # This file is part of turbulucid
# (c) 2018 Timofey Mukha
# The code is released under the GNU GPL Version 3 licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from vtk.util.numpy_support import numpy_to_vtk
from vtk.util.numpy_support import vtk_to_numpy
import os
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from .readers import NativeReader, LegacyReader, XMLReader
__all__ = ["Case"]
class Case:
"""A class representing a simulation case.
"""
def __init__(self, fileName, clean=False, pointData=False):
"""
Create Case from file.
Parameters
----------
fileName : str
The file to be read in. Should be data in VTK format.
clean : bool
Whether to attempt to clean the data of redundant cells.
"""
self.fileName = fileName
# Read in the data
self._blockData = self.read(clean, pointData)
# Compute the cell-centres
self._cellCentres = vtk.vtkCellCenters()
self._cellCentres.SetInputData(self._blockData.GetBlock(0))
self._cellCentres.Update()
self._cellCentres =\
dsa.WrapDataObject(self._cellCentres.GetOutput()).GetPoints()
self._cellCentres = np.array(self._cellCentres[:, :2])
self._vtkData = dsa.WrapDataObject(self._blockData.GetBlock(0))
self._boundaries = self._fill_boundary_list()
self._bounds = self._vtkData.VTKObject.GetBounds()[:4]
self._fields = self._vtkData.CellData.keys()
plot_limits = self._compute_plot_limits()
self._xlim = plot_limits[0]
self._ylim = plot_limits[1]
self._boundaryCellCoords, self._boundaryCellData = \
self._compute_boundary_cell_data()
@property
def blockData(self):
"""vtkMultiBlockDataSet : the multiblock data assembled by the
reader.
"""
return self._blockData
@property
def vtkData(self):
"""wrapped PolyData : The actual data read by the reader."""
return self._vtkData
@property
def cellCentres(self):
"""wrapped VTKArray : the cell centres of the read data """
return self._cellCentres
@property
def boundaries(self):
"""list : A list of names of the boundaries present the case."""
return self._boundaries
@property
def bounds(self):
"""tuple : (min(x), max(x), min(y), max(y))."""
return self._bounds
@property
def fields(self):
"""list of str: The names of the fields present in the case."""
return self._fields
@property
def xlim(self):
"""list of two floats: The x limits that cover the
geometry of the case, plus small a margin.
"""
return self._xlim
@property
def ylim(self):
"""list of two floats: The y limits that cover the
geometry of the case, plus a small margin.
"""
return self._ylim
def _fill_boundary_list(self):
fieldData = self.vtkData.FieldData['boundaries']
boundaryList = []
for i in range(fieldData.GetNumberOfValues()):
boundaryList.append(fieldData.GetValue(i))
return boundaryList
def __getitem__(self, item):
"""Return a cell array by name.
Parameters
----------
item : string
The name of the cell array.
Returns
-------
ndarray
Array of values of the requested field.
"""
if item not in self._fields:
raise ValueError("Field " + item + " not present in the case.")
return np.copy(np.array((self.vtkData.CellData[item])))
def __setitem__(self, item, values):
"""Add another internal field to the case.
Parameters
----------
item : string
The name of the cell array.
values : ndarray
The values of the field.
"""
if values.shape[0] != self[self.fields[0]].shape[0]:
raise ValueError("The dimensionality of the provided field "
"does not match that of the case.")
self.fields.append(item)
cellData = self._vtkData.VTKObject.GetCellData()
valuesVtk = vtk.vtkDoubleArray()
if np.ndim(values) > 1:
valuesVtk.SetNumberOfComponents(values.shape[1])
valuesVtk.SetNumberOfTuples(values.shape[0])
for i in range(values.shape[0]):
valuesVtk.SetTuple(i, values[i, :])
else:
valuesVtk.SetNumberOfComponents(1)
valuesVtk.SetNumberOfValues(values.shape[0])
for i in range(values.shape[0]):
valuesVtk.SetValue(i, values[i])
valuesVtk.SetName(item)
cellData.AddArray(valuesVtk)
# Add boundary cell data
# Add boundary data by copying from boundary cells data
for boundary in self.boundaries:
boundaryCellIds = self._vtkData.FieldData[boundary]
self._boundaryCellData[boundary][item] = self[item][boundaryCellIds, ...]
block = self.extract_block_by_name(boundary)
cellData = block.GetCellData()
valuesVtk = vtk.vtkDoubleArray()
nVals = self.boundary_cell_data(boundary)[0][:, 0].size
bCellData = self.boundary_cell_data(boundary)[1][item]
if np.ndim(values) > 1:
valuesVtk.SetNumberOfComponents(values.shape[1])
valuesVtk.SetNumberOfTuples(nVals)
for i in range(nVals):
valuesVtk.SetTuple(i, bCellData[i, :])
else:
valuesVtk.SetNumberOfComponents(1)
valuesVtk.SetNumberOfValues(nVals)
for i in range(nVals):
valuesVtk.SetValue(i, bCellData[i])
valuesVtk.SetName(item)
cellData.AddArray(valuesVtk)
def __delitem__(self, item):
"""Delete an internal field form the case.
Parameters
----------
item : str
Name of the field to delete.
"""
self.vtkData.VTKObject.GetCellData().RemoveArray(item)
self.fields.remove(item)
for boundary in self.boundaries:
del self._boundaryCellData[boundary][item]
block = self.extract_block_by_name(boundary)
block.GetCellData().RemoveArray(item)
def _compute_plot_limits(self):
""" Compute xlim and ylim."""
minX = self.bounds[0]
maxX = self.bounds[1]
minY = self.bounds[2]
maxY = self.bounds[3]
marginX = (maxX - minX)/60
marginY = (maxY - minY)/60
return (np.array([minX - marginX, maxX + marginX]),
np.array([minY - marginY, maxY + marginY]))
def _transform(self, transform):
"""Transform the geometry according to a vtkTransform filter."""
# Transform the internal field
filter = vtk.vtkTransformPolyDataFilter()
filter.SetInputData(self.blockData.GetBlock(0))
filter.SetTransform(transform)
filter.Update()
self._blockData.SetBlock(0, filter.GetOutput())
# Transform boundary data
i = 1
for boundary in self.boundaries:
filter = vtk.vtkTransformPolyDataFilter()
filter.SetTransform(transform)
filter.SetInputData(self.blockData.GetBlock(i))
filter.Update()
self.blockData.SetBlock(i, filter.GetOutput())
i += 1
# Update attuributes
self._cellCentres = vtk.vtkCellCenters()
self._cellCentres.SetInputData(self.blockData.GetBlock(0))
self._cellCentres.Update()
self._cellCentres = \
dsa.WrapDataObject(self._cellCentres.GetOutput()).GetPoints()
self._cellCentres = np.array(self._cellCentres[:, :2])
self._vtkData = dsa.WrapDataObject(self._blockData.GetBlock(0))
self._bounds = self._vtkData.VTKObject.GetBounds()[:4]
plot_limits = self._compute_plot_limits()
self._xlim = plot_limits[0]
self._ylim = plot_limits[1]
self._boundaryCellCoords, self._boundaryCellData = \
self._compute_boundary_cell_data()
def _compute_boundary_cell_data(self):
from collections import OrderedDict
boundaryCellData = OrderedDict()
boundaryCellCoords = OrderedDict()
for b in self.boundaries:
boundaryCellData[b] = OrderedDict()
cellIds = self._vtkData.FieldData[b]
boundaryCellCoords[b] = self.cellCentres[cellIds, :]
for f in self.fields:
boundaryCellData[b][f] = self.__getitem__(f)[cellIds, ...]
return boundaryCellCoords, boundaryCellData
def translate(self, dx, dy):
"""Translate the geometry of the case.
Parameters
----------
dx : float
The translation along the x axis.
dy : float
The translation along the y axis.
"""
transform = vtk.vtkTransform()
transform.Translate(dx, dy, 0)
transform.Update()
self._transform(transform)
def scale(self, scaleX, scaleY):
"""Scale the geometry of the case.
The coordinates get divided by the scaling factors.
Parameters
----------
scaleX : float
The scaling factor along x.
scaleY : float
The scaling factor along y.
"""
transform = vtk.vtkTransform()
transform.Scale(1/scaleX, 1/scaleY, 0)
transform.Update()
self._transform(transform)
def rotate(self, angle):
"""Rotate the geometry of the case around the z axis.
Parameters
----------
dx : angle
Rotation angle in degrees.
"""
axis = [0, 0, 1]
transform = vtk.vtkTransform()
transform.RotateWXYZ(angle, axis[0], axis[1], axis[2])
transform.Update()
self._transform(transform)
def boundary_cell_data(self, boundary, sort=None):
"""Return cell-centre coordinates and data from cells adjacent
to a specific boundary.
Parameters
----------
boundary : str
The name of the boundary.
sort : {None, 'x', 'y'}, optional
Whether to sort the data along a coordinate. Use 'x' and
'y' to sort along x and y, respectively. Default is no
sorting.
Returns
-------
Two ndarrays
"""
points = np.copy(self._boundaryCellCoords[boundary])
data = self._boundaryCellData[boundary].copy()
if sort is None:
return points, data
elif sort == "x":
ind = np.argsort(points[:, 0])
elif sort == "y":
ind = np.argsort(points[:, 1])
points = points[ind]
for key in data:
data[key] = data[key][ind, ...]
return points, data
def extract_block_by_name(self, name):
"""Extract a block from the case by a given name."""
return self._blockData.GetBlock(self.boundaries.index(name) + 1)
def boundary_data(self, boundary, sort=None):
"""Return cell-center coordinates and data from a boundary.
Parameters
----------
boundary : str
The name of the boundary.
sort : str
Whether to sort the data along a coordinate. Use "x" and
"y" to sort along x and y, respectively. Default is no
sorting.
Returns
-------
Two ndarrays
The coordinates of the boundary face centres.
The corresponding data.
"""
blockData = self.extract_block_by_name(boundary)
cCenters = vtk.vtkCellCenters()
cCenters.SetInputData(blockData)
cCenters.Update()
points = np.array(dsa.WrapDataObject(cCenters.GetOutput()).Points)
dataVTK = dsa.WrapDataObject(blockData).CellData
data = {}
for key in dataVTK.keys():
data[key] = np.array(dataVTK[key])
if sort is None:
return points[:, [0, 1]], data
elif sort == "x":
ind = np.argsort(points[:, 0])
elif sort == "y":
ind = np.argsort(points[:, 1])
points = points[ind]
for key in data:
data[key] = data[key][ind]
return points[:, [0, 1]], data
def read(self, clean, pointData):
"""Read in the data from a file.
Parameters
----------
clean : bool
Whether to attempt cleaning the case of degenerate cells upon
read.
pointData : bool
Whether the file contains point data instead of cell data.
Cell data will be computed by interpolation.
Raises
------
ValueError
If the provided file does not exist.
"""
fileName = self.fileName
fileExt = os.path.splitext(fileName)[1]
if fileExt == ".vtm":
reader = NativeReader(fileName)
return reader.data
elif fileExt == ".vtk":
return LegacyReader(fileName, clean=clean,
pointData=pointData).data
elif (fileExt == ".vtu") or (fileExt == ".vtp"):
return XMLReader(fileName, clean=clean, pointData=pointData).data
else:
raise ValueError("Unsupported file format.", fileName, fileExt)
def write(self, writePath):
"""Save the case to a .vtm format.
Parameters
----------
writePath : str
The name of the file.
"""
writer = vtk.vtkXMLMultiBlockDataWriter()
writer.SetFileName(writePath)
writer.SetInputData(self._blockData)
writer.Write()
| gpl-3.0 | -8,321,563,673,050,837,000 | 28.383333 | 85 | 0.573454 | false |
Linaro/lava-dispatcher | lava_dispatcher/test/test_lavashell.py | 1 | 7705 | # Copyright (C) 2014 Linaro Limited
#
# Author: Neil Williams <[email protected]>
#
# This file is part of LAVA Dispatcher.
#
# LAVA Dispatcher is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LAVA Dispatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
import os
import yaml
import datetime
from lava_dispatcher.action import (
Action,
InfrastructureError,
Pipeline,
JobError,
LAVAError,
Timeout
)
from lava_dispatcher.parser import JobParser
from lava_dispatcher.device import NewDevice
from lava_dispatcher.actions.deploy.testdef import get_test_action_namespaces
from lava_dispatcher.test.utils import DummyLogger
from lava_dispatcher.job import Job
from lava_dispatcher.protocols.multinode import MultinodeProtocol
from lava_dispatcher.protocols.vland import VlandProtocol
from lava_dispatcher.test.test_basic import Factory, StdoutTestCase
from lava_dispatcher.actions.test.shell import TestShellRetry, TestShellAction
# pylint: disable=duplicate-code,too-few-public-methods
class TestDefinitionHandlers(StdoutTestCase): # pylint: disable=too-many-public-methods
def setUp(self):
super(TestDefinitionHandlers, self).setUp()
factory = Factory()
self.job = factory.create_kvm_job('sample_jobs/kvm.yaml')
def test_testshell(self):
testshell = None
for action in self.job.pipeline.actions:
self.assertIsNotNone(action.name)
if isinstance(action, TestShellRetry):
testshell = action.pipeline.actions[0]
break
self.assertIsInstance(testshell, TestShellAction)
self.assertTrue(testshell.valid)
if 'timeout' in testshell.parameters:
time_int = Timeout.parse(testshell.parameters['timeout'])
else:
time_int = Timeout.default_duration()
self.assertEqual(
datetime.timedelta(seconds=time_int).total_seconds(),
testshell.timeout.duration
)
def test_missing_handler(self):
device = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/kvm01.yaml'))
kvm_yaml = os.path.join(os.path.dirname(__file__), 'sample_jobs/kvm.yaml')
parser = JobParser()
with open(kvm_yaml) as sample_job_data:
data = yaml.load(sample_job_data)
data['actions'][2]['test']['definitions'][0]['from'] = 'unusable-handler'
try:
job = parser.parse(yaml.dump(data), device, 4212, None, "")
job.logger = DummyLogger()
except JobError:
pass
except Exception as exc: # pylint: disable=broad-except
self.fail(exc)
else:
self.fail('JobError not raised')
def test_eventpatterns(self):
testshell = None
for action in self.job.pipeline.actions:
self.assertIsNotNone(action.name)
if isinstance(action, TestShellRetry):
testshell = action.pipeline.actions[0]
break
self.assertTrue(testshell.valid)
self.assertFalse(testshell.check_patterns('exit', None, ''))
self.assertRaises(InfrastructureError, testshell.check_patterns, 'eof', None, '')
self.assertTrue(testshell.check_patterns('timeout', None, ''))
class X86Factory(Factory):
def create_x86_job(self, filename, device): # pylint: disable=no-self-use
kvm_yaml = os.path.join(os.path.dirname(__file__), filename)
parser = JobParser()
try:
with open(kvm_yaml) as sample_job_data:
job = parser.parse(sample_job_data, device, 4212, None, "")
job.logger = DummyLogger()
except LAVAError as exc:
print(exc)
# some deployments listed in basics.yaml are not implemented yet
return None
return job
class TestMultiNodeOverlay(StdoutTestCase): # pylint: disable=too-many-public-methods
def setUp(self):
super(TestMultiNodeOverlay, self).setUp()
factory = X86Factory()
lng1 = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/lng-generator-01.yaml'))
lng2 = NewDevice(os.path.join(os.path.dirname(__file__), '../devices/lng-generator-02.yaml'))
self.server_job = factory.create_x86_job('sample_jobs/test_action-1.yaml', lng1)
self.client_job = factory.create_x86_job('sample_jobs/test_action-2.yaml', lng2)
def test_action_namespaces(self):
self.assertIsNotNone(self.server_job)
self.assertIsNotNone(self.client_job)
deploy_server = [action for action in self.server_job.pipeline.actions if action.name == 'tftp-deploy'][0]
self.assertIn(MultinodeProtocol.name, deploy_server.parameters.keys())
self.assertIn(VlandProtocol.name, deploy_server.parameters.keys())
self.assertEqual(['common'], get_test_action_namespaces(self.server_job.parameters))
namespace = self.server_job.parameters.get('namespace', None)
self.assertIsNone(namespace)
namespace = self.client_job.parameters.get('namespace', None)
self.assertIsNone(namespace)
deploy_client = [action for action in self.client_job.pipeline.actions if action.name == 'tftp-deploy'][0]
self.assertIn(MultinodeProtocol.name, deploy_client.parameters.keys())
self.assertIn(VlandProtocol.name, deploy_client.parameters.keys())
key_list = []
for block in self.client_job.parameters['actions']:
key_list.extend(block.keys())
self.assertEqual(key_list, ['deploy', 'boot', 'test']) # order is important
self.assertEqual(['common'], get_test_action_namespaces(self.client_job.parameters))
key_list = []
for block in self.server_job.parameters['actions']:
key_list.extend(block.keys())
self.assertEqual(key_list, ['deploy', 'boot', 'test']) # order is important
class TestShellResults(StdoutTestCase): # pylint: disable=too-many-public-methods
class FakeJob(Job):
def __init__(self, parameters):
super(TestShellResults.FakeJob, self).__init__(parameters)
class FakeDeploy(object):
"""
Derived from object, *not* Deployment as this confuses python -m unittest discover
- leads to the FakeDeploy being called instead.
"""
def __init__(self, parent):
self.__parameters__ = {}
self.pipeline = parent
self.job = parent.job
self.action = TestShellResults.FakeAction()
class FakePipeline(Pipeline):
def __init__(self, parent=None, job=None):
super(TestShellResults.FakePipeline, self).__init__(parent, job)
class FakeAction(Action):
"""
Isolated Action which can be used to generate artificial exceptions.
"""
name = "fake-action"
description = "fake, do not use outside unit tests"
summary = "fake action for unit tests"
def __init__(self):
super(TestShellResults.FakeAction, self).__init__()
self.count = 1
def run(self, connection, max_end_time, args=None):
self.count += 1
raise JobError("fake error")
| gpl-2.0 | -5,162,780,083,321,173,000 | 39.340314 | 114 | 0.657495 | false |
bbfamily/abu | abupy/TradeBu/ABuTradeProxy.py | 1 | 14496 | # -*- encoding:utf-8 -*-
"""
交易执行代理模块
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from contextlib import contextmanager
from functools import total_ordering
from enum import Enum
import numpy as np
import pandas as pd
from . import ABuTradeDrawer
from . import ABuTradeExecute
__author__ = '阿布'
__weixin__ = 'abu_quant'
class EOrderSameRule(Enum):
"""对order_pd中对order判断为是否相同使用的规则"""
"""order有相同的symbol和买入日期就认为是相同"""
ORDER_SAME_BD = 0
"""order有相同的symbol, 买入日期,和卖出日期,即不考虑价格,只要日期相同就相同"""
ORDER_SAME_BSD = 1
"""order有相同的symbol, 买入日期,相同的买入价格,即单子买入时刻都相同"""
ORDER_SAME_BDP = 2
"""order有相同的symbol, 买入日期, 买入价格, 并且相同的卖出日期和价格才认为是相同,即买入卖出时刻都相同"""
ORDER_SAME_BSPD = 3
@total_ordering
class AbuOrderPdProxy(object):
"""
包装交易订单构成的pd.DataFrame对象,外部debug因子的交易结果,寻找交易策略的问题使用,
支持两个orders_pd的并集,交集,差集,类似set的操作,同时支持相等,不等,大于,小于
的比较操作,eg如下:
orders_pd1 = AbuOrderPdProxy(orders_pd1)
with orders_pd1.proxy_work(orders_pd2) as (order1, order2):
a = order1 | order2 # 两个交易结果的并集
b = order1 & order2 # 两个交易结果的交集
c = order1 - order2 # 两个交易结果的差集(在order1中,但不在order2中)
d = order2 - order1 # 两个交易结果的差集(在order2中,但不在order1中)
eq = order1 == order2 # 两个交易结果是否相同
lg = order1 > order2 # order1唯一的交易数量是否大于order2
lt = order1 < order2 # order1唯一的交易数量是否小于order2
"""
def __init__(self, orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
初始化函数需要pd.DataFrame对象,暂时未做类型检测
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
"""
# 需要copy因为会添加orders_pd的列属性等
self.orders_pd = orders_pd.copy()
self.same_rule = same_rule
# 并集, 交集, 差集运算结果存储
self.op_result = None
self.last_op_metrics = {}
@contextmanager
def proxy_work(self, orders_pd):
"""
传人需要比较的orders_pd,构造ABuOrderPdProxy对象,返回使用者,
对op_result进行统一分析
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:return:
"""
# 运算集结果重置
self.op_result = None
# 实例化比较的ABuOrderPdProxy对象
other = AbuOrderPdProxy(orders_pd)
try:
yield self, other
finally:
if isinstance(self.op_result, pd.DataFrame):
# 如果有并集, 交集, 差集运算结果存储,
from ..MetricsBu.ABuMetricsBase import AbuMetricsBase
metrics = AbuMetricsBase(self.op_result, None, None, None)
metrics.fit_metrics_order()
self.last_op_metrics['win_rate'] = metrics.win_rate
self.last_op_metrics['gains_mean'] = metrics.gains_mean
self.last_op_metrics['losses_mean'] = metrics.losses_mean
self.last_op_metrics['sum_profit'] = self.op_result['profit'].sum()
self.last_op_metrics['sum_profit_cg'] = self.op_result['profit_cg'].sum()
def __and__(self, other):
""" & 操作符的重载,计算两个交易集的交集"""
# self.op = 'intersection(order1 & order2)'
self.op_result = intersection_in_2orders(self.orders_pd, other.orders_pd, same_rule=self.same_rule)
return self.op_result
def __or__(self, other):
""" | 操作符的重载,计算两个交易集的并集"""
# self.op = 'union(order1 | order2)'
self.op_result = union_in_2orders(self.orders_pd, other.orders_pd)
return self.op_result
def __sub__(self, other):
""" - 操作符的重载,计算两个交易集的差集"""
self.op_result = difference_in_2orders(self.orders_pd, other.orders_pd, same_rule=self.same_rule)
return self.op_result
def __eq__(self, other):
""" == 操作符的重载,计算两个交易集的是否相同"""
return (self - other).empty and (other - self).empty
def __gt__(self, other):
""" > 操作符的重载,计算两个交易集的大小, 类被total_ordering装饰,可以支持lt等操作符"""
unique_cnt = find_unique_group_symbol(self.orders_pd).shape[0]
other_unique_cnt = find_unique_group_symbol(other.orders_pd).shape[0]
return unique_cnt > other_unique_cnt
def union_in_2orders(orders_pd, other_orders_pd):
"""
并集:分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd中所有不同的交易,
注意这里不认为在相同的交易日买入相同的股票,两笔交易就一样,这里只是两个orders_pd合并
后使用drop_duplicates做了去除完全一样的order,即结果为并集:
orders_pd | cmp_orders_pd或orders_pd.union(cmp_orders_pd)
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:return: orders_pd | cmp_orders_pd
"""
orders_pd = orders_pd.append(other_orders_pd)
orders_pd = orders_pd.drop_duplicates()
return orders_pd
def _same_pd(order, other_orders_pd, same_rule):
"""
根据same_rule的规则从orders_pd和other_orders_pd中返回相同的df
:param order: orders_pd中的一行order记录数据
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则
:return: 从orders_pd和other_orders_pd中返回相同的df
"""
symbol = order.symbol
buy_day = order['buy_date']
buy_price = order['buy_price']
sell_day = order['sell_date']
sell_price = order['sell_price']
if same_rule == EOrderSameRule.ORDER_SAME_BD:
# 只根据买入时间和买入symbol确定是否相同,即认为在相同的交易日买入相同的股票,两笔交易就一样,忽略其它所有order中的因素
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)]
elif same_rule == EOrderSameRule.ORDER_SAME_BSD:
# 根据买入时间,卖出时间和买入symbol确定是否相同
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)
& (other_orders_pd['sell_date'] == sell_day)]
elif same_rule == EOrderSameRule.ORDER_SAME_BDP:
# 根据买入时间,买入价格和买入symbol确定是否相同
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)
& (other_orders_pd['buy_price'] == buy_price)]
elif same_rule == EOrderSameRule.ORDER_SAME_BSPD:
# 根据买入时间,卖出时间, 买入价格和卖出价格和买入symbol确定是否相同
same_pd = other_orders_pd[(other_orders_pd['symbol'] == symbol) & (other_orders_pd['buy_date'] == buy_day)
& (other_orders_pd['sell_date'] == sell_day)
& (other_orders_pd['buy_price'] == buy_price)
& (other_orders_pd['sell_price'] == sell_price)]
else:
raise TypeError('same_rule type is {}!!'.format(same_rule))
return same_pd
def intersection_in_2orders(orders_pd, other_orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
交集: 分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd中相同的交易,
即结果为交集:orders_pd & cmp_orders_pd或orders_pd.intersection(cmp_orders_pd)
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
:return: orders_pd & cmp_orders_pd
"""
def _intersection(order):
same_pd = _same_pd(order, other_orders_pd, same_rule)
if same_pd.empty:
# 如果是空,说明不相交
return False
# 相交, intersection=1,是交集
return True
orders_pd['intersection'] = orders_pd.apply(_intersection, axis=1)
return orders_pd[orders_pd['intersection'] == 1]
def difference_in_2orders(orders_pd, other_orders_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
差集: 分析因子或者参数问题时使用,debug策略问题时筛选出两个orders_pd的不同交易,
注意返回的结果是存在orders_pd中的交易,但不在cmp_orders_pd中的交易,即结果
为差集:orders_pd - cmp_orders_pd或orders_pd.difference(cmp_orders_pd)
:param orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param other_orders_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
:return: orders_pd - cmp_orders_pd
"""
def _difference(order):
same_pd = _same_pd(order, other_orders_pd, same_rule)
if same_pd.empty:
# 没有相同的说明是差集
return True
# 有相同的说明不是差集
return False
orders_pd['difference'] = orders_pd.apply(_difference, axis=1)
return orders_pd[orders_pd['difference'] == 1]
def find_unique_group_symbol(order_pd):
"""
按照'buy_date', 'symbol'分组后,只筛选组里的第一个same_group.iloc[0]
:param order_pd:
:return:
"""
def _find_unique_group_symbol(same_group):
# 只筛选组里的第一个, 即同一个交易日,对一个股票的交易只保留一个order
return same_group.iloc[0]
# 按照'buy_date', 'symbol'分组后apply same_handle
order_pds = order_pd.groupby(['buy_date', 'symbol']).apply(_find_unique_group_symbol)
return order_pds
def find_unique_symbol(order_pd, same_rule=EOrderSameRule.ORDER_SAME_BSPD):
"""
order_pd中如果一个buy_date对应的一个symbol有多条交易记录,过滤掉,
注意如果在对应多条记录中保留一个,使用find_unique_group_symbol
:param order_pd: 回测结果生成的交易订单构成的pd.DataFrame对象
:param same_rule: order判断为是否相同使用的规则, 默认EOrderSameRule.ORDER_SAME_BSPD
即:order有相同的symbol和买入日期和相同的卖出日期和价格才认为是相同
"""
def _find_unique_symbol(order):
"""根据order的symbol和buy_date在原始order_pd中进行复合条件筛选,结果same_pd如果只有1个就唯一,否则就是重复的"""
same_pd = _same_pd(order, order_pd, same_rule)
if same_pd.empty or same_pd.shape[0] == 1:
return False
# 同一天一个symbol有多条记录的一个也没留,都过滤
return True
same_mark = order_pd.apply(_find_unique_symbol, axis=1)
return order_pd[same_mark == 0]
def trade_summary(orders, kl_pd, draw=False, show_info=True):
"""
主要将AbuOrder对象序列转换为pd.DataFrame对象orders_pd,以及将
交易单子时间序列转换交易行为顺序序列,绘制每笔交易的细节交易图,以及
简单文字度量输出
:param orders: AbuOrder对象序列
:param kl_pd: 金融时间序列,pd.DataFrame对象
:param draw: 是否可视化交易细节图示
:param show_info: 是否输出交易文字信息
"""
# AbuOrder对象序列转换为pd.DataFrame对象orders_pd
orders_pd = ABuTradeExecute.make_orders_pd(orders, kl_pd)
# 交易单子时间序列转换交易行为顺序序列
action_pd = ABuTradeExecute.transform_action(orders_pd)
summary = ''
if draw:
# 绘制每笔交易的细节交易图
ABuTradeDrawer.plot_his_trade(orders, kl_pd)
if show_info:
# simple的意思是没有计算交易费用
simple_profit = 'simple profit: {} \n'.format(ABuTradeExecute.calc_simple_profit(orders, kl_pd))
summary += simple_profit
# 每笔交易收益期望
mean_win_profit = 'mean win profit {} \n'.format(np.mean(orders_pd[orders_pd.result == 1]['profit']))
summary += mean_win_profit
# 每笔交易亏损期望
mean_loss_profit = 'mean loss profit {} \n'.format(np.mean(orders_pd[orders_pd.result == -1]['profit']))
summary += mean_loss_profit
# 盈利笔数
win_cnt = 0 if len(orders_pd[orders_pd.result == 1].result.value_counts().values) <= 0 else \
orders_pd[orders_pd.result == 1].result.value_counts().values[0]
# 亏损笔数
loss_cnt = 0 if len(orders_pd[orders_pd.result == -1].result.value_counts().values) <= 0 else \
orders_pd[orders_pd.result == -1].result.value_counts().values[0]
# 胜率
win_rate = 'win rate ' + str('*@#')
if win_cnt + loss_cnt > 0:
win_rate = 'win rate: {}%'.format(float(win_cnt) / float(float(loss_cnt) + float(win_cnt)))
summary += win_rate
return orders_pd, action_pd, summary
| gpl-3.0 | -5,568,852,503,388,017,000 | 35.951456 | 115 | 0.632335 | false |
ak681443/mana-deep | evaluation/allmods/new_train/FindBestMatch.py | 1 | 5181 |
# coding: utf-8
# In[1]:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras.callbacks import TensorBoard
from keras.models import model_from_json
from keras.models import load_model
from keras import regularizers
from os import listdir
from os.path import isfile, join
import numpy as np
from matplotlib import pyplot as plt
import cv2
import scipy.misc
from scipy import spatial
from PIL import Image
import heapq
import sys
# In[2]:
th = int(sys.argv[1])
v = int(sys.argv[2])
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
masks = np.zeros((224,224))
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<th] = v
img1[img1>=th] = 0
masks = masks + img1
masks = masks / v
#masks = np.zeros(masks.shape)
#img1[masks>20] = 0
#print np.average(masks)
#plt.imshow(img1)
#masks = cv2.cvtColor(cv2.imread('/home/arvind/Desktop/mask.jpg'), cv2.COLOR_BGR2GRAY)
#masks = cv2.resize(masks, (224,224))
#masks[masks<100] = 71
#masks[masks!=71] = 0
# In[3]:
input_img = Input(shape=(224, 224,1))
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same', input_shape=(224,224,1))(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same', activity_regularizer=regularizers.activity_l1(10e-5))(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x)
model = Model(input_img, encoded)
model.compile(loss='binary_crossentropy', optimizer='adagrad', verbose=0)
# In[4]:
model.load_weights(sys.argv[3], by_name=True)
# In[5]:
def push_pqueue(queue, priority, value):
if len(queue)>20:
heapq.heappushpop(queue, (priority, value))
else:
heapq.heappush(queue, (priority, value))
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test_eval/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_test = []
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<=th] = v
img1[masks>60] = 0
img1[img1>th] = 0
X_test.append(np.array([img1]))
X_test = np.array(X_test).astype('float32')#/ float(np.max(X))
X_test = np.reshape(X_test, (len(X_test), 224, 224, 1))
X_test_pred = model.predict(X_test, verbose=0)
# In[8]:
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train_eval/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
X_train = []
files1 = files1[:100 * int(sys.argv[4])]
for filen1 in files1:
img1 = cv2.imread(mypath1+filen1)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img1[img1<=th] = v
img1[masks>60] = 0
img1[img1>th] = 0
X_train.append(np.array([img1]))
X_train = np.array(X_train).astype('float32')#/ float(np.max(X))
X_train = np.reshape(X_train, (len(X_train), 224, 224, 1))
X_train_pred = model.predict(X_train, verbose=0)
# In[9]:
import time
mypath = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/train_eval/'
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
files = files[:100 * int(sys.argv[4])]
start_time = time.time()
mypath1 = '/home/arvind/MyStuff/Desktop/Manatee_dataset/cleaned_data/test_eval/'
files1 = [f for f in listdir(mypath1) if isfile(join(mypath1, f))]
top10_correct = 0
top20_correct = 0
top5_correct = 0
top1_correct = 0
for i in np.arange(0, len(files1)):
filen1 = files1[i]
pred = X_test_pred[i]
max_confidence = 0.0
max_file = None
pqueue = []
msk = cv2.resize(X_train[i].reshape((224,224)), (28, 28))
msk = np.repeat(msk[:, :, np.newaxis], 8, axis=2)
msk = msk.flatten()
pred = pred.flatten()
#pred[msk!=0] = 5
for j in np.arange(0, len(files)):
filen = files[j]
tpred = X_train_pred[j]
tpred = np.copy(tpred).flatten()
#tpred[msk!=0] = tpred[msk!=0] * 5
score = 1 - spatial.distance.cosine(tpred, pred)
if score<=0:
print 'OMG'
print score
push_pqueue(pqueue, score, filen)
if max_confidence < score:
max_confidence = score
max_file = filen
h = 0
for top20 in heapq.nlargest(len(pqueue), pqueue):
h += 1
if top20[1].split('_')[1].split('.')[0] == filen1.split('_')[1].split('.')[0]:
if h==1:
top20_correct+=1
top10_correct+=1
top5_correct+=1
top1_correct+=1
elif h>10:
top20_correct+=1
elif h>5:
top10_correct+=1
top20_correct+=1
elif h>=1:
top10_correct+=1
top20_correct+=1
top5_correct+=1
break
print "\n!@#$", top20_correct/float(len(files1)) , top10_correct/float(len(files1)), top5_correct/float(len(files1)), top1_correct, "--- %s seconds ---" % (time.time() - start_time) ,"\n"
| apache-2.0 | -2,467,667,458,289,634,000 | 29.476471 | 187 | 0.642154 | false |
eort/OpenSesame | libqtopensesame/items/qtautoplugin.py | 2 | 5912 | #-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.py3compat import *
import os
from libopensesame import plugins
from libqtopensesame.items.qtplugin import qtplugin
from libqtopensesame import validators
from libqtopensesame.misc.translate import translation_context
from libopensesame.exceptions import osexception
_ = translation_context(u'qtautoplugin', category=u'core')
class qtautoplugin(qtplugin):
"""A class that processes auto-plugins defined in a YAML file"""
def __init__(self, plugin_file):
qtplugin.__init__(self, plugin_file)
def init_edit_widget(self):
"""Construct the GUI controls based on info.yaml"""
qtplugin.init_edit_widget(self, False)
item_type_translate = translation_context(self.item_type,
category=u'plugin')
self.info = plugins.plugin_properties(self.item_type, _type=u'plugins')
# Process the help url, if specified
if u'help' in self.info:
self.help_url = self.info[u'help']
# Some options are required. Which options are requires depends on the
# specific widget.
required = [
([u'checkbox', u'color_edit', u'combobox', u'editor', u'filepool', \
u'line_edit', u'spinbox', u'text'], [u'label']),
([u'checkbox', u'color_edit', u'combobox', u'editor', u'filepool', \
u'line_edit', u'spinbox'], [u'var']),
([u'spinbox', u'slider'], [u'min_val', u'max_val']),
([u'combobox'], [u'options']),
]
# Keywords are optional parameters that are set to some default if they
# are not specified.
keywords = {
u'info' : None,
u'min_width' : None,
u'prefix' : u'',
u'suffix' : u'',
u'left_label' : u'min.',
u'right_label' : u'max.',
u'syntax' : False
}
# This indicates whether we should pad the controls with a stretch at
# the end.
need_stretch = True
for c in self.info[u'controls']:
# Check whether all required options have been specified
if u'type' not in c:
raise osexception(
_(u'You must specify "type" for %s controls in info.yaml') \
% option)
for types, options in required:
if c[u'type'] in types:
for option in options:
if option not in c:
raise osexception(
_(u'You must specify "%s" for %s controls in info.yaml') \
% (option, c[u'type']))
if u'var' in c and not self.syntax.valid_var_name(c[u'var']):
raise osexception(
_(u'Invalid variable name (%s) specified in %s plugin info') %
(c[u'var'], self.item_type))
# Set missing keywords to None
for keyword, default in keywords.items():
if keyword not in c:
c[keyword] = default
# Translate translatable fields
c[u'label'] = item_type_translate(c[u'label'])
if c[u'info'] is not None:
c[u'info'] = item_type_translate(c[u'info'])
# Parse checkbox
if c[u'type'] == u'checkbox':
widget = self.add_checkbox_control(c[u'var'], c[u'label'],
info=c[u'info'])
# Parse color_edit
elif c[u'type'] == u'color_edit':
widget = self.add_color_edit_control(c[u'var'], c[u'label'],
info=c[u'info'], min_width=c[u'min_width'])
# Parse combobox
elif c[u'type'] == u'combobox':
widget = self.add_combobox_control(c[u'var'], c[u'label'],
c[u'options'], info=c[u'info'])
# Parse editor
elif c[u'type'] == u'editor':
widget = self.add_editor_control(c[u'var'], c[u'label'],
syntax=c[u'syntax'])
need_stretch = False
# Parse filepool
elif c[u'type'] == u'filepool':
widget = self.add_filepool_control(c[u'var'], c[u'label'],
info=c[u'info'])
# Parse line_edit
elif c[u'type'] == u'line_edit':
widget = self.add_line_edit_control(c[u'var'], c[u'label'],
info=c[u'info'], min_width=c[u'min_width'])
# Parse spinbox
elif c[u'type'] == u'spinbox':
widget = self.add_spinbox_control(c[u'var'], c[u'label'],
c[u'min_val'], c[u'max_val'], prefix=c[u'prefix'],
suffix=c[u'suffix'], info=c[u'info'])
# Parse slider
elif c[u'type'] == u'slider':
widget = self.add_slider_control(c[u'var'], c[u'label'],
c[u'min_val'], c[u'max_val'], left_label=c[u'left_label'],
right_label=c[u'right_label'], info=c[u'info'])
# Parse text
elif c[u'type'] == u'text':
widget = self.add_text(c[u'label'])
else:
raise Exception(_(u'"%s" is not a valid qtautoplugin control') \
% controls[u'type'])
# Add an optional validator
if u'validator' in c:
try:
validator = getattr(validators,
u'%s_validator' % c[u'validator'])
except:
raise osexception(
u'Invalid validator: %s' % c[u'validator'])
widget.setValidator(validator(self.main_window))
# Add the widget as an item property when the 'name' option is
# specified.
if u'name' in c:
if hasattr(self, c[u'name']):
raise Exception(_(u'Name "%s" is already taken in qtautoplugin control') \
% c[u'name'])
setattr(self, c[u'name'], widget)
if need_stretch:
self.add_stretch()
self.lock = True
def apply_edit_changes(self):
"""Applies the controls. I.e. sets the variables from the controls."""
if not qtplugin.apply_edit_changes(self) or self.lock:
return False
return True
def edit_widget(self):
"""Sets the controls based on the variables."""
self.lock = True
qtplugin.edit_widget(self)
self.lock = False
return self._edit_widget
| gpl-3.0 | 3,340,131,450,445,400,600 | 32.782857 | 79 | 0.653755 | false |
mfuery/google-python-exercises | basic/string2.py | 1 | 2688 | #!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
import math
def verbing(s):
if len(s) >= 3:
if s[-3:] == 'ing':
s += 'ly'
else:
s += 'ing'
return s
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
iNot = s.find('not')
iBad = s.find('bad')
if iNot > 0 and iBad > 0 and iNot < iBad:
s = s[:iNot] + 'good' + s[iBad+3:]
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
aMid = int(math.ceil(len(a) / 2.))
bMid = int(math.ceil(len(b) / 2.))
# aMid = len(a) // 2
# bMid = len(b) // 2
return a[:aMid] + b[:bMid] + a[aMid:] + b[bMid:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| apache-2.0 | -8,953,862,899,602,548,000 | 27.903226 | 77 | 0.641369 | false |
oVirt/jenkins | stdci_libs/stdci_dsl/api/formatters/runners.py | 1 | 1620 | #!/bin/env python
"""runners.py - Set of data formatters for stdci runners"""
import logging
from yaml import safe_dump
_formatters = {}
logger = logging.getLogger(__name__)
class FormatterNotFoundError(Exception):
pass
def get_formatter(formatter_name):
"""Given formatter name, return formatter function
:param str formatter_name: Name of the required formatter
:rtype: function
:returns: Formatter function
"""
formatter_ = _formatters.get(formatter_name, None)
if formatter_ is None:
raise FormatterNotFoundError(
'Could not find formatter_: {0}'.format(formatter_name)
)
return formatter_
def formatter(name):
"""Decorator function for formatter registration"""
def wrapper(function):
_formatters[name] = function
logger.debug('Registered runner data formatter: %s', name)
return function
return wrapper
@formatter('yaml_dumper')
def _dump_to_yaml_formatter(obj, template=None):
# TODO: use dict comprehension as soon as python 2.6 support is dropped
repos_fmt = {}
for repo_name, repo_url in obj.repos:
repos_fmt[repo_name] = repo_url
mounts_fmt = {}
for src, dst in obj.mounts:
mounts_fmt[src] = dst
yumrepos_fmt = '' if obj.yumrepos is None else obj.yumrepos
data = {
'script': str(obj.script),
'yumrepos': yumrepos_fmt,
'environment': obj.environment,
'mounts': mounts_fmt,
'repos': repos_fmt,
'hash': obj.hash,
'packages': obj.packages
}
return safe_dump(data, default_flow_style=False)
| gpl-3.0 | -3,148,337,800,633,962,500 | 25.129032 | 75 | 0.648148 | false |
silly-wacky-3-town-toon/SOURCE-COD | toontown/coghq/SellbotMegaCorpLegSpec.py | 1 | 138843 | from toontown.toonbase import TTLocalizer
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500.0,
'modelFilename': 'phase_9/models/cogHQ/SelbotLegFactory',
'wantDoors': 1},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
3: {'type': 'zone',
'name': 'Main Entrance',
'comment': '',
'parentEntId': 0,
'scale': Vec3(1, 1, 1),
'description': TTLocalizer.SellbotLegFactorySpecMainEntrance,
'visibility': [114]},
4: {'type': 'zone',
'name': 'Lobby',
'comment': '',
'parentEntId': 0,
'scale': Vec3(1, 1, 1),
'description': TTLocalizer.SellbotLegFactorySpecLobby,
'visibility': [113, 114]},
5: {'type': 'zone',
'name': 'hallwayFromLobby',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [113, 116]},
6: {'type': 'zone',
'name': 'hallwayToBoiler/Control/Lookout',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLobbyHallway,
'visibility': [109,
116,
117,
118]},
7: {'type': 'zone',
'name': 'GearRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecGearRoom,
'visibility': [109, 110]},
8: {'type': 'zone',
'name': 'BoilerRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecBoilerRoom,
'visibility': [108, 117]},
9: {'type': 'zone',
'name': 'EastCatwalk',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastCatwalk,
'visibility': [23,
25,
26,
33,
34,
35,
38,
41,
53,
110,
112,
115,
124,
200,
222]},
10: {'type': 'zone',
'name': 'PaintMixer',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecPaintMixer,
'visibility': [11, 111, 112]},
11: {'type': 'zone',
'name': 'PaintMixerRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecPaintMixerStorageRoom,
'visibility': [10, 111, 112]},
12: {'type': 'zone',
'name': 'WestSiloCatwalk',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestSiloCatwalk,
'visibility': [21,
26,
33,
34,
35,
36,
37,
40,
41,
53,
60,
61,
108,
110,
119,
120,
125,
127,
128,
129,
130,
200]},
13: {'type': 'zone',
'name': 'PipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecPipeRoom,
'visibility': [119, 121]},
14: {'type': 'zone',
'name': 'StairsToPipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [17,
18,
121,
126,
131]},
15: {'type': 'zone',
'name': 'DuctRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecDuctRoom,
'visibility': [106, 126]},
16: {'type': 'zone',
'name': 'Side Entrance',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecSideEntrance,
'visibility': [106]},
17: {'type': 'zone',
'name': 'StomperAlley',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecStomperAlley,
'visibility': [14,
121,
126,
131]},
18: {'type': 'zone',
'name': 'LavaRoomFoyer',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLavaRoomFoyer,
'visibility': [19,
20,
102,
103,
105,
131]},
19: {'type': 'zone',
'name': 'LavaRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLavaRoom,
'visibility': [17,
18,
20,
105,
131]},
20: {'type': 'zone',
'name': 'LavaRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLavaStorageRoom,
'visibility': [18, 19, 105]},
21: {'type': 'zone',
'name': 'WestCatwalk',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestCatwalk,
'visibility': [12,
23,
26,
33,
34,
35,
40,
41,
53,
60,
108,
119,
120,
125,
127,
200]},
22: {'type': 'zone',
'name': 'OilRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecOilRoom,
'visibility': [107]},
23: {'type': 'zone',
'name': 'Lookout',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecLookout,
'visibility': [24,
39,
115,
118,
120,
123,
124,
125]},
24: {'type': 'zone',
'name': 'Warehouse',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWarehouse,
'visibility': [23,
39,
115,
120,
123,
124,
125]},
25: {'type': 'zone',
'name': 'PaintMixerExterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
26: {'type': 'zone',
'name': 'WarehouseExterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
27: {'type': 'zone',
'name': 'OilRoomHallway',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecOilRoomHallway,
'visibility': [105, 107, 127]},
30: {'type': 'zone',
'name': 'EastSiloControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastSiloControlRoom,
'visibility': [130]},
31: {'type': 'zone',
'name': 'WestSiloControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestSiloControlRoom,
'visibility': [128]},
32: {'type': 'zone',
'name': 'CenterSiloControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecCenterSiloControlRoom,
'visibility': [129]},
33: {'type': 'zone',
'name': 'EastSilo',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastSilo,
'visibility': [9,
12,
21,
25,
26,
34,
35,
36,
37,
38,
40,
41,
53,
60,
61,
108,
110,
112,
119,
124,
128,
129,
130,
200,
222]},
34: {'type': 'zone',
'name': 'WestSilo',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestSilo,
'visibility': [9,
12,
21,
25,
26,
33,
35,
36,
37,
40,
41,
53,
60,
61,
108,
110,
112,
119,
120,
125,
127,
128,
129,
130,
200]},
35: {'type': 'zone',
'name': 'CenterSilo',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecCenterSilo,
'visibility': [9,
21,
25,
26,
33,
34,
36,
37,
40,
41,
53,
60,
61,
108,
110,
112,
119,
128,
129,
130,
200]},
36: {'type': 'zone',
'name': 'WestSiloBridge',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [9,
12,
21,
25,
26,
33,
34,
35,
36,
37,
40,
41,
53,
60,
61,
108,
110,
112,
119,
127,
128,
129,
130,
200]},
37: {'type': 'zone',
'name': 'EastSiloBridge',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': [9,
12,
21,
25,
26,
33,
34,
35,
36,
37,
38,
40,
41,
53,
60,
61,
108,
110,
112,
119,
128,
129,
130,
200,
222]},
38: {'type': 'zone',
'name': 'EastSiloCatwalk',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastSiloCatwalk,
'visibility': [9,
25,
26,
33,
34,
35,
36,
37,
41,
53,
60,
110,
112,
115,
124,
200,
222]},
39: {'type': 'zone',
'name': 'WarehouseCeiling',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
40: {'type': 'zone',
'name': 'WestExterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
41: {'type': 'zone',
'name': 'EastExterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
53: {'type': 'zone',
'name': 'ExteriorFloor',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
60: {'type': 'zone',
'name': 'WestElevatorShaft',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecWestElevatorShaft,
'visibility': [12, 34]},
61: {'type': 'zone',
'name': 'EastElevatorShaft',
'comment': 'no geom or DCS',
'parentEntId': 0,
'scale': 1,
'description': TTLocalizer.SellbotLegFactorySpecEastElevatorShaft,
'visibility': [33, 38]},
101: {'type': 'zone',
'name': 'dwToLavaRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
102: {'type': 'zone',
'name': 'dwToLavaRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
103: {'type': 'zone',
'name': 'dwToLavaRoomHallway',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
105: {'type': 'zone',
'name': 'dwToOilRoomCatwalks',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
106: {'type': 'zone',
'name': 'dwToDuctRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
107: {'type': 'zone',
'name': 'dwToOilRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
108: {'type': 'zone',
'name': 'dwFromBoilerRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
109: {'type': 'zone',
'name': 'dwToGearRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
110: {'type': 'zone',
'name': 'dwFromGearRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
111: {'type': 'zone',
'name': 'dwToPaintMixerRewardRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
112: {'type': 'zone',
'name': 'dwToPaintMixer',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
113: {'type': 'zone',
'name': 'dwFromLobby',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
114: {'type': 'zone',
'name': 'dwToLobby',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
115: {'type': 'zone',
'name': 'dwToWarehouseFromRight',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
116: {'type': 'zone',
'name': 'dwFromLobbyFar',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
117: {'type': 'zone',
'name': 'dwToBoilerRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
118: {'type': 'zone',
'name': 'dwToLookout',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
119: {'type': 'zone',
'name': 'dwFromPipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
120: {'type': 'zone',
'name': 'dwToWarehouseFromLeft',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
121: {'type': 'zone',
'name': 'dwToPipeRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
122: {'type': 'zone',
'name': 'dwToWarehouseControlRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
123: {'type': 'zone',
'name': 'dwFromWarehouseFloor',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
124: {'type': 'zone',
'name': 'dwFromWarehouseRight',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
125: {'type': 'zone',
'name': 'dwFromWarehouseLeft',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
126: {'type': 'zone',
'name': 'dwFromDuctRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
127: {'type': 'zone',
'name': 'dwFromOilRoomHallway',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
128: {'type': 'zone',
'name': 'dwToWestSiloRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
129: {'type': 'zone',
'name': 'dwToCenterSiloRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
130: {'type': 'zone',
'name': 'dwToEastSiloRoom',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
131: {'type': 'zone',
'name': 'dwFromStomperAlley',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
200: {'type': 'zone',
'name': 'sky',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
201: {'type': 'zone',
'name': 'extraZone201',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
202: {'type': 'zone',
'name': 'extraZone202',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
203: {'type': 'zone',
'name': 'extraZone203',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
204: {'type': 'zone',
'name': 'extraZone204',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
205: {'type': 'zone',
'name': 'extraZone205',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
206: {'type': 'zone',
'name': 'extraZone206',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
207: {'type': 'zone',
'name': 'extraZone207',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
208: {'type': 'zone',
'name': 'extraZone208',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
209: {'type': 'zone',
'name': 'extraZone209',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
210: {'type': 'zone',
'name': 'extraZone210',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
211: {'type': 'zone',
'name': 'extraZone211',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
212: {'type': 'zone',
'name': 'extraZone212',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
213: {'type': 'zone',
'name': 'extraZone213',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
214: {'type': 'zone',
'name': 'extraZone214',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
215: {'type': 'zone',
'name': 'extraZone215',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
216: {'type': 'zone',
'name': 'extraZone216',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
217: {'type': 'zone',
'name': 'extraZone217',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
218: {'type': 'zone',
'name': 'extraZone218',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
219: {'type': 'zone',
'name': 'extraZone219',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
220: {'type': 'zone',
'name': 'extraZone220',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
221: {'type': 'zone',
'name': 'extraZone221',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
222: {'type': 'zone',
'name': 'dwToEastSiloInterior',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10010: {'type': 'ambientSound',
'name': 'westWind',
'comment': '',
'parentEntId': 35,
'pos': Point3(-52.7549, -38.8374, 53.3758),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_whistling_wind.ogg',
'volume': 1},
10016: {'type': 'ambientSound',
'name': 'sndConveyorBelt',
'comment': '',
'parentEntId': 10056,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_conveyor_belt.ogg',
'volume': 0.5},
10053: {'type': 'ambientSound',
'name': 'eastWind',
'comment': '',
'parentEntId': 35,
'pos': Point3(52.75, -38.84, 53.38),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_whistling_wind.ogg',
'volume': 1},
10055: {'type': 'ambientSound',
'name': 'sndGears',
'comment': '',
'parentEntId': 10056,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'enabled': 1,
'soundPath': 'phase_9/audio/sfx/CHQ_FACT_gears_turning.ogg',
'volume': 1},
10031: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 8,
'pos': Point3(-1, 79, 10),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1.75, 1, 1),
'cellId': 1,
'radius': 10.0},
10035: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10039,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 4,
'radius': 10.0},
10038: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(0, -28.04, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 5,
'radius': 10.0},
20048: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0.973602, 71.7, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 0.2, 1),
'cellId': 0,
'radius': 15.0},
20063: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20033,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 8,
'radius': 1},
20064: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20034,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 8,
'radius': 1},
20065: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20035,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 8,
'radius': 1},
20066: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20036,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 8,
'radius': 1},
20086: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 33, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1, 1),
'cellId': 6,
'radius': 12.0},
20112: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(-10.0936, -9.55975, 4),
'hpr': Point3(45, 0, 0),
'scale': Point3(10, 1, 5),
'cellId': 10,
'radius': 5.0},
20113: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(9.08399, 4.42157, 0),
'hpr': Point3(-50, 0, 0),
'scale': Point3(10, 2, 6),
'cellId': 9,
'radius': 5.0},
20114: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60103,
'pos': Point3(0, 0, 1),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 0.5),
'cellId': 8,
'radius': 3.0},
10003: {'type': 'beanBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(1.25458, 19.2471, 0.0249529),
'hpr': Vec3(-8.28434, 0, 0),
'scale': 1,
'rewardPerGrab': 100,
'rewardPerGrabMax': 250},
10011: {'type': 'beanBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(16.344, -9.73, 0.025),
'hpr': Vec3(-79.8888, 0, 0),
'scale': 1,
'rewardPerGrab': 100,
'rewardPerGrabMax': 250},
20017: {'type': 'beanBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(20.0035, 2.94232, 0),
'hpr': Vec3(-31.6033, 0, 0),
'scale': 1,
'rewardPerGrab': 70,
'rewardPerGrabMax': 120},
10039: {'type': 'button',
'name': '<unnamed>',
'comment': '',
'parentEntId': 22,
'pos': Point3(-7, 29, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(4, 4, 4),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
20033: {'type': 'button',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(0.862745, 0.517647, 0.0941177, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1},
20034: {'type': 'button',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(7.5, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(0.862745, 0.517647, 0.0941177, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1},
20035: {'type': 'button',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(15, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(0.862745, 0.517647, 0.0941177, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1},
20036: {'type': 'button',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(22.5, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(0.862745, 0.517647, 0.0941177, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1},
30040: {'type': 'button',
'name': 'door button',
'comment': 'Entrance door unlock',
'parentEntId': 3,
'pos': Point3(0, 6.75, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(3, 3, 3),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
30076: {'type': 'button',
'name': 'open door 113',
'comment': 'Lobby door unlock',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1},
60102: {'type': 'button',
'name': 'door button',
'comment': 'Entrance Door Unlock',
'parentEntId': 16,
'pos': Point3(4, 8, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
60103: {'type': 'button',
'name': 'door button',
'comment': '',
'parentEntId': 20022,
'pos': Point3(25, -7, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(4, 4, 4),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
60104: {'type': 'button',
'name': '<unnamed>',
'comment': '',
'parentEntId': 31,
'pos': Point3(0, 10, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(5, 5, 4),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
60105: {'type': 'button',
'name': 'door button',
'comment': '',
'parentEntId': 30,
'pos': Point3(-4, 7, 0),
'hpr': Point3(0, 0, 0),
'scale': Point3(5, 5, 4),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
60118: {'type': 'button',
'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 20, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 0, 0, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
10005: {'type': 'conveyorBelt',
'name': 'belt',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, 45.2024, 7.24937),
'hpr': Point3(180, 0, 0),
'scale': 1,
'floorName': 'platformcollision',
'length': 78.81881352704218,
'speed': 2.0,
'treadLength': 10.0,
'treadModelPath': 'phase_9/models/cogHQ/platform1',
'widthScale': 0.85},
20081: {'type': 'crate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20080,
'pos': Point3(0, 0, 0),
'scale': 0.920000016689,
'crushCellId': None,
'gridId': 20080,
'modelType': 0,
'pushable': 1},
20091: {'type': 'crate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20090,
'pos': Point3(0, 23, 0),
'scale': 0.920000016689,
'crushCellId': None,
'gridId': 20090,
'modelType': 0,
'pushable': 1},
20024: {'type': 'crusherCell',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'col': 1,
'gridId': 20025,
'row': 14},
20026: {'type': 'crusherCell',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'col': 10,
'gridId': 20025,
'row': 14},
20027: {'type': 'crusherCell',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(1, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'col': 21,
'gridId': 20025,
'row': 14},
20028: {'type': 'crusherCell',
'name': 'copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(2, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'col': 28,
'gridId': 20025,
'row': 14},
30078: {'type': 'cutScene',
'name': 'button door',
'comment': '',
'parentEntId': 114,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'duration': 4.0,
'effect': 'irisInOut',
'motion': 'doorUnlock',
'startStopEvent': 30077},
10002: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 128,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
10052: {'type': 'door',
'name': 'door 127',
'comment': '',
'parentEntId': 127,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 10039,
'unlock2Event': 0,
'unlock3Event': 0},
30000: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 114,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60132,
'unlock2Event': 0,
'unlock3Event': 0},
30001: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 105,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1.0,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30002: {'type': 'door',
'name': 'door 106',
'comment': '',
'parentEntId': 106,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60132,
'unlock2Event': 0,
'unlock3Event': 0},
30003: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 107,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30004: {'type': 'door',
'name': 'doorFromBoilerRoom',
'comment': '',
'parentEntId': 108,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30005: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 109,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30006: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 110,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30008: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 112,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30009: {'type': 'door',
'name': 'door 113',
'comment': '',
'parentEntId': 113,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60119,
'unlock2Event': 0,
'unlock3Event': 0},
30010: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 115,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30011: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 116,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30012: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 117,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30013: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 118,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30014: {'type': 'door',
'name': 'doorFromPipeRoom 119',
'comment': '',
'parentEntId': 119,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30015: {'type': 'door',
'name': 'door 120',
'comment': '',
'parentEntId': 120,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30016: {'type': 'door',
'name': 'door 121',
'comment': '',
'parentEntId': 121,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30017: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 122,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30018: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 123,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 0,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60103,
'unlock2Event': 0,
'unlock3Event': 0},
30019: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 124,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30020: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 125,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
30021: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 126,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 60119,
'unlock2Event': 0,
'unlock3Event': 0},
60088: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 131,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1.0,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
60094: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 129,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 0,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1.0,
'unlock0Event': 0,
'unlock1Event': 60104,
'unlock2Event': 60105,
'unlock3Event': 0},
60095: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 130,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
60101: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 222,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 1,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 0,
'unlock2Event': 0,
'unlock3Event': 0},
10049: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 3},
10051: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 4},
60000: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 5},
60001: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 6},
60002: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 7},
60003: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 9},
60004: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 10},
60005: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 8},
60006: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 21},
60007: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 24},
60009: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 38},
60011: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 12},
60013: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 13},
60014: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 14},
60015: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 17},
60016: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 15},
60017: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 16},
60018: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 19},
60019: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 18},
60024: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 22},
60031: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 23},
60044: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 33},
60066: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 11},
60067: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 27},
60096: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 30},
60108: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 34},
60111: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 36},
60114: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 37},
60121: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 31},
60126: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 35},
60130: {'type': 'entityGroup',
'name': 'viz',
'comment': '',
'parentEntId': 32},
10028: {'type': 'entrancePoint',
'name': 'entrance1',
'comment': '',
'parentEntId': 3,
'pos': Point3(0, 10, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'entranceId': 0,
'radius': 15,
'theta': 20},
10029: {'type': 'entrancePoint',
'name': 'entrance2',
'comment': '',
'parentEntId': 16,
'pos': Point3(0, 10, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'entranceId': 1,
'radius': 15,
'theta': 20},
10330: {'type': 'entrancePoint',
'name': 'entrance3',
'comment': '',
'parentEntId': 3,
'pos': Point3(0, 10, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'entranceId': 2,
'radius': 15,
'theta': 20},
10021: {'type': 'gagBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(-2.02081, 0, 0),
'hpr': Vec3(337.477, 0, 0),
'scale': 1,
'gagLevel': 4,
'gagLevelMax': 0,
'gagTrack': 0,
'rewardPerGrab': 3,
'rewardPerGrabMax': 0},
10024: {'type': 'gagBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(20.3012, -26.3219, 0),
'hpr': Vec3(233.187, 0, 0),
'scale': 1,
'gagLevel': 4,
'gagLevelMax': 0,
'gagTrack': 4,
'rewardPerGrab': 4,
'rewardPerGrabMax': 0},
10025: {'type': 'gagBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(-47.312, 7.22571, 0),
'hpr': Vec3(19.1524, 0, 0),
'scale': 1,
'gagLevel': 5,
'gagLevelMax': 0,
'gagTrack': 2,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
10026: {'type': 'gagBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(-11.2037, 5.43514, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'gagLevel': 5,
'gagLevelMax': 0,
'gagTrack': 5,
'rewardPerGrab': 4,
'rewardPerGrabMax': 0},
10227: {'type': 'gagBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10022,
'pos': Point3(23.59, 4.83, 0),
'hpr': Vec3(180, 0, 0),
'scale': 1,
'gagLevel': 5,
'gagLevelMax': 0,
'gagTrack': 1,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
20020: {'type': 'gagBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(-23.0209, 0, 0),
'hpr': Vec3(126.676, 0, 0),
'scale': 1,
'gagLevel': 5,
'gagLevelMax': 0,
'gagTrack': 3,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
20021: {'type': 'gagBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(-31.3225, 14.1021, 0),
'hpr': Vec3(-136.57, 0, 0),
'scale': 1,
'gagLevel': 5,
'gagLevelMax': 0,
'gagTrack': 1,
'rewardPerGrab': 3,
'rewardPerGrabMax': 0},
20085: {'type': 'gagBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 5,
'pos': Point3(3.14, 12.6703, 10.12),
'hpr': Vec3(-24.8105, 0, 0),
'scale': 1,
'gagLevel': 5,
'gagLevelMax': 0,
'gagTrack': 4,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
20093: {'type': 'gagBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20087,
'pos': Point3(2.4, -1, 7),
'hpr': Vec3(-151.532, 0, 0),
'scale': 1,
'gagLevel': 5,
'gagLevelMax': 0,
'gagTrack': 6,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
10006: {'type': 'gear',
'name': 'first',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0, 0, 26.0634),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'degreesPerSec': 37.0,
'gearScale': 25.0,
'modelType': 'factory',
'orientation': 'vertical',
'phaseShift': 0},
10007: {'type': 'gear',
'name': 'second',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0, 15, 26.06),
'hpr': Point3(180, 0, 0),
'scale': 1,
'degreesPerSec': 40.0,
'gearScale': 25.0,
'modelType': 'factory',
'orientation': 'vertical',
'phaseShift': 0},
10008: {'type': 'gear',
'name': 'third',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0, 30, 26.06),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'degreesPerSec': 43.0,
'gearScale': 25.0,
'modelType': 'factory',
'orientation': 'vertical',
'phaseShift': 0},
10009: {'type': 'gear',
'name': 'fourth',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0, 45, 26.06),
'hpr': Point3(180, 0, 0),
'scale': 1,
'degreesPerSec': 47.0,
'gearScale': 25.0,
'modelType': 'factory',
'orientation': 'vertical',
'phaseShift': 0},
20013: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20012,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 2.25,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20014: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20010,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 2.25,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20016: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20015,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 2.25,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4},
20041: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20040,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 2.5,
'attackRadius': 15,
'crushCellId': 20026,
'goonType': 'pg',
'gridId': 20025,
'hFov': 80.0,
'strength': 20,
'velocity': 6.0},
20043: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20042,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 2.25,
'attackRadius': 15,
'crushCellId': 20024,
'goonType': 'pg',
'gridId': 20025,
'hFov': 80.0,
'strength': 15,
'velocity': 5.0},
20046: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20044,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 2.4,
'attackRadius': 15,
'crushCellId': 20024,
'goonType': 'pg',
'gridId': 20025,
'hFov': 70,
'strength': 17,
'velocity': 6.0},
20047: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20045,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 3.0,
'attackRadius': 15,
'crushCellId': 20026,
'goonType': 'pg',
'gridId': 20025,
'hFov': 80.0,
'strength': 6,
'velocity': 8.0},
20052: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20051,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 12.0,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 15,
'velocity': 4.0},
20054: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20053,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20027,
'goonType': 'pg',
'gridId': 20025,
'hFov': 80.0,
'strength': 10,
'velocity': 5.5},
20056: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20055,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20028,
'goonType': 'pg',
'gridId': 20025,
'hFov': 70,
'strength': 10,
'velocity': 6.0},
20060: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20059,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20028,
'goonType': 'pg',
'gridId': 20025,
'hFov': 90.0,
'strength': 10,
'velocity': 6.5},
20062: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20061,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': 20027,
'goonType': 'pg',
'gridId': 20025,
'hFov': 70,
'strength': 20,
'velocity': 7.5},
20071: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20070,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.5,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 30,
'velocity': 3.0},
20072: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20069,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 2.0,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 80.0,
'strength': 12,
'velocity': 8.0},
20074: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20073,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1.25,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 21,
'velocity': 4},
20089: {'type': 'goon',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20084,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 3.5,
'attackRadius': 15,
'crushCellId': None,
'goonType': 'pg',
'gridId': None,
'hFov': 70,
'strength': 3,
'velocity': 8},
20115: {'type': 'goonClipPlane',
'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, -7.4, 0),
'hpr': Point3(-90, 0, 0),
'scale': Point3(5, 5, 5),
'goonId': 20052},
20116: {'type': 'goonClipPlane',
'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, -58, 0),
'hpr': Point3(90, 0, 0),
'scale': 1,
'goonId': None},
20117: {'type': 'goonClipPlane',
'name': '<unnamed>',
'comment': '',
'parentEntId': 24,
'pos': Point3(0, -29, 0),
'hpr': Point3(90, 0, 0),
'scale': 1,
'goonId': None},
20118: {'type': 'goonClipPlane',
'name': '<unnamed>',
'comment': '',
'parentEntId': 24,
'pos': Point3(-52, 0, 5),
'hpr': Point3(0, 0, 0),
'scale': 1,
'goonId': None},
20025: {'type': 'grid',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20023,
'pos': Point3(-48.4442, -24.9385, 0),
'scale': 1,
'cellSize': 3,
'numCol': 30,
'numRow': 16},
20080: {'type': 'grid',
'name': '<unnamed>',
'comment': '',
'parentEntId': 5,
'pos': Point3(1.5, -10.7, 0),
'scale': 1,
'cellSize': 3,
'numCol': 2,
'numRow': 5},
20090: {'type': 'grid',
'name': '<unnamed>',
'comment': '',
'parentEntId': 17,
'pos': Point3(-6.5, -111, 0),
'scale': 1,
'cellSize': 3,
'numCol': 2,
'numRow': 9},
20011: {'type': 'healBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20,
'pos': Point3(-2.06235, 20.2198, 0.025),
'hpr': Vec3(-19.2153, 0, 0),
'scale': 1,
'rewardPerGrab': 10,
'rewardPerGrabMax': 0},
20092: {'type': 'healBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20087,
'pos': Point3(-1, -1.5, 7),
'hpr': Vec3(-191.79, 0, 0),
'scale': 1,
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
10041: {'type': 'lift',
'name': 'westLift',
'comment': '',
'parentEntId': 60,
'pos': Point3(0, 0, 0.0641994),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'autoMoveDelay': 5,
'duration': 7.0,
'endBoardSides': ['back'],
'endGuardName': 'topGuard',
'endPos': Point3(0, 0, 165),
'floorName': 'elevator_floor',
'modelPath': 'phase_9/models/cogHQ/Elevator.bam',
'modelScale': Vec3(1, 1, 1),
'moveDelay': 1,
'startBoardSides': ['front'],
'startGuardName': 'bottomGuard',
'startPos': Point3(0, 0, 0)},
10048: {'type': 'lift',
'name': 'eastLift',
'comment': '',
'parentEntId': 61,
'pos': Point3(0, -0.684064, 0.589322),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'autoMoveDelay': 5.0,
'duration': 7.0,
'endBoardSides': ['front',
'back',
'left',
'right'],
'endGuardName': 'topGuard',
'endPos': Point3(0, 0, 165),
'floorName': 'elevator_floor',
'modelPath': 'phase_9/models/cogHQ/Elevator.bam',
'modelScale': Vec3(1, 1, 1),
'moveDelay': 1,
'startBoardSides': ['front'],
'startGuardName': 'bottomGuard',
'startPos': Point3(0, 0, 0)},
10057: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10043,
'input1Event': 30009,
'input2Event': 30000,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
10059: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10058,
'input1Event': 10057,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
10061: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10060,
'input1Event': 10059,
'input2Event': 30013,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
10063: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10062,
'input1Event': 60033,
'input2Event': 30009,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
30068: {'type': 'logicGate',
'name': 'door 116 and door 118',
'comment': '',
'parentEntId': 30069,
'input1Event': 30013,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60023: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60021,
'input1Event': 30011,
'input2Event': 30009,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60025: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60022,
'input1Event': 60023,
'input2Event': 30013,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60028: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60026,
'input1Event': 30011,
'input2Event': 30005,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60029: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60027,
'input1Event': 30011,
'input2Event': 30012,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60030: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 30071,
'input1Event': 30011,
'input2Event': 30009,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60033: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 30073,
'input1Event': 30013,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60034: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 30075,
'input1Event': 30013,
'input2Event': 30005,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60035: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60032,
'input1Event': 30013,
'input2Event': 30012,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60037: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60036,
'input1Event': 30005,
'input2Event': 30012,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60039: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60038,
'input1Event': 30012,
'input2Event': 30005,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60041: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60040,
'input1Event': 30020,
'input2Event': 30019,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60043: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60042,
'input1Event': 30019,
'input2Event': 30020,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60047: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60045,
'input1Event': 10002,
'input2Event': 30019,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60049: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60048,
'input1Event': 30003,
'input2Event': 10052,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60051: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60050,
'input1Event': 30001,
'input2Event': 10052,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60053: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60052,
'input1Event': 30021,
'input2Event': 30016,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60055: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60054,
'input1Event': 30002,
'input2Event': 30021,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60057: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60056,
'input1Event': 30016,
'input2Event': 30021,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60059: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60058,
'input1Event': 30012,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60061: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60060,
'input1Event': 30012,
'input2Event': 30013,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60064: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60062,
'input1Event': 30005,
'input2Event': 30011,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60065: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60063,
'input1Event': 30005,
'input2Event': 30013,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60074: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60072,
'input1Event': 10052,
'input2Event': 30003,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60075: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60073,
'input1Event': 10052,
'input2Event': 30001,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60076: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60020,
'input1Event': 30021,
'input2Event': 30002,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60078: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60077,
'input1Event': 30021,
'input2Event': 30002,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60080: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60079,
'input1Event': 60057,
'input2Event': 30002,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60082: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60081,
'input1Event': 60055,
'input2Event': 30016,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60084: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60083,
'input1Event': 30004,
'input2Event': 30014,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60086: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60085,
'input1Event': 30006,
'input2Event': 30008,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60091: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60087,
'input1Event': 60088,
'input2Event': 30001,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60093: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60092,
'input1Event': 30001,
'input2Event': 60088,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60100: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60099,
'input1Event': 60095,
'input2Event': 10002,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60119: {'type': 'logicGate',
'name': 'open sesame Duct & Lobby',
'comment': 'links together the Duct Room and Lobby buttons',
'parentEntId': 0,
'input1Event': 30076,
'input2Event': 60118,
'isInput1': 0,
'isInput2': 0,
'logicType': 'or'},
60132: {'type': 'logicGate',
'name': 'open sesame Entrances',
'comment': 'links together the buttons in the two entrances',
'parentEntId': 0,
'input1Event': 30040,
'input2Event': 60102,
'isInput1': 0,
'isInput2': 0,
'logicType': 'or'},
60138: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60137,
'input1Event': 60095,
'input2Event': 60094,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60141: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60139,
'input1Event': 10002,
'input2Event': 60094,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
60142: {'type': 'logicGate',
'name': '<unnamed>',
'comment': '',
'parentEntId': 60140,
'input1Event': 10002,
'input2Event': 60095,
'isInput1': 0,
'isInput2': 0,
'logicType': 'and'},
10001: {'type': 'model',
'name': 'dropshadow',
'comment': '',
'parentEntId': 10006,
'pos': Point3(0, 0, -25),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1.5, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_3/models/props/drop_shadow.bam'},
10012: {'type': 'model',
'name': 'backCrate',
'comment': '',
'parentEntId': 10067,
'pos': Point3(0, -5.81496, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_9/models/cogHQ/metal_crateB.bam'},
10033: {'type': 'model',
'name': 'dropshadow',
'comment': '',
'parentEntId': 10007,
'pos': Point3(0, 0, -25),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1.5, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_3/models/props/drop_shadow.bam'},
10045: {'type': 'model',
'name': 'dropshadow',
'comment': '',
'parentEntId': 10008,
'pos': Point3(0, 0, -25),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1.5, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_3/models/props/drop_shadow.bam'},
10046: {'type': 'model',
'name': 'dropshadow',
'comment': '',
'parentEntId': 10009,
'pos': Point3(0, 0, -25),
'hpr': Vec3(0, 0, 0),
'scale': Point3(2, 1.5, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_3/models/props/drop_shadow.bam'},
10050: {'type': 'model',
'name': 'sky',
'comment': '',
'parentEntId': 200,
'pos': Point3(-142.02, 437.227, -300),
'hpr': Point3(0, 0, 0),
'scale': Point3(2.5, 2.5, 2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_9/models/cogHQ/cog_sky.bam'},
10066: {'type': 'model',
'name': 'frontCrate',
'comment': '',
'parentEntId': 10067,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_9/models/cogHQ/metal_crateB.bam'},
10069: {'type': 'model',
'name': 'backCrate',
'comment': '',
'parentEntId': 10065,
'pos': Point3(0, -5.81496, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 2),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_9/models/cogHQ/metal_crateB.bam'},
10070: {'type': 'model',
'name': 'frontCrate',
'comment': '',
'parentEntId': 10065,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_9/models/cogHQ/metal_crateB.bam'},
20082: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 5,
'pos': Point3(4.50815, 11.6508, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(0.92, 0.92, 0.92),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_9/models/cogHQ/metal_crateB.bam'},
20083: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20082,
'pos': Point3(0, 0, 5.5),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_9/models/cogHQ/metal_crateB.bam'},
20088: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20087,
'pos': Point3(1, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1.3, 1, 1.3),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_9/models/cogHQ/metal_crateB.bam'},
10000: {'type': 'nodepath',
'name': 'gearGauntletObstacle',
'comment': '',
'parentEntId': 10027,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10004: {'type': 'nodepath',
'name': 'gearGauntlet',
'comment': 'gears are staggered 15 ft in Y',
'parentEntId': 10000,
'pos': Point3(0, -23.25, 6.85),
'hpr': Point3(0, 0, 0),
'scale': 1},
10014: {'type': 'nodepath',
'name': 'cogs',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 34.07, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10015: {'type': 'nodepath',
'name': 'paint mixer platforms',
'comment': '',
'parentEntId': 10,
'pos': Point3(0, 5.15136, -2),
'hpr': Point3(0, 0, 0),
'scale': 1},
10022: {'type': 'nodepath',
'name': 'gagBarrels',
'comment': '',
'parentEntId': 11,
'pos': Point3(11.2328, 14.7959, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10023: {'type': 'nodepath',
'name': 'leftCogs',
'comment': '',
'parentEntId': 13,
'pos': Point3(-42.0363, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10027: {'type': 'nodepath',
'name': 'zoneNodeCompensate',
'comment': 'I think the ZoneNode was moved.',
'parentEntId': 19,
'pos': Point3(-0.426482, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10030: {'type': 'nodepath',
'name': 'cogs',
'comment': '',
'parentEntId': 8,
'pos': Point3(2.5, 62.5, 10),
'hpr': Point3(0, 0, 0),
'scale': 1},
10032: {'type': 'nodepath',
'name': 'rightCogs',
'comment': '',
'parentEntId': 13,
'pos': Point3(46.88, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
10034: {'type': 'nodepath',
'name': 'cogs',
'comment': '',
'parentEntId': 22,
'pos': Point3(0, 0, 0),
'hpr': Point3(180, 0, 0),
'scale': 1},
10036: {'type': 'nodepath',
'name': 'cogs',
'comment': '',
'parentEntId': 15,
'pos': Point3(5.5, 0, 0),
'hpr': Point3(161, 0, 0),
'scale': 1},
10037: {'type': 'nodepath',
'name': 'cogs',
'comment': '',
'parentEntId': 7,
'pos': Point3(3.1, -48.27, 0.05),
'hpr': Point3(0, 0, 0),
'scale': 1},
10040: {'type': 'nodepath',
'name': 'FactoryBoss',
'comment': '',
'parentEntId': 24,
'pos': Point3(0, 68.4457, 9.5669),
'hpr': Point3(180, 0, 0),
'scale': 1},
10047: {'type': 'nodepath',
'name': 'battleCell',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10056: {'type': 'nodepath',
'name': 'sounds',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, 0, 15),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10660: {'type': 'nodepath',
'name': 'battleCell',
'comment': 'Paint Mixer Storage Room',
'parentEntId': 11,
'pos': Point3(0, 7.2, 0),
'hpr': Vec3(-180, 0, 0),
'scale': 1},
10661: {'type': 'nodepath',
'name': 'battleCell',
'comment': 'East Catwalk',
'parentEntId': 9,
'pos': Point3(-64.82, 69.84, 0),
'hpr': Vec3(-90, 0, 0),
'scale': 1},
10662: {'type': 'nodepath',
'name': 'battleCell',
'comment': 'West Catwalk',
'parentEntId': 21,
'pos': Point3(109, 250.38, 0),
'hpr': Vec3(-180, 0, 0),
'scale': 1},
10663: {'type': 'nodepath',
'name': 'battleCell',
'comment': 'Outside Center Silo',
'parentEntId': 35,
'pos': Point3(-4, -10, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10064: {'type': 'nodepath',
'name': 'battleCell',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, -5.20447, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10065: {'type': 'nodepath',
'name': 'backSteps',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, 56.2652, 0),
'hpr': Point3(0, 0, 0),
'scale': Point3(1.5, 1.3, 0.73)},
10067: {'type': 'nodepath',
'name': 'frontSteps',
'comment': '',
'parentEntId': 10000,
'pos': Point3(0, -44.7196, 0),
'hpr': Point3(180, 0, 0),
'scale': Point3(1.5, 1.3, 0.729057)},
10068: {'type': 'nodepath',
'name': 'battleCell',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20000: {'type': 'nodepath',
'name': 'stompers',
'comment': '',
'parentEntId': 17,
'pos': Point3(0.75, 0, 0.5),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20018: {'type': 'nodepath',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10014,
'pos': Point3(0, -24, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20019: {'type': 'nodepath',
'name': 'cogsJoin',
'comment': '',
'parentEntId': 10030,
'pos': Point3(16, 2, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20022: {'type': 'nodepath',
'name': 'StomperButtonsNodepath',
'comment': '',
'parentEntId': 24,
'pos': Point3(-11.75, -35.8, 14.9),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20023: {'type': 'nodepath',
'name': '<unnamed>',
'comment': '',
'parentEntId': 24,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20037: {'type': 'nodepath',
'name': 'SignatureGoonNP',
'comment': '',
'parentEntId': 24,
'pos': Point3(-48.4442, -24.9385, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20058: {'type': 'nodepath',
'name': 'SigRoomCogs',
'comment': '',
'parentEntId': 24,
'pos': Point3(-1.0928, -45, 14.99),
'hpr': Point3(90, 0, 0),
'scale': 1},
20087: {'type': 'nodepath',
'name': '<unnamed>',
'comment': '',
'parentEntId': 17,
'pos': Point3(-4, -117, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20094: {'type': 'nodepath',
'name': 'cogs',
'comment': '',
'parentEntId': 34,
'pos': Point3(-0.720506, 27.5461, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
20095: {'type': 'nodepath',
'name': 'cogs',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1},
20096: {'type': 'nodepath',
'name': 'cogs',
'comment': '',
'parentEntId': 33,
'pos': Point3(4.84921, 8.74482, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
10017: {'type': 'paintMixer',
'name': 'fifth',
'comment': '',
'parentEntId': 10015,
'pos': Point3(5.24, 23.52, 8),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(-12, -6, 0),
'period': 8.0,
'phaseShift': 0.5,
'shaftScale': 1,
'waitPercent': 0.1},
10018: {'type': 'paintMixer',
'name': 'fourth',
'comment': '',
'parentEntId': 10015,
'pos': Point3(-12.1, 3, 8),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(0, -6, 15),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 2.5,
'waitPercent': 0.1},
10019: {'type': 'paintMixer',
'name': 'third',
'comment': '',
'parentEntId': 10015,
'pos': Point3(-3.85419, -7.75751, 22.5836),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(7, 0, 0),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 2.5,
'waitPercent': 0.1},
10020: {'type': 'paintMixer',
'name': 'second',
'comment': '',
'parentEntId': 10015,
'pos': Point3(16.01, -6.47, 23),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(-4, -8, -15),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 2.5,
'waitPercent': 0.1},
10054: {'type': 'paintMixer',
'name': 'first',
'comment': '',
'parentEntId': 10015,
'pos': Point3(-10, -26.1, 8),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'floorName': 'PaintMixerFloorCollision',
'modelPath': 'phase_9/models/cogHQ/PaintMixer',
'modelScale': Point3(0.8, 0.8, 0.8),
'motion': 'easeInOut',
'offset': Point3(15, 0, 0),
'period': 8.0,
'phaseShift': 0.0,
'shaftScale': 1,
'waitPercent': 0.1},
20008: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20009: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 17,
'pathScale': 1.0},
20010: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 36,
'pathScale': 1.0},
20012: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 34,
'pathScale': 1.0},
20015: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 37,
'pathScale': 1.0},
20038: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 38,
'pathScale': 1.0},
20039: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 12,
'pathScale': 1.0},
20040: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(41.5, 33.5, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20042: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(15, 34, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20044: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(1.5, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'pathIndex': 6,
'pathScale': 1.0},
20045: {'type': 'path',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'pathIndex': 7,
'pathScale': 1.0},
20049: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 13,
'pathScale': 1.0},
20051: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(1, -24, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 42,
'pathScale': 1.0},
20053: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 8,
'pathScale': 1.0},
20055: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 9,
'pathScale': 1.0},
20059: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 10,
'pathScale': 1.0},
20061: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20037,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 11,
'pathScale': 1.0},
20067: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 39,
'pathScale': 1.0},
20068: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 15,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 40,
'pathScale': 1.0},
20069: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 9,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 5,
'pathScale': 1.0},
20070: {'type': 'path',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 9,
'pos': Point3(1, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 5,
'pathScale': 1.0},
20073: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 21,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 35,
'pathScale': 1.0},
20075: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 7,
'pos': Point3(4, 4, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 14,
'pathScale': 1.0},
20076: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 8,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 15,
'pathScale': 1.0},
20077: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 8,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 16,
'pathScale': 1.0},
20078: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 18,
'pathScale': 1.0},
20079: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 4,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
20084: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 9,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 41,
'pathScale': 1.0},
20097: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 19,
'pathScale': 1.0},
20098: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 20,
'pathScale': 1.0},
20099: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 34,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 21,
'pathScale': 1.0},
20100: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 22,
'pathScale': 1.0},
20101: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 23,
'pathScale': 1.0},
20102: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 33,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 24,
'pathScale': 1.0},
20103: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 25,
'pathScale': 1.0},
20104: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 26,
'pathScale': 1.0},
20105: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 32,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 27,
'pathScale': 1.0},
20106: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 28,
'pathScale': 1.0},
20107: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 29,
'pathScale': 1.0},
20108: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 30,
'pathScale': 1.0},
20109: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 31,
'pathScale': 1.0},
20110: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 32,
'pathScale': 1.0},
20111: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 13,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 33,
'pathScale': 1.0},
60133: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 22,
'pos': Point3(-10, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
60134: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 22,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
60135: {'type': 'path',
'name': '<unnamed>',
'comment': '',
'parentEntId': 22,
'pos': Point3(10, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'pathIndex': 0,
'pathScale': 1.0},
10042: {'type': 'propSpinner',
'name': '<unnamed>',
'comment': '',
'parentEntId': 7},
20001: {'type': 'stomper',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(0, 0, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 12,
'headScale': Point3(7, 5, 7),
'modelPath': 0,
'motion': 3,
'period': 4.0,
'phaseShift': 0.0,
'range': 30.0,
'shaftScale': Point3(0.5, 12, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20002: {'type': 'stomper',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(0, -14.3294, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 12,
'headScale': Point3(7, 5, 7),
'modelPath': 0,
'motion': 3,
'period': 2.0,
'phaseShift': 0.0,
'range': 10.0,
'shaftScale': Point3(0.5, 12, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20003: {'type': 'stomper',
'name': 'copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(0, -28.3252, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 12,
'headScale': Point3(7, 5, 7),
'modelPath': 0,
'motion': 4,
'period': 2.0,
'phaseShift': 0.5,
'range': 10.0,
'shaftScale': Point3(0.5, 12, 0.5),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20004: {'type': 'stomper',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(-3.5, 16.2588, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 12,
'headScale': Point3(3.5, 5, 3.5),
'modelPath': 0,
'motion': 1,
'period': 3.0001373423482587,
'phaseShift': 0.0,
'range': 15.0,
'shaftScale': Point3(0.71, 12, 0.71),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20005: {'type': 'stomper',
'name': 'copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(3.5, 16.2588, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 12,
'headScale': Point3(3.5, 5, 3.5),
'modelPath': 0,
'motion': 0,
'period': 1.5,
'phaseShift': 0.0,
'range': 15.0,
'shaftScale': Point3(0.71, 12, 0.71),
'soundLen': 0,
'soundOn': 1,
'soundPath': 1,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20006: {'type': 'stomper',
'name': 'copy of copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(-3.5, 23.4392, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 12,
'headScale': Point3(3.5, 5, 3.5),
'modelPath': 0,
'motion': 2,
'period': 1.5,
'phaseShift': 0.5,
'range': 15.0,
'shaftScale': Point3(0.71, 12, 0.71),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20007: {'type': 'stomper',
'name': 'copy of copy of copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20000,
'pos': Point3(3.5, 23.4392, 0),
'hpr': Point3(0, 0, 0),
'scale': 1,
'crushCellId': None,
'damage': 12,
'headScale': Point3(3.5, 5, 3.5),
'modelPath': 0,
'motion': 3,
'period': 5.0,
'phaseShift': 0.5,
'range': 15.0,
'shaftScale': Point3(0.71, 12, 0.71),
'soundLen': 0,
'soundOn': 1,
'soundPath': 0,
'style': 'vertical',
'switchId': 0,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20029: {'type': 'stomper',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20025,
'pos': Point3(4.5, 43.5, 0.25),
'hpr': Point3(0, 0, 0),
'scale': 1,
'animateShadow': 0,
'crushCellId': 20024,
'damage': 12,
'headScale': Point3(3, 2, 3),
'modelPath': 0,
'motion': 5,
'period': 2.0,
'phaseShift': 0.0,
'range': 12.0,
'shaftScale': Point3(0.66, 37.5, 0.66),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 20033,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20030: {'type': 'stomper',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 20025,
'pos': Point3(31.5, 43.5, 0.25),
'hpr': Point3(0, 0, 0),
'scale': 1,
'animateShadow': 0,
'crushCellId': 20026,
'damage': 3,
'headScale': Point3(3, 2, 3),
'modelPath': 0,
'motion': 5,
'period': 2.0,
'phaseShift': 0.0,
'range': 12.0,
'shaftScale': Point3(0.66, 37.5, 0.66),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 20034,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20031: {'type': 'stomper',
'name': 'copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20025,
'pos': Point3(64.5, 43.5, 0.25),
'hpr': Point3(0, 0, 0),
'scale': 1,
'animateShadow': 0,
'crushCellId': 20027,
'damage': 3,
'headScale': Point3(3, 2, 3),
'modelPath': 0,
'motion': 5,
'period': 2.0,
'phaseShift': 0.0,
'range': 12.0,
'shaftScale': Point3(0.66, 37.5, 0.66),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 20035,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20032: {'type': 'stomper',
'name': 'copy of copy of copy of <unnamed>',
'comment': '',
'parentEntId': 20025,
'pos': Point3(85.5, 43.5, 0.25),
'hpr': Point3(0, 0, 0),
'scale': 1,
'animateShadow': 0,
'crushCellId': 20028,
'damage': 3,
'headScale': Point3(3, 2, 3),
'modelPath': 0,
'motion': 5,
'period': 2.0,
'phaseShift': 0.0,
'range': 12.0,
'shaftScale': Point3(0.66, 37.5, 0.66),
'soundLen': 0,
'soundOn': 1,
'soundPath': 2,
'style': 'vertical',
'switchId': 20036,
'wantShadow': 1,
'wantSmoke': 1,
'zOffset': 0},
20050: {'type': 'trigger',
'name': '<unnamed>',
'comment': '',
'parentEntId': 20022,
'pos': Point3(10, 0, 10),
'hpr': Vec3(0, 0, 0),
'scale': Point3(20, 20, 20),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1,
'triggerName': 'signatureRoomView'},
20057: {'type': 'trigger',
'name': '<unnamed>',
'comment': '',
'parentEntId': 23,
'pos': Point3(3, -8.8, 15.5091),
'hpr': Vec3(0, 0, 0),
'scale': Point3(25, 25, 25),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1,
'triggerName': 'lookoutTrigger'},
30077: {'type': 'trigger',
'name': 'button cutscene',
'comment': '',
'parentEntId': 3,
'pos': Point3(-4, 8, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': 1,
'triggerName': ''},
10013: {'type': 'visibilityExtender',
'name': 'intoEastSilo',
'comment': '',
'parentEntId': 60009,
'event': 60101,
'newZones': [61]},
10043: {'type': 'visibilityExtender',
'name': 'beyondLobby',
'comment': '',
'parentEntId': 10049,
'event': 10057,
'newZones': [5, 116]},
10044: {'type': 'visibilityExtender',
'name': 'intoEntrance1',
'comment': '',
'parentEntId': 10051,
'event': 30000,
'newZones': [3]},
10058: {'type': 'visibilityExtender',
'name': 'intoFarHallway',
'comment': '',
'parentEntId': 10049,
'event': 10059,
'newZones': [6, 118]},
10060: {'type': 'visibilityExtender',
'name': 'intoLookout',
'comment': '',
'parentEntId': 10049,
'event': 10061,
'newZones': [23]},
10062: {'type': 'visibilityExtender',
'name': 'intoLobby',
'comment': '',
'parentEntId': 60031,
'event': 10063,
'newZones': [4, 114]},
30022: {'type': 'visibilityExtender',
'name': 'intoLobby',
'comment': '',
'parentEntId': 10049,
'event': 30000,
'newZones': [4, 113]},
30023: {'type': 'visibilityExtender',
'name': 'beyond door 106',
'comment': '',
'parentEntId': 60017,
'event': 30002,
'newZones': [15, 126]},
30024: {'type': 'visibilityExtender',
'name': 'beyond door 106',
'comment': '',
'parentEntId': 60016,
'event': 30002,
'newZones': [16]},
30025: {'type': 'visibilityExtender',
'name': 'beyond door 126',
'comment': '',
'parentEntId': 60016,
'event': 30021,
'newZones': [14, 17, 121]},
30026: {'type': 'visibilityExtender',
'name': 'beyond door 121',
'comment': '',
'parentEntId': 60015,
'event': 30016,
'newZones': [13, 119]},
30027: {'type': 'visibilityExtender',
'name': 'beyond door 126',
'comment': '',
'parentEntId': 60015,
'event': 30021,
'newZones': [15, 106]},
30029: {'type': 'visibilityExtender',
'name': 'beyondLobby',
'comment': '',
'parentEntId': 10051,
'event': 30009,
'newZones': [5, 116]},
30030: {'type': 'visibilityExtender',
'name': 'beyond door 113',
'comment': '',
'parentEntId': 60000,
'event': 30009,
'newZones': [4, 114]},
30031: {'type': 'visibilityExtender',
'name': 'beyond door 116',
'comment': '',
'parentEntId': 60000,
'event': 30011,
'newZones': [6,
109,
117,
118]},
30032: {'type': 'visibilityExtender',
'name': 'intoHallwayFromLobby',
'comment': '',
'parentEntId': 60001,
'event': 30011,
'newZones': [5, 113]},
30033: {'type': 'visibilityExtender',
'name': 'intoBoilerRoom',
'comment': '',
'parentEntId': 60001,
'event': 30012,
'newZones': [8]},
30034: {'type': 'visibilityExtender',
'name': 'intoLookout',
'comment': '',
'parentEntId': 60001,
'event': 30013,
'newZones': [23, 39]},
30035: {'type': 'visibilityExtender',
'name': 'intoGearRoom',
'comment': '',
'parentEntId': 60001,
'event': 30005,
'newZones': [7]},
30036: {'type': 'visibilityExtender',
'name': 'beyond door 109',
'comment': '',
'parentEntId': 60002,
'event': 30005,
'newZones': [6,
116,
117,
118]},
30037: {'type': 'visibilityExtender',
'name': 'beyond door 110',
'comment': '',
'parentEntId': 60002,
'event': 30006,
'newZones': [9,
25,
26,
33,
34,
35,
38,
41,
53,
112,
115,
200]},
30038: {'type': 'visibilityExtender',
'name': 'beyond door 117',
'comment': '',
'parentEntId': 60005,
'event': 30012,
'newZones': [6,
109,
116,
118]},
30039: {'type': 'visibilityExtender',
'name': 'beyond door 108',
'comment': '',
'parentEntId': 60005,
'event': 30004,
'newZones': [12,
21,
26,
34,
35,
40,
41,
53,
60,
119,
120,
200]},
30041: {'type': 'visibilityExtender',
'name': 'beyond door 110',
'comment': '',
'parentEntId': 60003,
'event': 30006,
'newZones': [7]},
30042: {'type': 'visibilityExtender',
'name': 'beyond door 112',
'comment': '',
'parentEntId': 60003,
'event': 30008,
'newZones': [10, 11]},
30043: {'type': 'visibilityExtender',
'name': 'intoWarehouse',
'comment': '',
'parentEntId': 60003,
'event': 30010,
'newZones': [24, 39]},
30044: {'type': 'visibilityExtender',
'name': 'beyond door 112',
'comment': '',
'parentEntId': 60004,
'event': 30008,
'newZones': [9,
25,
26,
33,
34,
35,
38,
41,
53,
110,
115,
200]},
30046: {'type': 'visibilityExtender',
'name': 'beyond door 112',
'comment': '',
'parentEntId': 60066,
'event': 30008,
'newZones': [9,
25,
26,
41,
200]},
30049: {'type': 'visibilityExtender',
'name': 'beyond door 119',
'comment': '',
'parentEntId': 60013,
'event': 30014,
'newZones': [12,
21,
23,
26,
33,
34,
35,
41,
53,
60,
108,
112,
120,
200]},
30050: {'type': 'visibilityExtender',
'name': 'beyond door 121',
'comment': '',
'parentEntId': 60013,
'event': 30016,
'newZones': [14, 17, 126]},
30051: {'type': 'visibilityExtender',
'name': 'beyond door 121',
'comment': '',
'parentEntId': 60014,
'event': 30016,
'newZones': [13, 119]},
30052: {'type': 'visibilityExtender',
'name': 'beyond door 126',
'comment': '',
'parentEntId': 60014,
'event': 30021,
'newZones': [15, 106]},
30055: {'type': 'visibilityExtender',
'name': 'beyond door 105',
'comment': '',
'parentEntId': 60019,
'event': 30001,
'newZones': [27, 127]},
30056: {'type': 'visibilityExtender',
'name': 'beyond door 105',
'comment': '',
'parentEntId': 60018,
'event': 30001,
'newZones': [27, 127]},
30057: {'type': 'visibilityExtender',
'name': 'beyond door 103',
'comment': '',
'parentEntId': 60018,
'event': 60088,
'newZones': [17]},
30059: {'type': 'visibilityExtender',
'name': 'beyond door 108',
'comment': '',
'parentEntId': 60006,
'event': 30004,
'newZones': [8, 117]},
30060: {'type': 'visibilityExtender',
'name': 'beyond door 119',
'comment': '',
'parentEntId': 60006,
'event': 30014,
'newZones': [13, 121]},
30061: {'type': 'visibilityExtender',
'name': 'intoWarehouse',
'comment': '',
'parentEntId': 60006,
'event': 30015,
'newZones': [24, 39]},
30062: {'type': 'visibilityExtender',
'name': 'beyond door 107',
'comment': '',
'parentEntId': 60024,
'event': 30003,
'newZones': [27, 127]},
30063: {'type': 'visibilityExtender',
'name': 'intoHallway',
'comment': '',
'parentEntId': 60031,
'event': 30013,
'newZones': [6,
109,
116,
117]},
30064: {'type': 'visibilityExtender',
'name': 'beyondLowerWestDoor',
'comment': '',
'parentEntId': 60007,
'event': 30015,
'newZones': [12,
21,
26,
34,
40,
41,
53,
200]},
30066: {'type': 'visibilityExtender',
'name': 'beyondLowerEastDoor',
'comment': '',
'parentEntId': 60007,
'event': 30010,
'newZones': [9,
25,
26,
33,
38,
41,
200]},
30067: {'type': 'visibilityExtender',
'name': 'beyondUpperEastDoor',
'comment': '',
'parentEntId': 60007,
'event': 30019,
'newZones': [9,
33,
38,
41,
200,
222]},
30069: {'type': 'visibilityExtender',
'name': 'beyond door 118',
'comment': '',
'parentEntId': 60000,
'event': 30068,
'newZones': [23]},
30071: {'type': 'visibilityExtender',
'name': 'intoLobby',
'comment': '',
'parentEntId': 60001,
'event': 60030,
'newZones': [4, 114]},
30073: {'type': 'visibilityExtender',
'name': 'intoLobbyHallway',
'comment': '',
'parentEntId': 60031,
'event': 60033,
'newZones': [5, 113]},
30075: {'type': 'visibilityExtender',
'name': 'intoGearRoom',
'comment': '',
'parentEntId': 60031,
'event': 60034,
'newZones': [7]},
60008: {'type': 'visibilityExtender',
'name': 'beyondUpperWestDoor',
'comment': '',
'parentEntId': 60007,
'event': 30020,
'newZones': [12,
21,
34,
40,
41,
60,
127,
200]},
60010: {'type': 'visibilityExtender',
'name': 'intoWarehouse',
'comment': '',
'parentEntId': 60009,
'event': 30019,
'newZones': [24, 39, 125]},
60012: {'type': 'visibilityExtender',
'name': 'beyond door 125',
'comment': '',
'parentEntId': 60011,
'event': 30020,
'newZones': [24, 39, 124]},
60020: {'type': 'visibilityExtender',
'name': 'beyond door 106',
'comment': '',
'parentEntId': 60015,
'event': 60076,
'newZones': [16]},
60021: {'type': 'visibilityExtender',
'name': 'beyond door 116',
'comment': '',
'parentEntId': 10051,
'event': 60023,
'newZones': [6, 118]},
60022: {'type': 'visibilityExtender',
'name': 'beyond door 118',
'comment': '',
'parentEntId': 10051,
'event': 60025,
'newZones': [23]},
60026: {'type': 'visibilityExtender',
'name': 'beyond door 109',
'comment': '',
'parentEntId': 60000,
'event': 60028,
'newZones': [7]},
60027: {'type': 'visibilityExtender',
'name': 'beyond door 117',
'comment': '',
'parentEntId': 60000,
'event': 60029,
'newZones': [8]},
60032: {'type': 'visibilityExtender',
'name': 'intoBoilerRoom',
'comment': '',
'parentEntId': 60031,
'event': 60035,
'newZones': [8]},
60036: {'type': 'visibilityExtender',
'name': 'beyond door 117',
'comment': '',
'parentEntId': 60002,
'event': 60037,
'newZones': [8]},
60038: {'type': 'visibilityExtender',
'name': 'beyond door 109',
'comment': '',
'parentEntId': 60005,
'event': 60039,
'newZones': [7]},
60040: {'type': 'visibilityExtender',
'name': 'beyond door 124',
'comment': '',
'parentEntId': 60011,
'event': 60041,
'newZones': [38]},
60042: {'type': 'visibilityExtender',
'name': 'beyondWarehouse',
'comment': '',
'parentEntId': 60009,
'event': 60043,
'newZones': [12, 200]},
60045: {'type': 'visibilityExtender',
'name': 'beyond door 124',
'comment': '',
'parentEntId': 60044,
'event': 60047,
'newZones': [24]},
60046: {'type': 'visibilityExtender',
'name': 'beyond door 128',
'comment': '',
'parentEntId': 60044,
'event': 10002,
'newZones': [31]},
60048: {'type': 'visibilityExtender',
'name': 'beyond door 127',
'comment': '',
'parentEntId': 60024,
'event': 60049,
'newZones': [21, 200]},
60050: {'type': 'visibilityExtender',
'name': 'beyond door 127',
'comment': '',
'parentEntId': 60019,
'event': 60051,
'newZones': [21, 34, 200]},
60052: {'type': 'visibilityExtender',
'name': 'beyond door 121',
'comment': '',
'parentEntId': 60016,
'event': 60053,
'newZones': [13, 119]},
60054: {'type': 'visibilityExtender',
'name': 'beyond door 126',
'comment': '',
'parentEntId': 60017,
'event': 60055,
'newZones': [14, 17, 121]},
60056: {'type': 'visibilityExtender',
'name': 'beyond door 126',
'comment': '',
'parentEntId': 60013,
'event': 60057,
'newZones': [15, 106]},
60058: {'type': 'visibilityExtender',
'name': 'beyond door 116',
'comment': '',
'parentEntId': 60005,
'event': 60059,
'newZones': [5]},
60060: {'type': 'visibilityExtender',
'name': 'beyond door 118',
'comment': '',
'parentEntId': 60005,
'event': 60061,
'newZones': [23]},
60062: {'type': 'visibilityExtender',
'name': 'beyond door 116',
'comment': '',
'parentEntId': 60002,
'event': 60064,
'newZones': [5]},
60063: {'type': 'visibilityExtender',
'name': 'beyond door 118',
'comment': '',
'parentEntId': 60002,
'event': 60065,
'newZones': [23]},
60068: {'type': 'visibilityExtender',
'name': 'beyond door 105',
'comment': '',
'parentEntId': 60067,
'event': 30001,
'newZones': [18,
19,
20,
131]},
60069: {'type': 'visibilityExtender',
'name': 'beyond door 107',
'comment': '',
'parentEntId': 60067,
'event': 30003,
'newZones': [22]},
60070: {'type': 'visibilityExtender',
'name': 'beyond door 127',
'comment': '',
'parentEntId': 60067,
'event': 10052,
'newZones': [12,
21,
26,
34,
35,
40,
41,
53,
60,
200]},
60071: {'type': 'visibilityExtender',
'name': 'beyond door 127',
'comment': '',
'parentEntId': 60006,
'event': 10052,
'newZones': [27, 105, 107]},
60072: {'type': 'visibilityExtender',
'name': 'beyond door 107',
'comment': '',
'parentEntId': 60006,
'event': 60074,
'newZones': [22]},
60073: {'type': 'visibilityExtender',
'name': 'beyond door 105',
'comment': '',
'parentEntId': 60006,
'event': 60075,
'newZones': [18]},
60077: {'type': 'visibilityExtender',
'name': 'beyond door 106',
'comment': '',
'parentEntId': 60014,
'event': 60078,
'newZones': [16]},
60079: {'type': 'visibilityExtender',
'name': 'beyond door 106',
'comment': '',
'parentEntId': 60013,
'event': 60080,
'newZones': [16]},
60081: {'type': 'visibilityExtender',
'name': 'beyond door 121',
'comment': '',
'parentEntId': 60017,
'event': 60082,
'newZones': [13]},
60083: {'type': 'visibilityExtender',
'name': 'beyond door 119',
'comment': '',
'parentEntId': 60005,
'event': 60084,
'newZones': [13]},
60085: {'type': 'visibilityExtender',
'name': 'beyond door 112',
'comment': '',
'parentEntId': 60002,
'event': 60086,
'newZones': [10]},
60087: {'type': 'visibilityExtender',
'name': 'beyond door 105',
'comment': '',
'parentEntId': 60015,
'event': 60091,
'newZones': [27]},
60089: {'type': 'visibilityExtender',
'name': 'beyond door 103',
'comment': '',
'parentEntId': 60019,
'event': 60088,
'newZones': [17]},
60090: {'type': 'visibilityExtender',
'name': 'beyond door 103',
'comment': '',
'parentEntId': 60015,
'event': 60088,
'newZones': [18, 19, 105]},
60092: {'type': 'visibilityExtender',
'name': 'beyond door 103',
'comment': '',
'parentEntId': 60067,
'event': 60093,
'newZones': [17]},
60097: {'type': 'visibilityExtender',
'name': 'beyond door 130',
'comment': '',
'parentEntId': 60096,
'event': 60095,
'newZones': [33,
34,
35,
36,
37,
60,
61,
128,
129,
200]},
60098: {'type': 'visibilityExtender',
'name': 'beyond door 130',
'comment': '',
'parentEntId': 60044,
'event': 60095,
'newZones': [30]},
60099: {'type': 'visibilityExtender',
'name': 'beyond door 128',
'comment': '',
'parentEntId': 60096,
'event': 60100,
'newZones': [31]},
60106: {'type': 'visibilityExtender',
'name': 'beyond door 129',
'comment': '',
'parentEntId': 60011,
'event': 60094,
'newZones': [32]},
60107: {'type': 'visibilityExtender',
'name': 'beyond door 130',
'comment': '',
'parentEntId': 60011,
'event': 60095,
'newZones': [30]},
60109: {'type': 'visibilityExtender',
'name': 'beyond door 129',
'comment': '',
'parentEntId': 60108,
'event': 60094,
'newZones': [32]},
60110: {'type': 'visibilityExtender',
'name': 'beyond door 130',
'comment': '',
'parentEntId': 60108,
'event': 60095,
'newZones': [30]},
60112: {'type': 'visibilityExtender',
'name': 'beyond door 129',
'comment': '',
'parentEntId': 60111,
'event': 60094,
'newZones': [32]},
60113: {'type': 'visibilityExtender',
'name': 'beyond door 130',
'comment': '',
'parentEntId': 60111,
'event': 60095,
'newZones': [30]},
60115: {'type': 'visibilityExtender',
'name': 'beyond door 129',
'comment': '',
'parentEntId': 60114,
'event': 60094,
'newZones': [32]},
60116: {'type': 'visibilityExtender',
'name': 'beyond door 130',
'comment': '',
'parentEntId': 60114,
'event': 60095,
'newZones': [30]},
60117: {'type': 'visibilityExtender',
'name': 'beyond door 103',
'comment': '',
'parentEntId': 60014,
'event': 60088,
'newZones': [18]},
60120: {'type': 'visibilityExtender',
'name': 'beyond door 128',
'comment': '',
'parentEntId': 60108,
'event': 10002,
'newZones': [31]},
60122: {'type': 'visibilityExtender',
'name': 'beyond door 128',
'comment': '',
'parentEntId': 60121,
'event': 10002,
'newZones': [33,
34,
35,
36,
37,
60,
61,
128,
129,
130,
200]},
60123: {'type': 'visibilityExtender',
'name': 'beyond door 128',
'comment': '',
'parentEntId': 60111,
'event': 10002,
'newZones': []},
60124: {'type': 'visibilityExtender',
'name': 'beyond door 128',
'comment': '',
'parentEntId': 60114,
'event': 10002,
'newZones': [31]},
60125: {'type': 'visibilityExtender',
'name': 'beyond door 128',
'comment': '',
'parentEntId': 60011,
'event': 10002,
'newZones': [31]},
60127: {'type': 'visibilityExtender',
'name': 'beyond door 128',
'comment': '',
'parentEntId': 60126,
'event': 10002,
'newZones': [31]},
60128: {'type': 'visibilityExtender',
'name': 'beyond door 129',
'comment': '',
'parentEntId': 60126,
'event': 60094,
'newZones': [32]},
60129: {'type': 'visibilityExtender',
'name': 'beyond door 130',
'comment': '',
'parentEntId': 60126,
'event': 60095,
'newZones': [30]},
60131: {'type': 'visibilityExtender',
'name': 'beyond door 129',
'comment': '',
'parentEntId': 60130,
'event': 60094,
'newZones': [33,
34,
35,
36,
37,
60,
61,
128,
130,
200]},
60136: {'type': 'visibilityExtender',
'name': 'beyond door 129',
'comment': '',
'parentEntId': 60044,
'event': 60094,
'newZones': [32]},
60137: {'type': 'visibilityExtender',
'name': 'beyond door 129',
'comment': '',
'parentEntId': 60096,
'event': 60138,
'newZones': [32]},
60139: {'type': 'visibilityExtender',
'name': 'beyond door 129',
'comment': '',
'parentEntId': 60121,
'event': 60141,
'newZones': [32]},
60140: {'type': 'visibilityExtender',
'name': 'beyond door 130',
'comment': '',
'parentEntId': 60121,
'event': 60142,
'newZones': [30]}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
| apache-2.0 | 7,601,260,112,235,053,000 | 27.835514 | 76 | 0.419884 | false |
Phosphenius/battle-snakes | src/mapedit.py | 1 | 23176 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Map editor using a pygame window embedded into a tkinter frame
"""
import tkinter as tk
import tkinter.filedialog as filedia
import tkinter.messagebox
import os
from collections import deque
from copy import copy
import pygame
from constants import BLACK, ORANGE, GREEN, GUN_METAL
from core.map import TileMapBase
from Game import GraphicsManager
import editor.tools
FPS = 30
CELL_SIZE = 10
ROWS = 64
COLS = 128
DISPLAY_WIDTH = COLS * CELL_SIZE
DISPLAY_HEIGHT = ROWS * CELL_SIZE
LEFT_MOUSE_BUTTON = 1
RIGHT_MOUSE_BUTTON = 3
MAX_UNDO_REDO = 1024
# Map object types
TILE_OBJ = 0
SPAWNPOINT_OBJ = 1
PORTAL_OBJ = 2
BLOCKED_OBJ = 3
DEFAULT_EXT = '.battle-snakes.map'
DEFAULT_INIT_DIR = os.path.expanduser('~')
def gen_tile_line(point1, point2):
"""
Generates a line of tiles from point1 to point2
"""
tile_lst = []
if point1[0] == point2[0]:
startpoint = min(point1[1], point2[1])
endpoint = max(point1[1], point2[1])
for ypos in range(startpoint, endpoint+1, CELL_SIZE):
tile_lst.append((point1[0], ypos))
else:
startpoint = min(point1[0], point2[0])
endpoint = max(point1[0], point2[0])
for xpos in range(startpoint, endpoint+1, CELL_SIZE):
tile_lst.append((xpos, point1[1]))
return tile_lst
def gen_tile_rect(point1, point2):
"""
Generates a list of tiles forming a rectangle with the diagonal
point2 - point1
"""
width = abs(point1[0] - point2[0])
height = abs(point1[1] - point2[1])
topleft = min(point1[0], point2[0]), min(point1[1], point2[1])
botright = max(point1[0], point2[0]), max(point1[1], point2[1])
hort1 = gen_tile_line(topleft, (topleft[0] + width, topleft[1]))
hort2 = gen_tile_line(botright, (botright[0] - width, botright[1]))
vert1 = gen_tile_line(topleft, (topleft[0], topleft[1] + height))
vert2 = gen_tile_line(botright, (botright[0], botright[1] - height))
return list(set(hort1 + hort2 + vert1 + vert2))
class TileTool(object):
def __init__(self, editor):
self.editor = editor
self.startpoint = None
self.preview = None
def update(self):
if self.editor.input.key_pressed('CONTROL_L'):
if (self.editor.input.button_tapped(LEFT_MOUSE_BUTTON) and
self.editor.input.key_pressed('SHIFT_L')):
self.fill_vertical()
elif (self.editor.input.button_tapped(RIGHT_MOUSE_BUTTON)
and self.editor.input.key_pressed('SHIFT_L')):
self.remove_vertical()
elif self.editor.input.button_tapped(LEFT_MOUSE_BUTTON):
self.fill_horizontal()
elif self.editor.input.button_tapped(RIGHT_MOUSE_BUTTON):
self.remove_horizontal()
elif (self.editor.input.button_tapped(LEFT_MOUSE_BUTTON) and
self.editor.input.key_pressed('SHIFT_L')):
if self.startpoint is not None:
tile_lst = []
if (self.startpoint[0] == self.editor.selected[0] or
self.startpoint[1] == self.editor.selected[1]):
tile_lst = gen_tile_line(self.startpoint,
self.editor.selected)
else:
tile_lst = gen_tile_rect(self.startpoint,
self.editor.selected)
cmd = EditMapCommand(
tile_lst,
self.editor.tilemap,
TILE_OBJ)
self.editor.cmd_manager.exec_cmd(cmd)
self.startpoint = self.editor.selected
if self.editor.input.button_pressed(LEFT_MOUSE_BUTTON) and \
self.editor.selected not in self.editor.tilemap.tiles \
and not self.editor.input.key_pressed('SHIFT_L') \
and not self.editor.input.key_pressed('CONTROL_L'):
cmd = EditMapCommand(
[self.editor.selected],
self.editor.tilemap,
TILE_OBJ)
self.editor.cmd_manager.exec_cmd(cmd)
if self.editor.input.key_tapped('SHIFT_L'):
self.startpoint = None
if (self.editor.input.button_pressed(RIGHT_MOUSE_BUTTON) and
self.editor.selected in self.editor.tilemap.tiles):
cmd = EditMapCommand(
[self.editor.selected],
self.editor.tilemap,
TILE_OBJ,
remove=True)
self.editor.cmd_manager.exec_cmd(cmd)
# preview for line and rectangle tool
if self.startpoint is not None:
if (self.startpoint[0] == self.editor.selected[0] or
self.startpoint[1] == self.editor.selected[1]):
self.preview = gen_tile_line(self.startpoint,
self.editor.selected)
else:
self.preview = gen_tile_rect(self.startpoint,
self.editor.selected)
else:
self.preview = None
def draw(self, screen):
if self.preview is not None:
for tile in self.preview:
screen.blit(self.editor.wall_tex, tile)
if self.startpoint is not None:
screen.blit(self.editor.wall_tex, self.startpoint)
def fill_horizontal(self):
tile_lst = gen_tile_line((0, self.editor.selected[1]),
(DISPLAY_WIDTH,
self.editor.selected[1]))
cmd = EditMapCommand(tile_lst, self.editor.tilemap, TILE_OBJ)
self.editor.cmd_manager.exec_cmd(cmd)
def fill_vertical(self):
tile_lst = gen_tile_line((self.editor.selected[0], 0),
(self.editor.selected[0],
DISPLAY_HEIGHT))
cmd = EditMapCommand(tile_lst, self.editor.tilemap, TILE_OBJ)
self.editor.cmd_manager.exec_cmd(cmd)
def remove_horizontal(self):
tile_lst = gen_tile_line((0, self.editor.selected[1]),
(DISPLAY_WIDTH,
self.editor.selected[1]))
cmd = EditMapCommand(tile_lst,
self.editor.tilemap,
TILE_OBJ, remove=True)
self.editor.cmd_manager.exec_cmd(cmd)
def remove_vertical(self):
tile_lst = gen_tile_line((self.editor.selected[0], 0),
(self.editor.selected[0],
DISPLAY_HEIGHT))
cmd = EditMapCommand(tile_lst, self.editor.tilemap, TILE_OBJ,
remove=True)
self.editor.cmd_manager.exec_cmd(cmd)
class SpawnpointTool(object):
def __init__(self, editor):
self.editor = editor
def update(self):
if (self.editor.input.button_pressed(LEFT_MOUSE_BUTTON) and
self.editor.tilemap.get_free(self.editor.selected)):
cmd = EditMapCommand(
[self.editor.selected],
self.editor.tilemap,
SPAWNPOINT_OBJ)
self.editor.cmd_manager.exec_cmd(cmd)
elif (self.editor.input.button_pressed(RIGHT_MOUSE_BUTTON) and
not self.editor.tilemap.get_free(self.editor.selected)):
cmd = EditMapCommand(
[self.editor.selected],
self.editor.tilemap,
SPAWNPOINT_OBJ,
remove=True)
self.editor.cmd_manager.exec_cmd(cmd)
def draw(self, screen):
pass
class CommandManager(object):
"""
Manager for undo/redo functionality, implements the command pattern.
"""
def __init__(self):
self.undo_stack = deque(maxlen=MAX_UNDO_REDO)
self.redo_stack = deque(maxlen=MAX_UNDO_REDO)
self.state_change_listener = []
def exec_cmd(self, cmd):
"""
Execute a command and push it onto the undo stack.
"""
cmd.do()
self.undo_stack.append(cmd)
for callback in self.state_change_listener:
callback()
def undo(self):
"""Undo a command."""
if len(self.undo_stack) > 0:
cmd = self.undo_stack.pop()
self.redo_stack.append(cmd)
cmd.undo()
for callback in self.state_change_listener:
callback()
def redo(self):
"""Redo a command."""
if len(self.redo_stack) > 0:
cmd = self.redo_stack.pop()
self.undo_stack.append(cmd)
cmd.do()
for callback in self.state_change_listener:
callback()
def reset(self):
"""
Reset the command manager
:return:
"""
self.undo_stack = deque(maxlen=MAX_UNDO_REDO)
self.redo_stack = deque(maxlen=MAX_UNDO_REDO)
class EditMapCommand(object):
def __init__(self, obj_lst, tilemap, obj_type, remove=False):
self.obj_lst = obj_lst
self.tilemap = tilemap
self.obj_type = obj_type
self.objs_changed = []
self.remove = remove
def do(self):
for obj in self.obj_lst:
if self.remove:
if obj in self.tilemap[self.obj_type]:
self.tilemap[self.obj_type].remove(obj)
self.objs_changed.append(obj)
else:
if self.tilemap.is_unblocked(obj):
self.tilemap[self.obj_type].append(obj)
self.objs_changed.append(obj)
def undo(self):
for obj in self.objs_changed:
if self.remove:
if self.tilemap.get_free(obj):
self.tilemap[self.obj_type].append(obj)
else:
if obj in self.tilemap[self.obj_type]:
self.tilemap[self.obj_type].remove(obj)
class InputManager(object):
def __init__(self, init_mouse_x=0, init_mouse_y=0):
self.mouse_x = init_mouse_x
self.mouse_y = init_mouse_y
self.curr_key_state = []
self.prev_key_state = []
self.curr_button_state = []
self.prev_button_state = []
self.motion_event_listener = None
def capture_key_press(self, event):
self.prev_key_state = copy(self.curr_key_state)
if event.keysym not in self.curr_key_state:
if event.keysym == 'Meta_L':
self.curr_key_state.append('ALT_L')
elif event.keysym == 'Meta_R':
self.curr_key_state.append('ALT_R')
else:
self.curr_key_state.append(event.keysym.upper())
def capture_key_release(self, event):
self.prev_key_state = copy(self.curr_key_state)
if event.keysym == 'Meta_L':
self.curr_key_state.remove('ALT_L')
elif event.keysym == 'Meta_R':
self.curr_key_state.remove('ALT_R')
elif (event.keysym == 'Escape' and
'CONTROL_L' in self.curr_key_state):
self.curr_key_state.remove('CONTROL_L')
else:
try:
self.curr_key_state.remove(event.keysym.upper())
except Exception:
pass # Don't do anything.
def capture_button_press(self, event):
if event.num not in self.curr_button_state:
self.curr_button_state.append(event.num)
def capture_button_release(self, event):
self.prev_button_state = copy(self.curr_button_state)
self.curr_button_state.remove(event.num)
def mouse_motion(self, event):
self.mouse_x = event.x
self.mouse_y = event.y
if self.motion_event_listener:
self.motion_event_listener((self.mouse_x, self.mouse_y))
def update(self):
self.prev_key_state = []
self.prev_button_state = []
def key_pressed(self, key):
return key in self.curr_key_state
def key_tapped(self, key):
return (key not in self.curr_key_state and
key in self.prev_key_state)
def button_pressed(self, button):
return button in self.curr_button_state
def button_tapped(self, button):
return (button not in self.curr_button_state and
button in self.prev_button_state)
def reset_keys(self):
"""
Reset key states. This is needed to fix some weird bug
where the control key won't unregister as being pressed
after a dialog
:return:
"""
self.prev_key_state = []
self.curr_key_state = []
class MapEditor(object):
def __init__(self):
self.input = InputManager(DISPLAY_WIDTH / 2,
DISPLAY_HEIGHT / 2)
self.root = tk.Tk()
self.root.protocol('WM_DELETE_WINDOW', self.window_close)
self.root.bind('<Motion>', self.input.mouse_motion)
self.root.bind('<KeyPress>', self.input.capture_key_press)
self.root.bind('<KeyRelease>', self.input.capture_key_release)
self.root.bind('<ButtonPress>', self.input.capture_button_press)
self.root.bind('<ButtonRelease>',
self.input.capture_button_release)
self.input.motion_event_listener = self.on_mouse_motion
self.root.rowconfigure(0, weight=1)
self.root.rowconfigure(1)
self.root.columnconfigure(0, weight=1)
self.root.columnconfigure(1)
top = self.root.winfo_toplevel()
self.menu_bar = tk.Menu(top)
top['menu'] = self.menu_bar
self.file_menu = tk.Menu(self.menu_bar, tearoff=False)
self.menu_bar.add_cascade(label='File', menu=self.file_menu,
underline=0)
self.grid_var = tk.IntVar()
self.grid_var.set(1)
self.helplines_var = tk.IntVar()
self.helplines_var.set(1)
self.file_menu.add_command(
label='New',
command=self.file_new,
underline=0,
accelerator='Ctrl+N')
self.file_menu.add_command(
label='Open',
command=self.file_open,
underline=0,
accelerator='Ctrl+O')
self.file_menu.add_separator()
self.file_menu.add_command(
label='Save',
command=self.file_save,
underline=0,
accelerator='Ctrl+S')
self.file_menu.add_command(label='Save As',
command=self.file_save_as,
underline=5)
self.file_menu.add_separator()
self.file_menu.add_command(
label='Quit',
command=self.exit_cmd,
underline=0,
accelerator='Ctrl+Q')
self.file_menu.entryconfig(3, state=tk.DISABLED)
self.file_menu.entryconfig(4, state=tk.DISABLED)
self.view_menu = tk.Menu(self.menu_bar, tearoff=False)
self.menu_bar.add_cascade(label='View', menu=self.view_menu,
underline=0)
self.view_menu.add_checkbutton(label="Show Grid",
onvalue=1,
offvalue=0,
variable=self.grid_var,
underline=5,
accelerator="Ctrl+G")
self.view_menu.add_checkbutton(label="Show Guide Lines (H)",
onvalue=1,
offvalue=0,
variable=self.helplines_var,
underline=18,
accelerator="Ctrl+H")
self.embed = tk.Frame(self.root, width=1400, height=700)
self.embed.grid(row=0, column=0, sticky=tk.NW)
self.label_tile_pos = tk.Label(self.root, text='pos: ')
self.label_tile_pos.grid(row=1, column=0, sticky=tk.NW)
self.toolbox = tk.Frame(self.root)
self.toolbox.grid(row=0, column=1, sticky=tk.NW)
os.environ['SDL_WINDOWID'] = str(self.embed.winfo_id())
self.root.update()
self.fps_clock = pygame.time.Clock()
pygame.init()
self.screen = pygame.display.set_mode((DISPLAY_WIDTH,
DISPLAY_HEIGHT))
self.cmd_manager = CommandManager()
self.cmd_manager.state_change_listener.append(self.on_cmd_state_change)
self.selected = None
self.unsaved_changes = False
self.graphics = GraphicsManager(self.screen)
self.tilemap = TileMapBase(self.graphics)
self.tile_textures = self.graphics.get_startswith('tile')
# self.tool = TileTool(self)
self.tool = editor.tools.TileTool(self)
self.save_path = ''
self.quit = False
self.guide_line_color = GREEN
self.clipboard = None
def yes_no(self):
title = 'Quit mapedit'
msg = 'Are you sure you want to discard unsaved changes?'
result = tkinter.messagebox.askyesno(title, msg)
self.input.reset_keys()
return result
def save_file_dialog(self):
result = filedia.asksaveasfilename(
defaultextension=DEFAULT_EXT,
initialdir=DEFAULT_INIT_DIR,
initialfile='untiteld',
title='Save map')
self.input.reset_keys()
return result
def reset(self):
self.cmd_manager.reset()
self.tilemap = TileMapBase(self.graphics)
self.unsaved_changes = False
self.save_path = ''
def window_close(self):
if self.unsaved_changes and not self.yes_no():
return
self.root.destroy()
def on_cmd_state_change(self):
self.unsaved_changes = True
self.state_change()
def state_change(self):
if self.unsaved_changes:
self.file_menu.entryconfig(3, state=tk.NORMAL)
self.file_menu.entryconfig(4, state=tk.NORMAL)
else:
self.file_menu.entryconfig(3, state=tk.DISABLED)
self.file_menu.entryconfig(4, state=tk.DISABLED)
def on_mouse_motion(self, pos):
left = COLS * CELL_SIZE
bot = ROWS * CELL_SIZE
xpos = pos[0] / CELL_SIZE if pos[0] < left else COLS-1
ypos = pos[1] / CELL_SIZE if pos[1] < bot else ROWS-1
msg_str = 'pos: {0}:{1}'.format(xpos, ypos)
self.label_tile_pos.config(text=msg_str)
def file_new(self):
if self.unsaved_changes and not self.yes_no():
return
self.reset()
# TODO: Add new map API first, don't forget yes no dialog
def file_open(self):
result = filedia.askopenfilename(
defaultextension=DEFAULT_EXT,
initialdir=DEFAULT_INIT_DIR,
title='Open map')
if result is not '':
self.tilemap = TileMapBase(self.graphics, result)
def save_map(self):
self.tilemap.write_to_file(self.save_path)
self.unsaved_changes = False
def file_save(self):
"""
Write map data as xml to the file specified in 'save_path'.
"""
if self.save_path is not '':
self.save_map()
else:
self.file_save_as()
def file_save_as(self):
"""
Open file dialog and write map data as xml to the file
selected by the user.
"""
result = self.save_file_dialog()
if result is not '':
self.save_path = result
self.save_map()
def update(self):
"""
Update editor, get user input
"""
# TODO: Refactor this, yes?
if self.input.key_pressed('CONTROL_L'):
if self.input.key_pressed('Q'):
self.exit_cmd()
elif self.input.key_tapped('N'):
self.file_new()
elif self.input.key_tapped('O'):
self.file_open()
# Undo & redo
if self.input.key_pressed('CONTROL_L'):
if self.input.key_tapped('Z'):
self.cmd_manager.undo()
elif self.input.key_tapped('Y'):
self.cmd_manager.redo()
# Save file
if self.input.key_tapped('S'):
self.file_save()
# Toggle grid
if self.input.key_tapped('G'):
self.grid_var.set(not self.grid_var.get())
# Toggle 'help lines'
if self.input.key_tapped('H'):
self.helplines_var.set(
not self.helplines_var.get())
# else:
# if self.input.key_tapped('T'):
# self.tool = TileTool(self)
# elif self.input.key_tapped('S'):
# self.tool = SpawnpointTool(self)
# Update selected cell
row = self.input.mouse_y / CELL_SIZE
col = self.input.mouse_x / CELL_SIZE
self.selected = (
(col if col < COLS else COLS - 1) * CELL_SIZE,
(row if row < ROWS else ROWS - 1) * CELL_SIZE)
self.tool.update()
# Must be called at the very end of update!
self.input.update()
def draw(self):
if self.grid_var.get():
# Draw a grid
for pos_x in range(0, DISPLAY_WIDTH, CELL_SIZE):
pygame.draw.line(self.screen, GUN_METAL, (pos_x, 0),
(pos_x, DISPLAY_HEIGHT))
for pos_y in range(0, DISPLAY_HEIGHT, CELL_SIZE):
pygame.draw.line(self.screen, GUN_METAL, (0, pos_y),
(DISPLAY_WIDTH, pos_y))
self.tilemap.draw()
self.tool.draw(self.screen)
# pygame.draw.rect(self.screen, ORANGE,
# pygame.Rect(self.selected,
# (CELL_SIZE, CELL_SIZE)))
if self.helplines_var.get():
half_size = CELL_SIZE / 2
point1 = (self.selected[0] + half_size, 0)
point2 = (self.selected[0] + half_size, DISPLAY_HEIGHT)
pygame.draw.line(self.screen, self.guide_line_color,
point1, point2)
point1 = (0, self.selected[1] + half_size)
point2 = (DISPLAY_WIDTH, self.selected[1] + half_size)
pygame.draw.line(self.screen, self.guide_line_color,
point1, point2)
def draw_rect_cursor(self, color):
pygame.draw.rect(self.screen, color,
pygame.Rect(self.selected,
(CELL_SIZE, CELL_SIZE)))
def run(self):
while not self.quit:
for _ in pygame.event.get():
pass
self.screen.fill(BLACK)
self.fps_clock.tick(FPS)
self.update()
self.draw()
pygame.display.flip()
self.root.update()
def exit_cmd(self):
self.window_close()
def main():
mapedit = MapEditor()
mapedit.run()
if __name__ == '__main__':
main()
| mit | 102,947,849,897,043,100 | 30.361299 | 79 | 0.53879 | false |
moazzemi/HAMEX | cpu/gem5/configs/example/fsHT.py | 1 | 15677 | # Copyright (c) 2010-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2012-2014 Mark D. Hill and David A. Wood
# Copyright (c) 2009-2011 Advanced Micro Devices, Inc.
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
# Brad Beckmann
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
addToPath('../ruby')
import Ruby
from FSConfig import *
from SysPaths import *
from Benchmarks import *
import SimulationHT
import CacheConfigHT
import CpuConfigHT
import CpuConfig
import MemConfig
from Caches import *
import OptionsHT
#m5.disableAllListeners()
# Check if KVM support has been enabled, we might need to do VM
# configuration if that's the case.
have_kvm_support = 'BaseKvmCPU' in globals()
def is_kvm_cpu(cpu_class):
return have_kvm_support and cpu_class != None and \
issubclass(cpu_class, BaseKvmCPU)
def cmd_line_template():
if options.command_line and options.command_line_file:
print "Error: --command-line and --command-line-file are " \
"mutually exclusive"
sys.exit(1)
if options.command_line:
return options.command_line
if options.command_line_file:
return open(options.command_line_file).read().strip()
return None
def instantiate_cpu(cpu_type):
inst = CpuConfig._cpu_classes[cpu_type]()
if cpu_type == 'arm_detailed':
# make it cortex a15
from O3_ARM_v7a import *
inst.fuPool.FUList.append(O3_ARM_v7a_Simple_Int())
inst.fuPool.FUList.append(O3_ARM_v7a_FP())
return inst
def build_test_system(np):
cmdline = cmd_line_template()
if buildEnv['TARGET_ISA'] == "alpha":
test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0], options.ruby,
cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == "mips":
test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == "sparc":
test_sys = makeSparcSystem(test_mem_mode, bm[0], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == "x86":
test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0],
options.ruby, cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == "arm":
test_sys = makeArmSystem(test_mem_mode, options.machine_type,
options.num_cpus, bm[0], options.dtb_filename,
bare_metal=options.bare_metal,
cmdline=cmdline,
external_memory=options.external_memory_system)
if options.enable_context_switch_stats_dump:
test_sys.enable_context_switch_stats_dump = True
else:
fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA'])
# Set the cache line size for the entire system
test_sys.cache_line_size = options.cacheline_size
# Create a top-level voltage domain
test_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
test_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = test_sys.voltage_domain)
if options.kernel is not None:
test_sys.kernel = binary(options.kernel)
if options.script is not None:
test_sys.readfile = options.script
if options.lpae:
test_sys.have_lpae = True
if options.virtualisation:
test_sys.have_virtualization = True
test_sys.init_param = options.init_param
# For now, assign all the CPUs to the same clock domain
# test_sys.cpu = [TestCPUClass(clk_domain=test_sys.clk_domain, cpu_id=i)
# for i in xrange(np)]
test_sys.cpu = [instantiate_cpu(cpu_type) for cpu_type, cpu_nr in zip(options.cpus_type_names, options.num_cpus_eachtype) for i in range(int(cpu_nr))]
if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass):
test_sys.vm = KvmVM()
if options.ruby:
# Check for timing mode because ruby does not support atomic accesses
if not (options.cpu_type == "detailed" or options.cpu_type == "timing"):
print >> sys.stderr, "Ruby requires TimingSimpleCPU or O3CPU!!"
sys.exit(1)
Ruby.create_system(options, True, test_sys, test_sys.iobus,
test_sys._dma_ports)
# Create a seperate clock domain for Ruby
test_sys.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = test_sys.voltage_domain)
# Connect the ruby io port to the PIO bus,
# assuming that there is just one such port.
test_sys.iobus.master = test_sys.ruby._io_port.slave
for (i, cpu) in enumerate(test_sys.cpu):
#
# Tie the cpu ports to the correct ruby system ports
#
cpu.clk_domain = test_sys.cpu_clk_domain
cpu.createThreads()
cpu.createInterruptController()
cpu.icache_port = test_sys.ruby._cpu_ports[i].slave
cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave
if buildEnv['TARGET_ISA'] == "x86":
cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.interrupts[0].pio = test_sys.ruby._cpu_ports[i].master
cpu.interrupts[0].int_master = test_sys.ruby._cpu_ports[i].slave
cpu.interrupts[0].int_slave = test_sys.ruby._cpu_ports[i].master
else:
if options.caches or options.l2cache:
# By default the IOCache runs at the system clock
test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges)
test_sys.iocache.cpu_side = test_sys.iobus.master
test_sys.iocache.mem_side = test_sys.membus.slave
elif not options.external_memory_system:
test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges)
test_sys.iobridge.slave = test_sys.iobus.master
test_sys.iobridge.master = test_sys.membus.slave
# Sanity check
if options.fastmem:
if TestCPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
if options.simpoint_profile:
if not options.fastmem:
# Atomic CPU checked with fastmem option already
fatal("SimPoint generation should be done with atomic cpu and fastmem")
if np > 1:
fatal("SimPoint generation not supported with more than one CPUs")
for i in xrange(np):
if options.fastmem:
test_sys.cpu[i].fastmem = True
if options.simpoint_profile:
test_sys.cpu[i].addSimPointProbe(options.simpoint_interval)
if options.checker:
test_sys.cpu[i].addCheckerCpu()
test_sys.cpu[i].createThreads()
# If elastic tracing is enabled when not restoring from checkpoint and
# when not fast forwarding using the atomic cpu, then check that the
# TestCPUClass is DerivO3CPU or inherits from DerivO3CPU. If the check
# passes then attach the elastic trace probe.
# If restoring from checkpoint or fast forwarding, the code that does this for
# FutureCPUClass is in the Simulation module. If the check passes then the
# elastic trace probe is attached to the switch CPUs.
if options.elastic_trace_en and options.checkpoint_restore == None and \
not options.fast_forward:
CpuConfig.config_etrace(TestCPUClass, test_sys.cpu, options)
# Create a source clock for the CPUs and set the clock period
CpuConfigHT.set_cpu_clock_domains(test_sys,options)
CpuConfigHT.config_heterogeneous_cpus(test_sys,options)
CacheConfigHT.config_cache(options, test_sys)
MemConfig.config_mem(options, test_sys)
return test_sys
def build_drive_system(np):
# driver system CPU is always simple, so is the memory
# Note this is an assignment of a class, not an instance.
DriveCPUClass = AtomicSimpleCPU
drive_mem_mode = 'atomic'
DriveMemClass = SimpleMemory
cmdline = cmd_line_template()
if buildEnv['TARGET_ISA'] == 'alpha':
drive_sys = makeLinuxAlphaSystem(drive_mem_mode, bm[1], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == 'mips':
drive_sys = makeLinuxMipsSystem(drive_mem_mode, bm[1], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == 'sparc':
drive_sys = makeSparcSystem(drive_mem_mode, bm[1], cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == 'x86':
drive_sys = makeLinuxX86System(drive_mem_mode, np, bm[1],
cmdline=cmdline)
elif buildEnv['TARGET_ISA'] == 'arm':
drive_sys = makeArmSystem(drive_mem_mode, options.machine_type, np,
bm[1], options.dtb_filename, cmdline=cmdline)
# Create a top-level voltage domain
drive_sys.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
# Create a source clock for the system and set the clock period
drive_sys.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = drive_sys.voltage_domain)
# Create a CPU voltage domain
drive_sys.cpu_voltage_domain = VoltageDomain()
# Create a source clock for the CPUs and set the clock period
drive_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock,
voltage_domain =
drive_sys.cpu_voltage_domain)
drive_sys.cpu = DriveCPUClass(clk_domain=drive_sys.cpu_clk_domain,
cpu_id=0)
drive_sys.cpu.createThreads()
drive_sys.cpu.createInterruptController()
drive_sys.cpu.connectAllPorts(drive_sys.membus)
if options.fastmem:
drive_sys.cpu.fastmem = True
if options.kernel is not None:
drive_sys.kernel = binary(options.kernel)
if is_kvm_cpu(DriveCPUClass):
drive_sys.vm = KvmVM()
drive_sys.iobridge = Bridge(delay='50ns',
ranges = drive_sys.mem_ranges)
drive_sys.iobridge.slave = drive_sys.iobus.master
drive_sys.iobridge.master = drive_sys.membus.slave
# Create the appropriate memory controllers and connect them to the
# memory bus
drive_sys.mem_ctrls = [DriveMemClass(range = r)
for r in drive_sys.mem_ranges]
for i in xrange(len(drive_sys.mem_ctrls)):
drive_sys.mem_ctrls[i].port = drive_sys.membus.master
drive_sys.init_param = options.init_param
return drive_sys
# Add options
parser = optparse.OptionParser()
OptionsHT.addCommonOptions(parser)
OptionsHT.addFSOptions(parser)
# Add the ruby specific and protocol specific options
if '--ruby' in sys.argv:
Ruby.define_options(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
OptionsHT.options_preprocess(options)
# system under test can be any CPU
(TestCPUClass, test_mem_mode, FutureClass) = SimulationHT.setCPUClass(options)
# Match the memories with the CPUs, based on the options for the test system
TestMemClass = SimulationHT.setMemClass(options)
if options.benchmark:
try:
bm = Benchmarks[options.benchmark]
except KeyError:
print "Error benchmark %s has not been defined." % options.benchmark
print "Valid benchmarks are: %s" % DefinedBenchmarks
sys.exit(1)
else:
if options.dual:
bm = [SysConfig(disk=options.disk_image, rootdev=options.root_device,
mem=options.mem_size, os_type=options.os_type),
SysConfig(disk=options.disk_image, rootdev=options.root_device,
mem=options.mem_size, os_type=options.os_type)]
else:
bm = [SysConfig(disk=options.disk_image, rootdev=options.root_device,
mem=options.mem_size, os_type=options.os_type)]
options.num_cpus = CpuConfigHT.cpu_count(options)
options.total_l2_cache = CpuConfigHT.l2_cache_count(options)
test_sys = build_test_system(options.num_cpus)
if len(bm) == 2:
drive_sys = build_drive_system(options.num_cpus)
root = makeDualRoot(True, test_sys, drive_sys, options.etherdump)
elif len(bm) == 1 and options.dist:
# This system is part of a dist-gem5 simulation
root = makeDistRoot(test_sys,
options.dist_rank,
options.dist_size,
options.dist_server_name,
options.dist_server_port,
options.dist_sync_repeat,
options.dist_sync_start,
options.ethernet_linkspeed,
options.ethernet_linkdelay,
options.etherdump);
elif len(bm) == 1:
root = Root(full_system=True, system=test_sys)
else:
print "Error I don't know how to create more than 2 systems."
sys.exit(1)
if options.timesync:
root.time_sync_enable = True
if options.frame_capture:
VncServer.frame_capture = True
SimulationHT.setWorkCountOptions(test_sys, options)
SimulationHT.run(options, root, test_sys, FutureClass)
| mit | 1,070,710,434,811,740,200 | 40.364116 | 159 | 0.657269 | false |
Juniper/tempest | tempest/api/identity/admin/v3/test_groups.py | 1 | 6627 | # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
CONF = config.CONF
class GroupsV3TestJSON(base.BaseIdentityV3AdminTest):
@classmethod
def resource_setup(cls):
super(GroupsV3TestJSON, cls).resource_setup()
cls.domain = cls.create_domain()
@classmethod
def resource_cleanup(cls):
# Cleanup the domains created in the setup
cls.domains_client.update_domain(cls.domain['id'], enabled=False)
cls.domains_client.delete_domain(cls.domain['id'])
super(GroupsV3TestJSON, cls).resource_cleanup()
@decorators.idempotent_id('2e80343b-6c81-4ac3-88c7-452f3e9d5129')
def test_group_create_update_get(self):
name = data_utils.rand_name('Group')
description = data_utils.rand_name('Description')
group = self.groups_client.create_group(
name=name, domain_id=self.domain['id'],
description=description)['group']
self.addCleanup(self.groups_client.delete_group, group['id'])
self.assertEqual(group['name'], name)
self.assertEqual(group['description'], description)
new_name = data_utils.rand_name('UpdateGroup')
new_desc = data_utils.rand_name('UpdateDescription')
updated_group = self.groups_client.update_group(
group['id'], name=new_name, description=new_desc)['group']
self.assertEqual(updated_group['name'], new_name)
self.assertEqual(updated_group['description'], new_desc)
new_group = self.groups_client.show_group(group['id'])['group']
self.assertEqual(group['id'], new_group['id'])
self.assertEqual(new_name, new_group['name'])
self.assertEqual(new_desc, new_group['description'])
@decorators.idempotent_id('b66eb441-b08a-4a6d-81ab-fef71baeb26c')
def test_group_update_with_few_fields(self):
name = data_utils.rand_name('Group')
old_description = data_utils.rand_name('Description')
group = self.groups_client.create_group(
name=name, domain_id=self.domain['id'],
description=old_description)['group']
self.addCleanup(self.groups_client.delete_group, group['id'])
new_name = data_utils.rand_name('UpdateGroup')
updated_group = self.groups_client.update_group(
group['id'], name=new_name)['group']
self.assertEqual(new_name, updated_group['name'])
# Verify that 'description' is not being updated or deleted.
self.assertEqual(old_description, updated_group['description'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('1598521a-2f36-4606-8df9-30772bd51339')
def test_group_users_add_list_delete(self):
name = data_utils.rand_name('Group')
group = self.groups_client.create_group(
name=name, domain_id=self.domain['id'])['group']
self.addCleanup(self.groups_client.delete_group, group['id'])
# add user into group
users = []
for _ in range(3):
user = self.create_test_user()
users.append(user)
self.groups_client.add_group_user(group['id'], user['id'])
# list users in group
group_users = self.groups_client.list_group_users(group['id'])['users']
self.assertEqual(sorted(users, key=lambda k: k['name']),
sorted(group_users, key=lambda k: k['name']))
# check and delete user in group
for user in users:
self.groups_client.check_group_user_existence(
group['id'], user['id'])
self.groups_client.delete_group_user(group['id'], user['id'])
group_users = self.groups_client.list_group_users(group['id'])['users']
self.assertEqual(len(group_users), 0)
@decorators.idempotent_id('64573281-d26a-4a52-b899-503cb0f4e4ec')
def test_list_user_groups(self):
# create a user
user = self.create_test_user()
# create two groups, and add user into them
groups = []
for _ in range(2):
name = data_utils.rand_name('Group')
group = self.groups_client.create_group(
name=name, domain_id=self.domain['id'])['group']
groups.append(group)
self.addCleanup(self.groups_client.delete_group, group['id'])
self.groups_client.add_group_user(group['id'], user['id'])
# list groups which user belongs to
user_groups = self.users_client.list_user_groups(user['id'])['groups']
self.assertEqual(sorted(groups, key=lambda k: k['name']),
sorted(user_groups, key=lambda k: k['name']))
self.assertEqual(2, len(user_groups))
@decorators.idempotent_id('cc9a57a5-a9ed-4f2d-a29f-4f979a06ec71')
def test_list_groups(self):
# Test to list groups
group_ids = list()
fetched_ids = list()
for _ in range(3):
name = data_utils.rand_name('Group')
description = data_utils.rand_name('Description')
group = self.groups_client.create_group(
name=name, domain_id=self.domain['id'],
description=description)['group']
self.addCleanup(self.groups_client.delete_group, group['id'])
group_ids.append(group['id'])
# List and Verify Groups
# When domain specific drivers are enabled the operations
# of listing all users and listing all groups are not supported,
# they need a domain filter to be specified
if CONF.identity_feature_enabled.domain_specific_drivers:
body = self.groups_client.list_groups(
domain_id=self.domain['id'])['groups']
else:
body = self.groups_client.list_groups()['groups']
for g in body:
fetched_ids.append(g['id'])
missing_groups = [g for g in group_ids if g not in fetched_ids]
self.assertEmpty(missing_groups)
| apache-2.0 | -119,176,967,285,293,920 | 44.081633 | 79 | 0.634676 | false |
Juniper/contrail-horizon | openstack_dashboard/dashboards/project/networking/views.py | 1 | 6510 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Neutron Networks.
"""
import logging
from django.core.urlresolvers import reverse_lazy # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import workflows
from horizon import tabs
from openstack_dashboard import api
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking \
import forms as project_forms
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking.ports \
import tables as port_tables
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking.subnets \
import tables as subnet_tables
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking \
import tables as project_tables
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking \
import workflows as project_workflows
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.networking \
import tabs as project_tabs
LOG = logging.getLogger(__name__)
class IndexView(tabs.TabbedTableView):
tab_group_class = project_tabs.NetworkingTabs
template_name = 'project/networking/index.html'
class CreateView(workflows.WorkflowView):
workflow_class = project_workflows.CreateNetwork
def get_initial(self):
pass
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateNetwork
template_name = 'project/networking/update.html'
#context_object_name = 'network_id'
success_url = reverse_lazy("horizon:project:networking:index")
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context["network_id"] = self.kwargs['network_id']
return context
def _get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
network_id = self.kwargs['network_id']
try:
self._object = api.neutron.network_get(self.request,
network_id)
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve network details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
network = self._get_object()
return {'network_id': network['id'],
'tenant_id': network['tenant_id'],
'name': network['name'],
'admin_state': network['admin_state_up']}
class DetailView(tables.MultiTableView):
table_classes = (subnet_tables.SubnetsTable, port_tables.PortsTable)
template_name = 'project/networking/detail.html'
failure_url = reverse_lazy('horizon:project:networking:index')
def get_subnets_data(self):
try:
network = self._get_data()
subnets = api.neutron.subnet_list(self.request,
network_id=network.id)
except Exception:
subnets = []
msg = _('Subnet list can not be retrieved.')
exceptions.handle(self.request, msg)
for s in subnets:
s.set_id_as_name_if_empty()
return subnets
def get_ports_data(self):
try:
network_id = self.kwargs['network_id']
ports = api.neutron.port_list(self.request, network_id=network_id)
except Exception:
ports = []
msg = _('Port list can not be retrieved.')
exceptions.handle(self.request, msg)
for p in ports:
p.set_id_as_name_if_empty()
return ports
def _get_data(self):
if not hasattr(self, "_network"):
try:
network_id = self.kwargs['network_id']
network = api.neutron.network_get(self.request, network_id)
network.set_id_as_name_if_empty(length=0)
except Exception:
msg = _('Unable to retrieve details for network "%s".') \
% (network_id)
exceptions.handle(self.request, msg, redirect=self.failure_url)
self._network = network
return self._network
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["network"] = self._get_data()
return context
class ModifyPolicyView(workflows.WorkflowView):
workflow_class = project_workflows.UpdateNetworkAttachedPolicies
#context_object_name = 'network'
def get_context_data(self, **kwargs):
context = super(ModifyPolicyView, self).get_context_data(**kwargs)
context["network_id"] = self.kwargs['network_id']
network = self.get_object()
context["name"] = network.name
return context
def get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
network_id = self.kwargs['network_id']
try:
self._object = api.neutron.network_get(self.request,
network_id)
except Exception:
redirect = reverse("horizon:project:networking:index")
msg = _('Unable to retrieve network details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self._object
def get_initial(self):
initial = super(ModifyPolicyView, self).get_initial()
network = self.get_object()
initial.update({'network_id': self.kwargs['network_id'],
'tenant_id': network['tenant_id'],
'name': network['name']})
msg = _('get_initial net %s') % str(initial)
LOG.error(msg)
return initial
| apache-2.0 | 5,272,953,729,970,121,000 | 37.070175 | 93 | 0.634101 | false |
datawire/telepresence | telepresence/cli.py | 1 | 17399 | # Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import webbrowser
from contextlib import contextmanager
from pathlib import Path
from subprocess import check_output
from traceback import format_exc
from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Union
from urllib.parse import quote_plus
import telepresence
from telepresence.runner import BackgroundProcessCrash, Runner
from telepresence.utilities import dumb_print, random_name
class PortMapping(object):
"""Maps local ports to listen to remote exposed ports."""
def __init__(self) -> None:
self._mapping = {} # type: Dict[int,int]
@classmethod
def parse(cls, port_strings: List[str]) -> "PortMapping":
"""Parse list of 'port' or 'local_port:remote_port' to PortMapping."""
result = PortMapping()
for port_string in port_strings:
if ":" in port_string:
local_port, remote_port = map(int, port_string.split(":"))
else:
local_port, remote_port = int(port_string), int(port_string)
result._mapping[local_port] = remote_port
return result
def merge_automatic_ports(self, ports: List[int]) -> None:
"""
Merge a list of ports to the existing ones.
The existing ones will win if the port is already there.
"""
remote = self.remote()
for port in ports:
if port in remote:
continue
self._mapping[port] = port
def remote(self) -> Set[int]:
"""Return set of remote ports."""
return set(self._mapping.values())
def local_to_remote(self) -> Set[Tuple[int, int]]:
"""Return set of pairs of local, remote ports."""
return set(self._mapping.items())
def has_privileged_ports(self) -> bool:
"""
Return true if any remote port is privileged (< 1024)
"""
return any([p < 1024 for p in self.remote()])
def safe_output(args: List[str]) -> str:
"""
Capture output from a command but try to avoid crashing
:param args: Command to run
:return: Output from the command
"""
try:
return str(check_output(args), "utf-8").strip().replace("\n", " // ")
except Exception as e:
return "(error: {})".format(e)
def report_crash(error: Any, log_path: str, logs: str) -> None:
print(
"\nLooks like there's a bug in our code. Sorry about that!\n\n" +
error + "\n"
)
if log_path != "-":
log_ref = " (see {} for the complete logs):".format(log_path)
else:
log_ref = ""
if "\n" in logs:
print(
"Here are the last few lines of the logfile" + log_ref + "\n\n" +
"\n".join(logs.splitlines()[-12:]) + "\n"
)
report = "no"
if sys.stdout.isatty():
message = (
"Would you like to file an issue in our issue tracker?"
" You'll be able to review and edit before anything is"
" posted to the public."
" We'd really appreciate the help improving our product. [Y/n]: "
)
try:
report = input(message).lower()[:1]
except EOFError:
print("(EOF)")
if report in ("y", ""):
url = "https://github.com/datawire/telepresence/issues/new?body="
body = quote_plus(
BUG_REPORT_TEMPLATE.format(
sys.argv,
telepresence.__version__,
sys.version,
safe_output(["kubectl", "version", "--short"]),
safe_output(["oc", "version"]),
safe_output(["uname", "-a"]),
error,
logs[-1000:],
)[:4000]
) # Overly long URLs won't work
webbrowser.open_new(url + body)
@contextmanager
def crash_reporting(runner: Optional[Runner] = None) -> Iterator[None]:
"""
Decorator that catches unexpected errors
"""
try:
yield
except KeyboardInterrupt:
if runner is not None:
show = runner.show
else:
show = dumb_print
show("Keyboard interrupt (Ctrl-C/Ctrl-Break) pressed")
raise SystemExit(0)
except Exception as exc:
if isinstance(exc, BackgroundProcessCrash):
error = exc.details
else:
error = format_exc()
logs = "Not available"
log_path = "-"
if runner is not None:
logs = runner.read_logs()
log_path = runner.logfile_path
runner.write("CRASH: {}".format(exc))
runner.write(error)
runner.write("(calling crash reporter...)")
report_crash(error, log_path, logs)
raise SystemExit(1)
def path_or_bool(value: str) -> Union[Path, bool]:
"""Parse value as a Path or a boolean"""
path = Path(value)
if path.is_absolute():
return path
value = value.lower()
if value in ("true", "on", "yes", "1"):
return True
if value in ("false", "off", "no", "0"):
return False
raise argparse.ArgumentTypeError(
"Value must be true, false, or an absolute filesystem path"
)
def absolute_path(value: str) -> Path:
"""Parse value as a Path or a boolean"""
path = Path(value)
if path.is_absolute():
return path
raise argparse.ArgumentTypeError(
"Value must be an absolute filesystem path"
)
def parse_args(in_args: Optional[List[str]] = None) -> argparse.Namespace:
"""Create a new ArgumentParser and parse sys.argv."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
allow_abbrev=False, # can make adding changes not backwards compatible
description=(
"Telepresence: local development proxied to a remote Kubernetes "
"cluster.\n\n"
"Documentation: https://telepresence.io\n"
"Real-time help: https://d6e.co/slack\n"
"Issue tracker: https://github.com/datawire/telepresence/issues\n"
"\n" + HELP_EXAMPLES + "\n\n"
)
)
parser.add_argument(
'--version', action='version', version=telepresence.__version__
)
parser.add_argument(
"--verbose",
action='store_true',
help="Enables verbose logging for troubleshooting."
)
parser.add_argument(
"--logfile",
default="./telepresence.log",
help=(
"The path to write logs to. '-' means stdout, "
"default is './telepresence.log'."
)
)
parser.add_argument(
"--method",
"-m",
choices=["inject-tcp", "vpn-tcp", "container"],
help=(
"'inject-tcp': inject process-specific shared "
"library that proxies TCP to the remote cluster.\n"
"'vpn-tcp': all local processes can route TCP "
"traffic to the remote cluster. Requires root.\n"
"'container': used with --docker-run.\n"
"\n"
"Default is 'vpn-tcp', or 'container' when --docker-run is used.\n"
"\nFor more details see "
"https://telepresence.io/reference/methods.html"
)
)
group_deployment = parser.add_mutually_exclusive_group()
group_deployment.add_argument(
'--new-deployment',
"-n",
metavar="DEPLOYMENT_NAME",
dest="new_deployment",
help=(
"Create a new Deployment in Kubernetes where the "
"datawire/telepresence-k8s image will run. It will be deleted "
"on exit. If no deployment option is specified this will be "
" used by default, with a randomly generated name."
)
)
group_deployment.add_argument(
"--swap-deployment",
"-s",
dest="swap_deployment",
metavar="DEPLOYMENT_NAME[:CONTAINER]",
help=(
"Swap out an existing deployment with the Telepresence proxy, "
"swap back on exit. If there are multiple containers in the pod "
"then add the optional container name to indicate which container"
" to use."
)
)
group_deployment.add_argument(
"--deployment",
"-d",
metavar="EXISTING_DEPLOYMENT_NAME",
help=(
"The name of an existing Kubernetes Deployment where the " +
"datawire/telepresence-k8s image is already running."
)
)
parser.add_argument(
"--context",
default=None,
help=(
"The Kubernetes context to use. Defaults to current kubectl"
" context."
)
)
parser.add_argument(
"--namespace",
default=None,
help=(
"The Kubernetes namespace to use. Defaults to kubectl's default"
" for the current context, which is usually 'default'."
)
)
parser.add_argument(
"--serviceaccount",
dest="service_account",
default=None,
help=(
"The Kubernetes service account to use. Sets the value for a new"
" deployment or overrides the value for a swapped deployment."
)
)
parser.add_argument(
"--expose",
action='append',
metavar="PORT[:REMOTE_PORT]",
default=[],
help=(
"Port number that will be exposed to Kubernetes in the Deployment."
" Should match port exposed in the existing Deployment if using "
"--deployment or --swap-deployment. By default local port and "
"remote port are the same; if you want to listen on port 8080 "
"locally but be exposed as port 80 in Kubernetes you can do "
"'--expose 8080:80'."
)
)
parser.add_argument(
"--to-pod",
action="append",
metavar="PORT",
type=int,
default=[],
help=(
"Access localhost:PORT on other containers in the swapped "
"deployment's pod from your host or local container. For example, "
"use this to reach proxy/helper containers in the pod with "
"--swap-deployment."
)
)
parser.add_argument(
"--from-pod",
action="append",
metavar="PORT",
type=int,
default=[],
help=(
"Allow access to localhost:PORT on your host or local container "
"from other containers in the swapped deployment's pod. For "
"example, use this to let an adapter container forward requests "
"to your swapped deployment."
)
)
parser.add_argument(
"--container-to-host",
action="append",
metavar="CONTAINER_PORT[:HOST_PORT]",
default=[],
help=(
"For the container method, listen on localhost:CONTAINER_PORT in"
" the container and forward connections to localhost:HOST_PORT on"
" the host running Telepresence. Useful for allowing code running"
" in the container to connect to an IDE or debugger running on the"
" host."
)
)
parser.add_argument(
"--also-proxy",
metavar="CLOUD_HOSTNAME",
dest="also_proxy",
action='append',
default=[],
help=(
"If you are using --method=vpn-tcp, use this to add additional "
"remote IPs, IP ranges, or hostnames to proxy. Kubernetes service "
"and pods are proxied automatically, so you only need to list "
"cloud resources, e.g. the hostname of a AWS RDS. "
"When using --method=inject-tcp "
"this option is unnecessary as all outgoing communication in "
"the run subprocess will be proxied."
)
)
parser.add_argument(
"--local-cluster",
action='store_true',
help=(
"If you are using --method=vpn-tcp with a local cluster (one that"
" is running on the same computer as Telepresence) and you"
" experience DNS loops or loss of Internet connectivity while"
" Telepresence is running, use this flag to enable an internal"
" workaround that may help."
)
)
mount_group = parser.add_mutually_exclusive_group()
mount_group.add_argument(
"--docker-mount",
type=absolute_path,
metavar="PATH",
dest="docker_mount",
default=None,
help=(
"The absolute path for the root directory where volumes will be "
"mounted, $TELEPRESENCE_ROOT. "
"Requires --method container."
)
)
mount_group.add_argument(
"--mount",
type=path_or_bool,
metavar="PATH_OR_BOOLEAN",
dest="mount",
default=True,
help=(
"The absolute path for the root directory where volumes will be "
"mounted, $TELEPRESENCE_ROOT. "
"Use \"true\" to have Telepresence pick a random mount point "
"under /tmp (default). "
"Use \"false\" to disable filesystem mounting entirely."
)
)
parser.add_argument(
"--env-json",
metavar="FILENAME",
default=None,
help="Also emit the remote environment to a file as a JSON blob."
)
parser.add_argument(
"--env-file",
metavar="FILENAME",
default=None,
help=(
"Also emit the remote environment to an env file in Docker "
"Compose format. "
"See https://docs.docker.com/compose/env-file/ for more "
"information on the limitations of this format."
)
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--run-shell",
dest="runshell",
action="store_true",
help="Run a local shell that will be proxied to/from Kubernetes.",
)
group.add_argument(
"--run",
metavar=("COMMAND", "ARG"),
dest="run",
nargs=argparse.REMAINDER,
help=(
"Run the specified command arguments, e.g. "
"'--run python myapp.py'."
)
)
group.add_argument(
"--docker-run",
metavar="DOCKER_RUN_ARG",
dest="docker_run",
nargs=argparse.REMAINDER,
help=(
"Run a Docker container, by passing the arguments to 'docker run',"
" e.g. '--docker-run -i -t ubuntu:16.04 /bin/bash'. "
"Requires --method container."
)
)
args = parser.parse_args(in_args)
# Fill in defaults:
if args.method is None:
if args.docker_run is not None:
args.method = "container"
else:
args.method = "vpn-tcp"
if args.deployment is None and args.new_deployment is None and (
args.swap_deployment is None
):
args.new_deployment = random_name()
if args.docker_mount:
args.mount = False
if args.method == "container" and args.docker_run is None:
raise SystemExit(
"'--docker-run' is required when using '--method container'."
)
if args.docker_run is not None and args.method != "container":
raise SystemExit(
"'--method container' is required when using '--docker-run'."
)
if args.docker_mount is not None and args.method != "container":
raise SystemExit(
"'--method container' is required when using '--docker-mount'."
)
args.expose = PortMapping.parse(args.expose)
args.container_to_host = PortMapping.parse(args.container_to_host)
return args
HELP_EXAMPLES = """\
== Examples ==
Send a HTTP query to Kubernetes Service called 'myservice' listening on port \
8080:
$ telepresence --run curl http://myservice:8080/
Replace an existing Deployment 'myserver' listening on port 9090 with a local \
process listening on port 9090:
$ telepresence --swap-deployment myserver --expose 9090 \
--run python3 -m http.server 9090
Use a different local port than the remote port:
$ telepresence --swap-deployment myserver --expose 9090:80 \
--run python3 -m http.server 9090
Run a Docker container instead of a local process:
$ telepresence --swap-deployment myserver --expose 80 \
--docker-run -i -t nginx:latest
== Detailed usage ==
"""
BUG_REPORT_TEMPLATE = u"""\
### What were you trying to do?
(please tell us)
### What did you expect to happen?
(please tell us)
### What happened instead?
(please tell us - the traceback is automatically included, see below.
use https://gist.github.com to pass along full telepresence.log)
### Automatically included information
Command line: `{}`
Version: `{}`
Python version: `{}`
kubectl version: `{}`
oc version: `{}`
OS: `{}`
```
{}
```
Logs:
```
{}
```
"""
| apache-2.0 | 8,950,086,370,324,174,000 | 31.042357 | 79 | 0.579918 | false |
mindbody/API-Examples | SDKs/Python/swagger_client/models/get_class_payroll_response.py | 1 | 4756 | # coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.class_payroll_event import ClassPayrollEvent # noqa: F401,E501
from swagger_client.models.pagination_response import PaginationResponse # noqa: F401,E501
class GetClassPayrollResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_response': 'PaginationResponse',
'class_payroll': 'list[ClassPayrollEvent]'
}
attribute_map = {
'pagination_response': 'PaginationResponse',
'class_payroll': 'ClassPayroll'
}
def __init__(self, pagination_response=None, class_payroll=None): # noqa: E501
"""GetClassPayrollResponse - a model defined in Swagger""" # noqa: E501
self._pagination_response = None
self._class_payroll = None
self.discriminator = None
if pagination_response is not None:
self.pagination_response = pagination_response
if class_payroll is not None:
self.class_payroll = class_payroll
@property
def pagination_response(self):
"""Gets the pagination_response of this GetClassPayrollResponse. # noqa: E501
Contains information about the pagination used. # noqa: E501
:return: The pagination_response of this GetClassPayrollResponse. # noqa: E501
:rtype: PaginationResponse
"""
return self._pagination_response
@pagination_response.setter
def pagination_response(self, pagination_response):
"""Sets the pagination_response of this GetClassPayrollResponse.
Contains information about the pagination used. # noqa: E501
:param pagination_response: The pagination_response of this GetClassPayrollResponse. # noqa: E501
:type: PaginationResponse
"""
self._pagination_response = pagination_response
@property
def class_payroll(self):
"""Gets the class_payroll of this GetClassPayrollResponse. # noqa: E501
Contains the class payroll events. # noqa: E501
:return: The class_payroll of this GetClassPayrollResponse. # noqa: E501
:rtype: list[ClassPayrollEvent]
"""
return self._class_payroll
@class_payroll.setter
def class_payroll(self, class_payroll):
"""Sets the class_payroll of this GetClassPayrollResponse.
Contains the class payroll events. # noqa: E501
:param class_payroll: The class_payroll of this GetClassPayrollResponse. # noqa: E501
:type: list[ClassPayrollEvent]
"""
self._class_payroll = class_payroll
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GetClassPayrollResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetClassPayrollResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| bsd-2-clause | -2,032,422,677,886,335,000 | 31.135135 | 119 | 0.609336 | false |
Pushjet/Pushjet-Server-Api | controllers/subscription.py | 1 | 1428 | from flask import Blueprint, jsonify
from utils import Error, has_service, has_uuid, queue_zmq_message
from shared import db
from models import Subscription
from json import dumps as json_encode
from config import zeromq_relay_uri
subscription = Blueprint('subscription', __name__)
@subscription.route('/subscription', methods=['POST'])
@has_uuid
@has_service
def subscription_post(client, service):
exists = Subscription.query.filter_by(device=client).filter_by(service=service).first() is not None
if exists:
return Error.DUPLICATE_LISTEN
subscription_new = Subscription(client, service)
db.session.add(subscription_new)
db.session.commit()
if zeromq_relay_uri:
queue_zmq_message(json_encode({'subscription': subscription_new.as_dict()}))
return jsonify({'service': service.as_dict()})
@subscription.route('/subscription', methods=['GET'])
@has_uuid
def subscription_get(client):
subscriptions = Subscription.query.filter_by(device=client).all()
return jsonify({'subscriptions': [_.as_dict() for _ in subscriptions]})
@subscription.route('/subscription', methods=['DELETE'])
@has_uuid
@has_service
def subscription_delete(client, service):
l = Subscription.query.filter_by(device=client).filter_by(service=service).first()
if l is not None:
db.session.delete(l)
db.session.commit()
return Error.NONE
return Error.NOT_SUBSCRIBED
| bsd-2-clause | 6,787,025,586,137,599,000 | 30.733333 | 103 | 0.721989 | false |
HyShai/youtube-dl | youtube_dl/downloader/f4m.py | 1 | 12552 | from __future__ import unicode_literals
import base64
import io
import itertools
import os
import time
import xml.etree.ElementTree as etree
from .common import FileDownloader
from .http import HttpFD
from ..compat import (
compat_urlparse,
)
from ..utils import (
struct_pack,
struct_unpack,
format_bytes,
encodeFilename,
sanitize_open,
xpath_text,
)
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return struct_unpack('!Q', self.read(8))[0]
def read_unsigned_int(self):
return struct_unpack('!I', self.read(4))[0]
def read_unsigned_char(self):
return struct_unpack('!B', self.read(1))[0]
def read_string(self):
res = b''
while True:
char = self.read(1)
if char == b'\x00':
break
res += char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read(real_size - header_end)
def read_asrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
quality_entry_count = self.read_unsigned_char()
# QualityEntryCount
for i in range(quality_entry_count):
self.read_string()
segment_run_count = self.read_unsigned_int()
segments = []
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
return {
'segment_run': segments,
}
def read_afrt(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
# time scale
self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
# QualitySegmentUrlModifiers
for i in range(quality_entry_count):
self.read_string()
fragments_count = self.read_unsigned_int()
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({
'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
return {
'fragments': fragments,
}
def read_abst(self):
# version
self.read_unsigned_char()
# flags
self.read(3)
self.read_unsigned_int() # BootstrapinfoVersion
# Profile,Live,Update,Reserved
self.read(1)
# time scale
self.read_unsigned_int()
# CurrentMediaTime
self.read_unsigned_long_long()
# SmpteTimeCodeOffset
self.read_unsigned_long_long()
self.read_string() # MovieIdentifier
server_count = self.read_unsigned_char()
# ServerEntryTable
for i in range(server_count):
self.read_string()
quality_count = self.read_unsigned_char()
# QualityEntryTable
for i in range(quality_count):
self.read_string()
# DrmData
self.read_string()
# MetaData
self.read_string()
segments_count = self.read_unsigned_char()
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
fragments = []
for i in range(fragments_run_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {
'segments': segments,
'fragments': fragments,
}
def read_bootstrap_info(self):
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
first_frag_number = fragment_run_entry_table[0]['first']
fragments_counter = itertools.count(first_frag_number)
for segment, fragments_count in segment_run_table['segment_run']:
for _ in range(fragments_count):
res.append((segment, next(fragments_counter)))
return res
def write_unsigned_int(stream, val):
stream.write(struct_pack('!I', val))
def write_unsigned_int_24(stream, val):
stream.write(struct_pack('!I', val)[1:])
def write_flv_header(stream):
"""Writes the FLV header to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
stream.write(b'\x00\x00\x00\x00')
def write_metadata_tag(stream, metadata):
"""Writes optional metadata tag to stream"""
SCRIPT_TAG = b'\x12'
FLV_TAG_HEADER_LEN = 11
if metadata:
stream.write(SCRIPT_TAG)
write_unsigned_int_24(stream, len(metadata))
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
def _add_ns(prop):
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
class HttpQuietDownloader(HttpFD):
def to_screen(self, *args, **kargs):
pass
class F4mFD(FileDownloader):
"""
A downloader for f4m manifests or AdobeHDS.
"""
def _get_unencrypted_media(self, doc):
media = doc.findall(_add_ns('media'))
if not media:
self.report_error('No media found')
for e in (doc.findall(_add_ns('drmAdditionalHeader')) +
doc.findall(_add_ns('drmAdditionalHeaderSet'))):
# If id attribute is missing it's valid for all media nodes
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
if 'id' not in e.attrib:
self.report_error('Missing ID in f4m DRM')
media = list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
'drmAdditionalHeaderSetId' not in e.attrib,
media))
if not media:
self.report_error('Unsupported DRM')
return media
def real_download(self, filename, info_dict):
man_url = info_dict['url']
requested_bitrate = info_dict.get('tbr')
self.to_screen('[download] Downloading f4m manifest')
manifest = self.ydl.urlopen(man_url).read()
self.report_destination(filename)
http_dl = HttpQuietDownloader(
self.ydl,
{
'continuedl': True,
'quiet': True,
'noprogress': True,
'ratelimit': self.params.get('ratelimit', None),
'test': self.params.get('test', False),
}
)
doc = etree.fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f)
for f in self._get_unencrypted_media(doc)]
if requested_bitrate is None:
# get the best format
formats = sorted(formats, key=lambda f: f[0])
rate, media = formats[-1]
else:
rate, media = list(filter(
lambda f: int(f[0]) == requested_bitrate, formats))[0]
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
if bootstrap_node.text is None:
bootstrap_url = compat_urlparse.urljoin(
base_url, bootstrap_node.attrib['url'])
bootstrap = self.ydl.urlopen(bootstrap_url).read()
else:
bootstrap = base64.b64decode(bootstrap_node.text)
metadata_node = media.find(_add_ns('metadata'))
if metadata_node is not None:
metadata = base64.b64decode(metadata_node.text)
else:
metadata = None
boot_info = read_bootstrap_info(bootstrap)
fragments_list = build_fragments_list(boot_info)
if self.params.get('test', False):
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
# For some akamai manifests we'll need to add a query to the fragment url
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
tmpfilename = self.temp_name(filename)
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
write_flv_header(dest_stream)
write_metadata_tag(dest_stream, metadata)
# This dict stores the download progress, it's updated by the progress
# hook
state = {
'downloaded_bytes': 0,
'frag_counter': 0,
}
start = time.time()
def frag_progress_hook(status):
frag_total_bytes = status.get('total_bytes', 0)
estimated_size = (state['downloaded_bytes'] +
(total_frags - state['frag_counter']) * frag_total_bytes)
if status['status'] == 'finished':
state['downloaded_bytes'] += frag_total_bytes
state['frag_counter'] += 1
progress = self.calc_percent(state['frag_counter'], total_frags)
byte_counter = state['downloaded_bytes']
else:
frag_downloaded_bytes = status['downloaded_bytes']
byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
frag_progress = self.calc_percent(frag_downloaded_bytes,
frag_total_bytes)
progress = self.calc_percent(state['frag_counter'], total_frags)
progress += frag_progress / float(total_frags)
eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
self.report_progress(progress, format_bytes(estimated_size),
status.get('speed'), eta)
http_dl.add_progress_hook(frag_progress_hook)
frags_filenames = []
for (seg_i, frag_i) in fragments_list:
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
url = base_url + name
if akamai_pv:
url += '?' + akamai_pv.strip(';')
frag_filename = '%s-%s' % (tmpfilename, name)
success = http_dl.download(frag_filename, {'url': url})
if not success:
return False
with open(frag_filename, 'rb') as down:
down_data = down.read()
reader = FlvReader(down_data)
while True:
_, box_type, box_data = reader.read_box_info()
if box_type == b'mdat':
dest_stream.write(box_data)
break
frags_filenames.append(frag_filename)
dest_stream.close()
self.report_finish(format_bytes(state['downloaded_bytes']), time.time() - start)
self.try_rename(tmpfilename, filename)
for frag_file in frags_filenames:
os.remove(frag_file)
fsize = os.path.getsize(encodeFilename(filename))
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
| unlicense | -2,441,628,705,126,840,000 | 32.832884 | 88 | 0.565249 | false |
makson96/free-engineer | games/doom3/game.py | 2 | 2138 | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
##This software is available to you under the terms of the GPL-3, see "/usr/share/common-licenses/GPL-3".
##Copyright:
##- Tomasz Makarewicz ([email protected])
import os, shutil
from subprocess import check_output
recultis_dir = os.getenv("HOME") + "/.recultis/"
self_dir = os.path.dirname(os.path.abspath(__file__)) + "/"
install_dir = recultis_dir + "doom3/"
desk_dir = str(check_output(['xdg-user-dir', 'DESKTOP']))[2:-3]
full_name = "Doom 3 BFG on RBDOOM-3-BFG engine"
description = """Doom 3: BFG is the remaster of classic Doom 3 with all expansions. It
features enhanced graphic and audio to original game. Doom 3 is one of
the best FPS games of all time. Unfortunately, it was never released
on Linux, but game engine was release open source. With many
enhancements and new features, game is now available on Linux and it
is better than ever before. Recultis uses RBDOOM-3-BFG flavor of the
engine and requires game to be present in your Steam Library.
"""
shops = ["steam"]
s_appid = "208200"
steam_link = "http://store.steampowered.com/app/"+s_appid+"/"
screenshot_path = self_dir + "../../assets/html/rbdoom3-screen.png"
icon1_name = "rbdoom-3-bfg.png"
icon_list = [icon1_name]
engine = "rbdoom-3-bfg"
runtime_version = 2
env_var = "LD_LIBRARY_PATH=$HOME/.recultis/runtime/recultis" + str(runtime_version) + ":$HOME/.recultis/runtime/recultis" + str(runtime_version) + "/custom"
launcher1_cmd = "bash -c 'cd $HOME/.recultis/doom3/; " + env_var + " ./RBDoom3BFG'"
launcher_cmd_list = [["Doom3 BFG", launcher1_cmd]]
launcher1_text = """[Desktop Entry]
Type=Application
Name=Doom 3 BFG
Comment=Play Doom 3 BFG
Exec=""" + launcher1_cmd + """
Icon=""" + icon1_name + """
Categories=Game;
Terminal=false
"""
launcher_list = [["doom3.desktop", launcher1_text]]
uninstall_files_list = []
uninstall_dir_list = []
def prepare_engine():
print("Prepare game engine")
try:
os.remove(install_dir + "RBDoom3BFG")
shutil.rmtree(install_dir + "lib")
except:
pass
shutil.copy(recultis_dir + "tmp/rbdoom-3-bfg/RBDoom3BFG", install_dir + "RBDoom3BFG")
print("Game engine ready")
| gpl-3.0 | 4,665,695,567,504,706,000 | 34.633333 | 156 | 0.707203 | false |
alirizakeles/tendenci | tendenci/apps/base/management/commands/upload_addon.py | 1 | 1366 | from optparse import make_option
import os
import zipfile
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""
Addon upload process.
Usage:
example:
python manage.py upload_addon --zip_path /uploads/addons/addon.zip
"""
option_list = BaseCommand.option_list + (
make_option(
'--zip_path',
action='store',
dest='zip_path',
default='',
help='Path to the zip file'),
)
def handle(self, *args, **options):
path = options['zip_path']
addon_zip = zipfile.ZipFile(default_storage.open(path))
addon_name = addon_zip.namelist()[0]
addon_name = addon_name.strip('/')
addon_zip.extractall(settings.SITE_ADDONS_PATH)
print "Updating tendenci site"
os.system('python manage.py syncdb --noinput')
os.system('python manage.py migrate %s --noinput' % addon_name)
os.system('python manage.py update_settings %s' % addon_name)
os.system('python manage.py collectstatic --noinput')
print "Restarting Server"
os.system('sudo reload %s' % os.path.basename(settings.PROJECT_ROOT))
print 'Deleting zip file'
default_storage.delete(path)
| gpl-3.0 | 7,041,375,665,213,064,000 | 28.695652 | 77 | 0.627379 | false |
xS1ender/CytaPWN | cytapwn.py | 1 | 7318 | #!/usr/bin/python
#!/usr/bin/python2
#!/usr/bin/python3
# +-------------------------------------------------------------------------------------------------------------+
# | ZTE ZXHN H267N Router with <= V1.0.01_CYTA_A01 - RCE Root Exploit |
# | Copyright (c) 2017 Kropalis Thomas <[email protected]> |
# +-------------------------------------------------------------------------------------------------------------+
# | This python script connects to ZTE ZXHN H267N running CYTA's software through telnet |
# | using the current credentials, and changes/adds/removes data and features. This script |
# | is tested mostly on a machine running Kali Linux 2017.1 and Windows 10 Prof Edition. |
# | UPDATE (12/6/17): CytaPWN will no longer support Windows; This might change in the future. |
# +-------------------------------------------------------------------------------------------------------------+
# | Tested on ZTE: |
# | [*] Model name : ZTE ZXHN H267N |
# | [*] Software Version : V1.0.0T6P1_CYTA |
# | [*] Hardware Version : V1.3 |
# | [*] Bootloader Version : V1.0.0 |
# +-------------------------------------------------------------------------------------------------------------+
# | ztexploit.py tested on Kali Linux 2017.1 (amd64) |
# +-------------------------------------------------------------------------------------------------------------+
# | TODO: Add more features - including changing WPA Key and SSID Name, full control |
# | over network's devices, compatibility for Windows. |
# +-------------------------------------------------------------------------------------------------------------+
import urllib, re, time, os, sys, requests
import urllib2, commands, telnetlib, imp
from bs4 import BeautifulSoup as bs
# -------------------------------------------------
# See if BeatifulSoup is installed, continue if
# it is and install it through pip if not
# -------------------------------------------------
# try:
# imp.find_module('BeatifulSoup()')
# from bs4 import BeautifulSoup as bs
# except ImportError:
# os.system('pip install BeatifulSoup')
# -------------------------------------------------
# Generic (hidden) 'root' account credentials.
# Hint: Use these credentials to login on Telnet
# -------------------------------------------------
username = "CytaAdmRes"
password = "d5l_cyt@_Adm1n"
# --------------------------------------------------
# Payload with root credentials for the router's
# interface. Mostly to grab needed router info.
# --------------------------------------------------
payload = {
'Frm_Username':username,
'Frm_Password':password
}
os.system('clear')
##
RED = '\033[31m'
GREEN = '\033[32m'
RESET = '\033[0;0m'
##
print "+------------------------------------------------------------------+"
print "| ZTE ZXHN H267N with <= V1.0.01_CYTA_A01 - RCE Root Exploit |"
print "| Thomas Kropalis (c) 2017 - <[email protected]> |"
print "+------------------------------------------------------------------+"
try:
targetip = raw_input("\nEnter the address of the ZTE router:\n> ")
if targetip[:7] != "http://":
target = "http://"+targetip
try:
sys.stdout.write(" [*] Pinging router address...\r")
sys.stdout.flush()
time.sleep(2)
ping_res = urllib.urlopen(target).getcode()
if ping_res == 200:
sys.stdout.write(" ["+GREEN+" OK "+RESET+"]\n")
else:
print("[-] "+RED+"Error"+RESET)
sys.exit()
response = urllib.urlopen(target)
html_data = response.read()
sys.stdout.write(" [*] Retrieving random login token...\r")
sys.stdout.flush()
time.sleep(3)
# Checking for random Login token
Frm_Logintoken = re.findall(r'Frm_Logintoken"\).value = "(.*)";', html_data)
if Frm_Logintoken :
sys.stdout.write(" ["+GREEN+" OK "+RESET+"]\n")
time.sleep(1)
Frm_Logintoken = str(Frm_Logintoken[0])
# Check router information
info = target
r = requests.get(target)
data = r.text
s = bs(data, "lxml")
response = urllib.urlopen(info)
html_data = response.read()
Frm_ModelName = str(s.find_all("span",class_="w250"))#"ZXHN H267N"
if Frm_ModelName :
print " [*] Model Name: "+GREEN+Frm_ModelName+RESET
Frm_SerialNumber = "0"
if Frm_SerialNumber :
print " [*] Serial Number: "+GREEN+Frm_SerialNumber+RESET
Frm_SoftwareVerExtent = "V1.0.0"
if Frm_SoftwareVerExtent :
print " [*] Hardware Version: "+GREEN+Frm_SoftwareVerExtent+RESET
Frm_HardwareVer = "V1.0.0T6P1_CYTA"
if Frm_HardwareVer :
print " [*] Software Version: "+GREEN+Frm_HardwareVer+RESET
Frm_BootVer = "V1.0.0 (Strong guess)"
if Frm_BootVer :
print " [*] Boot Loader Version: "+GREEN+Frm_BootVer+RESET
# Main menu
print"\nWelcome to CytaPWN main menu:"
print" 1. Start FTP Daemon"
print" 2. Initiate a MITM to a connected device"
print" 3. Control and administrate connected devices"
print" 4. Initiate a Telnet connection"
print" 5. About."
print" 6. Quit."
while True:
choice = raw_input("\nEnter your choice: ")
if choice == "5":
print"\n+---------------------------------------------------------------------------+"
print"| 0Day exploit for most Cyta's routers. Developed by Thomas Kropalis. |"
print"| This exploit allows full administrative control over the router and its |"
print"| connected devices. It mostly works on new routers, obtained around 2016. |"
print"+---------------------------------------------------------------------------+"
elif choice == "6":
print"Exiting.."
time.sleep(1)
sys.exit(1)
else:
print("\n["+RED+"-"+RESET+"] Invalid Option. ")
time.sleep(1)
else:
sys.stdout.write(" ["+RED+" FALSE "+RESET+"]\n")
except IOError, e:
print "Failed to connect on "+target
except (KeyboardInterrupt, SystemExit):
print "Exiting.." | apache-2.0 | -4,255,299,614,088,935,400 | 48.120805 | 113 | 0.410768 | false |
wuliming/pcp | src/python/pcp/pmcc.py | 1 | 23035 | """ Convenience Classes building on the base PMAPI extension module """
#
# Copyright (C) 2013-2015 Red Hat
# Copyright (C) 2009-2012 Michael T. Werner
#
# This file is part of the "pcp" module, the python interfaces for the
# Performance Co-Pilot toolkit.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
from sys import stderr
from ctypes import c_int, c_uint, c_char_p, cast, POINTER
from pcp.pmapi import (pmContext, pmResult, pmValueSet, pmValue, pmDesc,
pmErr, pmOptions, timeval)
from cpmapi import (PM_CONTEXT_HOST, PM_CONTEXT_ARCHIVE, PM_INDOM_NULL,
PM_IN_NULL, PM_ID_NULL, PM_SEM_COUNTER, PM_ERR_EOL, PM_TYPE_DOUBLE)
class MetricCore(object):
"""
Core metric information that can be queried from the PMAPI
PMAPI metrics are unique by name, and MetricCores should be also
rarely, some PMAPI metrics with different names might have identical PMIDs
PMAPI metrics are unique by (name) and by (name,pmid) - _usually_ by (pmid)
too. Note that names here (and only here) are stored as byte strings for
direct PMAPI access. All dictionaries/caching strategies built using the
core structure use native strings (i.e., not byte strings in python3).
"""
def __init__(self, ctx, name, pmid):
self.ctx = ctx
if type(name) != type(b''):
name = name.encode('utf-8')
self.name = name
self.pmid = pmid
self.desc = None
self.text = None
self.help = None
class Metric(object):
"""
Additional metric information, such as conversion factors and values
several instances of Metric may share a MetricCore instance
"""
##
# constructor
def __init__(self, core):
self._core = core # MetricCore
self._vset = None # pmValueSet member
self._values = None
self._prevvset = None
self._prevValues = None
self._convType = core.desc.contents.type
self._convUnits = None
self._errorStatus = None
self._netValues = None # (instance, name, value)
self._netPrevValues = None # (instance, name, value)
self._netConvertedValues = None # (instance, name, value)
##
# core property read methods
def _R_ctx(self):
return self._core.ctx
def _R_name(self):
return self._core.name.decode()
def _R_pmid(self):
return self._core.pmid
def _R_desc(self):
return self._core.desc
def _R_text(self):
return self._core.text
def _R_help(self):
return self._core.help
def get_vlist(self, vset, vlist_idx):
""" Return the vlist[vlist_idx] of vset[vset_idx] """
listptr = cast(vset.contents.vlist, POINTER(pmValue))
return listptr[vlist_idx]
def get_inst(self, vset, vlist_idx):
""" Return the inst for vlist[vlist_idx] of vset[vset_idx] """
return self.get_vlist(vset, vset_idx, vlist_idx).inst
def computeValues(self, inValues):
""" Extract the value for a singleton or list of instances
as a triple (inst, name, val)
"""
vset = inValues
ctx = self.ctx
instD = ctx.mcGetInstD(self.desc.contents.indom)
valL = []
for i in range(vset.numval):
instval = self.get_vlist(vset, i)
try:
name = instD[instval.inst]
except KeyError:
name = ''
outAtom = self.ctx.pmExtractValue(
vset.valfmt, instval, self.desc.type, self._convType)
if self._convUnits:
desc = (POINTER(pmDesc) * 1)()
desc[0] = self.desc
outAtom = self.ctx.pmConvScale(
self._convType, outAtom, desc, 0, self._convUnits)
value = outAtom.dref(self._convType)
valL.append((instval, name, value))
return valL
def _find_previous_instval(self, index, inst, pvset):
""" Find a metric instance in the previous resultset """
if index <= pvset.numval:
pinstval = self.get_vlist(pvset, index)
if inst == pinstval.inst:
return pinstval
for pi in range(pvset.numval):
pinstval = self.get_vlist(pvset, pi)
if inst == pinstval.inst:
return pinstval
return None
def convertValues(self, values, prevValues, delta):
""" Extract the value for a singleton or list of instances as a
triple (inst, name, val) for COUNTER metrics with the value
delta calculation applied (for rate conversion).
"""
if self.desc.sem != PM_SEM_COUNTER:
return self.computeValues(values)
if prevValues == None:
return None
pvset = prevValues
vset = values
ctx = self.ctx
instD = ctx.mcGetInstD(self.desc.contents.indom)
valL = []
for i in range(vset.numval):
instval = self.get_vlist(vset, i)
pinstval = self._find_previous_instval(i, instval.inst, pvset)
if pinstval == None:
continue
try:
name = instD[instval.inst]
except KeyError:
name = ''
outAtom = self.ctx.pmExtractValue(vset.valfmt,
instval, self.desc.type, PM_TYPE_DOUBLE)
poutAtom = self.ctx.pmExtractValue(pvset.valfmt,
pinstval, self.desc.type, PM_TYPE_DOUBLE)
if self._convUnits:
desc = (POINTER(pmDesc) * 1)()
desc[0] = self.desc
outAtom = self.ctx.pmConvScale(
PM_TYPE_DOUBLE, outAtom, desc, 0, self._convUnits)
poutAtom = self.ctx.pmConvScale(
PM_TYPE_DOUBLE, poutAtom, desc, 0, self._convUnits)
value = outAtom.dref(PM_TYPE_DOUBLE)
pvalue = poutAtom.dref(PM_TYPE_DOUBLE)
if (value >= pvalue):
valL.append((instval, name, (value - pvalue) / delta))
return valL
def _R_values(self):
return self._values
def _R_prevValues(self):
return self._prevValues
def _R_convType(self):
return self._convType
def _R_convUnits(self):
return self._convUnits
def _R_errorStatus(self):
return self._errorStatus
def _R_netConvValues(self):
return self._netConvValues
def _R_netPrevValues(self):
if not self._prevvset:
return None
self._netPrevValues = self.computeValues(self._prevvset)
return self._netPrevValues
def _R_netValues(self):
if not self._vset:
return None
self._netValues = self.computeValues(self._vset)
return self._netValues
def _W_values(self, values):
self._prev = self._values
self._values = values
self._netPrev = self._netValue
self._netValue = None
def _W_convType(self, value):
self._convType = value
def _W_convUnits(self, value):
self._convUnits = value
# interface to properties in MetricCore
ctx = property(_R_ctx, None, None, None)
name = property(_R_name, None, None, None)
pmid = property(_R_pmid, None, None, None)
desc = property(_R_desc, None, None, None)
text = property(_R_text, None, None, None)
help = property(_R_help, None, None, None)
# properties specific to this instance
values = property(_R_values, _W_values, None, None)
prevValues = property(_R_prevValues, None, None, None)
convType = property(_R_convType, _W_convType, None, None)
convUnits = property(_R_convUnits, _W_convUnits, None, None)
errorStatus = property(_R_errorStatus, None, None, None)
netValues = property(_R_netValues, None, None, None)
netPrevValues = property(_R_netPrevValues, None, None, None)
netConvValues = property(_R_netConvValues, None, None, None)
def metricPrint(self):
indomstr = self.ctx.pmInDomStr(self.desc.indom)
print(" ", "indom:", indomstr)
instD = self.ctx.mcGetInstD(self.desc.indom)
for inst, name, val in self.netValues:
print(" ", name, val)
def metricConvert(self, delta):
convertedList = self.convertValues(self._vset, self._prevvset, delta)
self._netConvValues = convertedList
return self._netConvValues
class MetricCache(pmContext):
"""
A cache of MetricCores is kept to reduce calls into the PMAPI library
this also slightly reduces the memory footprint of Metric instances
that share a common MetricCore
a cache of instance domain information is also kept, which further
reduces calls into the PMAPI and reduces the memory footprint of
Metric objects that share a common instance domain
"""
##
# overloads
def __init__(self, typed = PM_CONTEXT_HOST, target = "local:"):
pmContext.__init__(self, typed, target)
self._mcIndomD = {}
self._mcByNameD = {}
self._mcByPmidD = {}
##
# methods
def mcGetInstD(self, indom):
""" Query the instance : instance_list dictionary """
return self._mcIndomD[indom]
def _mcAdd(self, core):
""" Update the dictionary """
indom = core.desc.contents.indom
if indom not in self._mcIndomD:
if c_int(indom).value == c_int(PM_INDOM_NULL).value:
instmap = { PM_IN_NULL : b'PM_IN_NULL' }
else:
if self._type == PM_CONTEXT_ARCHIVE:
instL, nameL = self.pmGetInDomArchive(core.desc)
else:
instL, nameL = self.pmGetInDom(core.desc)
if instL != None and nameL != None:
instmap = dict(zip(instL, nameL))
else:
instmap = {}
self._mcIndomD.update({indom: instmap})
self._mcByNameD.update({core.name.decode(): core})
self._mcByPmidD.update({core.pmid: core})
def mcGetCoresByName(self, nameL):
""" Update the core (metric id, description,...) list """
coreL = []
missD = None
errL = None
# lookup names in cache
for index, name in enumerate(nameL):
if type(name) == type(b''):
name = name.decode()
# lookup metric core in cache
core = self._mcByNameD.get(name)
if not core:
# cache miss
if not missD:
missD = {}
missD.update({name: index})
coreL.append(core)
# some cache lookups missed, fetch pmids and build missing MetricCores
if missD:
idL, errL = self.mcFetchPmids(missD.keys())
for name, pmid in idL:
if pmid == PM_ID_NULL:
# fetch failed for the given metric name
if not errL:
errL = []
errL.append(name)
else:
# create core pmDesc
newcore = self._mcCreateCore(name, pmid)
# update core ref in return list
coreL[missD[name]] = newcore
return coreL, errL
def _mcCreateCore(self, name, pmid):
""" Update the core description """
newcore = MetricCore(self, name, pmid)
try:
newcore.desc = self.pmLookupDesc(pmid)
except pmErr as error:
fail = "%s: pmLookupDesc: %s" % (error.progname(), error.message())
print >> stderr, fail
raise SystemExit(1)
# insert core into cache
self._mcAdd(newcore)
return newcore
def mcFetchPmids(self, nameL):
""" Update the core metric ids. note: some names have identical pmids """
errL = None
nameA = (c_char_p * len(nameL))()
for index, name in enumerate(nameL):
if type(name) != type(b''):
name = name.encode('utf-8')
nameA[index] = c_char_p(name)
try:
pmidArray = self.pmLookupName(nameA)
if len(pmidArray) < len(nameA):
missing = "%d of %d metric names" % (len(pmidArray), len(nameA))
print >> stderr, "Cannot resolve", missing
raise SystemExit(1)
except pmErr as error:
fail = "%s: pmLookupName: %s" % (error.progname(), error.message())
print >> stderr, fail
raise SystemExit(1)
return zip(nameL, pmidArray), errL
class MetricGroup(dict):
"""
Manages a group of metrics for fetching the values of
a MetricGroup is a dictionary of Metric objects, for which data can
be fetched from a target system using a single call to pmFetch
the Metric objects are indexed by the metric name
pmFetch fetches data for a list of pmIDs, so there is also a shadow
dictionary keyed by pmID, along with a shadow list of pmIDs
"""
##
# property read methods
def _R_contextCache(self):
return self._ctx
def _R_pmidArray(self):
return self._pmidArray
def _R_timestamp(self):
return self._result.contents.timestamp
def _R_result(self):
return self._result
def _R_prevTimestamp(self):
return self._prev.contents.timestamp
def _R_prev(self):
return self._prev
##
# property write methods
def _W_result(self, pmresult):
self._prev = self._result
self._result = pmresult
##
# property definitions
contextCache = property(_R_contextCache, None, None, None)
pmidArray = property(_R_pmidArray, None, None, None)
result = property(_R_result, _W_result, None, None)
timestamp = property(_R_timestamp, None, None, None)
prev = property(_R_prev, None, None, None)
prevTimestamp = property(_R_prevTimestamp, None, None, None)
##
# overloads
def __init__(self, contextCache, inL = []):
dict.__init__(self)
self._ctx = contextCache
self._pmidArray = None
self._result = None
self._prev = None
self._altD = {}
self.mgAdd(inL)
def __setitem__(self, attr, value = []):
if attr in self:
raise KeyError("metric group with that key already exists")
else:
dict.__setitem__(self, attr, MetricGroup(self, inL = value))
##
# methods
def mgAdd(self, nameL):
""" Create the list of Metric(s) """
coreL, errL = self._ctx.mcGetCoresByName(nameL)
for core in coreL:
metric = Metric(core)
self.update({metric.name: metric})
self._altD.update({metric.pmid: metric})
n = len(self)
self._pmidArray = (c_uint * n)()
for x, key in enumerate(self.keys()):
self._pmidArray[x] = c_uint(self[key].pmid)
def mgFetch(self):
""" Fetch the list of Metric values. Save the old value. """
try:
self.result = self._ctx.pmFetch(self._pmidArray)
# update the result entries in each metric
result = self.result.contents
for i in range(self.result.contents.numpmid):
pmid = self.result.contents.get_pmid(i)
vset = self.result.contents.get_vset(i)
self._altD[pmid]._prevvset = self._altD[pmid]._vset
self._altD[pmid]._vset = vset
except pmErr as error:
if error.args[0] == PM_ERR_EOL:
raise SystemExit(0)
fail = "%s: pmFetch: %s" % (error.progname(), error.message())
print >> stderr, fail
raise SystemExit(1)
def mgDelta(self):
"""
Sample delta - used for rate conversion calculations, which
requires timestamps from successive samples.
"""
if self._prev != None:
prevTimestamp = float(self.prevTimestamp)
else:
prevTimestamp = 0.0
return float(self.timestamp) - prevTimestamp
class MetricGroupPrinter(object):
"""
Handles reporting of MetricGroups within a GroupManager.
This object is called upon at the end of each fetch when
new values are available. It is also responsible for
producing any initial (or on-going) header information
that the tool may wish to report.
"""
def report(self, manager):
""" Base implementation, all tools should override """
for group_name in manager.keys():
group = manager[group_name]
for metric_name in group.keys():
group[metric_name].metricPrint()
def convert(self, manager):
""" Do conversion for all metrics across all groups """
for group_name in manager.keys():
group = manager[group_name]
delta = group.mgDelta()
for metric_name in group.keys():
group[metric_name].metricConvert(delta)
class MetricGroupManager(dict, MetricCache):
"""
Manages a dictionary of MetricGroups which can be pmFetch'ed
inherits from MetricCache, which inherits from pmContext
"""
##
# property access methods
def _R_options(self): # command line option object
return self._options
def _W_options(self, options):
self._options = options
def _R_default_delta(self): # default interval unless command line set
return self._default_delta
def _W_default_delta(self, delta):
self._default_delta = delta
def _R_default_pause(self): # default reporting delay (archives only)
return self._default_pause
def _W_default_pause(self, pause):
self._default_pause = pause
def _W_printer(self, printer): # helper class for reporting
self._printer = printer
def _R_counter(self): # fetch iteration count, useful for printer
return self._counter
##
# property definitions
options = property(_R_options, _W_options, None, None)
default_delta = property(_R_default_delta, _W_default_delta, None, None)
default_pause = property(_R_default_pause, _W_default_pause, None, None)
printer = property(None, _W_printer, None, None)
counter = property(_R_counter, None, None, None)
##
# overloads
def __init__(self, typed = PM_CONTEXT_HOST, target = "local:"):
dict.__init__(self)
MetricCache.__init__(self, typed, target)
self._options = None
self._default_delta = timeval(1, 0)
self._default_pause = None
self._printer = None
self._counter = 0
def __setitem__(self, attr, value = []):
if attr in self:
raise KeyError("metric group with that key already exists")
else:
dict.__setitem__(self, attr, MetricGroup(self, inL = value))
@classmethod
def builder(build, options, argv):
""" Helper interface, simple PCP monitor argument parsing. """
manager = build.fromOptions(options, argv)
manager._default_delta = timeval(options.delta, 0)
manager._options = options
return manager
##
# methods
def _computeSamples(self):
""" Calculate the number of samples we are to take.
This is based on command line options --samples but also
must consider --start, --finish and --interval. If none
of these were presented, a zero return means "infinite".
"""
if self._options == None:
return 0 # loop until interrupted or PM_ERR_EOL
samples = self._options.pmGetOptionSamples()
if samples != None:
return samples
if self._options.pmGetOptionFinishOptarg() == None:
return 0 # loop until interrupted or PM_ERR_EOL
origin = self._options.pmGetOptionOrigin()
finish = self._options.pmGetOptionFinish()
delta = self._options.pmGetOptionInterval()
if delta == None:
delta = self._default_delta
period = (delta.tv_sec * 1.0e6 + delta.tv_usec) / 1e6
window = float(finish.tv_sec - origin.tv_sec)
window += float((finish.tv_usec - origin.tv_usec) / 1e6)
window /= period
return int(window + 0.5) # roundup to positive number
def _computePauseTime(self):
""" Figure out how long to sleep between samples.
This needs to take into account whether we were explicitly
asked for a delay (independent of context type, --pause),
whether this is an archive or live context, and the sampling
--interval (including the default value, if none requested).
"""
if self._default_pause != None:
return self._default_pause
if self.type == PM_CONTEXT_ARCHIVE:
self._default_pause = timeval(0, 0)
elif self._options != None:
pause = self._options.pmGetOptionInterval()
if pause != None:
self._default_pause = pause
else:
self._default_pause = self._default_delta
else:
self._default_pause = self._default_delta
return self._default_pause
def fetch(self):
""" Perform fetch operation on all of the groups. """
for group in self.keys():
self[group].mgFetch()
def run(self):
""" Using options specification, loop fetching and reporting,
pausing for the requested time interval between updates.
Transparently handles archive/live mode differences.
Note that this can be different to the sampling interval
in archive mode, but is usually the same as the sampling
interval in live mode.
"""
samples = self._computeSamples()
timer = self._computePauseTime()
try:
self.fetch()
while True:
if samples == 0 or self._counter <= samples:
self._printer.report(self)
if self._counter == samples:
break
# for need two fetches to report rate converted counter
# metrics. so the actual output samples will be less than
# the speicified number when using '-s' and '-T' option.
# '+1' can fix this issue.
self._counter += 1
timer.sleep()
self.fetch()
except SystemExit as code:
return code
except KeyboardInterrupt:
pass
return 0
| lgpl-2.1 | -2,193,649,800,487,159,300 | 35.161695 | 82 | 0.584545 | false |
Osmose/pontoon | pontoon/administration/management/commands/sync_projects.py | 1 | 17746 | from collections import Counter
from datetime import datetime
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.template.loader import render_to_string
from django.utils import timezone
from bulk_update.helper import bulk_update
from pontoon.administration.files import update_from_repository
from pontoon.administration.vcs import commit_to_vcs, CommitToRepositoryException
from pontoon.base.models import (
ChangedEntityLocale,
Entity,
Locale,
Project,
Resource,
Translation,
update_stats
)
from pontoon.base.utils import match_attr
from pontoon.base.vcs_models import VCSProject
class Command(BaseCommand):
args = '<project_slug project_slug ...>'
help = 'Synchronize database and remote repositories.'
def add_arguments(self, parser):
parser.add_argument(
'--no-commit',
action='store_true',
dest='no_commit',
default=False,
help='Do not commit changes to VCS'
)
def log(self, msg, *args, **kwargs):
"""Log a message to the console."""
self.stdout.write(msg.format(*args, **kwargs))
def info(self, msg, *args, **kwargs):
"""Log a message to the console if --verbosity=1 or more."""
if self.verbosity >= 1:
self.log(msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
"""Log a message to the console if --verbosity=2."""
if self.verbosity == 2:
self.log(msg, *args, **kwargs)
def handle(self, *args, **options):
self.verbosity = options['verbosity']
self.no_commit = options['no_commit']
self.log('SYNC PROJECTS: start')
projects = Project.objects.filter(disabled=False)
if args:
projects = projects.filter(slug__in=args)
if len(projects) < 1:
raise CommandError('No matching projects found.')
for project in projects:
if not project.can_commit:
self.log(u'Skipping project {0}, cannot commit to repository.'
.format(project.name))
else:
self.handle_project(project)
self.log('SYNC PROJECTS: done')
# Once we've synced, we can delete all translations scheduled
# for deletion.
Translation.deleted_objects.all().delete()
def handle_project(self, db_project):
# Pull changes from VCS and update what we know about the files.
update_from_repository(db_project)
vcs_project = VCSProject(db_project)
self.update_resources(db_project, vcs_project)
# Collect all entities across VCS and the database and get their
# keys so we can match up matching entities.
vcs_entities = self.get_vcs_entities(vcs_project)
db_entities = self.get_db_entities(db_project)
entity_keys = set().union(db_entities.keys(), vcs_entities.keys())
changeset = ChangeSet(db_project, vcs_project)
for key in entity_keys:
db_entity = db_entities.get(key, None)
vcs_entity = vcs_entities.get(key, None)
self.handle_entity(changeset, db_project, key, db_entity, vcs_entity)
# Apply the changeset to the files, commit them, and update stats
# entries in the DB.
changeset.execute()
if not self.no_commit:
self.commit_changes(db_project, changeset)
self.update_stats(db_project, vcs_project, changeset)
# Clear out the list of changed locales for entity in this
# project now that we've finished syncing.
(ChangedEntityLocale.objects
.filter(entity__resource__project=db_project)
.delete())
self.log(u'Synced project {0}', db_project.slug)
def handle_entity(self, changeset, db_project, key, db_entity, vcs_entity):
"""
Determine what needs to be synced between the database and VCS versions
of a single entity and log what needs to be changed in the changeset.
"""
if vcs_entity is None:
if db_entity is None:
# This should never happen. What? Hard abort.
raise CommandError('No entities found for key {0}'.format(key))
else:
# VCS no longer has the entity, remove it from Pontoon.
changeset.obsolete_db_entity(db_entity)
elif db_entity is None:
# New VCS entities are added to Pontoon.
changeset.create_db_entity(vcs_entity)
else:
for locale in db_project.locales.all():
if not vcs_entity.has_translation_for(locale.code):
# VCS lacks an entity for this locale, so we can't
# pull updates nor edit it. Skip it!
continue
if db_entity.has_changed(locale):
# Pontoon changes overwrite whatever VCS has.
changeset.update_vcs_entity(locale.code, db_entity, vcs_entity)
else:
# If Pontoon has nothing or has not changed, and the VCS
# still has the entity, update Pontoon with whatever may
# have changed.
changeset.update_db_entity(locale.code, db_entity, vcs_entity)
def update_resources(self, db_project, vcs_project):
"""Update the database on what resource files exist in VCS."""
relative_paths = vcs_project.resources.keys()
db_project.resource_set.exclude(path__in=relative_paths).delete()
for relative_path, vcs_resource in vcs_project.resources.items():
resource, created = db_project.resource_set.get_or_create(path=relative_path)
resource.format = Resource.get_path_format(relative_path)
resource.entity_count = len(vcs_resource.entities)
resource.save()
def update_stats(self, db_project, vcs_project, changeset):
"""
Update the Stats entries in the database for locales that had
translation updates.
"""
for resource in db_project.resource_set.all():
for locale in changeset.updated_locales:
# We only want to create/update the stats object if the resource
# exists in the current locale, UNLESS the file is asymmetric.
vcs_resource = vcs_project.resources[resource.path]
resource_exists = vcs_resource.files.get(locale) is not None
if resource_exists or resource.is_asymmetric:
update_stats(resource, locale)
def get_vcs_entities(self, vcs_project):
return {self.entity_key(entity): entity for entity in vcs_project.entities}
def get_db_entities(self, db_project):
entities = (Entity.objects
.select_related('resource')
.prefetch_related('changed_locales')
.filter(resource__project=db_project, obsolete=False))
return {self.entity_key(entity): entity for entity in entities}
def entity_key(self, entity):
"""
Generate a key for the given entity that is unique within the
project.
"""
key = entity.key or entity.string
return ':'.join([entity.resource.path, key])
def commit_changes(self, db_project, changeset):
"""Commit the changes we've made back to the VCS."""
for locale in db_project.locales.all():
authors = changeset.commit_authors_per_locale.get(locale.code, [])
# Use the top translator for this batch as commit author, or
# the fake Pontoon user if there are no authors.
if len(authors) > 0:
commit_author = Counter(authors).most_common(1)[0][0]
else:
commit_author = User(first_name="Pontoon", email="[email protected]")
commit_message = render_to_string('commit_message.jinja', {
'locale': locale,
'project': db_project,
'authors': authors
})
try:
result = commit_to_vcs(
db_project.repository_type,
db_project.locale_directory_path(locale.code),
commit_message,
commit_author,
db_project.repository_url
)
except CommitToRepositoryException as err:
result = {'message': unicode(err)}
if result is not None:
self.log(
u'Committing project {project.name} for {locale.name} '
u'({locale.code}) failed: {reason}',
project=db_project,
locale=locale,
reason=result['message']
)
class ChangeSet(object):
"""
Stores a set of changes to be made to the database and the
translations stored in VCS. Once all the necessary changes have been
stored, execute all the changes at once efficiently.
"""
def __init__(self, db_project, vcs_project):
self.db_project = db_project
self.vcs_project = vcs_project
self.executed = False
self.changes = {
'update_vcs': [],
'update_db': [],
'obsolete_db': [],
'create_db': []
}
self.entities_to_update = []
self.translations_to_update = []
self.translations_to_create = []
self.commit_authors_per_locale = {}
self.updated_locales = set()
def update_vcs_entity(self, locale_code, db_entity, vcs_entity):
"""
Replace the translations in VCS with the translations from the
database.
"""
self.changes['update_vcs'].append((locale_code, db_entity, vcs_entity))
def create_db_entity(self, vcs_entity):
"""Create a new entity in the database."""
self.changes['create_db'].append(vcs_entity)
def update_db_entity(self, locale_code, db_entity, vcs_entity):
"""Update the database with translations from VCS."""
self.changes['update_db'].append((locale_code, db_entity, vcs_entity))
def obsolete_db_entity(self, db_entity):
"""Mark the given entity as obsolete."""
self.changes['obsolete_db'].append(db_entity.pk)
def execute(self):
"""
Execute the changes stored in this changeset. Execute can only
be called once per changeset; subsequent calls raise a
RuntimeError, even if the changes failed.
"""
if self.executed:
raise RuntimeError('execute() can only be called once per changeset.')
else:
self.executed = True
# Store locales and resources for FK relationships.
self.locales = {l.code: l for l in Locale.objects.all()}
self.resources = {r.path: r for r in self.db_project.resource_set.all()}
# Perform the changes and fill the lists for bulk creation and
# updating.
self.execute_update_vcs()
self.execute_create_db()
self.execute_update_db()
self.execute_obsolete_db()
# Apply the built-up changes to the DB
if len(self.entities_to_update) > 0:
bulk_update(self.entities_to_update, update_fields=[
'resource',
'string',
'string_plural',
'key',
'comment',
'order',
'source'
])
Translation.objects.bulk_create(self.translations_to_create)
if len(self.translations_to_update) > 0:
bulk_update(self.translations_to_update, update_fields=[
'entity',
'locale',
'string',
'plural_form',
'approved',
'approved_user_id',
'approved_date',
'fuzzy',
'extra'
])
# Track which locales were updated.
for translation in self.translations_to_update:
self.updated_locales.add(translation.locale)
def execute_update_vcs(self):
resources = self.vcs_project.resources
changed_resources = set()
for locale_code, db_entity, vcs_entity in self.changes['update_vcs']:
changed_resources.add(resources[db_entity.resource.path])
vcs_translation = vcs_entity.translations[locale_code]
db_translations = (db_entity.translation_set
.filter(approved=True, locale__code=locale_code))
# If no DB translations are fuzzy, set fuzzy to False.
# Otherwise, it's true.
vcs_translation.fuzzy = any(t for t in db_translations if t.fuzzy)
if len(db_translations) > 0:
last_translation = max(db_translations, key=lambda t: t.date or datetime.min)
vcs_translation.last_updated = last_translation.date
vcs_translation.last_translator = last_translation.user
# Replace existing translations with ones from the database.
vcs_translation.strings = {
db.plural_form: db.string for db in db_translations
}
# Track which translators were involved.
self.commit_authors_per_locale[locale_code] = [t.user for t in db_translations if t.user]
for resource in changed_resources:
resource.save()
def get_entity_updates(self, vcs_entity):
"""
Return a dict of the properties and values necessary to create
or update a database entity from a VCS entity.
"""
return {
'resource': self.resources[vcs_entity.resource.path],
'string': vcs_entity.string,
'string_plural': vcs_entity.string_plural,
'key': vcs_entity.key,
'comment': '\n'.join(vcs_entity.comments),
'order': vcs_entity.order,
'source': vcs_entity.source
}
def execute_create_db(self):
for vcs_entity in self.changes['create_db']:
entity = Entity(**self.get_entity_updates(vcs_entity))
entity.save() # We can't use bulk_create since we need a PK
for locale_code, vcs_translation in vcs_entity.translations.items():
for plural_form, string in vcs_translation.strings.items():
self.translations_to_create.append(Translation(
entity=entity,
locale=self.locales[locale_code],
string=string,
plural_form=plural_form,
approved=not vcs_translation.fuzzy,
approved_date=timezone.now() if not vcs_translation.fuzzy else None,
fuzzy=vcs_translation.fuzzy
))
def execute_update_db(self):
for locale_code, db_entity, vcs_entity in self.changes['update_db']:
for field, value in self.get_entity_updates(vcs_entity).items():
setattr(db_entity, field, value)
if db_entity.is_dirty(check_relationship=True):
self.entities_to_update.append(db_entity)
# Update translations for the entity.
vcs_translation = vcs_entity.translations[locale_code]
db_translations = db_entity.translation_set.filter(locale__code=locale_code)
approved_translations = []
for plural_form, string in vcs_translation.strings.items():
# Check if we need to modify an existing translation or
# create a new one.
db_translation = match_attr(db_translations,
plural_form=plural_form,
string=string)
if db_translation:
if not db_translation.approved:
db_translation.approved = True
db_translation.approved_date = timezone.now()
db_translation.fuzzy = vcs_translation.fuzzy
db_translation.extra = vcs_translation.extra
if db_translation.is_dirty():
self.translations_to_update.append(db_translation)
if not db_translation.fuzzy:
approved_translations.append(db_translation)
else:
self.translations_to_create.append(Translation(
entity=db_entity,
locale=self.locales[locale_code],
string=string,
plural_form=plural_form,
approved=not vcs_translation.fuzzy,
approved_date=timezone.now() if not vcs_translation.fuzzy else None,
fuzzy=vcs_translation.fuzzy,
extra=vcs_translation.extra
))
# Any existing translations that were not approved get unapproved.
for translation in db_translations:
if translation not in approved_translations:
translation.approved = False
translation.approved_user = None
translation.approved_date = None
if translation.is_dirty():
self.translations_to_update.append(translation)
def execute_obsolete_db(self):
(Entity.objects
.filter(pk__in=self.changes['obsolete_db'])
.update(obsolete=True))
| bsd-3-clause | 6,697,951,671,892,925,000 | 39.515982 | 101 | 0.575679 | false |
sunweaver/ganetimgr | ganeti/utils.py | 1 | 18377 | import requests
from requests.exceptions import ConnectionError
from bs4 import BeautifulSoup
import json
from gevent.pool import Pool
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.core.mail import send_mail
from django.contrib.sites.models import Site
from django.contrib.auth.models import User, Group
from django.db import close_connection
from django.shortcuts import get_object_or_404
from django.template.defaultfilters import filesizeformat
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from ganeti.models import Cluster, Instance, InstanceAction
from util.client import GanetiApiError
def memsize(value):
return filesizeformat(value * 1024 ** 2)
def disksizes(value):
return [filesizeformat(v * 1024 ** 2) for v in value]
def get_instance_data(instance, cluster, node=None):
instance.cpu_url = reverse(
'graph',
args=(cluster.slug, instance.name, 'cpu-ts')
)
instance.net_url = []
for (nic_i, link) in enumerate(instance.nic_links):
instance.net_url.append(
reverse(
'graph',
args=(
cluster.slug,
instance.name,
'net-ts',
'/eth%s' % nic_i
)
)
)
return {
'node': instance.pnode,
'name': instance.name,
'cluster': instance.cluster.slug,
'cpu': instance.cpu_url,
'network': instance.net_url,
}
def get_nodes_with_graphs(cluster_slug, nodes=None):
cluster = Cluster.objects.get(slug=cluster_slug)
instances = Instance.objects.filter(cluster=cluster)
response = []
for i in instances:
# if we have set a nodes, then we should check if the
# instance belongs to them
if not nodes:
response.append(get_instance_data(i, cluster))
else:
for node in nodes:
if i.pnode == node:
response.append(get_instance_data(i, cluster, node))
return response
def prepare_clusternodes(cluster=None):
if not cluster:
# get only enabled clusters
clusters = Cluster.objects.filter(disabled=False)
else:
clusters = Cluster.objects.filter(slug=cluster)
p = Pool(15)
nodes = []
bad_clusters = []
bad_nodes = []
def _get_nodes(cluster):
try:
for node in cluster.get_cluster_nodes():
nodes.append(node)
if node['offline'] is True:
bad_nodes.append(node['name'])
except (GanetiApiError, Exception):
cluster._client = None
bad_clusters.append(cluster)
finally:
close_connection()
p.map(_get_nodes, clusters)
return nodes, bad_clusters, bad_nodes
def generate_json(instance, user, locked_nodes):
jresp_list = []
i = instance
inst_dict = {}
if not i.admin_view_only:
inst_dict['name_href'] = "%s" % (
reverse(
'instance-detail',
kwargs={
'cluster_slug': i.cluster.slug, 'instance': i.name
}
)
)
inst_dict['name'] = i.name
if user.is_superuser or user.has_perm('ganeti.view_instances'):
inst_dict['cluster'] = i.cluster.slug
inst_dict['pnode'] = i.pnode
else:
inst_dict['cluster'] = i.cluster.description
inst_dict['clusterslug'] = i.cluster.slug
inst_dict['node_group_locked'] = i.pnode in locked_nodes
inst_dict['memory'] = memsize(i.beparams['maxmem'])
inst_dict['disk'] = ", ".join(disksizes(i.disk_sizes))
inst_dict['vcpus'] = i.beparams['vcpus']
inst_dict['ipaddress'] = [ip for ip in i.nic_ips if ip]
if not user.is_superuser and not user.has_perm('ganeti.view_instances'):
inst_dict['ipv6address'] = [ip for ip in i.ipv6s if ip]
# inst_dict['status'] = i.nic_ips[0] if i.nic_ips[0] else "-"
if i.admin_state == i.oper_state:
if i.admin_state:
inst_dict['status'] = "Running"
inst_dict['status_style'] = "success"
else:
inst_dict['status'] = "Stopped"
inst_dict['status_style'] = "important"
else:
if i.oper_state:
inst_dict['status'] = "Running"
else:
inst_dict['status'] = "Stopped"
if i.admin_state:
inst_dict['status'] = "%s, should be running" % inst_dict['status']
else:
inst_dict['status'] = "%s, should be stopped" % inst_dict['status']
inst_dict['status_style'] = "warning"
if i.status == 'ERROR_nodedown':
inst_dict['status'] = "Generic cluster error"
inst_dict['status_style'] = "important"
if i.adminlock:
inst_dict['adminlock'] = True
if i.isolate:
inst_dict['isolate'] = True
if i.needsreboot:
inst_dict['needsreboot'] = True
# When renaming disable clicking on instance for everyone
if hasattr(i, 'admin_lock'):
if i.admin_lock:
try:
del inst_dict['name_href']
except KeyError:
pass
if i.joblock:
inst_dict['locked'] = True
inst_dict['locked_reason'] = "%s" % ((i.joblock).capitalize())
if inst_dict['locked_reason'] in ['Deleting', 'Renaming']:
try:
del inst_dict['name_href']
except KeyError:
pass
if 'cdrom_image_path' in i.hvparams.keys():
if i.hvparams['cdrom_image_path'] and i.hvparams['boot_order'] == 'cdrom':
inst_dict['cdrom'] = True
inst_dict['nic_macs'] = ', '.join(i.nic_macs)
if user.is_superuser or user.has_perm('ganeti.view_instances'):
inst_dict['nic_links'] = ', '.join(i.nic_links)
inst_dict['network'] = []
for (nic_i, link) in enumerate(i.nic_links):
if i.nic_ips[nic_i] is None:
inst_dict['network'].append("%s" % (i.nic_links[nic_i]))
else:
inst_dict['network'].append(
"%s@%s" % (i.nic_ips[nic_i], i.nic_links[nic_i])
)
inst_dict['users'] = [
{
'user': user_item.username,
'email': user_item.email,
'user_href': "%s" % (
reverse(
"user-info",
kwargs={
'type': 'user',
'usergroup': user_item.username
}
)
)
} for user_item in i.users]
inst_dict['groups'] = [
{
'group': group.name,
'groupusers': [
"%s,%s" % (u.username, u.email) for u in group.userset
],
'group_href':"%s" % (
reverse(
"user-info",
kwargs={
'type': 'group',
'usergroup': group.name
}
)
)
} for group in i.groups
]
jresp_list.append(inst_dict)
return jresp_list
def generate_json_light(instance, user):
jresp_list = []
i = instance
inst_dict = {}
if not i.admin_view_only:
inst_dict['name_href'] = "%s" % (
reverse(
"instance-detail",
kwargs={
'cluster_slug': i.cluster.slug,
'instance': i.name
}
)
)
inst_dict['name'] = i.name
inst_dict['clusterslug'] = i.cluster.slug
inst_dict['memory'] = i.beparams['maxmem']
inst_dict['vcpus'] = i.beparams['vcpus']
inst_dict['disk'] = sum(i.disk_sizes)
if user.is_superuser or user.has_perm('ganeti.view_instances'):
inst_dict['users'] = [
{
'user': user_item.username
} for user_item in i.users
]
jresp_list.append(inst_dict)
return jresp_list
def clear_cluster_user_cache(username, cluster_slug):
cache.delete("user:%s:index:instances" % username)
cache.delete("cluster:%s:instances" % cluster_slug)
def notifyuseradvancedactions(
user,
cluster_slug,
instance,
action_id,
action_value,
new_operating_system
):
action_id = int(action_id)
if action_id not in [1, 2, 3]:
action = {'action': _("Not allowed action")}
return action
cluster = get_object_or_404(Cluster, slug=cluster_slug)
instance = cluster.get_instance_or_404(instance)
reinstalldestroy_req = InstanceAction.objects.create_action(
user,
instance,
cluster,
action_id,
action_value,
new_operating_system
)
fqdn = Site.objects.get_current().domain
url = "https://%s%s" % \
(
fqdn,
reverse(
"reinstall-destroy-review",
kwargs={
'application_hash': reinstalldestroy_req.activation_key,
'action_id': action_id
}
)
)
email = render_to_string(
"instances/emails/reinstall_mail.txt",
{
"instance": instance,
"user": user,
"action": reinstalldestroy_req.get_action_display(),
"action_value": reinstalldestroy_req.action_value,
"url": url,
"operating_system": reinstalldestroy_req.operating_system
}
)
if action_id == 1:
action_mail_text = _("re-installation")
if action_id == 2:
action_mail_text = _("destruction")
if action_id == 3:
action_mail_text = _("rename")
try:
send_mail(
_("%(pref)sInstance %(action)s requested: %(instance)s") % {
"pref": settings.EMAIL_SUBJECT_PREFIX,
"action": action_mail_text,
"instance": instance.name
},
email,
settings.SERVER_EMAIL,
[user.email]
)
# if anything goes wrong do nothing.
except:
# remove entry
reinstalldestroy_req.delete()
action = {'action': _("Could not send email")}
else:
action = {'action': _("Mail sent")}
return action
try:
from ganetimgr.settings import OPERATING_SYSTEMS_URLS
except ImportError:
OPERATING_SYSTEMS_URLS = False
else:
from ganetimgr.settings import OPERATING_SYSTEMS_PROVIDER, OPERATING_SYSTEMS_SSH_KEY_PARAM
try:
from ganetimgr.settings import OPERATING_SYSTEMS
except ImportError:
OPERATING_SYSTEMS = False
def discover_available_operating_systems():
operating_systems = {}
if OPERATING_SYSTEMS_URLS:
for url in OPERATING_SYSTEMS_URLS:
try:
raw_response = requests.get(url)
except ConnectionError:
# fail silently if url is unreachable
break
else:
if raw_response.ok:
soup = BeautifulSoup(raw_response.text)
extensions = {
'.tar.gz': 'tarball',
'.img': 'qemu',
'-root.dump': 'dump',
}
architectures = ['-x86_', '-amd' '-i386']
for link in soup.findAll('a'):
try:
if '.' + '.'.join(link.attrs.get('href').split('.')[-2:]) == '.tar.gz':
extension = '.tar.gz'
elif '.' + '.'.join(link.attrs.get('href').split('.')[-1:]) == '.img':
extension = '.img'
else:
extension = '.' + '.'.join(link.attrs.get('href').split('.')[-1:])
# in case of false link
except IndexError:
pass
else:
# if the file is tarball, qemu or dump then it is valid
if extension in extensions.keys() or '-root.dump' in link.attrs.get('href'):
re = requests.get(url + link.attrs.get('href') + '.dsc')
if re.ok:
name = re.text
else:
name = link.attrs.get('href')
for arch in architectures:
if arch in link.attrs.get('href'):
img_id = link.attrs.get('href').replace(extension, '').split(arch)[0]
architecture = arch
break
description = name
img_format = extensions[extension]
if link.attrs.get('href').split('-')[0] == 'nomount':
operating_systems.update({
img_id: {
'description': description,
'provider': OPERATING_SYSTEMS_PROVIDER,
'ssh_key_param': OPERATING_SYSTEMS_SSH_KEY_PARAM,
'arch': architecture,
'osparams': {
'img_id': img_id,
'img_format': img_format,
'img_nomount': 'yes',
}
}
})
else:
operating_systems.update({
img_id: {
'description': description,
'provider': OPERATING_SYSTEMS_PROVIDER,
'ssh_key_param': OPERATING_SYSTEMS_SSH_KEY_PARAM,
'arch': architecture,
'osparams': {
'img_id': img_id,
'img_format': img_format,
}
}
})
return operating_systems
else:
return {}
def get_operating_systems_dict():
if OPERATING_SYSTEMS:
return OPERATING_SYSTEMS
else:
return {}
def operating_systems():
# check if results exist in cache
response = cache.get('operating_systems')
# if no items in cache
if not response:
discovery = discover_available_operating_systems()
dictionary = get_operating_systems_dict()
operating_systems = sorted(dict(discovery.items() + dictionary.items()).items())
# move 'none' on the top of the list for ui purposes.
for os in operating_systems:
if os[0] == 'none':
operating_systems.remove(os)
operating_systems.insert(0, os)
response = json.dumps({'status': 'success', 'operating_systems': operating_systems})
# add results to cache for one day
cache.set('operating_systems', response, timeout=86400)
return response
# find os info given its img_id
def get_os_details(img_id):
oss = json.loads(operating_systems()).get('operating_systems')
for os in oss:
if os[0] == img_id:
return os[1]
return False
def refresh_cluster_cache(cluster, instance):
cluster.force_cluster_cache_refresh(instance)
for u in User.objects.all():
cache.delete("user:%s:index:instances" % u.username)
nodes, bc, bn = prepare_clusternodes()
cache.set('allclusternodes', nodes, 90)
cache.set('badclusters', bc, 90)
cache.set('badnodes', bn, 90)
def clusterdetails_generator(slug):
cluster_profile = {}
cluster_profile['slug'] = slug
cluster = Cluster.objects.get(slug=slug)
cluster_profile['description'] = cluster.description
cluster_profile['hostname'] = cluster.hostname
# We want to fetch info about the cluster per se, networks,
# nodes and nodegroups plus a really brief instances outline.
# Nodegroups
nodegroups = cluster.get_node_group_stack()
nodes = cluster.get_cluster_nodes()
# Networks
networks = cluster.get_networks()
# Instances later on...
cluster_profile['clusterinfo'] = cluster.get_cluster_info()
cluster_profile['clusterinfo']['mtime'] = str(cluster_profile['clusterinfo']['mtime'])
cluster_profile['clusterinfo']['ctime'] = str(cluster_profile['clusterinfo']['ctime'])
cluster_profile['nodegroups'] = nodegroups
cluster_profile['nodes'] = nodes
cluster_profile['networks'] = networks
return cluster_profile
def prepare_cluster_node_group_stack(cluster):
cluster_info = cluster.get_cluster_info()
len_instances = len(cluster.get_cluster_instances())
res = {}
res['slug'] = cluster.slug
res['cluster_id'] = cluster.pk
res['num_inst'] = len_instances
res['description'] = cluster.description
res['disk_templates'] = cluster_info['ipolicy']['disk-templates']
res['node_groups'] = cluster.get_node_group_stack()
return res
def prepare_tags(taglist):
tags = []
for i in taglist:
#User
if i.startswith('u'):
tags.append(
"%s:user:%s" % (
settings.GANETI_TAG_PREFIX, User.objects.get(
pk=i.replace('u_', '')
).username
)
)
#Group
if i.startswith('g'):
tags.append("%s:group:%s" % (
settings.GANETI_TAG_PREFIX,
Group.objects.get(pk=i.replace('g_','')).name
))
return list(set(tags))
| gpl-3.0 | -5,637,533,463,556,239,000 | 34.545455 | 109 | 0.502313 | false |
bright-sparks/wpull | wpull/scraper/util_test.py | 1 | 4868 | import unittest
from wpull.item import LinkType
from wpull.scraper.util import clean_link_soup, parse_refresh, is_likely_link, \
is_unlikely_link, identify_link_type
class TestUtil(unittest.TestCase):
def test_clean_link_soup(self):
self.assertEqual(
'http://example.com',
clean_link_soup('http://example.com ')
)
self.assertEqual(
'http://example.com/',
clean_link_soup('\n\r\thttp://example.com\n\r\r\r\n\t/')
)
self.assertEqual(
'http://example.com/ something',
clean_link_soup('http://example.com\n\t / something \n\r\t')
)
self.assertEqual(
'http://example.com/dog cat/',
clean_link_soup('http://example.com/\n dog \tcat\r/\n')
)
self.assertEqual(
'ßðf ¤Jáßðff ßðfœ³²œ¤ œë ßfœ',
clean_link_soup('ß\tðf ¤Jáßðf\n f ßðfœ³²œ¤ œë ßfœ ')
)
def test_parse_refresh(self):
self.assertEqual(
'http://example.com', parse_refresh('10;url="http://example.com"')
)
self.assertEqual(
'http://example.com', parse_refresh('10;url= http://example.com ')
)
self.assertEqual(
'example.com', parse_refresh("url =' example.com '")
)
self.assertFalse(
parse_refresh('url=')
)
self.assertFalse(
parse_refresh('url = ')
)
def test_is_likely_link(self):
self.assertTrue(is_likely_link('image.png'))
self.assertTrue(is_likely_link('video.mp4'))
self.assertTrue(is_likely_link('/directory'))
self.assertTrue(is_likely_link('directory/'))
self.assertTrue(is_likely_link('/directory/'))
self.assertTrue(is_likely_link('../directory/'))
self.assertTrue(is_likely_link('http://example.com/'))
self.assertTrue(is_likely_link('https://example.com/'))
self.assertTrue(is_likely_link('ftp://example.com'))
self.assertTrue(is_likely_link('directory/index.html'))
self.assertFalse(is_likely_link('directory/another_directory'))
self.assertTrue(is_likely_link('application/windows.exe'))
self.assertTrue(is_likely_link('//example.com/admin'))
self.assertFalse(is_likely_link('12.0'))
self.assertFalse(is_likely_link('7'))
self.assertFalse(is_likely_link('horse'))
self.assertFalse(is_likely_link(''))
self.assertFalse(is_likely_link('setTimeout(myTimer, 1000)'))
self.assertFalse(is_likely_link('comment.delete'))
self.assertFalse(is_likely_link('example.com'))
self.assertFalse(is_likely_link('example.net'))
self.assertFalse(is_likely_link('example.org'))
self.assertFalse(is_likely_link('example.edu'))
def test_is_unlikely_link(self):
self.assertTrue(is_unlikely_link('example.com+'))
self.assertTrue(is_unlikely_link('www.'))
self.assertTrue(is_unlikely_link(':example.com'))
self.assertTrue(is_unlikely_link(',example.com'))
self.assertTrue(is_unlikely_link('http:'))
self.assertTrue(is_unlikely_link('.example.com'))
self.assertTrue(is_unlikely_link('doc[0]'))
self.assertTrue(is_unlikely_link('/'))
self.assertTrue(is_unlikely_link('//'))
self.assertTrue(is_unlikely_link('application/json'))
self.assertTrue(is_unlikely_link('application/javascript'))
self.assertTrue(is_unlikely_link('text/javascript'))
self.assertTrue(is_unlikely_link('text/plain'))
self.assertTrue(is_unlikely_link('/\\/'))
self.assertTrue(is_unlikely_link('a.help'))
self.assertTrue(is_unlikely_link('div.menu'))
self.assertTrue(is_unlikely_link('apikey={YOUR_API_KEY_HERE}'))
self.assertFalse(is_unlikely_link('http://'))
self.assertFalse(is_unlikely_link('example'))
self.assertFalse(is_unlikely_link('example.com'))
self.assertFalse(is_unlikely_link('//example.com/assets/image.css'))
self.assertFalse(is_unlikely_link('./image.css'))
self.assertFalse(is_unlikely_link('../image.css'))
self.assertFalse(is_unlikely_link('index.html'))
self.assertFalse(is_unlikely_link('body.html'))
def test_identifiy_link_type(self):
self.assertEqual(LinkType.javascript, identify_link_type('hello.js'))
self.assertEqual(LinkType.css, identify_link_type('hello.css'))
self.assertEqual(LinkType.html, identify_link_type('hello.html'))
self.assertEqual(LinkType.media, identify_link_type('hello.mp3'))
self.assertEqual(LinkType.media, identify_link_type('hello.png'))
self.assertEqual(LinkType.media, identify_link_type('hello.flv'))
self.assertFalse(identify_link_type('hello.exe'))
| gpl-3.0 | -1,224,444,410,961,170,400 | 44.17757 | 80 | 0.621845 | false |
MadeInHaus/django-template | fabfile/vagrant.py | 1 | 5990 | from os import path
from fabric.api import local, cd, lcd, roles, execute, task, run, \
settings, abort, hide
from fabric.colors import yellow
from haus_vars import with_vars
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
# config....
USE_CELERY = False # this should match the USE_CELERY setting in project.settings
USE_GULP = True
@task
@roles('vagrant')
def test_vagrant():
local_md5 = local('md5 README.md', capture=True).split()[3]
with cd('/var/www'):
remote_md5 = run('md5sum README.md').split()[0]
if not local_md5 == remote_md5:
abort('VM does not match, another VM running? got local: {} and remote: {}'.format(local_md5, remote_md5))
@task
@roles('vagrant')
@with_vars
def env_test(*args, **kwargs):
print 'args: {} kwargs: {}'.format(args, kwargs)
run('env | grep HAUS')
@task
def runall():
execute(test_vagrant)
local('touch nohup.out')
if USE_CELERY:
local('nohup fab vagrant.celery &')
local('nohup fab vagrant.celerybeat &')
if USE_GULP:
local('nohup fab vagrant.rungulp &')
local('nohup fab vagrant.runserver &')
local('tail -f nohup.out')
@task
@roles('vagrant')
def killall():
execute(test_vagrant)
log.warning(yellow('killing all processes'))
with settings(warn_only=True):
with hide('warnings', 'running', 'stderr', 'stdout'):
run("ps ax | grep [r]unserver | awk '{ print $1 }' | xargs sudo kill -9")
run("ps ax | grep [r]un_gunicorn | awk '{ print $1 }' | xargs sudo kill -9")
if USE_CELERY:
run("ps ax | grep [w]orker | awk '{ print $1 }' | xargs sudo kill -9")
run("ps ax | grep [c]elerybeat | awk '{ print $1 }' | xargs sudo kill -9")
run("ps ax | grep [c]ompass | awk '{ print $1 }' | xargs sudo kill -9")
run("ps ax | grep [s]ass | awk '{ print $1 }' | xargs sudo kill -9")
@task
@roles('vagrant')
def runserver():
with settings(warn_only=True):
with hide('warnings', 'running', 'stderr', 'stdout'):
# FOR SOME REASON IF THE PROCESS WASN'T ENDED CORRECTLY, THIS WILL KILL IT
run("ps ax | grep [r]unserver | awk '{ print $1 }' | xargs kill -9")
with cd("/var/www/"):
run('python ./manage.py runserver [::]:8000')
@task
@roles('vagrant')
def rungulp():
with settings(warn_only=True):
with hide('warnings', 'running', 'stderr', 'stdout'):
local("pkill gulp")
with lcd('frontend'):
local('gulp')
@task
@roles('vagrant')
def gunicorn():
with settings(warn_only=True):
with hide('warnings', 'running', 'stderr', 'stdout'):
# FOR SOME REASON IF THE PROCESS WASN'T ENDED CORRECTLY, THIS WILL KILL IT
run("ps ax | grep [r]un_gunicorn | awk '{ print $1 }' | xargs kill -9")
with cd("/var/www"):
run('gunicorn project.wsgi::application [::]:8000')
@task
@roles('vagrant')
def celery():
with settings(warn_only=True):
with hide('warnings', 'running', 'stderr', 'stdout'):
# FOR SOME REASON IF THE PROCESS WASN'T ENDED CORRECTLY, THIS WILL KILL IT
run("ps ax | grep [w]orker | awk '{ print $1 }' | xargs kill -9")
with cd("/var/www"):
run('python manage.py celery worker --loglevel=DEBUG')
@task
@roles('vagrant')
def celerybeat():
with settings(warn_only=True):
with hide('warnings', 'running', 'stderr', 'stdout'):
# FOR SOME REASON IF THE PROCESS WASN'T ENDED CORRECTLY, THIS WILL KILL IT
run("ps ax | grep [c]elerybeat | awk '{ print $1 }' | xargs kill -9")
with cd("/var/www"):
run('python manage.py celerybeat --loglevel=INFO')
@task
@roles('vagrant')
def initdb(load_images=False):
with cd("/var/www"):
run('yes no | python manage.py syncdb')
run('python manage.py migrate')
run('python manage.py createadmin')
if load_images:
load_fixture_images()
load_fixtures()
@task
@roles('vagrant')
def syncdb():
with cd("/var/www"):
run('python manage.py migrate')
@task
@roles('vagrant')
def resetall():
"""Stop all services, destroy the database, restore it from fixtures, remove all files in uploads directory and download assets."""
killall()
local('vagrant provision')
resetdb(delete_images=True, load_images=True)
@task
@roles('vagrant')
def resetdb(load_images=False, delete_images=False):
# mysql
#run("mysql -u vagrant -pvagrant -e 'drop database if exists django'")
#run('mysql -u vagrant -pvagrant -e "create database django"')
killall()
# postgres
run('dropdb django')
run('createdb django')
if delete_images:
run("mkdir -p /var/www/uploads")
with cd("/var/www/uploads"):
run('rm -rf ./*')
initdb(load_images)
@task
def load_fixtures():
with cd("/var/www"):
run("python manage.py loaddata project/fixtures/local_data.json")
@task
def load_fixture_images():
# basic media fixture stub
uploads_dir = path.abspath(path.join(path.dirname(__file__), '../uploads'))
with lcd(uploads_dir):
with settings(warn_only=True):
local('rm -rf ./*')
#local('curl -sLO https://domain/assets.tar.bz2')
#local('tar xjvf assets.tar.bz2')
#local('rm assets.tar.bz2')
@task
@roles('vagrant')
def collectstatic(no_input=False, skip_admin=False):
with cd("/var/www"):
run('python manage.py collectstatic {} {}'.format('--noinput' if no_input else '', '-i "admin*" -i "grappelli*"' if skip_admin else ''))
@task
@roles('vagrant')
def pipinstall():
run('/home/vagrant/.venv/bin/pip install --use-mirrors -r /var/www/requirements.txt')
@task
@roles('vagrant')
def freeze():
run('/home/vagrant/.venv/bin/pip freeze > /var/www/current-requirements.txt')
@task
@roles('vagrant')
def test():
with cd("/var/www"):
run('python manage.py test')
| mit | -3,264,137,712,658,079,000 | 28.800995 | 144 | 0.606344 | false |
saydulk/newfies-dialer | newfies/mod_utils/templatetags/utils_tags.py | 1 | 2429 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <[email protected]>
#
from django.utils.safestring import mark_safe
from django.template.defaultfilters import register
from django_lets_go.common_functions import word_capital
import re
from string import Template
def striphtml(data):
p = re.compile(r'<.*?>')
return mark_safe(p.sub('', data))
@register.simple_tag(name='field_html_code')
def field_html_code(field, main_class='col-md-6 col-xs-8', flag_error_text=True, flag_help_text=True):
"""
Usage: {% field_html_code field 'col-md-6 col-xs-8' %}
"""
tmp_div = Template("""
<div class="$main_class">
<div class="form-group $has_error">
<label class="control-label" for="$field_auto_id">$field_label</label>
$field
$field_errors
$field_help_text
</div>
</div>
""")
has_error = 'has-error' if field.errors else ''
field_errors = ''
if field.errors and flag_error_text:
field_errors = '<span class="help-block">%s</span>\n' % striphtml(str(field.errors)).capitalize()
field_help_text = ''
if flag_help_text:
field_help_text = '<span class="help-block">%s</span>\n' % (field.help_text.capitalize())
htmlcell = tmp_div.substitute(
main_class=main_class, has_error=has_error,
field_auto_id=field.auto_id, field_label=word_capital(field.label),
field=str(field).decode("utf-8"), field_errors=field_errors,
field_help_text=field_help_text)
return mark_safe(htmlcell)
@register.filter(name='check_url_for_template_width')
def check_url_for_template_width(current_url):
""""""
full_width_on_requested_path = [
'/dashboard/', '/sms_dashboard/', '/campaign/', '/sms_campaign/',
'user_detail_change', '/audio/', '/user_notification/',
]
if current_url == '/':
return True
else:
current_url = str(current_url)
for path in full_width_on_requested_path:
if path in current_url:
return True
return False
| mpl-2.0 | 6,704,997,681,144,459,000 | 31.824324 | 105 | 0.625772 | false |
soedinglab/hh-suite | scripts/a3m.py | 1 | 8020 | #!/usr/bin/env python
class A3MFormatError(Exception):
def __init__(self, value):
self.value = "ERROR: "+value
def __str__(self):
return repr(self.value)
class A3M_Container:
RESIDUES = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
VALID_MATCH_STATES = set(RESIDUES)
VALID_INSERTION_STATES = set(RESIDUES.lower())
VALID_GAP_STATES = set("-.")
VALID_SS_CONF_STATES = set("0123456789")
VALID_SS_STATES = set("ECH")
VALID_DSSP_STATES = set("CHBEGITS-")
def __init__(self):
self.header = None
self.annotations = dict()
self.consensus = None
self.sequences = []
self.nr_match_states = None
@property
def number_sequences(self):
"""get the current number of protein sequences"""
return len(self.sequences)
def check_and_add_sequence(self, header, sequence):
try:
if (not self.check_and_add_annotation(header, sequence) and
not self.check_and_add_consensus(header, sequence)):
self.check_sequence(sequence)
self.sequences.append((header, sequence))
except A3MFormatError as e:
raise e
def check_and_add_consensus(self, header, sequence):
header_name = header[1:].split()[0]
if header_name.endswith("_consensus"):
if self.consensus:
raise A3MFormatError("Multiple definitions of consensus!")
else:
self.check_sequence(sequence)
self.consensus = (header, sequence)
return True
else:
return False
def check_and_add_annotation(self, header, sequence):
annotation_classes = [
("ss_conf", self.check_ss_conf),
("ss_pred", self.check_ss_pred),
("ss_dssp", self.check_dssp)
]
for (annotation_name, check) in annotation_classes:
if(header[1:].startswith(annotation_name)):
if(annotation_name in self.annotations):
raise A3MFormatError(
"Multiple definitions of {}!".format(annotation_name)
)
elif check(sequence):
self.annotations[annotation_name] = sequence
return True
return False
def check_match_states(self, match_states):
if not self.nr_match_states:
self.nr_match_states = match_states
if match_states == 0:
raise A3MFormatError("Sequence with zero match states!")
elif match_states != self.nr_match_states:
raise A3MFormatError(
("Sequence with diverging number "
"of match states ({} vs. {})!").format(
match_states,
self.nr_match_states
)
)
def check_ss_conf(self, sequence):
count_match_states = sum((c in self.VALID_SS_CONF_STATES
or c in self.VALID_GAP_STATES)
for c in sequence)
self.check_match_states(count_match_states)
invalid_states = set(sequence) - self.VALID_SS_CONF_STATES
invalid_states -= self.VALID_GAP_STATES
if len(invalid_states):
raise A3MFormatError(
("Undefined character(s) '{}' in predicted "
"secondary structure confidence!").format(invalid_states))
else:
return True
def check_ss_pred(self, sequence):
count_match_states = sum((c in self.VALID_SS_STATES
or c in self.VALID_GAP_STATES)
for c in sequence)
self.check_match_states(count_match_states)
invalid_states = set(sequence) - self.VALID_SS_STATES
invalid_states -= self.VALID_GAP_STATES
if len(invalid_states):
raise A3MFormatError(
("Undefined character(s) '{}' in predicted "
"secondary structure!").format(invalid_states))
else:
return True
def check_dssp(self, sequence):
count_match_states = sum(
(c in self.VALID_DSSP_STATES) for c in sequence)
self.check_match_states(count_match_states)
invalid_states = set(sequence) - self.VALID_DSSP_STATES
if len(invalid_states):
raise A3MFormatError(
("Undefined character(s) '{}' in "
"dssp annotation!").format(invalid_states))
else:
return True
def check_sequence(self, sequence):
count_match_states = sum((c in self.VALID_MATCH_STATES
or c in self.VALID_GAP_STATES)
for c in sequence)
self.check_match_states(count_match_states)
invalid_states = set(sequence) - self.VALID_MATCH_STATES
invalid_states -= self.VALID_GAP_STATES
invalid_states -= self.VALID_INSERTION_STATES
if len(invalid_states):
raise A3MFormatError(
("Undefined character(s) '{}' in "
"protein sequence!").format(invalid_states))
else:
return True
def get_sub_sequence(self, sequence, limits):
sub_sequence = []
for (start, end) in limits:
start_pos = 0
pos = -1
for i in range(len(sequence)):
if (sequence[i] in self.VALID_MATCH_STATES or
sequence[i] in self.VALID_GAP_STATES):
pos += 1
if pos + 1 == start:
start_pos = i
break
end_pos = 0
pos = -1
for i in range(len(sequence)):
if (sequence[i] in self.VALID_MATCH_STATES or
sequence[i] in self.VALID_GAP_STATES):
pos += 1
if pos + 1 == end:
end_pos = i
break
sub_sequence.append(sequence[start_pos:end_pos+1])
return "".join(sub_sequence)
def __str__(self):
content = []
if self.header:
content.append(self.header)
if self.consensus:
content.append(self.consensus[0])
content.append(self.consensus[1])
for (header, sequence) in self.sequences:
content.append(header)
content.append(sequence)
return "\n".join(content)
def split_a3m(self, limits):
new_a3m = A3M_Container()
if self.consensus:
new_consensus_sequence = self.get_sub_sequence(self.consensus[1],
limits)
new_a3m.consensus = (self.consensus[0], new_consensus_sequence)
for (header, sequence) in self.sequences:
new_sequence = self.get_sub_sequence(sequence, limits)
new_a3m.sequences.append((header, new_sequence))
return new_a3m
def read_a3m(self, fh):
lines = fh.readlines()
self.read_a3m_from_lines(lines)
fh.close()
def read_a3m_from_lines(self, lines):
sequence_header = None
sequence = []
is_first_line = True
for line in lines:
line = line.strip()
if len(line) == 0:
continue
elif line[0] == "#":
if is_first_line:
self.header = line
elif line[0] == ">":
if sequence_header:
self.check_and_add_sequence(sequence_header,
"".join(sequence))
sequence = []
sequence_header = line.rstrip()
else:
sequence.append(line.strip().strip("\x00"))
is_first_line = False
if sequence_header:
self.check_and_add_sequence(sequence_header, "".join(sequence))
| gpl-3.0 | 6,906,977,804,485,290,000 | 32.416667 | 77 | 0.522818 | false |
meteotest/hurray | hurray/server/platform/posix.py | 1 | 1906 | #!/usr/bin/env python
#
# Copyright 2011 Facebook
# Modifications copyright 2016 Meteotest
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Posix implementations of platform-specific functionality."""
from __future__ import absolute_import, division, print_function, with_statement
import fcntl
import os
from hurray.server.platform import interface
def set_close_exec(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def _set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
class Waker(interface.Waker):
def __init__(self):
r, w = os.pipe()
_set_nonblocking(r)
_set_nonblocking(w)
set_close_exec(r)
set_close_exec(w)
self.reader = os.fdopen(r, "rb", 0)
self.writer = os.fdopen(w, "wb", 0)
def fileno(self):
return self.reader.fileno()
def write_fileno(self):
return self.writer.fileno()
def wake(self):
try:
self.writer.write(b"x")
except IOError:
pass
def consume(self):
try:
while True:
result = self.reader.read()
if not result:
break
except IOError:
pass
def close(self):
self.reader.close()
self.writer.close()
| bsd-3-clause | 186,925,933,638,133,950 | 25.84507 | 80 | 0.640084 | false |
kartikshah1/Test | discussion_forum/views.py | 1 | 29002 | """
Views for Discussion Forum
Keeping activity for add operations only. Can be extended easily if required
TODO
- introduce user specific variable "follow" for thread
Whether user is following thread or not ?
- introduce 'liked', variable for Thread/Comment/Reply
- handle anonymity while serializing thread/comment/reply: instructor \
can see the User
- send notification to thread subscriber about new content
"""
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.shortcuts import get_object_or_404, render
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action, link
from rest_framework.response import Response
from discussion_forum import models
from discussion_forum import permissions
from discussion_forum import serializers
from courseware.models import Concept
ORDER_CHOICES = ['recent', 'earlier', 'popularity']
PAGINATED_BY = 5
@login_required
def forum(request):
"""
Serves forum.html template
"""
context = {"request": request}
return render(request, "discussion_forum/forum.html", context)
@login_required
def forum_admin(request):
"""
Serves forum.html template
"""
context = {"request": request}
return render(request, "discussion_forum/admin.html", context)
def apply_content_filters(order='recent', queryset=None):
""" Apply sorting_order, disable and pinned filter """
queryset = queryset.filter(disabled=False)
if order == 'earlier':
queryset = queryset.order_by('-pinned', 'created')
elif order == 'popularity':
queryset = queryset.order_by('-pinned', '-popularity', '-created')
else:
#default order recent
queryset = queryset.order_by('-pinned', '-created')
return queryset
def paginated_serializer(request=None, queryset=None, serializer=None):
"""
Returns the serializer containing objects corresponding to paginated page
"""
paginator = Paginator(queryset, PAGINATED_BY)
page = request.QUERY_PARAMS.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
items = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
items = paginator.page(paginator.num_pages)
serializer_context = {'request': request}
return serializer(items, context=serializer_context)
def get_threads(forum=None, tag=None, request=None, search_term=None):
"""
Return threads according to the specifications.
Returns HTTP_400_BAD_REQUEST if any error occurs.
"""
if tag:
queryset = models.Thread.objects.filter(tags__pk=tag.pk)
else:
queryset = models.Thread.objects.filter(forum=forum)
if search_term:
queryset = queryset.filter(content__contains=search_term)
order = request.QUERY_PARAMS.get('order')
queryset = apply_content_filters(order=order, queryset=queryset)
serializer = paginated_serializer(
request=request,
queryset=queryset,
serializer=serializers.PaginatedThreadSerializer
)
response = serializer.data
for result in response["results"]:
thread = models.Thread.objects.get(pk=result["id"])
result["subscribed"] = thread.subscription.is_subscribed(request.user)
return Response(response)
class DiscussionForumViewSet(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""
Methods for this ViewSet. Only retrieve and update are allowed
"""
model = models.DiscussionForum
serializer_class = serializers.DiscussionForumSettingSerializer
permission_classes = [permissions.IsForumAdminOrReadOnly]
paginate_by = 2
def retrieve(self, request, pk=None):
""" Returns discussion_forum object along with tags """
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
print "RETRIEVE CALLED"
serializer = serializers.DiscussionForumSerializer(forum)
return Response(serializer.data)
@action(methods=['POST'], permission_classes=(permissions.IsForumAdmin, ))
def add_tag(self, request, pk=None):
"""
Add tag to this DiscussionForum
"""
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
serializer = serializers.ForumTagSerializer(data=request.DATA)
if serializer.is_valid():
tag = models.Tag(
forum=forum,
title=serializer.data['title'],
tag_name=serializer.data['tag_name'],
auto_generated=False
)
tag.save()
return Response(serializers.TagSerializer(tag).data)
else:
content = {"detail": "tag-name should be unique across forum-tags"}
return Response(content, status.HTTP_400_BAD_REQUEST)
@link(permission_classes=([permissions.IsForumUser]))
def activity(self, request, pk):
"""
Returns activities of particular discussion_forum
"""
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
activities = models.Activity.objects.filter(forum=forum)
activities = activities.order_by('-happened_at')
serializer = serializers.ActivitySerializer(activities, many=True)
return Response(serializer.data)
@link(permission_classes=([permissions.IsForumUser]))
def user_setting(self, request, pk):
"""
Returns the user_setting for currently loggedIn user
"""
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
setting = get_object_or_404(
models.UserSetting,
forum=forum,
user=request.user
)
serializer = serializers.UserSettingSerializer(setting)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumUser, )))
def threads(self, request, pk):
"""
Return list of threads in a particular order
"""
print "THREAD CLAAED"
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
return get_threads(forum=forum, tag=None, request=request)
@link(permission_classes=((permissions.IsForumUser, )))
def search_threads(self, request, pk):
"""
Return list of threads in a particular order
"""
search_term = request.GET.get('search', None)
forum = models.DiscussionForum.objects.get(pk=pk, Content_Forum__content__contains=search_term)
#forum = get_object_or_404(models.DiscussionForum, pk=pk, Content_Forum__content__contains=search_term)
self.check_object_permissions(request, forum)
serializer = serializers.DiscussionForumSerializer(forum)
return Response(serializer.data)
return get_threads(forum=forum,
tag=None,
request=request,
search_term=search_term)
@action(methods=['POST'], permission_classes=((permissions.IsForumUser,)))
def add_thread(self, request, pk=None):
"""
Add a new post to the forum
"""
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
serializer = serializers.ForumThreadSerializer(data=request.DATA)
try:
user_setting = models.UserSetting.objects.get(
forum=forum,
user=request.user
)
except:
content = {'detail': 'Not enough permissions'}
return Response(content, status=status.HTTP_401_UNAUTHORIZED)
if serializer.is_valid():
thread = models.Thread(
forum=forum,
author=request.user,
author_badge=user_setting.badge,
title=serializer.data['title'],
content=serializer.data['content'],
anonymous=serializer.data['anonymous'],
)
thread.save()
forum.thread_count += 1
forum.save()
subscribe = serializer.data['subscribe']
if subscribe:
thread.subscription.subscribe(request.user)
models.Activity.activity(
forum=forum,
user=request.user,
operation=models.ActivityOperation.add,
object_type=models.ActivityObject.thread,
object_id=thread.pk
)
serializer = serializers.ThreadSerializer(thread)
return Response(serializer.data)
else:
content = {"detail": "malformed data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
@link(permission_classes=((permissions.IsForumModerator, )))
def review_content(self, request, pk=None):
"""
Returns list of disabled content to user
"""
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
content_set = models.Content.objects.filter(forum=forum)
content_set = content_set.filter(
Q(spam_count__gt=forum.review_threshold) | Q(disabled=True))
serializer = paginated_serializer(
request=request,
queryset=content_set,
serializer=serializers.PaginatedContentSerializer
)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumAdmin, )))
def users(self, request, pk=None):
"""
Retuns list of all moderator's UserSetting object
"""
forum = get_object_or_404(models.DiscussionForum, pk=pk)
self.check_object_permissions(request, forum)
queryset = models.UserSetting.objects.filter(forum=forum)
utype = request.QUERY_PARAMS.get('type')
if utype == "moderators":
queryset = queryset.filter(
Q(super_user=True) | Q(moderator=True)
)
elif utype == "search":
search_str = request.QUERY_PARAMS.get('query')
queryset = queryset.filter(user__username__icontains=search_str)
serializer = paginated_serializer(
request=request,
queryset=queryset,
serializer=serializers.PaginatedUserSettingSerializer
)
return Response(serializer.data)
class TagViewSet(
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""
Methods For this ViewSet
"""
model = models.Tag
serializer_class = serializers.TagSerializer
permission_classes = [permissions.IsForumAdminOrReadOnly]
# Modified IsForumAdminOrReadOnly permission to restrict admin from \
# deleting auto_generated tags
@link(permission_classes=((permissions.IsForumUser, )))
def threads(self, request, pk=None):
""" Return list of threads in a particular order """
tag = get_object_or_404(models.Tag, pk=pk)
self.check_object_permissions(request, tag)
return get_threads(forum=tag.forum, tag=tag, request=request)
class UserSettingViewSet(
mixins.UpdateModelMixin,
#mixins.DestroyModelMixin,: Automatically deleted on dropping course
viewsets.GenericViewSet):
"""
Methods For this ViewSet
"""
model = models.UserSetting
serializer_class = serializers.UserSettingSerializer
permission_classes = [permissions.IsOwnerOrModeratorReadOnly]
@action(methods=['POST'],
permission_classes=((permissions.IsForumModerator, )))
def update_badge(self, request, pk=None):
"""
Updates badge for a Current User. Only course moderator can update \
badge
"""
user_setting = get_object_or_404(models.UserSetting, pk=pk)
self.check_object_permissions(request, user_setting)
# Checking for current user's permission
try:
current_user_setting = models.UserSetting.objects.get(
forum=user_setting.forum,
user=request.user)
except:
content = {"detail": "not enough permission"}
return Response(content, status.HTTP_403_FORBIDDEN)
if not current_user_setting.moderator:
content = {"detail": "not enough permission"}
return Response(content, status.HTTP_403_FORBIDDEN)
serializer = serializers.BadgeSerializer(data=request.DATA)
if serializer.is_valid():
user_setting.badge = serializer.data['badge']
user_setting.save()
serializer = serializers.UserSettingSerializer(user_setting)
return Response(serializer.data)
else:
content = {"detail": "malformed data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
@action(methods=['POST'],
permission_classes=((permissions.IsForumAdmin, )))
def update_moderation_permission(self, request, pk=None):
"""
Update moderator value of UserSetting for this object. Only \
allowed for Super Usersself.
"""
user_setting = get_object_or_404(models.UserSetting, pk=pk)
self.check_object_permissions(request, user_setting)
# Checking for current user's permission
try:
current_user_setting = models.UserSetting.objects.get(
forum=user_setting.forum,
user=request.user)
except:
content = {"detail": "not enough permission"}
return Response(content, status.HTTP_403_FORBIDDEN)
if not current_user_setting.super_user:
content = {"detail": "not enough permission"}
return Response(content, status.HTTP_403_FORBIDDEN)
serializer = serializers.BooleanSerializer(data=request.DATA)
if serializer.is_valid():
user_setting.moderator = serializer.data['mark']
user_setting.save()
serializer = serializers.UserSettingSerializer(user_setting)
return Response(serializer.data)
else:
content = {"detail": "malformed data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
class ContentViewSet(mixins.DestroyModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""
Methods For this ViewSet
"""
model = models.Content
serializer_class = serializers.ContentSerializer
permission_classes = [permissions.IsOwnerOrModerator]
def destroy(self, request, pk=None):
"""
Downcast to appropriate class member and delete that content
"""
try:
content = models.Content.objects.get_subclass(id=pk)
self.check_object_permissions(request, content)
content.delete()
response = {"detail": "Content deleted."}
return Response(response, status=status.HTTP_204_NO_CONTENT)
except:
response = {"detail": "invalid delete request"}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
@link(permission_classes=((permissions.IsForumUser, )))
def upvote(self, request, pk=None):
"""
Do upvote for content object
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.vote_up(request.user)
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumUser, )))
def downvote(self, request, pk=None):
"""
Do downvote for content object
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.vote_down(request.user)
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumUser, )))
def mark_spam(self, request, pk=None):
"""
Mark content as spam. If spam count exceeds threshold then content \
gets disabled
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.mark_spam(request.user)
if content.disabled:
return Response({"detail": "Content is disabled."})
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumModerator, )))
def pin_content(self, request, pk=None):
"""
Pin the content
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.pinned = not content.pinned
content.save()
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumModerator, )))
def disable(self, request, pk=None):
"""
Disable the content object
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.disabled = True
content.save()
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumModerator, )))
def enable(self, request, pk=None):
"""
Disable the content object
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.disabled = False
content.save()
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumModerator, )))
def reset_spam_flags(self, request, pk=None):
"""
Reset spam_count and spammers and enable the content
"""
content = get_object_or_404(models.Content, pk=pk)
self.check_object_permissions(request, content)
content.reset_spam_flags()
content.disabled = False
content.save()
serializer = serializers.ContentSerializer(content)
return Response(serializer.data)
class ThreadViewSet(
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""
Methods For this ViewSet
"""
model = models.Thread
serializer_class = serializers.ThreadSerializer
permission_classes = [permissions.IsOwnerOrModeratorOrReadOnly]
def retrieve(self, request, pk=None):
"""
Send a single thread instance. Perform make_hit operation.
If thread is disabled then it sends HTTP_404_NOT_FOUND
"""
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
thread.make_hit()
if thread.disabled:
content = {'detail': 'Content is disabled'}
return Response(content, status=status.HTTP_404_NOT_FOUND)
else:
return Response(serializers.ThreadSerializer(thread).data)
@link(permission_classes=((permissions.IsForumUser, )))
def comments(self, request, pk=None):
"""
Returns list of comments
"""
web_request = request._request
if 'order' in web_request.GET.keys():
order = web_request.GET['order']
else:
order = 'earlier'
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
comments = models.Comment.objects.filter(thread=thread)
comments = apply_content_filters(queryset=comments, order=order)
serializer = paginated_serializer(
request=request,
queryset=comments,
serializer=serializers.PaginatedCommentSerializer
)
if serializer.data["previous"] is None:
thread.make_hit()
return Response(serializer.data)
@link(permission_classes=((permissions.IsForumUser, )))
def get_tag_list(self, request, pk=None):
## Get all concept Names for this course
print "PK=", pk
queryset = Concept.objects.filter(is_published=True).filter(group__course_id=pk)
data = {}
print queryset.values()
data['results'] = queryset.values("id", "title")
return Response(data)
@action(methods=['POST'],
permission_classes=((permissions.IsForumUser, )))
def add_comment(self, request, pk=None):
"""
Add a new comment for to the Thread
"""
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
serializer = serializers.ForumContentSerializer(data=request.DATA)
try:
user_setting = models.UserSetting.objects.get(
forum=thread.forum,
user=request.user
)
except:
content = {'detail': 'Not enough permissions'}
return Response(content, status=status.HTTP_401_UNAUTHORIZED)
if serializer.is_valid():
comment = models.Comment(
thread=thread,
forum=thread.forum,
author=request.user,
author_badge=user_setting.badge,
content=serializer.data['content'],
anonymous=serializer.data['anonymous']
)
comment.save()
thread.children_count += 1
thread.save()
models.Activity.activity(
forum=thread.forum,
user=request.user,
operation=models.ActivityOperation.add,
object_type=models.ActivityObject.comment,
object_id=comment.pk
)
return Response(serializers.CommentSerializer(comment).data)
else:
content = {"detail": "inconsistent data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
@action(methods=['POST'],
permission_classes=((permissions.IsForumUser, )))
def add_tag(self, request, pk=None):
"""
Adds a new tag to this thread
"""
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
serializer = serializers.IntegerSerializer(data=request.DATA)
if serializer.is_valid():
tag_id = serializer.data['value']
tag = get_object_or_404(models.Tag, pk=tag_id)
if tag.forum == thread.forum:
thread.tags.add(tag)
serializer = serializers.TagSerializer(
thread.tags.all(),
many=True
)
return Response(serializer.data)
content = {"detail": "un-identified tag"}
return Response(content, status.HTTP_400_BAD_REQUEST)
else:
content = {"detail": "malformed data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
@action(methods=['POST'],
permission_classes=((permissions.IsForumUser, )))
def remove_tag(self, request, pk=None):
"""
Removes tag from this thread
"""
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
serializer = serializers.IntegerSerializer(data=request.DATA)
if serializer.is_valid():
tag_id = serializer.data['value']
tag = get_object_or_404(models.Tag, pk=tag_id)
if tag.forum == thread.forum:
thread.tags.remove(tag)
serializer = serializers.TagSerializer(
thread.tags.all(),
many=True
)
return Response(serializer.data)
content = {"detail": "un-identified tag"}
return Response(content, status.HTTP_400_BAD_REQUEST)
else:
content = {"detail": "malformed data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
@link(permission_classes=((permissions.IsForumUser, )))
def subscribe(self, request, pk=None):
"""
Subscribe to this thread notifications
"""
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
thread.subscription.subscribe(request.user)
response = {"success": "your subscribed to thread notifications"}
response["subscribed"] = True
return Response(response)
@link(permission_classes=((permissions.IsForumUser, )))
def unsubscribe(self, request, pk=None):
"""
Subscribe to this thread notifications
"""
thread = get_object_or_404(models.Thread, pk=pk)
self.check_object_permissions(request, thread)
thread.subscription.unsubscribe(request.user)
response = {"success": "you will no longer recieve notifications"}
response["subscribed"] = False
return Response(response)
class CommentViewSet(
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""
Methods For this ViewSet
"""
model = models.Comment
serializer_class = serializers.CommentSerializer
permission_classes = [permissions.IsOwnerOrModeratorOrReadOnly]
@link(permission_classes=((permissions.IsForumUser, )))
def replies(self, request, pk=None):
"""
Returns list of replies in discussion_forum
"""
web_request = request._request
if 'order' in web_request.GET.keys():
order = web_request.GET['order']
else:
order = 'earlier'
comment = get_object_or_404(models.Comment, pk=pk)
self.check_object_permissions(request, comment)
replies = models.Reply.objects.filter(comment=comment)
replies = apply_content_filters(queryset=replies, order=order)
serializer = paginated_serializer(
request=request,
queryset=replies,
serializer=serializers.PaginatedReplySerializer
)
return Response(serializer.data)
@action(methods=['POST'],
permission_classes=((permissions.IsForumUser, )))
def add_reply(self, request, pk=None):
"""
Add a new reply for to the comment
"""
comment = get_object_or_404(models.Comment, pk=pk)
self.check_object_permissions(request, comment)
serializer = serializers.ForumContentSerializer(data=request.DATA)
try:
user_setting = models.UserSetting.objects.get(
forum=comment.forum,
user=request.user
)
except:
content = {'detail': 'Not enough permissions'}
return Response(content, status=status.HTTP_401_UNAUTHORIZED)
if serializer.is_valid():
reply = models.Reply(
thread=comment.thread,
comment=comment,
forum=comment.forum,
author=request.user,
author_badge=user_setting.badge,
content=serializer.data['content'],
anonymous=serializer.data['anonymous']
)
reply.save()
comment.children_count += 1
comment.save()
models.Activity.activity(
forum=comment.forum,
user=request.user,
operation=models.ActivityOperation.add,
object_type=models.ActivityObject.reply,
object_id=reply.pk
)
return Response(serializers.ReplySerializer(reply).data)
else:
content = {"detail": "inconsistent data"}
return Response(content, status.HTTP_400_BAD_REQUEST)
class ReplyViewSet(
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
"""
Reply ViewSet.
Allowed methods are retrieve, content update and delete
"""
model = models.Reply
serializer_class = serializers.ReplySerializer
permission_classes = [permissions.IsOwnerOrModeratorOrReadOnly]
| mit | -2,295,580,879,099,539,200 | 36.373711 | 111 | 0.623716 | false |
gobins/python-madclient | madclient/openstack/common/apiclient/base.py | 1 | 17430 | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
########################################################################
#
# THIS MODULE IS DEPRECATED
#
# Please refer to
# https://etherpad.openstack.org/p/kilo-oslo-library-proposals for
# the discussion leading to this deprecation.
#
# We recommend checking out the python-openstacksdk project
# (https://launchpad.net/python-openstacksdk) instead.
#
########################################################################
# E1102: %s is not callable
# pylint: disable=E1102
import abc
import copy
from oslo_utils import strutils
import six
from six.moves.urllib import parse
from openstack.common._i18n import _
from openstack.common.apiclient import exceptions
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param args: args to be passed to every hook function
:param kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class BaseManager(HookableMixin):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes BaseManager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(BaseManager, self).__init__()
self.client = client
def _list(self, url, response_key=None, obj_class=None, json=None):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
"""
if json:
body = self.client.post(url, json=json).json()
else:
body = self.client.get(url).json()
if obj_class is None:
obj_class = self.resource_class
data = body[response_key] if response_key is not None else body
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key=None):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
"""
body = self.client.get(url).json()
data = body[response_key] if response_key is not None else body
return self.resource_class(self, data, loaded=True)
def _head(self, url):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
"""
resp = self.client.head(url)
return resp.status_code == 204
def _post(self, url, json, response_key=None, return_raw=False):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
"""
body = self.client.post(url, json=json).json()
data = body[response_key] if response_key is not None else body
if return_raw:
return data
return self.resource_class(self, data)
def _put(self, url, json=None, response_key=None):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
resp = self.client.put(url, json=json)
# PUT requests may not return a body
if resp.content:
body = resp.json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, json=None, response_key=None):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
@six.add_metaclass(abc.ABCMeta)
class ManagerWithFind(BaseManager):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(BaseManager):
"""Base manager class for manipulating entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
def build_url(self, base_url=None, **kwargs):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
:param base_url: if provided, the generated URL will be appended to it
"""
url = base_url if base_url is not None else ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = kwargs.get('%s_id' % self.key)
if entity_id is not None:
url += '/%s' % entity_id
return url
def _filter_kwargs(self, kwargs):
"""Drop null values and handle ids."""
for key, ref in six.iteritems(kwargs.copy()):
if ref is None:
kwargs.pop(key)
else:
if isinstance(ref, Resource):
kwargs.pop(key)
kwargs['%s_id' % key] = getid(ref)
return kwargs
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._post(
self.build_url(**kwargs),
{self.key: kwargs},
self.key)
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs),
self.key)
def head(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._head(self.build_url(**kwargs))
def list(self, base_url=None, **kwargs):
"""List the collection.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
def put(self, base_url=None, **kwargs):
"""Update an element.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._put(self.build_url(base_url=base_url, **kwargs))
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._patch(
self.build_url(**kwargs),
{self.key: params},
self.key)
def delete(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._delete(
self.build_url(**kwargs))
def find(self, base_url=None, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
rl = self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(404, msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Extension(HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
manager_class = None
def __init__(self, name, module):
super(Extension, self).__init__()
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
else:
try:
if issubclass(attr_value, BaseManager):
self.manager_class = attr_value
except TypeError:
pass
def __repr__(self):
return "<Extension '%s'>" % self.name
class Resource(object):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion.
"""
if self.HUMAN_ID:
name = getattr(self, self.NAME_ATTR, None)
if name is not None:
return strutils.to_slug(name)
return None
def _add_details(self, info):
for (k, v) in six.iteritems(info):
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def get(self):
"""Support for lazy loading details.
Some clients, such as novaclient have the option to lazy load the
details, details which can be loaded with this function.
"""
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
self._add_details(
{'x_request_id': self.manager.client.last_request_id})
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def to_dict(self):
return copy.deepcopy(self._info)
| apache-2.0 | -13,708,069,029,369,212 | 31.763158 | 79 | 0.572117 | false |
durandj/botman | tests/test_cli.py | 1 | 3671 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the command line interface
"""
import os
import unittest
import unittest.mock
import click.testing
import botman.cli
import botman.bot
# pylint: disable=too-few-public-methods
class CliRunnerMixin(object):
"""
Test case mixin for adding CLI runner support
"""
# pylint: disable=invalid-name
def setUp(self):
"""
Sets up the CLI runner
"""
super().setUp()
self.cli_runner = click.testing.CliRunner()
# pylint: enable=invalid-name
# pylint: enable=too-few-public-methods
class TestCommand(CliRunnerMixin, unittest.TestCase):
"""
Tests for the main command
"""
def setUp(self):
super().setUp()
# Ensure that env does not contain our auth_token
self.initial_auth_token = os.environ.get('BOTMAN_AUTH_TOKEN')
if self.initial_auth_token is not None:
del os.environ['BOTMAN_AUTH_TOKEN']
self.auth_token = 'deadbeef'
self.bot_patch = unittest.mock.patch('botman.bot.BotmanBot')
self.mock_bot = self.bot_patch.start()
def tearDown(self):
super().tearDown()
self.bot_patch.stop()
# Restore the environment variables we might have changed
if self.initial_auth_token is not None:
os.environ['BOTMAN_AUTH_TOKEN'] = self.initial_auth_token
def test_help_message(self):
"""
Tests that can get the help message
"""
result = self.cli_runner.invoke(botman.cli.main, ['--help'])
self.assertEqual(
0,
result.exit_code,
'Command exitted successfully',
)
self.assertIn(
'--help Show this message and exit.',
result.output,
'Help message contained the correct information',
)
def test_no_args(self):
"""
Tests that the command fails when not given an auth token
"""
result = self.cli_runner.invoke(botman.cli.main)
self.assertEqual(
2,
result.exit_code,
'Command failed to start correctly',
)
self.assertIn(
'Error: Missing argument "auth_token"',
result.output,
'A helpful error message was given',
)
def test_auth_token_arg(self):
"""
Tests that we can provide the auth token as an argument
"""
result = self.cli_runner.invoke(botman.cli.main, [self.auth_token])
self.assertEqual(
0,
result.exit_code,
'Command exitted successfully',
)
expected_command_list = ', '.join(
botman.bot.BotmanBot.command_handlers.keys(),
)
self.assertIn(
f'Commands: {expected_command_list}',
result.output,
'Command output matched the expected',
)
self.mock_bot.assert_called_with(self.auth_token)
def test_auth_token_env(self):
"""
Tests that we can provide the auth token as an environment variable
"""
result = self.cli_runner.invoke(
botman.cli.main,
env={'BOTMAN_AUTH_TOKEN': self.auth_token},
)
self.assertEqual(
0,
result.exit_code,
'Command exitted successfully',
)
expected_command_list = ', '.join(
botman.bot.BotmanBot.command_handlers.keys(),
)
self.assertIn(
f'Commands: {expected_command_list}',
result.output,
'Command output matched the expected',
)
| mit | -7,692,964,981,166,732,000 | 23.804054 | 75 | 0.565241 | false |
googleads/google-ads-python | google/ads/googleads/v8/services/services/language_constant_service/transports/grpc.py | 1 | 10459 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import language_constant
from google.ads.googleads.v8.services.types import language_constant_service
from .base import LanguageConstantServiceTransport, DEFAULT_CLIENT_INFO
class LanguageConstantServiceGrpcTransport(LanguageConstantServiceTransport):
"""gRPC backend transport for LanguageConstantService.
Service to fetch language constants.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_language_constant(
self,
) -> Callable[
[language_constant_service.GetLanguageConstantRequest],
language_constant.LanguageConstant,
]:
r"""Return a callable for the get language constant method over gRPC.
Returns the requested language constant.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetLanguageConstantRequest],
~.LanguageConstant]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_language_constant" not in self._stubs:
self._stubs[
"get_language_constant"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.LanguageConstantService/GetLanguageConstant",
request_serializer=language_constant_service.GetLanguageConstantRequest.serialize,
response_deserializer=language_constant.LanguageConstant.deserialize,
)
return self._stubs["get_language_constant"]
__all__ = ("LanguageConstantServiceGrpcTransport",)
| apache-2.0 | -6,344,020,487,974,135,000 | 41.173387 | 98 | 0.607611 | false |
scylladb/scylla | test/alternator/test_tag.py | 1 | 11280 | # -*- coding: utf-8 -*-
# Copyright 2019-present ScyllaDB
#
# This file is part of Scylla.
#
# Scylla is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scylla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
# Tests for Tagging:
# 1. TagResource - tagging a table with a (key, value) pair
# 2. UntagResource
# 3. ListTagsOfResource
import pytest
from botocore.exceptions import ClientError
import re
import time
from util import multiset, create_test_table, test_table_name
def delete_tags(table, arn):
got = table.meta.client.list_tags_of_resource(ResourceArn=arn)
print(got['Tags'])
if len(got['Tags']):
table.meta.client.untag_resource(ResourceArn=arn, TagKeys=[tag['Key'] for tag in got['Tags']])
# Test checking that tagging and untagging is correctly handled
def test_tag_resource_basic(test_table):
got = test_table.meta.client.describe_table(TableName=test_table.name)['Table']
arn = got['TableArn']
tags = [
{
'Key': 'string',
'Value': 'string'
},
{
'Key': 'string2',
'Value': 'string4'
},
{
'Key': '7',
'Value': ' '
},
{
'Key': ' ',
'Value': '9'
},
]
delete_tags(test_table, arn)
got = test_table.meta.client.list_tags_of_resource(ResourceArn=arn)
assert len(got['Tags']) == 0
test_table.meta.client.tag_resource(ResourceArn=arn, Tags=tags)
got = test_table.meta.client.list_tags_of_resource(ResourceArn=arn)
assert 'Tags' in got
assert multiset(got['Tags']) == multiset(tags)
# Removing non-existent tags is legal
test_table.meta.client.untag_resource(ResourceArn=arn, TagKeys=['string2', 'non-nexistent', 'zzz2'])
tags.remove({'Key': 'string2', 'Value': 'string4'})
got = test_table.meta.client.list_tags_of_resource(ResourceArn=arn)
assert 'Tags' in got
assert multiset(got['Tags']) == multiset(tags)
delete_tags(test_table, arn)
got = test_table.meta.client.list_tags_of_resource(ResourceArn=arn)
assert len(got['Tags']) == 0
def test_tag_resource_overwrite(test_table):
got = test_table.meta.client.describe_table(TableName=test_table.name)['Table']
arn = got['TableArn']
tags = [
{
'Key': 'string',
'Value': 'string'
},
]
delete_tags(test_table, arn)
test_table.meta.client.tag_resource(ResourceArn=arn, Tags=tags)
got = test_table.meta.client.list_tags_of_resource(ResourceArn=arn)
assert 'Tags' in got
assert multiset(got['Tags']) == multiset(tags)
tags = [
{
'Key': 'string',
'Value': 'different_string_value'
},
]
test_table.meta.client.tag_resource(ResourceArn=arn, Tags=tags)
got = test_table.meta.client.list_tags_of_resource(ResourceArn=arn)
assert 'Tags' in got
assert multiset(got['Tags']) == multiset(tags)
PREDEFINED_TAGS = [{'Key': 'str1', 'Value': 'str2'}, {'Key': 'kkk', 'Value': 'vv'}, {'Key': 'keykey', 'Value': 'valvalvalval'}]
@pytest.fixture(scope="module")
def test_table_tags(dynamodb):
# The feature of creating a table already with tags was only added to
# DynamoDB in April 2019, and to the botocore library in version 1.12.136
# https://aws.amazon.com/about-aws/whats-new/2019/04/now-you-can-tag-amazon-dynamodb-tables-when-you-create-them/
# so older versions of the library cannot run this test.
import botocore
from distutils.version import LooseVersion
if (LooseVersion(botocore.__version__) < LooseVersion('1.12.136')):
pytest.skip("Botocore version 1.12.136 or above required to run this test")
table = create_test_table(dynamodb,
KeySchema=[ { 'AttributeName': 'p', 'KeyType': 'HASH' }, { 'AttributeName': 'c', 'KeyType': 'RANGE' } ],
AttributeDefinitions=[ { 'AttributeName': 'p', 'AttributeType': 'S' }, { 'AttributeName': 'c', 'AttributeType': 'N' } ],
Tags=PREDEFINED_TAGS)
yield table
table.delete()
# Test checking that tagging works during table creation
def test_list_tags_from_creation(test_table_tags):
got = test_table_tags.meta.client.describe_table(TableName=test_table_tags.name)['Table']
arn = got['TableArn']
got = test_table_tags.meta.client.list_tags_of_resource(ResourceArn=arn)
assert multiset(got['Tags']) == multiset(PREDEFINED_TAGS)
# Test checking that incorrect parameters return proper error codes
def test_tag_resource_incorrect(test_table):
got = test_table.meta.client.describe_table(TableName=test_table.name)['Table']
arn = got['TableArn']
# Note: Tags must have two entries in the map: Key and Value, and their values
# must be at least 1 character long, but these are validated on boto3 level
with pytest.raises(ClientError, match='AccessDeniedException'):
test_table.meta.client.tag_resource(ResourceArn='I_do_not_exist', Tags=[{'Key': '7', 'Value': '8'}])
with pytest.raises(ClientError, match='ValidationException'):
test_table.meta.client.tag_resource(ResourceArn=arn, Tags=[])
test_table.meta.client.tag_resource(ResourceArn=arn, Tags=[{'Key': str(i), 'Value': str(i)} for i in range(30)])
test_table.meta.client.tag_resource(ResourceArn=arn, Tags=[{'Key': str(i), 'Value': str(i)} for i in range(20, 40)])
with pytest.raises(ClientError, match='ValidationException'):
test_table.meta.client.tag_resource(ResourceArn=arn, Tags=[{'Key': str(i), 'Value': str(i)} for i in range(40, 60)])
for incorrect_arn in ['arn:not/a/good/format', 'x'*125, 'arn:'+'scylla/'*15, ':/'*30, ' ', 'незаконные буквы']:
with pytest.raises(ClientError, match='.*Exception'):
test_table.meta.client.tag_resource(ResourceArn=incorrect_arn, Tags=[{'Key':'x', 'Value':'y'}])
for incorrect_tag in [('ok', '#!%%^$$&'), ('->>;-)])', 'ok'), ('!!!\\|','<><')]:
with pytest.raises(ClientError, match='ValidationException'):
test_table.meta.client.tag_resource(ResourceArn=arn, Tags=[{'Key':incorrect_tag[0],'Value':incorrect_tag[1]}])
# Test that only specific values are allowed for write isolation (system:write_isolation tag)
def test_tag_resource_write_isolation_values(scylla_only, test_table):
got = test_table.meta.client.describe_table(TableName=test_table.name)['Table']
arn = got['TableArn']
for i in ['f', 'forbid', 'forbid_rmw', 'a', 'always', 'always_use_lwt', 'o', 'only_rmw_uses_lwt', 'u', 'unsafe', 'unsafe_rmw']:
test_table.meta.client.tag_resource(ResourceArn=arn, Tags=[{'Key':'system:write_isolation', 'Value':i}])
with pytest.raises(ClientError, match='ValidationException'):
test_table.meta.client.tag_resource(ResourceArn=arn, Tags=[{'Key':'system:write_isolation', 'Value':'bah'}])
# Test that if trying to create a table with forbidden tags (in this test,
# a list of tags longer than the maximum allowed of 50 tags), the table
# is not created at all.
def test_too_long_tags_from_creation(dynamodb):
# The feature of creating a table already with tags was only added to
# DynamoDB in April 2019, and to the botocore library in version 1.12.136
# so older versions of the library cannot run this test.
import botocore
from distutils.version import LooseVersion
if (LooseVersion(botocore.__version__) < LooseVersion('1.12.136')):
pytest.skip("Botocore version 1.12.136 or above required to run this test")
name = test_table_name()
# Setting 100 tags is not allowed, the following table creation should fail:
with pytest.raises(ClientError, match='ValidationException'):
dynamodb.create_table(TableName=name,
BillingMode='PAY_PER_REQUEST',
KeySchema=[{ 'AttributeName': 'p', 'KeyType': 'HASH' }],
AttributeDefinitions=[{ 'AttributeName': 'p', 'AttributeType': 'S' }],
Tags=[{'Key': str(i), 'Value': str(i)} for i in range(100)])
# After the table creation failed, the table should not exist.
with pytest.raises(ClientError, match='ResourceNotFoundException'):
dynamodb.meta.client.describe_table(TableName=name)
# This test is similar to the above, but uses another case of forbidden tags -
# here an illegal value for the system::write_isolation tag. This is a
# scylla_only test because only Alternator checks the validity of the
# system::write_isolation tag.
# Reproduces issue #6809, where the table creation appeared to fail, but it
# was actually created (without the tag).
def test_forbidden_tags_from_creation(scylla_only, dynamodb):
# The feature of creating a table already with tags was only added to
# DynamoDB in April 2019, and to the botocore library in version 1.12.136
# so older versions of the library cannot run this test.
import botocore
from distutils.version import LooseVersion
if (LooseVersion(botocore.__version__) < LooseVersion('1.12.136')):
pytest.skip("Botocore version 1.12.136 or above required to run this test")
name = test_table_name()
# It is not allowed to set the system:write_isolation to "dog", so the
# following table creation should fail:
with pytest.raises(ClientError, match='ValidationException'):
dynamodb.create_table(TableName=name,
BillingMode='PAY_PER_REQUEST',
KeySchema=[{ 'AttributeName': 'p', 'KeyType': 'HASH' }],
AttributeDefinitions=[{ 'AttributeName': 'p', 'AttributeType': 'S' }],
Tags=[{'Key': 'system:write_isolation', 'Value': 'dog'}])
# After the table creation failed, the table should not exist.
with pytest.raises(ClientError, match='ResourceNotFoundException'):
dynamodb.meta.client.describe_table(TableName=name)
# Test checking that unicode tags are allowed
@pytest.mark.xfail(reason="unicode tags not yet supported")
def test_tag_resource_unicode(test_table):
got = test_table.meta.client.describe_table(TableName=test_table.name)['Table']
arn = got['TableArn']
tags = [
{
'Key': 'законные буквы',
'Value': 'string'
},
{
'Key': 'ѮѮ Ѯ',
'Value': 'string4'
},
{
'Key': 'ѮѮ',
'Value': 'ѮѮѮѮѮѮѮѮѮѮѮѮѮѮ'
},
{
'Key': 'keyѮѮѮ',
'Value': 'ѮѮѮvalue'
},
]
delete_tags(test_table, arn)
got = test_table.meta.client.list_tags_of_resource(ResourceArn=arn)
assert len(got['Tags']) == 0
test_table.meta.client.tag_resource(ResourceArn=arn, Tags=tags)
got = test_table.meta.client.list_tags_of_resource(ResourceArn=arn)
assert 'Tags' in got
assert multiset(got['Tags']) == multiset(tags)
| agpl-3.0 | 4,879,117,840,986,191,000 | 45.974895 | 131 | 0.66064 | false |
Southpaw-TACTIC/TACTIC | src/pyasm/web/palette.py | 1 | 13401 | ###########################################################
#
# Copyright (c) 2010, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['Palette']
from pyasm.common import Container, Config, Common
from pyasm.search import Search
import colorsys, types
class Palette(object):
# default color palette
DEFAULT = {
'color': '#AAA', # main font color
'color2': '#BBB', # secondary font color
'color3': '#222222', # tertiary font color
'background': '#444444', # main background color
'background2': '#2F2F2F', # secondary background color
'background3': '#777777', # tertiary background color
'border': '#737b79', # main border color
'shadow': '#000000', # main shadow color
'theme': 'dark',
'table_border': '#494949',
'side_bar_title': '#3C76C2',
}
DARK = DEFAULT
BLACK = {
'color': '#AAA', # main font color
'color2': '#AAA', # secondary font color
'color3': '#AAA', # tertiary font color
'background': '#101010', # main background color
'background2': '#100000', # secondary background color
'background3': '#000000', # tertiary background color
'border': '#202020', # main border color
'shadow': '#202020', # main shadow color
'theme': 'dark',
'table_border': '#202020',
'side_bar_title': '#3C76C2',
}
AQUA = {
'color': '#000', # main font color
'color2': '#333', # secondary font color
'color3': '#333', # tertiary font color
'background': '#FFFFFF', # main background color
'background2': '#BBBBBB', # secondary background color
'background3': '#D1D7E2', # tertiary background color
'border': '#BBB', # main border color
'side_bar_title': '#3C76C2',
'side_bar_title_color': '#FFF',
'tab_background': '#3C76C2',
'table_border': '#E0E0E0',
'theme': 'default',
'shadow': 'rgba(0,0,0,0.1)',
}
# silver theme
SILVER = {
'color': '#000', # main font color
'color2': '#333', # secondary font color
'color3': '#333', # tertiary font color
'background': '#DDDDDD', # main background color
'background2': '#777777', # secondary background color
'background3': '#999999', # tertiary background color
'border': '#888888', # main border color
'table_border': '#DDD',
'theme': 'default',
'shadow': 'rgba(0,0,0,0.6)',
'side_bar_title': '#3C76C2',
}
# silver theme
BRIGHT = {
'color': '#000', # main font color
'color2': '#333', # secondary font color
'color3': '#333', # tertiary font color
'background': '#FFFFFF', # main background color
'background2': '#AAAAAA', # secondary background color
'background3': '#EEEEEE', # tertiary background color
'border': '#BBBBBB', # main border color
'table_border': '#E0E0E0',
'theme': 'default',
'shadow': 'rgba(0,0,0,0.6)',
'side_bar_title': '#3C76C2',
}
# bon noche theme
BON_NOCHE = {
'color': '#FFF', # main font color
'color2': '#FFF', # secondary font color
'color3': '#FFF', # tertiary font color
'background': '#060719', # main background color
'background2': '#4C1B2F', # secondary background color
'background3': '#9E332E', # tertiary background color
'border': '#444', # main border color
'table_border': '#060719',
'theme': 'dark'
}
# origami theme
ORIGAMI = {
'color': '#000', # main font color
'color2': '#FFF', # secondary font color
'color3': '#000', # tertiary font color
'background': '#E8FAC8', # main background color
'background2': '#8C8015', # secondary background color
'background3': '#BAB966', # tertiary background color
'border': '#888888', # main border color
'table_border': '#E8FAC8',
'shadow': 'rgba(0,0,0,0.6)',
'theme': 'default'
}
MMS = {
'color': '#FFF', # main font color
'color2': '#000', # secondary font color
'color3': '#000', # tertiary font color
'background': '#00539F', # main background color
'background2': '#CCCCCC', # secondary background color
'background3': '#AAAAAA', # tertiary background color
'border': '#999999', # main border color
'table_border': '#00539F',
'theme': 'default'
}
AVIATOR = {
'color': '#000000', # main font color
'color2': '#FFFFFF', # secondary font color
'color3': '#FFFFFF', # tertiary font color
'background': '#E6D595', # main background color
'background2': '#1A9481', # secondary background color
'background3': '#003D5c', # tertiary background color
'border': '#666666', # main border color
'table_border': '#E6D595',
'theme': 'dark'
}
#COLORS = DEFAULT
#COLORS = SILVER
#COLORS = ORIGAMI
COLORS = AQUA
#COLORS = BRIGHT
#COLORS = BON_NOCHE
#COLORS = MMS
#COLORS = AVIATOR
TABLE = {
'table_hilite': '#F00',
'table_select': '#FF0',
'table_changed': '#FFF',
'header_background': '#FFF'
}
def __init__(self, **kwargs):
self.kwargs = kwargs
self.colors = self.kwargs.get("colors")
palette = self.kwargs.get("palette")
if palette:
self.set_palette(palette)
else:
# look at the project
from pyasm.biz import Project
project = Project.get(no_exception=True)
if project:
value = project.get_value("palette")
self.set_palette(value)
# otherwise look at the user
if not self.colors:
from pyasm.biz import PrefSetting
value = PrefSetting.get_value_by_key("palette")
self.set_palette(value)
# look in the config
if not self.colors:
value = Config.get_value("look", "palette")
self.set_palette(value)
if not self.colors:
self.colors = self.COLORS
# make sure all of the colors are defined
for name, value in self.DEFAULT.items():
# make a special provision for theme!
if name == 'theme':
continue
if not self.colors.get(name):
self.colors[name] = value
def set_palette(self, palette):
value = palette
if not value:
return
try:
self.colors = eval(value)
# make sure all of the colors are defined
for name, value in self.DEFAULT.items():
# make a special provision for theme!
if name == 'theme':
continue
if not self.colors.get(name):
self.colors[name] = value
except:
try:
value = value.upper()
value = value.replace(" ", "_")
self.colors = eval("self.%s" % value)
except:
print("WARNING: palette [%s] does not exist. Using default" % value)
def get_theme(self):
theme = self.colors.get("theme")
if not theme:
theme = "default"
return theme
def get_keys(self):
return self.colors.keys()
def get_colors(self):
return self.colors
def color(self, category, modifier=0, default=None):
if not category:
category = 'background'
# make default adjustments
if category.startswith("#"):
color = category
category = "color"
else:
color = self.colors.get(category)
if not color:
color = self.colors.get(default)
if not color:
color = category
if category == 'background2' and not color:
category = 'background'
modifier += 10
color = self.colors.get(category)
if category == 'color2' and not color:
category = 'color'
modifier += 10
color = self.colors.get(category)
return Common.modify_color(color, modifier)
def modify_color(color, modifier):
return Common.modify_color(color, modifier)
modify_color = staticmethod(modify_color)
"""
if not modifier:
return color
if not color:
return None
color = color.replace("#", '')
if len(color) == 3:
first = "%s%s" % (color[0], color[0])
second = "%s%s" % (color[1], color[1])
third = "%s%s" % (color[2], color[2])
elif len(color) == 6:
first = "%s" % color[0:2]
second = "%s" % color[2:4]
third = "%s" % color[4:6]
first = float(int(first, 16) ) / 256
second = float(int(second, 16) ) / 256
third = float(int(third, 16) ) / 256
if type(modifier) == types.ListType:
rgb = []
rgb.append( 0.01*modifier[0] + first )
rgb.append( 0.01*modifier[1] + second )
rgb.append( 0.01*modifier[2] + third )
else:
hsv = colorsys.rgb_to_hsv(first, second, third)
value = 0.01*modifier + hsv[2]
if value < 0:
value = 0
if value > 1:
value = 1
hsv = (hsv[0], hsv[1], value )
rgb = colorsys.hsv_to_rgb(*hsv)
first = hex(int(rgb[0]*256))[2:]
if len(first) == 1:
first = "0%s" % first
second = hex(int(rgb[1]*256))[2:]
if len(second) == 1:
second = "0%s" % second
third = hex(int(rgb[2]*256))[2:]
if len(third) == 1:
third = "0%s" % third
if len(first) == 3:
first = "FF"
if len(second) == 3:
second = "FF"
if len(third) == 3:
third = "FF"
color = "#%s%s%s" % (first, second, third)
return color
modify_color = staticmethod(modify_color)
"""
def gradient(self, palette_key, modifier=0, range=-20, reverse=False, default=None):
if modifier == None:
modifier = 0
if range == None:
range = -20
from .web_container import WebContainer
web = WebContainer.get_web()
palette = Palette.get()
if web.is_IE():
color = self.color(palette_key, (modifier+range)/2, default=default)
return color
else:
if not reverse:
color1 = self.color(palette_key, modifier, default=default)
color2 = self.color(palette_key, modifier+range, default=default)
else:
color2 = self.color(palette_key, modifier, default=default)
color1 = self.color(palette_key, modifier+range, default=default)
if web.get_browser() == 'Mozilla':
return "-moz-linear-gradient(top, %s, %s)" % (color1, color2)
else:
return "-webkit-gradient(linear, 0%% 0%%, 0%% 100%%, from(%s), to(%s))" % (color1, color2)
def push_palette(cls, palette):
palettes = Container.get("Palette:palettes")
if palettes == None:
palettes = []
Container.put("Palette:palettes", palettes)
palette = Palette(palette=palette)
palettes.append(palette)
push_palette = classmethod(push_palette)
def pop_palette(cls):
palettes = Container.get("Palette:palettes")
if palettes == None:
palettes = []
Container.put("Palette:palettes", palettes)
if len(palettes) == 0:
return palettes[0]
return palettes.pop()
pop_palette = classmethod(pop_palette)
def num_palettes(cls):
palettes = Container.get("Palette:palettes")
if palettes == None:
palettes = []
Container.put("Palette:palettes", palettes)
return len(palettes)
num_palettes = classmethod(num_palettes)
def get(cls):
palettes = Container.get("Palette:palettes")
if palettes == None:
palettes = []
Container.put("Palette:palettes", palettes)
if not palettes:
palette = Palette()
palettes.append(palette)
else:
palette = palettes[-1]
return palette
get = classmethod(get)
def set(cls, palette):
Container.put("Palette:palette", palette)
set = classmethod(set)
| epl-1.0 | 5,284,234,413,447,526,000 | 28.64823 | 106 | 0.50929 | false |
faber03/AndroidMalwareEvaluatingTools | framework sources/Alan/dalvikobfuscator/baksmali-modifier.py | 1 | 2307 | #!/usr/bin/env python
# Copyright (C) 2012 [email protected]
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from pyparsing import *
InjectedCode = ["nop\n" for i in range(10)]
MethodToken = Literal(".method")
AccessFlag = Literal("public") | \
Literal("private") | \
Literal("protected")| \
Literal("abstract")| \
Literal("static")| \
Literal("constructor")| \
Literal("final")| \
Literal("native") | \
Literal("bridge") | \
Literal("synthetic") | \
Literal("native") | \
Literal("varargs") | \
Literal("declared-synchronized")
JavaType = Word(alphas+"[", alphanums +"_$[;/", min=1)
MethodName = Word(alphas+"$_<", alphanums+"_>$", min=1)
ArgList = JavaType
MethodProtoType = MethodName + Suppress("(") + Optional(ArgList) + Suppress(")") + JavaType
MethodDecl = Suppress(MethodToken) + ZeroOrMore(AccessFlag) + Suppress(MethodProtoType)
def injectnops(filename):
with open(filename, "r") as smalifile:
lines = smalifile.readlines()
modified = []
for index, line in enumerate(lines):
modified.append(line)
if line.startswith(".method"):
try:
flags = list(MethodDecl.parseString(line.strip("\n"),parseAll=True))
except Exception as e:
print line
raise e
if "abstract" not in flags and "native" not in flags:
modified += InjectedCode
with open(filename, "w") as smalifile:
smalifile.writelines(modified)
def run(directory):
for dirpath, dinames, filenames in os.walk(directory):
for filename in filter(lambda x: x.endswith(".smali"), filenames):
injectnops(os.path.join(dirpath, filename))
def usage():
print "%s %s"%(sys.argv[0], sys.argv[1])
print ""
print "inject nops into baksmali files"
if __name__ == "__main__":
if len(sys.argv) != 2:
usage()
else:
run(sys.argv[1])
| apache-2.0 | -1,676,058,413,938,096,400 | 28.576923 | 91 | 0.682271 | false |
mediafactory/tryton_core_daemon | trytond/backend/sqlite/fields.py | 1 | 2752 | #This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
from trytond.backend import fields
class Boolean(fields.Boolean):
@staticmethod
def sql_format(value):
return value
@staticmethod
def sql_type(field):
return ('BOOLEAN', 'BOOLEAN')
class Integer(fields.Integer):
@staticmethod
def sql_format(value):
if value is None:
return value
return int(value)
@staticmethod
def sql_type(field):
return ('INTEGER', 'INTEGER')
class BigInteger(fields.BigInteger, Integer):
pass
class Char(fields.Char):
@staticmethod
def sql_type(field):
return ('VARCHAR', 'VARCHAR')
class Sha(fields.Sha):
@staticmethod
def sql_type(field):
return ('VARCHAR', 'VARCHAR(40)')
class Text(fields.Text):
@staticmethod
def sql_type(field):
return ('TEXT', 'TEXT')
class Float(fields.Float):
@staticmethod
def sql_format(value):
if value is None:
return value
return float(value)
@staticmethod
def sql_type(field):
return ('FLOAT', 'FLOAT')
class Numeric(fields.Numeric):
@staticmethod
def sql_type(field):
return ('NUMERIC', 'NUMERIC')
class Date(fields.Date):
@staticmethod
def sql_type(field):
return ('DATE', 'DATE')
class DateTime(fields.DateTime):
@staticmethod
def sql_type(field):
return ('TIMESTAMP', 'TIMESTAMP')
class Timestamp(fields.Timestamp):
@staticmethod
def sql_type(field):
return ('TIMESTAMP', 'TIMESTAMP')
class Time(fields.Time):
@staticmethod
def sql_type(field):
return ('TIME', 'TIME')
class Binary(fields.Binary):
@staticmethod
def sql_type(field):
return ('BLOB', 'BLOB')
class Selection(fields.Selection):
@staticmethod
def sql_type(field):
return ('VARCHAR', 'VARCHAR')
class Reference(fields.Reference):
@staticmethod
def sql_type(field):
return ('VARCHAR', 'VARCHAR')
class Many2One(fields.Many2One):
@staticmethod
def sql_type(field):
return ('INTEGER', 'INTEGER')
FIELDS = {
'boolean': Boolean,
'integer': Integer,
'biginteger': BigInteger,
'char': Char,
'sha': Sha,
'text': Text,
'float': Float,
'numeric': Numeric,
'date': Date,
'datetime': DateTime,
'timestamp': Timestamp,
'time': Time,
'binary': Binary,
'selection': Selection,
'reference': Reference,
'many2one': Many2One,
'one2many': fields.One2Many,
'many2many': fields.Many2Many,
'function': fields.Function,
'property': fields.Property,
}
| gpl-3.0 | -7,945,399,863,617,317,000 | 17.225166 | 71 | 0.619549 | false |
lemmingapex/ProjectEuler | Problem018/src/MaximumPathSum.py | 1 | 1162 | #!/usr/bin/python3
#
# 09/21/2016
# MaximumPathSum.py
# Maximum path sum I
# Maximum path sum II
#
# Scott Wiedemann
#
import sys
class MaximumPathSum:
_triangleData = []
def __init__(self, InputFile):
for line in InputFile:
self._triangleData.append([int(v) for v in line.split()])
return
def sumMaxPath(self):
maxPathData = [row[:] for row in self._triangleData]
i = len(maxPathData) - 2
while i >= 0:
#print(maxPathData[i])
j = len(maxPathData[i]) - 1
while j >= 0:
leftChild = maxPathData[i+1][j]
rightChild = maxPathData[i+1][j+1]
maxPathData[i][j] += max(leftChild, rightChild)
j-=1
i-=1
return maxPathData[0][0]
# main (DRIVER)
def main():
if len(sys.argv) != 2:
print("Incorrect number of arguments.", file=sys.stderr)
print("Usage: " + sys.argv[0] + " Prog.asm\n", file=sys.stderr)
return 1
else:
InputFileName = sys.argv[1]
try:
# read file
InputFile = open(InputFileName, "r")
except IOError:
print("The file \"" + InputFileName + "\" does not exist.\n")
return 2
print(MaximumPathSum(InputFile).sumMaxPath())
return 0
# call to main
if __name__ == "__main__":
main()
| mit | 4,385,577,479,959,194,600 | 20.127273 | 65 | 0.638554 | false |
open-craft/xblock-mentoring | mentoring/mentoring.py | 1 | 22696 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Harvard
#
# Authors:
# Xavier Antoviaque <[email protected]>
#
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute and/or modify this program under the terms of
# the GNU Affero General Public License (AGPL) as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version of the AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
#
# Imports ###########################################################
import json
import logging
import uuid
import re
from collections import namedtuple
from lxml import etree
from StringIO import StringIO
from xblock.core import XBlock
from xblock.fields import Boolean, Scope, String, Integer, Float, List
from xblock.fragment import Fragment
from .light_children import XBlockWithLightChildren
from .title import TitleBlock
from .header import SharedHeaderBlock
from .message import MentoringMessageBlock
from .step import StepParentMixin
from .utils import loader
# Globals ###########################################################
log = logging.getLogger(__name__)
def _default_xml_content():
return loader.render_template(
'templates/xml/mentoring_default.xml',
{'url_name': 'mentoring-{}'.format(uuid.uuid4())})
def _is_default_xml_content(value):
UUID_PATTERN = '[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}'
DUMMY_UUID = '12345678-1234-1234-1234-123456789abc'
if value is _default_xml_content:
return True
expected = _default_xml_content()
expected = re.sub(UUID_PATTERN, DUMMY_UUID, expected)
value = re.sub(UUID_PATTERN, DUMMY_UUID, value)
return value == expected
# Classes ###########################################################
Score = namedtuple("Score", ["raw", "percentage", "correct", "incorrect", "partially_correct"])
CORRECT = 'correct'
INCORRECT = 'incorrect'
PARTIAL = 'partial'
class MentoringBlock(XBlockWithLightChildren, StepParentMixin):
"""
An XBlock providing mentoring capabilities
Composed of text, answers input fields, and a set of MRQ/MCQ with advices.
A set of conditions on the provided answers and MCQ/MRQ choices will determine if the
student is a) provided mentoring advices and asked to alter his answer, or b) is given the
ok to continue.
"""
@staticmethod
def is_default_xml_content(value):
return _is_default_xml_content(value)
attempted = Boolean(help="Has the student attempted this mentoring step?",
default=False, scope=Scope.user_state)
completed = Boolean(help="Has the student completed this mentoring step?",
default=False, scope=Scope.user_state)
next_step = String(help="url_name of the next step the student must complete (global to all blocks)",
default='mentoring_first', scope=Scope.preferences)
followed_by = String(help="url_name of the step after the current mentoring block in workflow",
default=None, scope=Scope.content)
url_name = String(help="Name of the current step, used for URL building",
default='mentoring-default', scope=Scope.content)
enforce_dependency = Boolean(help="Should the next step be the current block to complete?",
default=False, scope=Scope.content, enforce_type=True)
display_submit = Boolean(help="Allow submission of the current block?", default=True,
scope=Scope.content, enforce_type=True)
xml_content = String(help="XML content", default=_default_xml_content, scope=Scope.content)
weight = Float(help="Defines the maximum total grade of the block.",
default=1, scope=Scope.content, enforce_type=True)
num_attempts = Integer(help="Number of attempts a user has answered for this questions",
default=0, scope=Scope.user_state, enforce_type=True)
max_attempts = Integer(help="Number of max attempts for this questions", default=0,
scope=Scope.content, enforce_type=True)
mode = String(help="Mode of the mentoring. 'standard' or 'assessment'",
default='standard', scope=Scope.content)
step = Integer(help="Keep track of the student assessment progress.",
default=0, scope=Scope.user_state, enforce_type=True)
student_results = List(help="Store results of student choices.", default=[],
scope=Scope.user_state)
extended_feedback = Boolean(help="Show extended feedback details when all attempts are used up.",
default=False, Scope=Scope.content)
display_name = String(help="Display name of the component", default="Mentoring XBlock",
scope=Scope.settings)
icon_class = 'problem'
has_score = True
MENTORING_MODES = ('standard', 'assessment')
FLOATING_BLOCKS = (TitleBlock, MentoringMessageBlock, SharedHeaderBlock)
FIELDS_TO_INIT = ('xml_content',)
@property
def is_assessment(self):
return self.mode == 'assessment'
def get_question_number(self, question_id):
"""
Get the step number of the question id
"""
for question in self.get_children_objects():
if hasattr(question, 'step_number') and (question.name == question_id):
return question.step_number
raise ValueError("Question ID in answer set not a step of this Mentoring Block!")
def answer_mapper(self, answer_status):
"""
Create a JSON-dumpable object with readable key names from a list of student answers.
"""
return [
{
'number': self.get_question_number(answer[0]),
'id': answer[0],
'details': answer[1],
} for answer in self.student_results if answer[1]['status'] == answer_status
]
@property
def score(self):
"""Compute the student score taking into account the light child weight."""
total_child_weight = sum(float(step.weight) for step in self.steps)
if total_child_weight == 0:
return Score(0, 0, [], [], [])
score = sum(r[1]['score'] * r[1]['weight'] for r in self.student_results) / total_child_weight
correct = self.answer_mapper(CORRECT)
incorrect = self.answer_mapper(INCORRECT)
partially_correct = self.answer_mapper(PARTIAL)
return Score(score, int(round(score * 100)), correct, incorrect, partially_correct)
@property
def assessment_message(self):
if not self.max_attempts_reached:
return self.get_message_html('on-assessment-review')
else:
return None
def show_extended_feedback(self):
return self.extended_feedback and self.max_attempts_reached
def feedback_dispatch(self, target_data, stringify):
if self.show_extended_feedback():
if stringify:
return json.dumps(target_data)
else:
return target_data
def correct_json(self, stringify=True):
return self.feedback_dispatch(self.score.correct, stringify)
def incorrect_json(self, stringify=True):
return self.feedback_dispatch(self.score.incorrect, stringify)
def partial_json(self, stringify=True):
return self.feedback_dispatch(self.score.partially_correct, stringify)
def student_view(self, context):
# Migrate stored data if necessary
self.migrate_fields()
fragment, named_children = self.get_children_fragment(
context, view_name='mentoring_view',
not_instance_of=self.FLOATING_BLOCKS,
)
fragment.add_content(loader.render_template('templates/html/mentoring.html', {
'self': self,
'named_children': named_children,
'missing_dependency_url': self.has_missing_dependency and self.next_step_url,
}))
fragment.add_css_url(self.runtime.local_resource_url(self, 'public/css/mentoring.css'))
fragment.add_javascript_url(
self.runtime.local_resource_url(self, 'public/js/vendor/underscore-min.js'))
js_view = 'mentoring_assessment_view.js' if self.is_assessment else 'mentoring_standard_view.js'
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/'+js_view))
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/mentoring.js'))
fragment.add_resource(loader.load_unicode('templates/html/mentoring_attempts.html'), "text/html")
fragment.add_resource(loader.load_unicode('templates/html/mentoring_grade.html'), "text/html")
fragment.add_resource(loader.load_unicode('templates/html/mentoring_review_questions.html'), "text/html")
fragment.initialize_js('MentoringBlock')
if not self.display_submit:
self.runtime.publish(self, 'progress', {})
return fragment
def migrate_fields(self):
"""
Migrate data stored in the fields, when a format change breaks backward-compatibility with
previous data formats
"""
# Partial answers replaced the `completed` with `status` in `self.student_results`
if self.student_results and 'completed' in self.student_results[0][1]:
# Rename the field and use the new value format (text instead of boolean)
for result in self.student_results:
result[1]['status'] = CORRECT if result[1]['completed'] else INCORRECT
del result[1]['completed']
@property
def additional_publish_event_data(self):
return {
'user_id': self.scope_ids.user_id,
'component_id': self.url_name,
}
@property
def title(self):
"""
Returns the title child.
"""
for child in self.get_children_objects():
if isinstance(child, TitleBlock):
return child
return None
@property
def header(self):
"""
Return the header child.
"""
for child in self.get_children_objects():
if isinstance(child, SharedHeaderBlock):
return child
return None
@property
def has_missing_dependency(self):
"""
Returns True if the student needs to complete another step before being able to complete
the current one, and False otherwise
"""
return self.enforce_dependency and (not self.completed) and (self.next_step != self.url_name)
@property
def next_step_url(self):
"""
Returns the URL of the next step's page
"""
return '/jump_to_id/{}'.format(self.next_step)
@XBlock.json_handler
def get_results(self, queries, suffix=''):
"""
Gets detailed results in the case of extended feedback.
It may be a good idea to eventually have this function get results
in the general case instead of loading them in the template in the future,
and only using it for extended feedback situations.
Right now there are two ways to get results-- through the template upon loading up
the mentoring block, or after submission of an AJAX request like in
submit or get_results here.
"""
results = []
if not self.show_extended_feedback():
return {
'results': [],
'error': 'Extended feedback results cannot be obtained.'
}
completed = True
choices = dict(self.student_results)
step = self.step
# Only one child should ever be of concern with this method.
for child in self.get_children_objects():
if child.name and child.name in queries:
results = [child.name, child.get_results(choices[child.name])]
# Children may have their own definition of 'completed' which can vary from the general case
# of the whole mentoring block being completed. This is because in standard mode, all children
# must be correct to complete the block. In assessment mode with extended feedback, completion
# happens when you're out of attempts, no matter how you did.
completed = choices[child.name]['status']
break
# The 'completed' message should always be shown in this case, since no more attempts are available.
message = self.get_message(True)
return {
'results': results,
'completed': completed,
'attempted': self.attempted,
'message': message,
'step': step,
'max_attempts': self.max_attempts,
'num_attempts': self.num_attempts,
}
def get_message(self, completed):
if self.max_attempts_reached:
return self.get_message_html('max_attempts_reached')
elif completed:
return self.get_message_html('completed')
else:
return self.get_message_html('incomplete')
@XBlock.json_handler
def submit(self, submissions, suffix=''):
log.info(u'Received submissions: {}'.format(submissions))
self.attempted = True
if self.is_assessment:
return self.handleAssessmentSubmit(submissions, suffix)
submit_results = []
completed = True
for child in self.get_children_objects():
if child.name and child.name in submissions:
submission = submissions[child.name]
child_result = child.submit(submission)
submit_results.append([child.name, child_result])
child.save()
completed = completed and (child_result['status'] == CORRECT)
message = self.get_message(completed)
# Once it has been completed once, keep completion even if user changes values
if self.completed:
completed = True
# server-side check to not set completion if the max_attempts is reached
if self.max_attempts_reached:
completed = False
if self.has_missing_dependency:
completed = False
message = 'You need to complete all previous steps before being able to complete the current one.'
elif completed and self.next_step == self.url_name:
self.next_step = self.followed_by
# Once it was completed, lock score
if not self.completed:
# save user score and results
while self.student_results:
self.student_results.pop()
for result in submit_results:
self.student_results.append(result)
self.runtime.publish(self, 'grade', {
'value': self.score.raw,
'max_value': 1,
})
if not self.completed and self.max_attempts > 0:
self.num_attempts += 1
self.completed = completed is True
raw_score = self.score.raw
self.publish_event_from_dict('xblock.mentoring.submitted', {
'num_attempts': self.num_attempts,
'submitted_answer': submissions,
'grade': raw_score,
})
return {
'results': submit_results,
'completed': self.completed,
'attempted': self.attempted,
'message': message,
'max_attempts': self.max_attempts,
'num_attempts': self.num_attempts
}
def handleAssessmentSubmit(self, submissions, suffix):
completed = False
current_child = None
children = [child for child in self.get_children_objects()
if not isinstance(child, self.FLOATING_BLOCKS)]
assessment_message = None
for child in children:
if child.name and child.name in submissions:
submission = submissions[child.name]
# Assessment mode doesn't allow to modify answers
# This will get the student back at the step he should be
current_child = child
step = children.index(child)
if self.step > step or self.max_attempts_reached:
step = self.step
completed = False
break
self.step = step + 1
child_result = child.submit(submission)
if 'tips' in child_result:
del child_result['tips']
self.student_results.append([child.name, child_result])
child.save()
completed = child_result['status']
event_data = {}
score = self.score
if current_child == self.steps[-1]:
log.info(u'Last assessment step submitted: {}'.format(submissions))
if not self.max_attempts_reached:
self.runtime.publish(self, 'grade', {
'value': score.raw,
'max_value': 1,
'score_type': 'proficiency',
})
event_data['final_grade'] = score.raw
assessment_message = self.assessment_message
self.num_attempts += 1
self.completed = True
event_data['exercise_id'] = current_child.name
event_data['num_attempts'] = self.num_attempts
event_data['submitted_answer'] = submissions
self.publish_event_from_dict('xblock.mentoring.assessment.submitted', event_data)
return {
'completed': completed,
'attempted': self.attempted,
'max_attempts': self.max_attempts,
'num_attempts': self.num_attempts,
'step': self.step,
'score': score.percentage,
'correct_answer': len(score.correct),
'incorrect_answer': len(score.incorrect),
'partially_correct_answer': len(score.partially_correct),
'extended_feedback': self.show_extended_feedback() or '',
'correct': self.correct_json(stringify=False),
'incorrect': self.incorrect_json(stringify=False),
'partial': self.partial_json(stringify=False),
'assessment_message': assessment_message,
}
@XBlock.json_handler
def try_again(self, data, suffix=''):
if self.max_attempts_reached:
return {
'result': 'error',
'message': 'max attempts reached'
}
# reset
self.step = 0
self.completed = False
while self.student_results:
self.student_results.pop()
return {
'result': 'success'
}
@property
def max_attempts_reached(self):
return self.max_attempts > 0 and self.num_attempts >= self.max_attempts
def get_message_fragment(self, message_type):
for child in self.get_children_objects():
if isinstance(child, MentoringMessageBlock) and child.type == message_type:
frag = self.render_child(child, 'mentoring_view', {})
return self.fragment_text_rewriting(frag)
def get_message_html(self, message_type):
fragment = self.get_message_fragment(message_type)
if fragment:
return fragment.body_html()
else:
return ''
def studio_view(self, context):
"""
Editing view in Studio
"""
fragment = Fragment()
fragment.add_content(loader.render_template('templates/html/mentoring_edit.html', {
'self': self,
'xml_content': self.xml_content,
}))
fragment.add_javascript_url(
self.runtime.local_resource_url(self, 'public/js/mentoring_edit.js'))
fragment.add_css_url(
self.runtime.local_resource_url(self, 'public/css/mentoring_edit.css'))
fragment.initialize_js('MentoringEditBlock')
return fragment
@XBlock.json_handler
def studio_submit(self, submissions, suffix=''):
log.info(u'Received studio submissions: {}'.format(submissions))
xml_content = submissions['xml_content']
try:
content = etree.parse(StringIO(xml_content))
except etree.XMLSyntaxError as e:
response = {
'result': 'error',
'message': e.message
}
else:
success = True
root = content.getroot()
if 'mode' in root.attrib:
if root.attrib['mode'] not in self.MENTORING_MODES:
response = {
'result': 'error',
'message': "Invalid mentoring mode: should be 'standard' or 'assessment'"
}
success = False
elif root.attrib['mode'] == 'assessment' and 'max_attempts' not in root.attrib:
# assessment has a default of 2 max_attempts
root.attrib['max_attempts'] = '2'
if success:
response = {
'result': 'success',
}
self.xml_content = etree.tostring(content, pretty_print=True)
log.debug(u'Response from Studio: {}'.format(response))
return response
@property
def url_name_with_default(self):
"""
Ensure the `url_name` is set to a unique, non-empty value.
This should ideally be handled by Studio, but we need to declare the attribute
to be able to use it from the workbench, and when this happen Studio doesn't set
a unique default value - this property gives either the set value, or if none is set
a randomized default value
"""
if self.url_name == 'mentoring-default':
return 'mentoring-{}'.format(uuid.uuid4())
else:
return self.url_name
@staticmethod
def workbench_scenarios():
"""
Scenarios displayed by the workbench. Load them from external (private) repository
"""
return loader.load_scenarios_from_path('templates/xml')
| agpl-3.0 | 2,307,463,548,119,067,600 | 37.337838 | 113 | 0.60341 | false |
mpunkenhofer/irc-telegram-bot | telepot/telepot/__init__.py | 1 | 39572 | import sys
import io
import time
import json
import threading
import traceback
import collections
import bisect
try:
import Queue as queue
except ImportError:
import queue
# Patch urllib3 for sending unicode filename
from . import hack
from . import exception
__version_info__ = (10, 5)
__version__ = '.'.join(map(str, __version_info__))
def flavor(msg):
"""
Return flavor of message or event.
A message's flavor may be one of these:
- ``chat``
- ``callback_query``
- ``inline_query``
- ``chosen_inline_result``
An event's flavor is determined by the single top-level key.
"""
if 'message_id' in msg:
return 'chat'
elif 'id' in msg and 'chat_instance' in msg:
return 'callback_query'
elif 'id' in msg and 'query' in msg:
return 'inline_query'
elif 'result_id' in msg:
return 'chosen_inline_result'
else:
top_keys = list(msg.keys())
if len(top_keys) == 1:
return top_keys[0]
raise exception.BadFlavor(msg)
chat_flavors = ['chat']
inline_flavors = ['inline_query', 'chosen_inline_result']
def _find_first_key(d, keys):
for k in keys:
if k in d:
return k
raise KeyError('No suggested keys %s in %s' % (str(keys), str(d)))
all_content_types = [
'text', 'audio', 'document', 'game', 'photo', 'sticker', 'video', 'voice',
'contact', 'location', 'venue', 'new_chat_member', 'left_chat_member', 'new_chat_title',
'new_chat_photo', 'delete_chat_photo', 'group_chat_created', 'supergroup_chat_created',
'channel_chat_created', 'migrate_to_chat_id', 'migrate_from_chat_id', 'pinned_message',
]
def glance(msg, flavor='chat', long=False):
"""
Extract "headline" info about a message.
Use parameter ``long`` to control whether a short or long tuple is returned.
When ``flavor`` is ``chat``
(``msg`` being a `Message <https://core.telegram.org/bots/api#message>`_ object):
- short: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``)
- long: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``, ``msg['date']``, ``msg['message_id']``)
*content_type* can be: ``text``, ``audio``, ``document``, ``game``, ``photo``, ``sticker``, ``video``, ``voice``,
``contact``, ``location``, ``venue``, ``new_chat_member``, ``left_chat_member``, ``new_chat_title``,
``new_chat_photo``, ``delete_chat_photo``, ``group_chat_created``, ``supergroup_chat_created``,
``channel_chat_created``, ``migrate_to_chat_id``, ``migrate_from_chat_id``, ``pinned_message``.
When ``flavor`` is ``callback_query``
(``msg`` being a `CallbackQuery <https://core.telegram.org/bots/api#callbackquery>`_ object):
- regardless: (``msg['id']``, ``msg['from']['id']``, ``msg['data']``)
When ``flavor`` is ``inline_query``
(``msg`` being a `InlineQuery <https://core.telegram.org/bots/api#inlinequery>`_ object):
- short: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``)
- long: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``, ``msg['offset']``)
When ``flavor`` is ``chosen_inline_result``
(``msg`` being a `ChosenInlineResult <https://core.telegram.org/bots/api#choseninlineresult>`_ object):
- regardless: (``msg['result_id']``, ``msg['from']['id']``, ``msg['query']``)
"""
def gl_chat():
content_type = _find_first_key(msg, all_content_types)
if long:
return content_type, msg['chat']['type'], msg['chat']['id'], msg['date'], msg['message_id']
else:
return content_type, msg['chat']['type'], msg['chat']['id']
def gl_callback_query():
return msg['id'], msg['from']['id'], msg['data']
def gl_inline_query():
if long:
return msg['id'], msg['from']['id'], msg['query'], msg['offset']
else:
return msg['id'], msg['from']['id'], msg['query']
def gl_chosen_inline_result():
return msg['result_id'], msg['from']['id'], msg['query']
try:
fn = {'chat': gl_chat,
'callback_query': gl_callback_query,
'inline_query': gl_inline_query,
'chosen_inline_result': gl_chosen_inline_result}[flavor]
except KeyError:
raise exception.BadFlavor(flavor)
return fn()
def flance(msg, long=False):
"""
A combination of :meth:`telepot.flavor` and :meth:`telepot.glance`,
return a 2-tuple (flavor, headline_info), where *headline_info* is whatever extracted by
:meth:`telepot.glance` depending on the message flavor and the ``long`` parameter.
"""
f = flavor(msg)
g = glance(msg, flavor=f, long=long)
return f,g
def peel(event):
"""
Remove an event's top-level skin (where its flavor is determined), and return
the core content.
"""
return list(event.values())[0]
def fleece(event):
"""
A combination of :meth:`telepot.flavor` and :meth:`telepot.peel`,
return a 2-tuple (flavor, content) of an event.
"""
return flavor(event), peel(event)
def is_event(msg):
"""
Return whether the message looks like an event. That is, whether it has a flavor
that starts with an underscore.
"""
return flavor(msg).startswith('_')
def origin_identifier(msg):
"""
Extract the message identifier of a callback query's origin. Returned value
is guaranteed to be a tuple.
``msg`` is expected to be ``callback_query``.
"""
if 'message' in msg:
return msg['message']['chat']['id'], msg['message']['message_id']
elif 'inline_message_id' in msg:
return msg['inline_message_id'],
else:
raise ValueError()
def message_identifier(msg):
"""
Extract an identifier for message editing. Useful with :meth:`telepot.Bot.editMessageText`
and similar methods. Returned value is guaranteed to be a tuple.
``msg`` is expected to be ``chat`` or ``choson_inline_result``.
"""
if 'chat' in msg and 'message_id' in msg:
return msg['chat']['id'], msg['message_id']
elif 'inline_message_id' in msg:
return msg['inline_message_id'],
else:
raise ValueError()
def _dismantle_message_identifier(f):
if isinstance(f, tuple):
if len(f) == 2:
return {'chat_id': f[0], 'message_id': f[1]}
elif len(f) == 1:
return {'inline_message_id': f[0]}
else:
raise ValueError()
else:
return {'inline_message_id': f}
PY_3 = sys.version_info.major >= 3
_string_type = str if PY_3 else basestring
_file_type = io.IOBase if PY_3 else file
def _isstring(s):
return isinstance(s, _string_type)
def _isfile(f):
return isinstance(f, _file_type)
from . import helper
def flavor_router(routing_table):
router = helper.Router(flavor, routing_table)
return router.route
class _BotBase(object):
def __init__(self, token):
self._token = token
self._file_chunk_size = 65536
def _strip(params, more=[]):
return {key: value for key,value in params.items() if key not in ['self']+more}
def _rectify(params):
def namedtuple_to_dict(value):
if isinstance(value, list):
return [namedtuple_to_dict(v) for v in value]
elif isinstance(value, dict):
return {k:namedtuple_to_dict(v) for k,v in value.items() if v is not None}
elif isinstance(value, tuple) and hasattr(value, '_asdict'):
return {k:namedtuple_to_dict(v) for k,v in value._asdict().items() if v is not None}
else:
return value
def flatten(value):
v = namedtuple_to_dict(value)
if isinstance(v, (dict, list)):
return json.dumps(v, separators=(',',':'))
else:
return v
# remove None, then json-serialize if needed
return {k: flatten(v) for k,v in params.items() if v is not None}
from . import api
class Bot(_BotBase):
class Scheduler(threading.Thread):
# A class that is sorted by timestamp. Use `bisect` module to ensure order in event queue.
Event = collections.namedtuple('Event', ['timestamp', 'data'])
Event.__eq__ = lambda self, other: self.timestamp == other.timestamp
Event.__ne__ = lambda self, other: self.timestamp != other.timestamp
Event.__gt__ = lambda self, other: self.timestamp > other.timestamp
Event.__ge__ = lambda self, other: self.timestamp >= other.timestamp
Event.__lt__ = lambda self, other: self.timestamp < other.timestamp
Event.__le__ = lambda self, other: self.timestamp <= other.timestamp
def __init__(self):
super(Bot.Scheduler, self).__init__()
self._eventq = []
self._lock = threading.RLock() # reentrant lock to allow locked method calling locked method
self._output_queue = None
def _locked(fn):
def k(self, *args, **kwargs):
with self._lock:
return fn(self, *args, **kwargs)
return k
@_locked
def _insert_event(self, data, when):
ev = self.Event(when, data)
bisect.insort(self._eventq, ev)
return ev
@_locked
def _remove_event(self, event):
# Find event according to its timestamp.
# Index returned should be one behind.
i = bisect.bisect(self._eventq, event)
# Having two events with identical timestamp is unlikely but possible.
# I am going to move forward and compare timestamp AND object address
# to make sure the correct object is found.
while i > 0:
i -= 1
e = self._eventq[i]
if e.timestamp != event.timestamp:
raise exception.EventNotFound(event)
elif id(e) == id(event):
self._eventq.pop(i)
return
raise exception.EventNotFound(event)
@_locked
def _pop_expired_event(self):
if not self._eventq:
return None
if self._eventq[0].timestamp <= time.time():
return self._eventq.pop(0)
else:
return None
def event_at(self, when, data):
"""
Schedule some data to emit at an absolute timestamp.
:type when: int or float
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, when)
def event_later(self, delay, data):
"""
Schedule some data to emit after a number of seconds.
:type delay: int or float
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, time.time()+delay)
def event_now(self, data):
"""
Emit some data as soon as possible.
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, time.time())
def cancel(self, event):
"""
Cancel an event.
:type event: an internal Event object
"""
self._remove_event(event)
def run(self):
while 1:
e = self._pop_expired_event()
while e:
if callable(e.data):
d = e.data()
if d is not None:
self._output_queue.put(d)
else:
self._output_queue.put(e.data)
e = self._pop_expired_event()
time.sleep(0.1)
def __init__(self, token):
super(Bot, self).__init__(token)
self._scheduler = self.Scheduler()
self._router = helper.Router(flavor, {'chat': lambda msg: self.on_chat_message(msg),
'callback_query': lambda msg: self.on_callback_query(msg),
'inline_query': lambda msg: self.on_inline_query(msg),
'chosen_inline_result': lambda msg: self.on_chosen_inline_result(msg)})
# use lambda to delay evaluation of self.on_ZZZ to runtime because
# I don't want to require defining all methods right here.
@property
def scheduler(self):
return self._scheduler
@property
def router(self):
return self._router
def handle(self, msg):
self._router.route(msg)
def _api_request(self, method, params=None, files=None, **kwargs):
return api.request((self._token, method, params, files), **kwargs)
def getMe(self):
""" See: https://core.telegram.org/bots/api#getme """
return self._api_request('getMe')
def sendMessage(self, chat_id, text,
parse_mode=None, disable_web_page_preview=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendmessage """
p = _strip(locals())
return self._api_request('sendMessage', _rectify(p))
def forwardMessage(self, chat_id, from_chat_id, message_id, disable_notification=None):
""" See: https://core.telegram.org/bots/api#forwardmessage """
p = _strip(locals())
return self._api_request('forwardMessage', _rectify(p))
def _sendfile(self, inputfile, filetype, params):
method = {'photo': 'sendPhoto',
'audio': 'sendAudio',
'document': 'sendDocument',
'sticker': 'sendSticker',
'video': 'sendVideo',
'voice': 'sendVoice',}[filetype]
if _isstring(inputfile):
params[filetype] = inputfile
return self._api_request(method, _rectify(params))
else:
files = {filetype: inputfile}
return self._api_request(method, _rectify(params), files)
def sendPhoto(self, chat_id, photo,
caption=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendphoto
:param photo:
a string indicating a ``file_id`` on server,
a file-like object as obtained by ``open()`` or ``urlopen()``,
or a (filename, file-like object) tuple.
If the file-like object is obtained by ``urlopen()``, you most likely
have to supply a filename because Telegram servers require to know
the file extension.
If the filename contains non-ASCII characters and you are using Python 2.7,
make sure the filename is a unicode string.
"""
p = _strip(locals(), more=['photo'])
return self._sendfile(photo, 'photo', p)
def sendAudio(self, chat_id, audio,
caption=None, duration=None, performer=None, title=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendaudio
:param audio: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['audio'])
return self._sendfile(audio, 'audio', p)
def sendDocument(self, chat_id, document,
caption=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['document'])
return self._sendfile(document, 'document', p)
def sendSticker(self, chat_id, sticker,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendsticker
:param sticker: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['sticker'])
return self._sendfile(sticker, 'sticker', p)
def sendVideo(self, chat_id, video,
duration=None, width=None, height=None, caption=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvideo
:param video: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['video'])
return self._sendfile(video, 'video', p)
def sendVoice(self, chat_id, voice,
caption=None, duration=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvoice
:param voice: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['voice'])
return self._sendfile(voice, 'voice', p)
def sendLocation(self, chat_id, latitude, longitude,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendlocation """
p = _strip(locals())
return self._api_request('sendLocation', _rectify(p))
def sendVenue(self, chat_id, latitude, longitude, title, address,
foursquare_id=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendvenue """
p = _strip(locals())
return self._api_request('sendVenue', _rectify(p))
def sendContact(self, chat_id, phone_number, first_name,
last_name=None,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendcontact """
p = _strip(locals())
return self._api_request('sendContact', _rectify(p))
def sendGame(self, chat_id, game_short_name,
disable_notification=None, reply_to_message_id=None, reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendgame """
p = _strip(locals())
return self._api_request('sendGame', _rectify(p))
def sendChatAction(self, chat_id, action):
""" See: https://core.telegram.org/bots/api#sendchataction """
p = _strip(locals())
return self._api_request('sendChatAction', _rectify(p))
def getUserProfilePhotos(self, user_id, offset=None, limit=None):
""" See: https://core.telegram.org/bots/api#getuserprofilephotos """
p = _strip(locals())
return self._api_request('getUserProfilePhotos', _rectify(p))
def getFile(self, file_id):
""" See: https://core.telegram.org/bots/api#getfile """
p = _strip(locals())
return self._api_request('getFile', _rectify(p))
def kickChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#kickchatmember """
p = _strip(locals())
return self._api_request('kickChatMember', _rectify(p))
def leaveChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#leavechat """
p = _strip(locals())
return self._api_request('leaveChat', _rectify(p))
def unbanChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#unbanchatmember """
p = _strip(locals())
return self._api_request('unbanChatMember', _rectify(p))
def getChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchat """
p = _strip(locals())
return self._api_request('getChat', _rectify(p))
def getChatAdministrators(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatadministrators """
p = _strip(locals())
return self._api_request('getChatAdministrators', _rectify(p))
def getChatMembersCount(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatmemberscount """
p = _strip(locals())
return self._api_request('getChatMembersCount', _rectify(p))
def getChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#getchatmember """
p = _strip(locals())
return self._api_request('getChatMember', _rectify(p))
def answerCallbackQuery(self, callback_query_id,
text=None, show_alert=None, url=None, cache_time=None):
""" See: https://core.telegram.org/bots/api#answercallbackquery """
p = _strip(locals())
return self._api_request('answerCallbackQuery', _rectify(p))
def editMessageText(self, msg_identifier, text,
parse_mode=None, disable_web_page_preview=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagetext
:param msg_identifier:
a 2-tuple (``chat_id``, ``message_id``),
a 1-tuple (``inline_message_id``),
or simply ``inline_message_id``.
You may extract this value easily with :meth:`telepot.message_identifier`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageText', _rectify(p))
def editMessageCaption(self, msg_identifier, caption=None, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagecaption
:param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageCaption', _rectify(p))
def editMessageReplyMarkup(self, msg_identifier, reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagereplymarkup
:param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageReplyMarkup', _rectify(p))
def answerInlineQuery(self, inline_query_id, results,
cache_time=None, is_personal=None, next_offset=None,
switch_pm_text=None, switch_pm_parameter=None):
""" See: https://core.telegram.org/bots/api#answerinlinequery """
p = _strip(locals())
return self._api_request('answerInlineQuery', _rectify(p))
def getUpdates(self, offset=None, limit=None, timeout=None, allowed_updates=None):
""" See: https://core.telegram.org/bots/api#getupdates """
p = _strip(locals())
return self._api_request('getUpdates', _rectify(p))
def setWebhook(self, url=None, certificate=None, max_connections=None, allowed_updates=None):
""" See: https://core.telegram.org/bots/api#setwebhook """
p = _strip(locals(), more=['certificate'])
if certificate:
files = {'certificate': certificate}
return self._api_request('setWebhook', _rectify(p), files)
else:
return self._api_request('setWebhook', _rectify(p))
def deleteWebhook(self):
""" See: https://core.telegram.org/bots/api#deletewebhook """
return self._api_request('deleteWebhook')
def getWebhookInfo(self):
""" See: https://core.telegram.org/bots/api#getwebhookinfo """
return self._api_request('getWebhookInfo')
def setGameScore(self, user_id, score, game_message_identifier,
force=None, disable_edit_message=None):
"""
See: https://core.telegram.org/bots/api#setgamescore
:param game_message_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return self._api_request('setGameScore', _rectify(p))
def getGameHighScores(self, user_id, game_message_identifier):
"""
See: https://core.telegram.org/bots/api#getgamehighscores
:param game_message_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return self._api_request('getGameHighScores', _rectify(p))
def download_file(self, file_id, dest):
"""
Download a file to local disk.
:param dest: a path or a ``file`` object
"""
f = self.getFile(file_id)
try:
d = dest if _isfile(dest) else open(dest, 'wb')
r = api.download((self._token, f['file_path']), preload_content=False)
while 1:
data = r.read(self._file_chunk_size)
if not data:
break
d.write(data)
finally:
if not _isfile(dest) and 'd' in locals():
d.close()
if 'r' in locals():
r.release_conn()
def message_loop(self, callback=None, relax=0.1,
timeout=20, allowed_updates=None,
source=None, ordered=True, maxhold=3,
run_forever=False):
"""
Spawn a thread to constantly ``getUpdates`` or pull updates from a queue.
Apply ``callback`` to every message received. Also starts the scheduler thread
for internal events.
:param callback:
a function that takes one argument (the message), or a routing table.
If ``None``, the bot's ``handle`` method is used.
A *routing table* is a dictionary of ``{flavor: function}``, mapping messages to appropriate
handler functions according to their flavors. It allows you to define functions specifically
to handle one flavor of messages. It usually looks like this: ``{'chat': fn1,
'callback_query': fn2, 'inline_query': fn3, ...}``. Each handler function should take
one argument (the message).
:param source:
Source of updates.
If ``None``, ``getUpdates`` is used to obtain new messages from Telegram servers.
If it is a synchronized queue (``Queue.Queue`` in Python 2.7 or
``queue.Queue`` in Python 3), new messages are pulled from the queue.
A web application implementing a webhook can dump updates into the queue,
while the bot pulls from it. This is how telepot can be integrated with webhooks.
Acceptable contents in queue:
- ``str``, ``unicode`` (Python 2.7), or ``bytes`` (Python 3, decoded using UTF-8)
representing a JSON-serialized `Update <https://core.telegram.org/bots/api#update>`_ object.
- a ``dict`` representing an Update object.
When ``source`` is ``None``, these parameters are meaningful:
:type relax: float
:param relax: seconds between each ``getUpdates``
:type timeout: int
:param timeout:
``timeout`` parameter supplied to :meth:`telepot.Bot.getUpdates`,
controlling how long to poll.
:type allowed_updates: array of string
:param allowed_updates:
``allowed_updates`` parameter supplied to :meth:`telepot.Bot.getUpdates`,
controlling which types of updates to receive.
When ``source`` is a queue, these parameters are meaningful:
:type ordered: bool
:param ordered:
If ``True``, ensure in-order delivery of messages to ``callback``
(i.e. updates with a smaller ``update_id`` always come before those with
a larger ``update_id``).
If ``False``, no re-ordering is done. ``callback`` is applied to messages
as soon as they are pulled from queue.
:type maxhold: float
:param maxhold:
Applied only when ``ordered`` is ``True``. The maximum number of seconds
an update is held waiting for a not-yet-arrived smaller ``update_id``.
When this number of seconds is up, the update is delivered to ``callback``
even if some smaller ``update_id``\s have not yet arrived. If those smaller
``update_id``\s arrive at some later time, they are discarded.
Finally, there is this parameter, meaningful always:
:type run_forever: bool or str
:param run_forever:
If ``True`` or any non-empty string, append an infinite loop at the end of
this method, so it never returns. Useful as the very last line in a program.
A non-empty string will also be printed, useful as an indication that the
program is listening.
"""
if callback is None:
callback = self.handle
elif isinstance(callback, dict):
callback = flavor_router(callback)
collect_queue = queue.Queue()
def collector():
while 1:
try:
item = collect_queue.get(block=True)
callback(item)
except:
# Localize error so thread can keep going.
traceback.print_exc()
def relay_to_collector(update):
key = _find_first_key(update, ['message',
'edited_message',
'channel_post',
'edited_channel_post',
'callback_query',
'inline_query',
'chosen_inline_result'])
collect_queue.put(update[key])
return update['update_id']
def get_from_telegram_server():
offset = None # running offset
allowed_upd = allowed_updates
while 1:
try:
result = self.getUpdates(offset=offset,
timeout=timeout,
allowed_updates=allowed_upd)
# Once passed, this parameter is no longer needed.
allowed_upd = None
if len(result) > 0:
# No sort. Trust server to give messages in correct order.
# Update offset to max(update_id) + 1
offset = max([relay_to_collector(update) for update in result]) + 1
except exception.BadHTTPResponse as e:
traceback.print_exc()
# Servers probably down. Wait longer.
if e.status == 502:
time.sleep(30)
except:
traceback.print_exc()
finally:
time.sleep(relax)
def dictify3(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
elif type(data) is str:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
def dictify27(data):
if type(data) in [str, unicode]:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
def get_from_queue_unordered(qu):
dictify = dictify3 if sys.version_info >= (3,) else dictify27
while 1:
try:
data = qu.get(block=True)
update = dictify(data)
relay_to_collector(update)
except:
traceback.print_exc()
def get_from_queue(qu):
dictify = dictify3 if sys.version_info >= (3,) else dictify27
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
data = qu.get(block=True, timeout=qwait)
update = dictify(data)
if max_id is None:
# First message received, handle regardless.
max_id = relay_to_collector(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = relay_to_collector(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = relay_to_collector(buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id']-max_id-1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except queue.Empty:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = relay_to_collector(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
if qwait < 0:
qwait = 0
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
collector_thread = threading.Thread(target=collector)
collector_thread.daemon = True
collector_thread.start()
if source is None:
message_thread = threading.Thread(target=get_from_telegram_server)
elif isinstance(source, queue.Queue):
if ordered:
message_thread = threading.Thread(target=get_from_queue, args=(source,))
else:
message_thread = threading.Thread(target=get_from_queue_unordered, args=(source,))
else:
raise ValueError('Invalid source')
message_thread.daemon = True # need this for main thread to be killable by Ctrl-C
message_thread.start()
self._scheduler._output_queue = collect_queue
self._scheduler.daemon = True
self._scheduler.start()
if run_forever:
if _isstring(run_forever):
print(run_forever)
while 1:
time.sleep(10)
import inspect
class SpeakerBot(Bot):
def __init__(self, token):
super(SpeakerBot, self).__init__(token)
self._mic = helper.Microphone()
@property
def mic(self):
return self._mic
def create_listener(self):
q = queue.Queue()
self._mic.add(q)
ln = helper.Listener(self._mic, q)
return ln
class DelegatorBot(SpeakerBot):
def __init__(self, token, delegation_patterns):
"""
:param delegation_patterns: a list of (seeder, delegator) tuples.
"""
super(DelegatorBot, self).__init__(token)
self._delegate_records = [p+({},) for p in delegation_patterns]
def _startable(self, delegate):
return ((hasattr(delegate, 'start') and inspect.ismethod(delegate.start)) and
(hasattr(delegate, 'is_alive') and inspect.ismethod(delegate.is_alive)))
def _tuple_is_valid(self, t):
return len(t) == 3 and callable(t[0]) and type(t[1]) in [list, tuple] and type(t[2]) is dict
def _ensure_startable(self, delegate):
if self._startable(delegate):
return delegate
elif callable(delegate):
return threading.Thread(target=delegate)
elif type(delegate) is tuple and self._tuple_is_valid(delegate):
func, args, kwargs = delegate
return threading.Thread(target=func, args=args, kwargs=kwargs)
else:
raise RuntimeError('Delegate does not have the required methods, is not callable, and is not a valid tuple.')
def handle(self, msg):
self._mic.send(msg)
for calculate_seed, make_delegate, dict in self._delegate_records:
id = calculate_seed(msg)
if id is None:
continue
elif isinstance(id, collections.Hashable):
if id not in dict or not dict[id].is_alive():
d = make_delegate((self, msg, id))
d = self._ensure_startable(d)
dict[id] = d
dict[id].start()
else:
d = make_delegate((self, msg, id))
d = self._ensure_startable(d)
d.start()
| mit | -1,966,166,402,499,118,800 | 37.531646 | 131 | 0.550996 | false |
ImmobilienScout24/cfn-sphere | src/main/python/cfn_sphere/template/__init__.py | 1 | 1485 | import json
class CloudFormationTemplate(object):
def __init__(self, body_dict, name):
self.name = name
self.template_format_version = body_dict.get('AWSTemplateFormatVersion', '2010-09-09')
self.description = body_dict.get('Description', '')
self.metadata = body_dict.get('Metadata', {})
self.parameters = body_dict.get('Parameters', {})
self.mappings = body_dict.get('Mappings', {})
self.conditions = body_dict.get('Conditions', {})
self.resources = body_dict.get('Resources', {})
self.outputs = body_dict.get('Outputs', {})
self.post_custom_resources = body_dict.get('PostCustomResources', {})
def get_no_echo_parameter_keys(self):
if self.parameters:
return [key for key, value in self.parameters.items() if str(value.get('NoEcho')).lower() == 'true']
else:
return []
def get_template_body_dict(self):
return {
'AWSTemplateFormatVersion': self.template_format_version,
'Description': self.description,
'Parameters': self.parameters,
'Mappings': self.mappings,
'Conditions': self.conditions,
'Resources': self.resources,
'Outputs': self.outputs
}
def get_pretty_template_json(self):
return json.dumps(self.get_template_body_dict(), indent=2)
def get_template_json(self):
return json.dumps(self.get_template_body_dict())
| apache-2.0 | -3,528,714,139,985,188,400 | 38.078947 | 112 | 0.607407 | false |
Mangara/ArboralExplorer | lib/Cmpl/cmplServer/cmplServer/CmplGridScheduler.py | 1 | 24624 | #***********************************************************************
# This code is part of CmplServer
#
# Copyright (C) 2013, 2014
# Mike Steglich - Technical University of Applied Sciences
# Wildau, Germany
#
# CmplServer is a project of the Technical University of
# Applied Sciences Wildau and the Institute for Operations Research
# and Business Management at the Martin Luther University
# Halle-Wittenberg.
# Please visit the project homepage <www.coliop.org>
#
# CmplServer is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# CmplServer is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
#**********************************************************************
#!/usr/bin/python
from __future__ import division
from pyCmpl.CmplDefs import *
from pyCmpl.CmplTools import *
from CmplServerException import *
from CmplServerTools import *
import xmlrpclib
import thread
import threading
import random
import os
import sys
import subprocess
import socket
import time
#################################################################################
#
# CmplServerHandler
#
#################################################################################
class CmplServerHandler(object):
#*********** constructor **********
def __init__(self, cmplServerUrl , maxProb, performanceIndex ):
self.__cmplServerUrl = cmplServerUrl
self.__cmplServer = None
self.__emptyProblems = maxProb
self.__maxProblems = maxProb
self.__performanceIndex = performanceIndex
self.__lastActivity = 0
self.__isActive = True
self.__solvers = []
#*********** end constructor *****
# getter and setter ***************
@property
def cmplServerUrl(self):
return self.__cmplServerUrl
def setCmplServerUrl(self, url):
self.__cmplServerUrl = url
@property
def cmplServer(self):
return self.__cmplServer
def setCmplServer(self, server):
self.__cmplServer = server
@property
def emptyProblems(self):
return self.__emptyProblems
def setEmptyProblems(self, nr):
self.__emptyProblems=nr
def addEmptyProblem(self):
self.__emptyProblems += 1
def removeEmptyProblem(self):
self.__emptyProblems -= 1
@property
def maxProblems(self):
return self.__maxProblems
def setMaxProblems(self, nr):
self.__maxProblems=nr
@property
def performanceIndex(self):
return self.__performanceIndex
def setPerformanceIndex(self, nr):
self.__performanceIndex=nr
@property
def lastActivityTime(self):
return self.__lastActivityTime
def setLastActivityTime(self, timeStamp):
self.__lastActivityTime=timeStamp
@property
def isActive(self):
return self.__isActive
def setActive(self, val):
self.__isActive=val
@property
def solvers(self):
return self.__solvers
def setSolvers(self, solvers):
self.__solvers=solvers
# end getter and setter *************
#################################################################################
# End CmplServerHandler
#################################################################################
#################################################################################
#
# ProblemQueueHandler
#
#################################################################################
class ProblemQueueHandler(object):
#*********** constructor **********
def __init__(self, cmplName , solver):
self.__cmplServerId = None
self.__cmplName = cmplName
self.__status = CMPLGRID_SCHEDULER_UNKNOWN
self.__solver = solver
self.setLastActivityTime(time.time())
#*********** end constructor *****
# getter and setter ***************
@property
def cmplServerId(self):
return self.__cmplServerId
def setCmplServerId(self, id):
self.__cmplServerId = id
@property
def cmplName(self):
return self.__cmplName
def setCmplName(self, name):
self.__cmplName = url
@property
def status(self):
return self.__status
def setStatus(self, status):
self.__status = status
@property
def solver(self):
return self.__solver
@property
def lastActivityTime(self):
return self.__lastActivityTime
def setLastActivityTime(self, timeStamp):
self.__lastActivityTime=timeStamp
# end getter and setter *************
#################################################################################
# End ProblemQueueHandler
#################################################################################
#################################################################################
#
# CmplGridScheduler
#
#################################################################################
class CmplGridScheduler(object):
#****************************************************************************
# Constructor and destructor
#****************************************************************************
#*********** constructor **********
def __init__(self, port = None ):
self.__compatibility = COMPATIBILITY
self.__server = None
self.__serverList = {}
self.__problemQueueInfos = {}
self.__problemQueue = []
self.__maxCmplServerTries = 10
self.__maxInactivityTime=60*60*12 # half a day
self.__schedulerStatus = CMPLGRID_SCHEDULER_OK
self.__schedulerStatusTxt = "CMPLGrid scheduler is running"
self.__solvers = ["cbc","glpk","scip","gurobi","cplex"]
if port == None:
self.__cmplPort = 8008
else:
self.__cmplPort = port
self.__serviceIntervall = 0.1
self.__serviceIntervall2 = 30
self.__serviceThreadHandler = None
self.__cmplServerPath = os.path.expanduser("~") + os.sep+ "CmplServer" +os.sep
self.__logFileName = self.__cmplServerPath + "cmplGridScheduler.log"
if os.path.exists(self.__cmplServerPath) == False:
try:
os.mkdir(self.__cmplServerPath)
except OSError, e:
raise CmplServerException( "Cannot create CmplServer path <"+self.__cmplServerPath+">")
try:
self.__logFile = open(self.__logFileName, "a")
except IOError, e:
raise CmplServerException( "Cannot read CmplServer option file <"+self.__optFileName+"> " + str(e) )
try:
try:
self.__optFileName=os.environ['CMPLSERVERPATH']+ os.sep + "cmplServer.opt"
except:
self.__optFileName=os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + ".." + os.sep + "cmplServer.opt"
f = open(self.__optFileName, "r")
for line in f:
ret=line.split("=")
if ret[0].strip().lower() == "schedulerserviceintervall":
if CmplTools.strIsNumber(ret[1].strip()):
self.__serviceIntervall = float(ret[1].strip())
else:
CmplServerTools.cmplLogging( self.__logFile, "Wrong schedulerServiceIntervall in CmplServer option file <"+str(self.__serviceIntervall)+"> default value is used" )
if ret[0].strip().lower() == "sserviceintervall":
if CmplTools.strIsNumber(ret[1].strip()):
self.__serviceIntervall2 = float(ret[1].strip())
else:
CmplServerTools.cmplLogging( self.__logFile, "Wrong serviceIntervall in CmplServer option file <"+str(self.__serviceIntervall2)+"> default value is used" )
if ret[0].strip().lower() == "maxinactivitytime":
if CmplTools.strIsNumber(ret[1].strip()):
self.__maxInactivityTime = int(ret[1].strip())
continue
else:
CmplServerTools.cmplLogging( self.__logFile, "Wrong option maxInactivityTime in CmplServer option file <"+self.__optFileName+"> default value is used" )
if ret[0].strip().lower() == "maxservertries":
if CmplTools.strIsNumber(ret[1].strip()):
self.__maxServerTries = int(ret[1].strip())
else:
CmplServerTools.cmplLogging( self.__logFile, "Wrong maxServerTries in CmplServer option file <"+str(self.__maxServerTries)+"> default value is used" )
"""if ret[0].strip().lower() == "solvers":
self.__solvers=ret[1].split()"""
f.close()
#undocumented - only used for the pyCmpl test script
if port != None:
self.__cmplPort = port
except IOError, e:
raise CmplServerException( "Cannot read CmplServer option file <"+self.__optFileName+"> " + str(e), self.__logFile )
#*********** end constructor *******
#*********** destructor ************
def __del__(self ):
if self.__serviceThreadHandler!=None:
self.__serviceThreadHandler.kill()
self.__logFile.close()
#*********** end destructor ********
#****************************************************************************
# public methods
#****************************************************************************
#*********** startCmplGridScheduler *************
def startCmplGridScheduler(self):
try:
self.__server = CmplXmlRpcServer(("", self.__cmplPort), logRequests=False)
self.__server.register_function(self.getJobId)
self.__server.register_function(self.knock)
self.__server.register_function(self.cmplServerFailed)
self.__server.register_function(self.getServerId)
self.__server.register_function(self.addEmptyProblem)
self.__server.register_function(self.disconnectServer)
self.__server.register_function(self.disconnectProblem)
self.__server.register_function(self.stopServer)
self.__server.register_function(self.status)
CmplServerTools.cmplLogging( self.__logFile, "CmplGridScheduler has been started | port: " + str(self.__cmplPort) + " | serviceIntervall: " + str(self.__serviceIntervall) )
self.shutdown = False
thread.start_new_thread(self.__serviceThread, () )
while not self.shutdown:
self.__server.handle_request()
except:
print "CmplGridScheduler error: " , str(sys.exc_info()[1])
#*********** end startCmplGridScheduler **********
#*********** getJobId **************
def getJobId(self, cmplName, solver, compatibility=0):
id = "G"+self.__server.client_address[0] + "-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) + "-" + str(random.randint(100000, 999999))
status = CMPLGRID_SCHEDULER_OK
statusMessage = ""
if int(compatibility)!=self.__compatibility:
status = CMPLSERVER_ERROR
statusMessage = "Incompatible CmplServer client with compatibilty stage "+str(compatibility) + " instead of " + str(self.__compatibility)
else:
statusMessage = str(self.__compatibility)
tmpSolver=solver.lower()
if not tmpSolver in self.__solvers:
status=CMPLSERVER_ERROR
statusMessage="Unsupported solver <"+ solver + ">"
if status==CMPLGRID_SCHEDULER_OK:
CmplServerTools.cmplLogging( self.__logFile, "Problem has been registered" , id, cmplName )
if len(self.__problemQueue)>0:
status = CMPLGRID_SCHEDULER_BUSY
self.__problemQueue.append(id)
self.__problemQueueInfos.update( {id : ProblemQueueHandler(cmplName, tmpSolver) } )
CmplServerTools.cmplLogging( self.__logFile, "CmplGrid is busy: Problem is moved to the problem queue. " , id, cmplName)
statusMessage = "CmplGrid is busy: Problem is moved to the problem queue. "
else:
bestServer, status = self.__getBestServer(tmpSolver)
if status==CMPLGRID_SCHEDULER_SOLVER_NOT_AVAILABLE:
status = CMPLSERVER_ERROR
statusMessage = "Solver <"+solver + "> not available in the CmplGrid "
elif status==CMPLGRID_SCHEDULER_BUSY:
self.__problemQueue.append(id)
self.__problemQueueInfos.update( {id : ProblemQueueHandler(cmplName, tmpSolver) } )
self.__problemQueueInfos[id].setStatus(status)
status = CMPLGRID_SCHEDULER_BUSY
CmplServerTools.cmplLogging( self.__logFile, "CmplGrid is busy: Problem is moved to the problem queue. ", id, cmplName)
if self.__nrOfActiveServers()>0:
statusMessage = "CmplGrid is busy: Problem is moved to the problem queue. "
else:
statusMessage = "No server available at the moment in the CMPLGrid: Problem is moved to the problem queue. "
self.__schedulerStatus = CMPLGRID_SCHEDULER_BUSY
self.__schedulerStatusTxt = "CMPLGrid scheduler is busy"
elif status==CMPLGRID_SCHEDULER_OK:
if self.__sendProblemToServer( bestServer, id, cmplName):
statusMessage = self.__serverList[bestServer].cmplServerUrl
return [ status, statusMessage, id]
#*********** end getJobId ************
#*********** knock **************
def knock(self, id):
status=CMPLGRID_SCHEDULER_UNKNOWN
statusMessage=""
serverUrl=""
if id in self.__problemQueueInfos:
status = self.__problemQueueInfos[id].status
serverId = self.__problemQueueInfos[id].cmplServerId
if status == CMPLGRID_SCHEDULER_OK:
if serverId == None:
status = CMPLGRID_SCHEDULER_BUSY
statusMessage = "CmplGrid scheduler is waiting for a free CmplServer"
else:
if self.__checkId(serverId):
statusMessage=self.__serverList[serverId].cmplServerUrl
del self.__problemQueueInfos[id]
else:
status = CMPLGRID_SCHEDULER_ERROR
statusMessage = "...Server isn't connected"
elif status==CMPLGRID_SCHEDULER_PROBLEM_DELETED:
status = CMPLGRID_SCHEDULER_ERROR
statusMessage = "The problem was to long inactive and was therefore deleted. "
del self.__problemQueueInfos[id]
else:
status = CMPLGRID_SCHEDULER_ERROR
statusMessage = "Problem is not connected to CMPLGrid <"+id+">"
return [status, statusMessage, id ]
#*********** end knock **************
#*********** cmplServerFailed **************
def cmplServerFailed(self, cmplUrl):
status=CMPLGRID_SCHEDULER_WARNING
statusMessage="Unknown CmplServer can't registred as inactive <"+cmplUrl+">"
for s in self.__serverList:
if self.__serverList[s].cmplServerUrl==cmplUrl:
self.__serverList[s].setActive(False)
status=CMPLGRID_SCHEDULER_OK
statusMessage="CmplServer is now registred as inactive <"+cmplUrl+">"
CmplServerTools.cmplLogging( self.__logFile, statusMessage )
return [status, statusMessage, "" ]
#*********** end cmplServerFailed **************
#*********** getServerId **************
def getServerId(self, port, maxProblems, performanceIndex, solvers, compatibility=0):
tmpUrl = self.__server.client_address[0]+":"+str(port)
id = tmpUrl + "-"+ str(random.randint(100000, 999999))
status = CMPLGRID_SCHEDULER_OK
statusMessage=""
if type(port)!=int:
status= CMPLGRID_SCHEDULER_ERROR
statusMessage = "Wrong CmplServer port ", port
else:
tmpUrl= "http://"+tmpUrl
self.__serverList.update( { id: CmplServerHandler(tmpUrl, int(maxProblems) , int(performanceIndex) ) } )
if int(compatibility)!=self.__compatibility:
status= CMPLGRID_SCHEDULER_ERROR
statusMessage = "Incompatible CmplServer with compatibilty stage "+str(compatibility) + " instead of " + str(self.__compatibility)
else:
statusMessage = str(self.__compatibility)
self.__serverList[id].setLastActivityTime(time.time())
self.__serverList[id].setSolvers(solvers)
try:
self.__serverList[id].setCmplServer( xmlrpclib.ServerProxy( self.__serverList[id].cmplServerUrl , allow_none=False) )
except:
CmplServerTools.cmplLogging( self.__logFile, "CmplServer can't connect - no bi-directional connection :"+ str(sys.exc_info()[1]) , id )
status = CMPLGRID_SCHEDULER_ERROR
statusMessage = "CmplServer can't connect - no bi-directional connect :"+ str(sys.exc_info()[1])
if status == CMPLGRID_SCHEDULER_OK:
CmplServerTools.cmplLogging( self.__logFile, "CmplServer has been connected: solver "+str(self.__serverList[id].solvers)+" : maxProblems :" + str(self.__serverList[id].emptyProblems) + ">" , id )
return [ status, statusMessage, id]
#*********** end getServerId ************
#*********** addEmptyProblem **************
def addEmptyProblem(self, serverId):
if self.__checkId(serverId):
self.__serverList[serverId].addEmptyProblem()
status = CMPLGRID_SCHEDULER_OK
statusMessage = "Empty problem has added"
CmplServerTools.cmplLogging( self.__logFile, "CmplServer has added empty problem " , serverId )
else:
status = CMPLGRID_SCHEDULER_ERROR
statusMessage = "Server isn't connected"
return [status, statusMessage, "" ]
#*********** end addEmptyProblem **************
#*********** disconnectServer **************
def disconnectServer(self, id):
status=None
statusMessage=None
if id in self.__serverList:
del self.__serverList[id]
status = CMPLGRID_SCHEDULER_OK
statusMessage = "CmplServer <" + id +"> disconnected"
CmplServerTools.cmplLogging( self.__logFile, "CmplServer has been disconnected " , id )
else:
status = CMPLGRID_SCHEDULER_WARNING
statusMessage = "CmplServer <" + id +"> wasn't connected"
return [ status, statusMessage, ""]
#*********** end disconnectServer ************
#*********** disconnectProblem **************
def disconnectProblem(self, id):
status=None
statusMessage=None
if id in self.__problemQueue:
del self.__problemQueue[self.__problemQueue.index(id)]
status = CMPLGRID_SCHEDULER_OK
statusMessage = "Problem <" + id +"> disconnected"
CmplServerTools.cmplLogging( self.__logFile, "Problem has been disconnected from problem queue." , id )
else:
status = CMPLGRID_SCHEDULER_WARNING
statusMessage = "Problem <" + id +"> wasn't connected"
return [ status, statusMessage, ""]
#*********** end disconnectProblem ************
#*********** stopServer **************
def stopServer(self):
if self.__server.client_address[0] == "127.0.0.1":
while len( self.__serverList) >0:
id = self.__serverList.keys()[0]
ret=self.__cmplServerExecute(self.__serverList[id].cmplServer, "disconnectFromScheduler", id)
if ret[0]==CMPLSERVER_OK:
self.disconnectServer(id)
else:
CmplServerTools.cmplLogging( self.__logFile, "Can't disconnect CmplServer <" + id +"> : " + ret[1])
self.__serverList.clear()
self.shutdown = True
CmplServerTools.cmplLogging( self.__logFile, "CmplGridScheduler has been stopped" )
return True
else:
return False
#*********** end stopServer **********
#*********** status ***************
def status(self):
#CmplServerTools.cmplLogging( self.__logFile, "Status check: " + str(self.__schedulerStatus) )
return [self.__schedulerStatus, self.__schedulerStatusTxt, ""]
#*********** end status ***********
#****************************************************************************
# private methods
#****************************************************************************
#*********** checkId ***************
def __checkId(self, id) :
return id in self.__serverList
#*********** end checkId ***********
#*********** nrOfActiveServers ***************
def __nrOfActiveServers(self) :
i=0
for s in self.__serverList:
if self.__serverList[s].isActive:
i+=1
return i
#*********** end __nrOfActiveServers ***********
#*********** __serviceThread ******
def __serviceThread(self):
lastActivityTime=time.time()
status = CMPLGRID_SCHEDULER_OK
while True:
ret=[]
if self.shutdown==True:
break
time.sleep(self.__serviceIntervall)
if time.time()-lastActivityTime>self.__serviceIntervall2:
self.__cleanOldProblems()
lastActivityTime=time.time()
if len(self.__problemQueue)>0:
tmpId=self.__problemQueue.pop(0)
bestServer, status = self.__getBestServer(self.__problemQueueInfos[tmpId].solver)
if status==CMPLGRID_SCHEDULER_SOLVER_NOT_AVAILABLE:
self.__problemQueueInfos[tmpId].setStatus=CMPLGRID_SCHEDULER_PROBLEM_DELETED
status = CMPLSERVER_ERROR
statusMessage = "Solver <"+solver + "> not available in the CmplGrid "
elif status==CMPLGRID_SCHEDULER_BUSY:
self.__problemQueue.insert(0,tmpId)
self.__problemQueueInfos[tmpId].setStatus(status)
elif status==CMPLGRID_SCHEDULER_OK:
ans = self.__sendProblemToServer(bestServer, tmpId, self.__problemQueueInfos[tmpId].cmplName)
if ans==True:
self.__problemQueueInfos[tmpId].setStatus(CMPLGRID_SCHEDULER_OK)
self.__problemQueueInfos[tmpId].setCmplServerId(bestServer)
else:
self.__problemQueue.insert(0,tmpId)
self.__problemQueueInfos[tmpId].setStatus(CMPLGRID_SCHEDULER_BUSY)
if len(self.__problemQueue)>0:
self.__schedulerStatus = CMPLGRID_SCHEDULER_BUSY
self.__schedulerStatusTxt = "CMPLGrid scheduler is busy"
else:
self.__schedulerStatus = CMPLGRID_SCHEDULER_OK
self.__schedulerStatusTxt = "CMPLGrid scheduler is running"
for s in self.__serverList:
if self.__serverList[s].isActive==False:
oldMaxTries=self.__maxCmplServerTries
self.__maxCmplServerTries=1
ret=self.__cmplServerExecute(self.__serverList[s].cmplServer, "status")
self.__maxCmplServerTries=oldMaxTries
if ret[0]==CMPLSERVER_OK:
self.__serverList[s].setActive(True)
self.__serverList[s].setEmptyProblems(ret[2])
CmplServerTools.cmplLogging( self.__logFile, "CmplServer is now registred as active <"+self.__serverList[s].cmplServerUrl+"> " + str(self.__serverList[s].emptyProblems) )
#******* end __serviceThread *******
#*********** cleanOldProblems ******
def __cleanOldProblems(self):
delList = []
for id in self.__problemQueue:
if (time.time()-self.__problemQueueInfos[id].lastActivityTime)>self.__maxInactivityTime:
delList.append(id)
for id in delList:
self.__problemQueueInfos[id].setStatus=CMPLGRID_SCHEDULER_PROBLEM_DELETED
del self.__problemQueue[id]
CmplServerTools.cmplLogging( self.__logFile, "Inactive problem has been canceled and deregistered" , id, self.__problemQueue[id].cmplName)
#******* end __cleanOldProblems ******
#*********** cmplServerExecute *******
def __cmplServerExecute(self, server, method="", *args):
ret=[]
tries=0
while True:
try:
if method=="status":
ret = server.status()
if method=="sendJobId":
ret = server.sendJobId(args[0], args[1], args[2])
if method=="disconnectFromScheduler":
ret = server.disconnectFromScheduler(args[0])
except :
tries+=1
if tries==self.__maxCmplServerTries:
ret=[CMPLSERVER_ERROR, str(sys.exc_info()[1]) , ""]
break
else:
continue
break
return ret
#******** end cmplServerExecute *******
#*********** __sendProblemToServer **************
def __sendProblemToServer(self, bestServer, id, cmplName):
ans=True
ret = self.__cmplServerExecute(self.__serverList[bestServer].cmplServer, "sendJobId", cmplName, id, bestServer)
if ret[0] == CMPLSERVER_ERROR:
self.__serverList[bestServer].setActive(False)
ans = False
else:
CmplServerTools.cmplLogging( self.__logFile, "Problem has been sent to CmplServer <"+ bestServer +"> " , id, cmplName )
ans=True
return ans
#*********** end __sendProblemToServer ************
#*********** getBestServer **************
def __getBestServer(self, solver):
bestServer=None
status=CMPLGRID_SCHEDULER_OK
bestFactor=0
activeServerFound=False
for s in self.__serverList:
if self.__serverList[s].isActive:
if solver in self.__serverList[s].solvers:
activeServerFound=True
tmpFactor=(self.__serverList[s].emptyProblems/self.__serverList[s].maxProblems * self.__serverList[s].performanceIndex)
if tmpFactor > bestFactor:
bestFactor = tmpFactor
bestServer = s
if bestServer!=None:
self.__serverList[bestServer].removeEmptyProblem()
else:
if not activeServerFound:
status=CMPLGRID_SCHEDULER_SOLVER_NOT_AVAILABLE
else:
status=CMPLGRID_SCHEDULER_BUSY
return (bestServer, status)
#*********** end getBestServer ************
#################################################################################
# End CmplGridScheduler
#################################################################################
| apache-2.0 | 5,686,667,388,055,705,000 | 30.897668 | 201 | 0.610867 | false |
wegamekinglc/alpha-mind | alphamind/tests/test_suite.py | 1 | 4079 | # -*- coding: utf-8 -*-
"""
Created on 2017-4-25
@author: cheng.li
"""
import os
SKIP_ENGINE_TESTS = True
if not SKIP_ENGINE_TESTS:
try:
DATA_ENGINE_URI = os.environ['DB_URI']
except KeyError:
DATA_ENGINE_URI = "mysql+mysqldb://reader:Reader#[email protected]:13317/vision?charset=utf8"
else:
DATA_ENGINE_URI = None
if __name__ == '__main__':
from simpleutils import add_parent_path
add_parent_path(__file__, 3)
from simpleutils import TestRunner
from alphamind.utilities import alpha_logger
from alphamind.tests.data.test_neutralize import TestNeutralize
from alphamind.tests.data.test_standardize import TestStandardize
from alphamind.tests.data.test_winsorize import TestWinsorize
from alphamind.tests.data.test_quantile import TestQuantile
from alphamind.tests.data.engines.test_sql_engine import TestSqlEngine
from alphamind.tests.data.engines.test_universe import TestUniverse
from alphamind.tests.portfolio.test_constraints import TestConstraints
from alphamind.tests.portfolio.test_evolver import TestEvolver
from alphamind.tests.portfolio.test_longshortbuild import TestLongShortBuild
from alphamind.tests.portfolio.test_rankbuild import TestRankBuild
from alphamind.tests.portfolio.test_percentbuild import TestPercentBuild
from alphamind.tests.portfolio.test_linearbuild import TestLinearBuild
from alphamind.tests.portfolio.test_meanvariancebuild import TestMeanVarianceBuild
from alphamind.tests.portfolio.test_riskmodel import TestRiskModel
from alphamind.tests.settlement.test_simplesettle import TestSimpleSettle
from alphamind.tests.analysis.test_riskanalysis import TestRiskAnalysis
from alphamind.tests.analysis.test_perfanalysis import TestPerformanceAnalysis
from alphamind.tests.analysis.test_factoranalysis import TestFactorAnalysis
from alphamind.tests.analysis.test_quantilieanalysis import TestQuantileAnalysis
from alphamind.tests.model.test_modelbase import TestModelBase
from alphamind.tests.model.test_linearmodel import TestLinearModel
from alphamind.tests.model.test_treemodel import TestTreeModel
from alphamind.tests.model.test_loader import TestLoader
from alphamind.tests.model.test_composer import TestComposer
from alphamind.tests.execution.test_naiveexecutor import TestNaiveExecutor
from alphamind.tests.execution.test_thresholdexecutor import TestThresholdExecutor
from alphamind.tests.execution.test_targetvolexecutor import TestTargetVolExecutor
from alphamind.tests.execution.test_pipeline import TestExecutionPipeline
from alphamind.tests.portfolio.test_optimizers import TestOptimizers
runner = TestRunner([TestNeutralize,
TestStandardize,
TestWinsorize,
TestQuantile,
TestSqlEngine,
TestUniverse,
TestConstraints,
TestEvolver,
TestLongShortBuild,
TestRankBuild,
TestPercentBuild,
TestLinearBuild,
TestMeanVarianceBuild,
TestRiskModel,
TestSimpleSettle,
TestRiskAnalysis,
TestPerformanceAnalysis,
TestFactorAnalysis,
TestQuantileAnalysis,
TestModelBase,
TestLinearModel,
TestTreeModel,
TestLoader,
TestComposer,
TestNaiveExecutor,
TestThresholdExecutor,
TestTargetVolExecutor,
TestExecutionPipeline,
TestOptimizers],
alpha_logger)
runner.run()
| mit | 6,165,599,601,304,033,000 | 44.352273 | 101 | 0.648934 | false |
nycz/gimptools | NyczAddTextOutline.py | 1 | 1238 | #!/usr/bin/env python2
from gimpfu import *
## WORKFLOW
#
# Right-click on layer -> alpha to selection
# Grow selection by 1 pixel
# Make a new empty layer
# Fill selection with black
# Move new layer below old layer
# Merge down old layer
def add_text_outline(image, layer):
gimp.pdb.gimp_image_undo_group_start(image)
layer_name = layer.name
gimp.pdb.gimp_image_select_item(image, CHANNEL_OP_ADD, layer)
if gimp.pdb.gimp_selection_is_empty(image):
return
gimp.pdb.gimp_selection_grow(image, 1)
new_layer = gimp.Layer(image, 'outline', image.width, image.height, RGBA_IMAGE, 100, NORMAL_MODE)
top_pos = image.layers.index(layer)
image.add_layer(new_layer, top_pos+1)
gimp.pdb.gimp_edit_fill(new_layer, BACKGROUND_FILL)
gimp.pdb.gimp_selection_none(image)
final_layer = gimp.pdb.gimp_image_merge_down(image, layer, NORMAL_MODE)
final_layer.name = layer_name
gimp.pdb.gimp_image_undo_group_end(image)
return
register(
"nycz_add_text_outline",
"Add black outline to a text layer",
"Add black outline to a text layer",
"Nycz",
"Nycz",
"August 2015",
"<Image>/Nycz/Outline text",
"RGBA*",
[],
[],
add_text_outline,
)
main()
| mit | -366,173,237,773,243,100 | 25.913043 | 101 | 0.668821 | false |
iLoop2/ResInsight | ThirdParty/Ert/devel/python/python/ert/test/source_enumerator.py | 1 | 2491 | import os
import re
class SourceEnumerator(object):
@classmethod
def findDevRoot(cls, root_directory_name = "devel", verbose=False):
dev_root = os.path.dirname(os.path.realpath(__file__))
while True:
if verbose:
print("Looking at: %s" % dev_root)
dev_path = os.path.join(dev_root , root_directory_name)
if os.path.exists( dev_path ):
dev_root = os.path.join(dev_root , root_directory_name)
if verbose:
print("break: %s" % dev_path)
break
head, tail = os.path.split(dev_root)
dev_root = head
if tail == "":
raise ValueError("Source root: '%s' not found!" % root_directory_name)
if verbose:
print("Returning: %s " % dev_root)
return dev_root
@classmethod
def findSourceFile(cls, path):
dev_root = SourceEnumerator.findDevRoot()
source_file = os.path.join(dev_root, path)
if not os.path.exists(source_file):
raise ValueError("File not found: %s:%s" % (path , source_file))
return source_file
@classmethod
def removeComments(cls, code_string):
code_string = re.sub(re.compile("/\*.*?\*/",re.DOTALL ) ,"" ,code_string) # remove all occurance streamed comments (/*COMMENT */) from string
code_string = re.sub(re.compile("//.*?\n" ) ,"" ,code_string) # remove all occurance singleline comments (//COMMENT\n ) from string
return code_string
@classmethod
def findEnum(cls, enum_name, full_source_file_path):
with open(full_source_file_path, "r") as f:
text = f.read()
text = SourceEnumerator.removeComments(text)
enum_pattern = re.compile("typedef\s+enum\s+\{(.*?)\}\s*(\w+?);", re.DOTALL)
for enum in enum_pattern.findall(text):
if enum[1] == enum_name:
return enum[0]
raise ValueError("Enum with name: '%s' not found!" % enum_name)
@classmethod
def findEnumerators(cls, enum_name, source_file_path):
enum_text = SourceEnumerator.findEnum(enum_name, SourceEnumerator.findSourceFile(source_file_path))
enumerator_pattern = re.compile("(\w+?)\s*?=\s*?(\d+)")
enumerators = []
for enumerator in enumerator_pattern.findall(enum_text):
enumerators.append((enumerator[0], int(enumerator[1])))
return enumerators
| gpl-3.0 | -6,384,688,368,757,242,000 | 33.123288 | 149 | 0.577278 | false |
mithrandi/txaws | txaws/route53/tests/test_util.py | 1 | 1903 | # Licenced under the txaws licence available at /LICENSE in the txaws source.
"""
Tests for L{txaws.route53._util}.
"""
from txaws.testing.base import TXAWSTestCase
from txaws.route53._util import maybe_bytes_to_unicode, to_xml, tags
class MaybeBytesToUnicodeTestCase(TXAWSTestCase):
"""
Tests for L{maybe_bytes_to_unicode}.
"""
def test_bytes(self):
"""
When called with an instance of L{bytes}, L{maybe_bytes_to_unicode}
decodes its input using I{ascii} and returns the resulting unicode
string as an instance of L{unicode}.
"""
self.assertRaises(
UnicodeDecodeError,
lambda: maybe_bytes_to_unicode(u"\N{SNOWMAN}".encode("utf-8")),
)
decoded = maybe_bytes_to_unicode(b"hello world")
self.assertIsInstance(decoded, unicode)
self.assertEqual(decoded, u"hello world")
def test_unicode(self):
"""
When called with an instance of L{unicode},
L{maybe_bytes_to_unicode} returns its input unmodified.
"""
self.assertEqual(
u"\N{SNOWMAN}",
maybe_bytes_to_unicode(u"\N{SNOWMAN}"),
)
class ToXMLTestCase(TXAWSTestCase):
"""
Tests for L{to_xml}.
"""
def test_none(self):
"""
When called with L{None}, L{to_xml} returns a L{Deferred} that
fires with C{b""}.
"""
self.assertEqual(b"", self.successResultOf(to_xml(None)))
def test_something(self):
"""
When called with an instance of L{txaws.route53._util.Tag},
L{to_xml} returns a L{Defered} giving the result of flattening
it as an instance of L{bytes} with an xml doctype prepended.
"""
self.assertEqual(
"""<?xml version="1.0" encoding="UTF-8"?>\n<Foo>bar</Foo>""",
self.successResultOf(to_xml(tags.Foo(u"bar"))),
)
| mit | 3,261,922,258,071,802,400 | 29.693548 | 77 | 0.600105 | false |
allisnone/pytrade | pdSql0.py | 1 | 49993 | # -*- coding:utf-8 -*-
from sqlalchemy import create_engine
import pymysql
import pandas as pd
import numpy as np
from pandas.io import sql
from pandas.lib import to_datetime
from pandas.lib import Timestamp
import datetime,time,os
import tushare as ts
import easytrader,easyhistory
import time,os
from tradeStrategy import Stockhistory
#ROOT_DIR='E:/work/stockAnalyze'
#ROOT_DIR="C:/中国银河证券海王星/T0002"
#ROOT_DIR="C:\work\stockAnalyze"
RAW_HIST_DIR="C:/中国银河证券海王星/T0002/export/"
#HIST_DIR=ROOT_DIR+'/update/'
#"""
import tradeTime as tt
import sendEmail as sm
import qq_quotation as qq
"""
from . import tradeTime as tt
from . import sendEmail as sm
from . import qq_quotation as qq
"""
def form_sql(table_name,oper_type='query',select_field=None,where_condition=None,insert_field=None,update_field=None,update_value=None):
"""
:param table_name: string type, db_name.table_name
:param select_field: string type, like 'id,type,value'
:param where_condition: string type, like 'field_value>50'
:param insert_field: string type, like '(date_time,measurement_id,value)'
:param update_field: string type, like 'value' or '(measurement_id,value)'
:param update_value: value or string type, like '1000' or "'normal_type'"
:return: sql string
:use example:
:query: sql_q=form_sql(table_name='stock.account',oper_type='query',select_field='acc_name,initial',where_condition="acc_name='36005'")
:insert: sql_insert=form_sql(table_name='stock.account',oper_type='insert',insert_field='(acc_name,initial,comm)')
:update: sql_update=form_sql(table_name='stock.account',oper_type='update',update_field='initial',where_condition='initial=2900019000',set_value_str='29000')
:delete: sql_delete=form_sql(table_name='stock.account',oper_type='delete',where_condition="initial=14200.0")
"""
sql=''
if table_name=='' or not table_name:
return sql
if oper_type=='query':
field='*'
if select_field:
field=select_field
condition=''
if where_condition:
condition=' where %s' % where_condition
sql='select %s from %s'%(field,table_name) + condition +';'
elif oper_type=='insert' and insert_field:
num=len(insert_field.split(','))
value_tail='%s,'*num
value_tail='('+value_tail[:-1]+')'
sql='insert into %s '% table_name +insert_field +' values'+ value_tail + ';'
elif oper_type=='update' and where_condition and update_field:
"""
update_value_str=str(update_value)
if isinstance(update_value, str):
update_value_str="'%s'"%update_value
"""
sql='update %s set %s='%(table_name,update_field)+ update_value + ' where '+ where_condition + ';'
"""
sql=''
num=len(update_field.split(','))
if num==1:
sql='update %s set %s='%(table_name,update_field)+ update_value + ' where '+ where_condition + ';'
elif num>1:
value_tail='%s,'*num
value_tail='('+value_tail[:-1]+')'
update_sql="update test set " + update_field +value_tail + ':'
else:
pass
"""
elif oper_type=='delete':
condition=''
if where_condition:
condition=' where %s' % where_condition
sql='delete from %s'%table_name + condition + ';'
else:
pass
# print('%s_sql=%s'%(oper_type,sql))
return sql
def get_raw_hist_df(code_str,latest_count=None):
file_type='csv'
file_name='C:/hist/day/data/'+code_str+'.'+file_type
#print('file_name=',file_name)
raw_column_list=['date','open','high','low','close','volume','rmb','factor']
#print('file_name=',file_name)
df_0=pd.DataFrame({},columns=raw_column_list)
try:
#print('code_str=%s'%code_str)
#df=pd.read_csv(file_name,names=raw_column_list, header=0,encoding='gb2312' #='gb18030')#'utf-8') #for python3
hist_df = pd.read_csv(file_name)
hist_df['rmb'] = hist_df['amount']
#del hist_df['amount']
#del hist_df['MA1']
#print(hist_df)
#print('pd.read_csv=',df)
if hist_df.empty:
#print('code_str=',code_str)
return df_0
return hist_df
except OSError as e:
#print('OSError:',e)
return df_0
def get_yh_raw_hist_df(code_str,latest_count=None):
file_type='csv'
RAW_HIST_DIR="C:/中国银河证券海王星/T0002/export/"
file_name=RAW_HIST_DIR+code_str+'.'+file_type
raw_column_list=['date','open','high','low','close','volume','amount']
#print('file_name=',file_name)
df_0=pd.DataFrame({},columns=raw_column_list)
try:
#print('code_str=%s'%code_str)
df=pd.read_csv(file_name,names=raw_column_list, header=0,encoding='gb2312')#'utf-8') #for python3
#print('pd.read_csv=',df)
if df.empty:
#print('code_str=',code_str)
df_0.to_csv(file_name,encoding='utf-8')
return df_0
#else:
# return
last_date=df.tail(1).iloc[0].date
if last_date=='数据来源:通达信':
df=df[:-1]
#print('数据来源:通达信')
#print(df.tail(1).iloc[0].date)
if df.empty:
df_0.to_csv(file_name,encoding='utf-8')
return df_0
#else:
# return
last_volume=df.tail(1).iloc[0].volume
if int(last_volume)==0:
df=df[:-1]
df['date'].astype(Timestamp)
df_to_write = df.set_index('date')
df_to_write.to_csv(file_name,encoding='utf-8')
else:
pass
return df
except OSError as e:
#print('OSError:',e)
df_0.to_csv(file_name,encoding='utf-8')
return df_0
def get_easyhistory_df(code_str,source='easyhistory'): #ta_lib
data_path = 'C:/hist/day/data/'
if source=='YH' or source=='yh':
data_path = 'C:/中国银河证券海王星/T0002/export/'
his = easyhistory.History(dtype='D', path=data_path,type='csv',codes=[code_str])
res = his.get_hist_indicator(code_str)
return res
def update_one_hist(code_str,stock_sql_obj,histdata_last_df,update_db=True):
"""
:param code_str: string type, code string_name
:param stock_sql_obj: StockSQL type,
:param histdata_last_df: dataframe type, df from table histdata
:return:
"""
df=get_raw_hist_df(code_str)
if df.empty:
return 0
code_list=[code_str]*len(df)
df['code']=pd.Series(code_list,index=df.index)
p=df.pop('code')
df.insert(0,'code',p)
#print("update_one_hist1")
last_db_date=stock_sql_obj.get_last_db_date(code_str,histdata_last_df)
#print("update_one_hist2")
last_db_date_str=''
#print('last_db_date',last_db_date,type(last_db_date))
#print('last_db_date_str',last_db_date_str)
#criteria0=df.volume>0
#df=df[df.volume>0]
if last_db_date:
last_db_date_str='%s' % last_db_date
last_db_date_str=last_db_date_str[:10]
#criteria1=df.date>last_db_date_str
df=df[df.date>last_db_date_str]
#print('sub df', df)
if df.empty:
#print('History data up-to-date for %s, no need update' % code_str)
return 0
if update_db:
stock_sql_obj.insert_table(df, 'histdata')
#print(df.tail(1))
#print(df.tail(1).iloc[0])
update_date=df.tail(1).iloc[0].date
#last_date=histdata_last_df.loc[date[-1],'date']
#update_date= 2015-11-20 <class 'str'>
#print('update_date=',update_date,type(update_date))
stock_sql_obj.update_last_db_date(code_str,last_db_date_str,update_date)
return len(df)
def get_file_timestamp(file_name):
#get last modify time of given file
file_mt_str=''
try:
file_mt= time.localtime(os.stat(file_name).st_mtime)
file_mt_str=time.strftime("%Y-%m-%d %X",file_mt)
except:
#file do not exist
pass
return file_mt_str
#get the all file source data in certain DIR
def get_dir_latest_modify_time(hist_dir,codes={}):
"""
:param hist_dir: string type, DIR of export data
:return: list type, code string list
"""
all_code=[]
latest_time = '1970-01-01 00:00:00'
if codes:
for code in codes:
full_file_name = hist_dir + '%s.csv' % code
file_mt_str = get_file_timestamp(full_file_name)
if file_mt_str > latest_time:
latest_time = file_mt_str
all_code = codes
else:
for filename in os.listdir(hist_dir):#(r'ROOT_DIR+/export'):
code=filename[:-4]
if len(code)==6:
all_code.append(code)
full_file_name = hist_dir + filename
file_mt_str = get_file_timestamp(full_file_name)
if file_mt_str > latest_time:
latest_time = file_mt_str
return all_code,latest_time
#get the all file source data in certain DIR
def get_all_code(hist_dir):
"""
:param hist_dir: string type, DIR of export data
:return: list type, code string list
"""
all_code=[]
for filename in os.listdir(hist_dir):#(r'ROOT_DIR+/export'):
code=filename[:-4]
if len(code)==6:
all_code.append(code)
return all_code
def get_different_symbols(hist_dir='C:/hist/day/data/'):
indexs= ['cyb', 'zxb', 'sz', 'sh', 'sz300', 'zx300', 'hs300', 'sh50']
all_codes = get_all_code(hist_dir)
funds =[]
b_stock = []
for code in all_codes:
if code.startswith('1') or code.startswith('5'):
funds.append(code)
elif code.startswith('9'):
b_stock.append(code)
except_codes = ['000029']
all_stocks = list(set(all_codes).difference(set(funds+indexs+except_codes)))
return indexs,funds,b_stock,all_stocks
def update_all_hist_data(codes,update_db=True):
"""
:param codes: list type, code string list
:return:
"""
starttime=datetime.datetime.now()
stock_sql_obj=StockSQL()
print('histdata_last_df1',datetime.datetime.now())
histdata_last_df=stock_sql_obj.query_data(table='histdata_last')
print('histdata_last_df2',datetime.datetime.now())
for code_str in codes:
update_one_hist(code_str, stock_sql_obj,histdata_last_df,update_db)
deltatime=datetime.datetime.now()-starttime
print('update duration=',deltatime.days*24*3600+deltatime.seconds)
print('update completed')
def get_position(broker='yh',user_file='yh.json'):
user = easytrader.use(broker)
user.prepare(user_file)
holding_stocks_df = user.position#['证券代码'] #['code']
user_balance = user.balance#['证券代码'] #['code']
account = '36005'
if user_file== 'yh1.json':
account = '38736'
holding_stocks_df['account'] = account
this_day=datetime.datetime.now()
date_format='%Y/%m/%d'
time_format = date_format + ' %X'
time_str=this_day.strftime(time_format)
holding_stocks_df['update'] = time_str
#holding_stocks_df['valid'] = 1
"""
当前持仓 股份可用 参考市值 参考市价 股份余额 参考盈亏 交易市场 参考成本价 盈亏比例(%) 股东代码 \
0 6300 6300 24885.0 3.95 6300 343.00 深A 3.896 1.39% 0130010635
1 400 400 9900.0 24.75 400 163.00 深A 24.343 1.67% 0130010635
2 600 600 15060.0 25.10 600 115.00 深A 24.908 0.77% 0130010635
3 1260 0 13041.0 10.35 1260 906.06 沪A 9.631 7.47% A732980330
证券代码 证券名称 买入冻结 卖出冻结
0 000932 华菱钢铁 0 0
1 000977 浪潮信息 0 0
2 300326 凯利泰 0 0
3 601009 南京银行 0 0
"""
#print(holding_stocks_df)
return holding_stocks_df,user_balance
def update_one_stock(symbol,realtime_update=False,dest_dir='C:/hist/day/data/', force_update_from_YH=False):
"""
运行之前先下载及导出YH历史数据
"""
"""
:param symbol: string type, stock code
:param realtime_update: bool type, True for K data force update during trade time
:param dest_dir: string type, like csv dir
:param force_update_from_YH: bool type, force update K data from YH
:return: Dataframe, history K data for stock
"""
index_symbol_maps = {'sh':'999999','sz':'399001','zxb':'399005','cyb':'399006',
'sh50':'000016','sz300':'399007','zx300':'399008','hs300':'000300'}
qq_index_symbol_maps = {'sh':'000001','sz':'399001','zxb':'399005','cyb':'399006',
'sh50':'000016','sz300':'399007','zx300':'399008','hs300':'000300'}
FIX_FACTOR = 1.0
d_format='%Y/%m/%d'
last_date_str = tt.get_last_trade_date(date_format=d_format)
latest_date_str = tt.get_latest_trade_date(date_format=d_format)
#print('last_date_str=',last_date_str)
#print('latest_date_str=',latest_date_str)
next_date_str = tt.get_next_trade_date(date_format=d_format)
#print(next_date_str)
dest_file_name = dest_dir+ '%s.csv' % symbol
dest_df = get_raw_hist_df(code_str=symbol)
file_type='csv'
RAW_HIST_DIR = "C:/中国银河证券海王星/T0002/export/"
yh_file_name = RAW_HIST_DIR+symbol+'.'+file_type
if dest_df.empty:
if symbol in index_symbol_maps.keys():
symbol = index_symbol_maps[symbol]
yh_file_name = RAW_HIST_DIR+symbol+'.'+file_type
#yh_index_df = get_yh_raw_hist_df(code_str=symbol)
yh_index_df = pd.read_csv(yh_file_name)
yh_index_df['factor'] = 1.0
yh_df = yh_index_df.set_index('date')
yh_df.to_csv(dest_file_name ,encoding='utf-8')
dest_df = yh_index_df
#del dest_df['rmb']
return yh_df
#print(dest_df)
dest_df_last_date = dest_df.tail(1).iloc[0]['date']
#print('dest_df_last_date=',dest_df_last_date)
if dest_df_last_date<latest_date_str:
quotation_date = ''
try:
quotation_index_df = qq.get_qq_quotations([symbol], ['code','date','open','high','low','close','volume','amount'])
quotation_date = quotation_index_df.iloc[0]['date']
if dest_df_last_date==quotation_date:
return dest_df
#quotation_index_df = ts.get_index()
except:
time.sleep(3)
quotation_index_df = qq.get_qq_quotations([symbol], ['code','date','open','high','low','close','volume','amount'])
quotation_date = quotation_index_df.iloc[0]['date']
if dest_df_last_date==quotation_date:
return dest_df
#print('quotation_date=',quotation_date)
#print(quotation_index_df)
quotation_index_df['factor'] = 1.0
quotation_index_df = quotation_index_df[['date','open','high','low','close','volume','amount','factor']]
#quotation_index_df.iloc[0]['volume'] = 0
#quotation_index_df.iloc[0]['amount'] = 0
#print(quotation_index_df)
#print(quotation_index_df)
need_to_send_mail = []
sub = ''
index_name = symbol
#table_update_times = self.get_table_update_time()
if quotation_date:
yh_symbol = symbol
if symbol in index_symbol_maps.keys():
yh_symbol = index_symbol_maps[index_name]
yh_file_name = RAW_HIST_DIR+yh_symbol+'.'+file_type
#yh_index_df = get_yh_raw_hist_df(code_str=symbol)
yh_index_df = pd.read_csv(yh_file_name,encoding='GBK')
yh_index_df['factor'] = FIX_FACTOR
yh_last_date = yh_index_df.tail(1).iloc[0]['date']
#print('yh_last_date=',yh_last_date)
#print( yh_index_df)#.head(len(yh_index_df)-1))
if yh_last_date>dest_df_last_date: #dest_df_last_date<latest_date_str
#date_data = self.query_data(table=index_name,fields='date',condition="date>='%s'" % last_date_str)
#data_len = len(date_data)
#this_table_update_time = table_update_times[index_name]
#print('this_table_update_time=', this_table_update_time)
if yh_last_date<last_date_str: #no update more than two day
"""需要手动下载银河客户端数据"""
print('Need to manual update %s index from YH APP! Please make suere you have sync up YH data' % index_name)
need_to_send_mail.append(index_name)
sub = '多于两天没有更新指数数据库'
content = '%s 数据表更新可能异常' % need_to_send_mail
sm.send_mail(sub,content,mail_to_list=None)
elif yh_last_date==last_date_str: # update by last date
"""只需要更新当天数据"""
if realtime_update and yh_last_date<latest_date_str:
print(' force update %s index' % symbol)
yh_index_df = yh_index_df.append(quotation_index_df, ignore_index=True)
#print(yh_index_df)
pass
else:# yh_last_date>latest_date_str: #update to latest date
"""YH已经更新到今天,要更新盘中获取的当天数据"""
print(' %s index updated to %s; not need to update' % (index_name,latest_date_str))
"""
if force_update:
print(' force update %s index' % index_name)
yh_index_df0 = yh_index_df.head(len(yh_index_df)-1)
print(yh_index_df0)
yh_index_df = yh_index_df0.append(quotation_index_df, ignore_index=True)
print(yh_index_df)
else:
pass
"""
yh_index_df = yh_index_df.set_index('date')
"""
try:
os.remove(file_name)
print('Delete and update the csv file')
except:
pass
"""
yh_index_df.to_csv(dest_file_name ,encoding='utf-8')
else:
if force_update_from_YH and yh_last_date==dest_df_last_date:
yh_index_df = yh_index_df.set_index('date')
yh_index_df.to_csv(dest_file_name ,encoding='utf-8')
pass
else:
print('No need to update data')
if realtime_update:
quotation_index_df = qq.get_qq_quotations([symbol], ['code','date','open','high','low','close','volume','amount'])
quotation_index_df['factor'] = 1.0
quotation_index_df = quotation_index_df[['date','open','high','low','close','volume','amount','factor']]
#print(quotation_index_df)
print(' force update %s index' % symbol)
dest_df0 = dest_df
if dest_df_last_date==latest_date_str:
dest_df0 = dest_df.head(len(dest_df)-1)
#print(dest_df0)
dest_df = dest_df0.append(quotation_index_df, ignore_index=True)
#print(dest_df)
if quotation_index_df.empty:
pass
else:
dest_df.to_csv(dest_file_name ,encoding='utf-8')
else:
pass
return dest_df
def update_codes_from_YH(codes, realtime_update=False, dest_dir='C:/hist/day/data/', force_update_from_YH=False):
#index_symbol_maps = {'sh':'999999','sz':'399001','zxb':'399005','cyb':'399006',
# 'sh50':'000016','sz300':'399007','zx300':'399008','hs300':'000300'}
#print(list(index_symbol_maps.keys()))
#通常为指数和基金从银河的更新
for symbol in codes: # #list(index_symbol_maps.keys()):
update_one_stock(symbol, realtime_update, dest_dir, force_update_from_YH)
return
def get_exit_data(symbol,dest_df,last_date_str):
df=pd.read_csv('C:/hist/day/temp/%s.csv' % symbol)
dest_df = get_raw_hist_df(code_str=symbol)
if dest_df.empty:
pass
else:
dest_df_last_date = dest_df.tail(1).iloc[0]['date']
if dest_df_last_date==last_date_str:
exit_price = dest_df.tail(3)
return
def get_exit_price(hold_codes=['300162'],data_path='C:/中国银河证券海王星/T0002/export/' ):#, has_update_history=False):
#exit_dict={'300162': {'exit_half':22.5, 'exit_all': 19.0},'002696': {'exit_half':17.10, 'exit_all': 15.60}}
has_update_history = True
"""
if not has_update_history:
easyhistory.init('D', export='csv', path="C:/hist",stock_codes=hold_codes)
easyhistory.update(path="C:/hist",stock_codes=hold_codes)
#has_update_history = True
"""
#his = easyhistory.History(dtype='D', path='C:/hist',codes=hold_codes)
#data_path = 'C:/hist/day/data/'
#data_path = 'C:/中国银河证券海王星/T0002/export/'
exit_dict = dict()
his = easyhistory.History(dtype='D', path=data_path, type='csv',codes=hold_codes)
d_format='%Y/%m/%d'
last_date_str = tt.get_last_trade_date(date_format=d_format)
latest_date_str = tt.get_latest_trade_date(date_format=d_format)
for code in hold_codes:
#code_hist_df = hist[code].MA(1).tail(3).describe()
if code=='sh000001' or code=='sh':
code = '999999'
if code=='cyb':
code = '399006'
exit_data = dict()
hist_df =his[code].ROC(1)
hist_last_date = hist_df.tail(1).iloc[0].date
#print('hist_last_date=',hist_last_date)
tolerance_exit_rate = 0.0
t_rate = 0.0
min_close = 0.0
min_low =0.0
if hist_last_date<last_date_str:
hist_df['l_change'] = ((hist_df['low']-hist_df['close'].shift(1))/hist_df['close'].shift(1)).round(3)
hist_df['h_change'] = ((hist_df['high']-hist_df['close'].shift(1))/hist_df['close'].shift(1)).round(3)
hist_low_describe = hist_df.tail(60).describe()
#print(hist_low_describe)
tolerance_exit_rate = round(hist_low_describe.loc['25%'].l_change,4)
t_rate = round(hist_low_describe.loc['75%'].h_change,4)
#print('hist_low_change=',hist_low_change)
#if hist_low_change< tolerance_exit_rate:
#tolerance_exit_rate = hist_low_change
#print('tolerance_exit_rate=',tolerance_exit_rate)
else:
hist_df['l_change'] = ((hist_df['low']-hist_df['close'].shift(1))/hist_df['close'].shift(1)).round(3)
hist_df['h_change'] = ((hist_df['high']-hist_df['close'].shift(1))/hist_df['close'].shift(1)).round(3)
hist_low_describe = hist_df.tail(60).describe()
tolerance_exit_rate = round(hist_low_describe.loc['25%'].l_change,4)
t_rate = round(hist_low_describe.loc['75%'].h_change,4)
#tolerance_exit_rate = hist_low_change
#print('tolerance_exit_rate=',tolerance_exit_rate)
hist_df = hist_df[hist_df.date<=last_date_str]
describe_df = his[code].MA(1).tail(3).describe()
min_low =round(describe_df.loc['min'].low, 2)
min_close = round(round(describe_df.loc['min'].close,2),2)
max_close = round(describe_df.loc['max'].close,2)
max_high = round(describe_df.loc['max'].high,2)
exit_data['exit_half'] = min_close
exit_data['exit_all'] = min_low
exit_data['exit_rate'] = tolerance_exit_rate
exit_data['t_rate'] = t_rate
exit_dict[code] = exit_data
#print('exit_dict=%s' % exit_dict)
return exit_dict
def get_index_exit_data(indexs=['sh','cyb'],yh_index_symbol_maps = {'sh':'999999','sz':'399001','zxb':'399005','cyb':'399006',
'sh50':'000016','sz300':'399007','zx300':'399008'}):#['sh','sz','zxb','cyb','sz300','sh50']):
yh_index_symbol_maps = {'sh':'999999','sz':'399001','zxb':'399005','cyb':'399006',
'sh50':'000016','sz300':'399007','zx300':'399008'}#'hs300':'000300'}
hold_codes = []
for index in indexs:
if index in list(yh_index_symbol_maps.keys()):
yh_code = yh_index_symbol_maps[index]
hold_codes.append(yh_code)
index_exit_data = get_exit_price(hold_codes)
return index_exit_data
def is_system_risk(indexs=['sh','cyb'],index_exit_data=get_index_exit_data(['sh','cyb']),
yh_index_symbol_maps = {'sh':'999999','sz':'399001','zxb':'399005','cyb':'399006',
'sh50':'000016','sz300':'399007','zx300':'399008'}):
exit_data =index_exit_data
if not exit_data:
exit_data = get_index_exit_data(indexs)
index_quot = qq.get_qq_quotations(codes=indexs)
#overlap_index = list(set(list(exit_data.keys())) & set(list(index_quot.keys())))
if not exit_data or not index_quot:
return {}
risk_data = {}
for index in indexs:
this_risk = {}
index_now = index_quot[index]['now']
index_exit_half = exit_data[yh_index_symbol_maps[index]]['exit_half']
index_exit_all = exit_data[yh_index_symbol_maps[index]]['exit_all']
index_exit_rate = exit_data[yh_index_symbol_maps[index]]['exit_rate']
risk_state = 0
if index_exit_all==0:
last_close = index_quot[index]['close']
index_exit_all = (1+2*index_exit_rate) * last_close
index_exit_half = (1+index_exit_rate) * last_close
else:
pass
if index_now<index_exit_all:
risk_state = 1.0
elif index_now<index_exit_half:
risk_state = 0.5
else:
pass
if risk_state>0:
this_risk['index'] = index
this_risk['index_value'] = index_now
this_risk['index_state'] = risk_state
this_risk['date_time'] = datetime.datetime.now()
risk_data[index] = this_risk
print(risk_data)
return risk_data
def get_hold_stock_statistics(hold_stocks= ['000007', '000932', '601009', '150288', '300431', '002362', '002405', '600570', '603398'],
stock_dir='C:/hist/day/temp/'):
if len(hold_stocks)<1:
return False
first_stock = hold_stocks[0]
statistics_df = pd.read_csv(stock_dir + '%s.csv' % first_stock).tail(1)
statistics_df['code'] = first_stock
if len(hold_stocks)>=2:
hold_stocks.pop(0)
for stock in hold_stocks:
temp_hold_df = pd.read_csv(stock_dir + '%s.csv' % stock).tail(1)
temp_hold_df['code'] = stock
statistics_df = statistics_df.append(temp_hold_df)
statistics_df = statistics_df.set_index('code')
return statistics_df
class StockSQL(object):
def __init__(self):
self.engine = create_engine('mysql+pymysql://emsadmin:[email protected]/stock?charset=utf8')#,encoding='utf-8',echo=True,convert_unicode=True)
self.hold = {}
#self.engine = create_engine('mysql+pymysql://emsadmin:[email protected]/stock?charset=gbk')
def get_table_df(self,table,columns=None):
"""
:param table: string type, db_name.table_name
:param columns: lit type with string value, like: ['acc_name', 'initial']
:return: DataFrame type
"""
if columns:
return pd.read_sql_table(table, self.engine)
else:
return pd.read_sql_table(table, self.engine, columns)
def insert_table(self,data_frame,table_name,is_index=False):
"""
:param data_frame: DataFrame type
:param table_name: string type, name of table
:return:
"""
data_frame.to_sql(table_name, self.engine, index=is_index,if_exists='append')#encoding='utf-8')
return
def query_data(self,table,fields=None,condition=None):
"""
:param table: string type, db_name.table_name
:param fields: string type, like 'id,type,value'
:param condition: string type, like 'field_value>50'
:return: DataFrame type
"""
query_sql=form_sql(table_name=table, oper_type='query', select_field=fields, where_condition=condition)
print(query_sql)
return pd.read_sql_query(query_sql, self.engine)
def insert_data(self,table,fields,data):
"""
:param table: string type, db_name.table_name
:param fields: string type, like 'id,type,value'
:param data: list type of list value, like: data=[['李5'],['黄9']]
:return:
"""
fields='(' + fields +')'
insert_sql=form_sql(table_name=table,oper_type='insert',insert_field=fields)
sql.execute(insert_sql, self.engine, params=data)
def update_data(self,table,fields,values,condition=None):
"""
:param table: string type, db_name.table_name
:param fields: string type, like 'value'
:param values: string type, like: '1000' (for value type) or "'normal'" (for string type)
:param condition: string type, like 'field_value>50'
:return:
"""
if isinstance(values, str):
values="'%s'"%values
update_sql=form_sql(table_name=table,oper_type='update',update_field=fields,update_value=values,where_condition=condition)
sql.execute(update_sql,self.engine)
def delete_data(self,table_name,condition=None):
"""
:param table_name: string type, db_name.table_name
:param condition: string type, like 'field_value>50'
:return:
"""
delete_sql=form_sql(table_name=table_name,oper_type='delete',where_condition=condition)
print('delete_sql=',delete_sql)
sql.execute(delete_sql, self.engine)
def drop_table(self,table_name):
"""
:param table_name: string type, db_name.table_name
:return:
"""
drop_sql='drop table %s' % table_name
sql.execute(drop_sql, self.engine)
def get_last_db_date(self,code_str,histdata_last_df):
"""
:param code_str: string type, code_name
:param histdata_last_df: dataframe type, df from table histdata
:return: last_date: pandas datetime, last db date
"""
if histdata_last_df.empty:
#print('histdata_last_df is empty')
return None
else:
try:
histdata_last_df=histdata_last_df.set_index('code')
last_date=histdata_last_df.loc[code_str,'last_db_time']
return last_date
except KeyError as e:
#print('KeyError:',e)
return None
def get_table_update_time(self):
update_time_sql = "select TABLE_NAME,UPDATE_TIME from information_schema.TABLES where TABLE_SCHEMA='stock';"
update_datas = pd.read_sql_query(update_time_sql, self.engine)
update_datas = update_datas.set_index('TABLE_NAME')
table_time = {}
if update_datas.empty:
pass
else:
for index in update_datas.index.values.tolist():
update_time = update_datas.loc[index].UPDATE_TIME
table_time.update({index:update_time})
return table_time
def update_one_stock(self, symbol,force_update=False):
index_symbol_maps = {'sh':'999999','sz':'399001','zxb':'399005','cyb':'399006',
'sh50':'000016','sz300':'399007','zx300':'399008','hs300':'000300'}
FIX_FACTOR = 1.0
d_format='%Y/%m/%d'
last_date_str = tt.get_last_trade_date(date_format=d_format)
latest_date_str = tt.get_latest_trade_date(date_format=d_format)
print('last_date_str=',last_date_str)
print('latest_date_str=',latest_date_str)
next_date_str = tt.get_next_trade_date(date_format=d_format)
#print(next_date_str)
quotation_date = ''
try:
quotation_index_df = qq.get_qq_quotations([symbol], ['code','date','open','high','low','close','volume','amount'])
quotation_date = quotation_index_df.iloc[0]['date']
#quotation_index_df = ts.get_index()
except:
sleep(3)
quotation_index_df = qq.get_qq_quotations([symbol], ['code','date','open','high','low','close','volume','amount'])
quotation_date = quotation_index_df.iloc[0]['date']
print('quotation_date=',quotation_date)
print(quotation_index_df)
quotation_index_df['factor'] = 1.0
quotation_index_df = quotation_index_df[['date','open','high','low','close','volume','amount','factor']]
#quotation_index_df.iloc[0]['volume'] = 0
#quotation_index_df.iloc[0]['amount'] = 0
print(quotation_index_df)
#print(quotation_index_df)
need_to_send_mail = []
sub = ''
index_name = symbol
#table_update_times = self.get_table_update_time()
if quotation_date:
yh_symbol = symbol
if symbol in index_symbol_maps.keys():
yh_symbol = index_symbol_maps[index_name]
yh_file_name = RAW_HIST_DIR+symbol+'.'+file_type
#yh_index_df = get_yh_raw_hist_df(code_str=symbol)
yh_index_df = pd.read_csv(yh_file_name)
yh_index_df['factor'] = FIX_FACTOR
yh_last_date = yh_index_df.tail(1).iloc[0]['date']
print('yh_last_date=',yh_last_date)
print( yh_index_df)#.head(len(yh_index_df)-1))
if True:
#date_data = self.query_data(table=index_name,fields='date',condition="date>='%s'" % last_date_str)
#data_len = len(date_data)
#this_table_update_time = table_update_times[index_name]
#print('this_table_update_time=', this_table_update_time)
if yh_last_date<last_date_str: #no update more than two day
"""需要手动下载银河客户端数据"""
print('Need to manual update %s index from YH APP! Please make suere you have sync up YH data' % index_name)
need_to_send_mail.append(index_name)
sub = '多于两天没有更新指数数据库'
content = '%s 数据表更新可能异常' % need_to_send_mail
sm.send_mail(sub,content,mail_to_list=None)
elif yh_last_date==last_date_str: # update by last date
"""只需要更新当天数据"""
yh_index_df = yh_index_df.append(quotation_index_df, ignore_index=True)
print(yh_index_df)
pass
else:# yh_last_date>latest_date_str: #update to latest date
"""YH已经更新到今天,要更新盘中获取的当天数据"""
print(' %s index updated to %s; not need to update' % (index_name,latest_date_str))
if force_update:
print(' force update %s index' % index_name)
yh_index_df0 = yh_index_df.head(len(yh_index_df)-1)
print(yh_index_df0)
yh_index_df = yh_index_df0.append(quotation_index_df, ignore_index=True)
print(yh_index_df)
else:
pass
yh_index_df = yh_index_df.set_index('date')
dir='C:/hist/day/data/'
file_name = dir+ '%s.csv' % index_name
try:
os.remove(file_name)
print('Delete and update the csv file')
except:
pass
yh_index_df.to_csv(file_name ,encoding='utf-8')
return yh_index_df
def update_sql_index(self, index_list=['sh','sz','zxb','cyb','hs300','sh50'],force_update=False):
index_symbol_maps = {'sh':'999999','sz':'399001','zxb':'399005','cyb':'399006',
'sh50':'000016','sz300':'399007','zx300':'399008','hs300':'000300'}
FIX_FACTOR = 1.0
scz_code_str='399001'
zxb_code_str='399005'
chy_code_str='399006'
shz ='999999'
shz_50 = '000016'
hs_300 = '000300'
zx_300 ='399008'
sz_300 ='399007'
d_format='%Y/%m/%d'
last_date_str = tt.get_last_trade_date(date_format=d_format)
latest_date_str = tt.get_latest_trade_date(date_format=d_format)
print('last_date_str=',last_date_str)
print('latest_date_str=',latest_date_str)
#next_date_str = tt.get_next_trade_date(date_format=d_format)
#print(next_date_str)
try:
quotation_index_df = qq.get_qq_quotations(['sh','sz','zxb','cyb','hs300','sh50'], ['code','date','open','high','low','close','volume','amount'])
#quotation_index_df = ts.get_index()
except:
sleep(3)
quotation_index_df = qq.get_qq_quotations(['sh','sz','zxb','cyb','hs300','sh50'], ['code','date','open','high','low','close','volume','amount'])
#quotation_index_df[['open','high','low','close']]=quotation_index_df[['open','high','low','close']].round(2)
#quotation_index_df['amount'] = quotation_index_df['amount']*(10**8)
#quotation_index_df['date'] = latest_date_str
quotation_index_df['factor'] = 1.0
#print(quotation_index_df)
need_to_send_mail = []
sub = ''
#table_update_times = self.get_table_update_time()
for index_name in index_list:
yh_symbol = index_symbol_maps[index_name]
yh_file_name = RAW_HIST_DIR+symbol+'.'+file_type
#yh_index_df = get_yh_raw_hist_df(code_str=symbol)
yh_index_df = pd.read_csv(yh_file_name)
yh_index_df['factor'] = FIX_FACTOR
try:
date_data = self.query_data(table=index_name,fields='date',condition="date>='%s'" % last_date_str)
data_len = len(date_data)
#this_table_update_time = table_update_times[index_name]
#print('this_table_update_time=', this_table_update_time)
if len(date_data)==0: #no update more than two day
"""需要手动下载银河客户端数据"""
print('Need to manual update %s index from YH APP! Please make suere you have sync up YH data' % index_name)
need_to_send_mail.append(index_name)
sub = '多于两天没有更新指数数据库'
self.drop_table(table_name=index_name)
self.insert_table(data_frame=yh_index_df,table_name=index_name)
elif len(date_data) == 1: # update by last date
"""只需要更新当天数据"""
self.update_sql_index_today(index_name,latest_date_str,quotation_index_df,index_symbol_maps)
pass
elif len(date_data) == 2: #update to latest date
"""要更新盘中获取的当天数据"""
print(' %s index updated to %s.' % (index_name,latest_date_str))
if force_update:
print(' force update %s index' % index_name)
self.delete_data(table_name=index_name,condition="date='%s'" % latest_date_str)
self.update_sql_index_today(index_name,latest_date_str,quotation_index_df,index_symbol_maps)
pass
else:
pass
else:
pass
#print(date_data)
except:
sub = '数据表不存在'
need_to_send_mail.append(index_name)
print('Table %s not exist.'% index_name)
try:
self.drop_table(table_name=yh_index_df)
except:
pass
self.insert_table(data_frame=yh_index_df,table_name=index_name,is_index=False)
print('Created the table %s.' % index_name)
if need_to_send_mail:
content = '%s 数据表更新可能异常' % need_to_send_mail
sm.send_mail(sub,content,mail_to_list=None)
def update_sql_index_today(self,index_name,latest_date_str,quotation_index_df):
"""添加今天的更新"""
#index_sybol = index_symbol_maps[index_name]
#if index_name=='sh':
# index_sybol = '000001'
columns = ['date','open','high','low','close','volume','amount','factor']
single_index_df = quotation_index_df[quotation_index_df['code']==index_name]
single_index_df = single_index_df[columns]
if single_index_df.empty:
return
self.insert_table(data_frame=single_index_df,table_name=index_name,is_index=False)
def update_sql_position0(self, users={'36005':{'broker':'yh','json':'yh.json'},'38736':{'broker':'yh','json':'yh1.json'}}):
sub = '持仓更异常'
fail_check = []
for account in list(users.keys()):
#stock_sql.drop_table(table_name='myholding')
try:
broker = users[account]['broker']
user_file = users[account]['json']
position_df,balance = get_position(broker, user_file)
self.hold[account] = position_df
self.insert_table(data_frame=position_df,table_name='hold')
except:
fail_check.append(account)
#self.insert_table(data_frame=position_df,table_name='balance')
time.sleep(2)
if fail_check:
content = '%s 持仓表更新可能异常' % fail_check
sm.send_mail(sub,content,mail_to_list=None)
"""再次尝试获取异常持仓"""
for account in fail_check:
#stock_sql.drop_table(table_name='myholding')
try:
broker = users[account]['broker']
user_file = users[account]['json']
position_df,balance = get_position(broker, user_file)
self.hold[account] = position_df
self.insert_table(data_frame=position_df,table_name='hold')
except:
pass
#self.insert_table(data_frame=position_df,table_name='balance')
def get_except_codes(self):
#从数据库获取除外的股票代码,比如高风险或者长期持有,或者新股中签等
except_df = self.query_data(table='stock.except',fields='code',condition='valid=1')
return except_df['code'].values.tolist()
def get_exit_monitor_stocks(self,accounts=['36005']):
hold_df,hold_stocks,available_sells =his_sql.get_hold_stocks(accounts)
except_codes = his_sql.get_except_codes()
#monitor_indexs = ['sh000001','399006']
available_sells = list(set(available_sells).difference(set(except_codes)))
return available_sells
def update_index_chooce_time(self,yh_index='999999'):
pre_stock_trade_state = 0
s_stock=Stockhistory(yh_index,'D',test_num=0,source='YH')
result_df = s_stock.form_temp_df(stock_synbol)
test_result = s_stock.regression_test()
#last_trade_date =test_result.last_trade_date
last_trade_price=test_result.last_trade_price
print('last_trade_price=',last_trade_price)
if pre_stock_trade_state>0:
#is_system_risk()
pass
else:
pass
return
def update_sql_position(self, users={'account':'36005','broker':'yh','json':'yh.json'}):
try:
account_id = users['account']
broker = users['broker']
user_file = users['json']
position_df,balance = get_position(broker, user_file)
except_codes = self.get_except_codes()
except_holds = list(set(except_codes) & set(position_df['证券代码'].values.tolist()))
"""
if except_holds:
position_df['valid'] = np.where((position_df['证券代码']==except_holds[0]),0,1)
except_holds.pop(0)
for code in except_holds:
#position_df.loc['证券代码','valid'] = 0
position_df['valid'] = np.where((position_df['证券代码']==code),0,position_df['valid'])
else:
position_df['valid'] = 1
"""
position_df['valid'] = 1
for code in except_holds:
position_df['valid'][position_df['证券代码']==code] = 0
#df=df.tail(20)
#df[['close']].apply(lambda x: (x - x.min()) / (x.max()-x.nin()))
self.hold[account_id] = position_df
#print(position_df)
table_name='acc%s'%account_id
try:
self.drop_table(table_name)
except:
pass
self.insert_table(data_frame=position_df,table_name='acc%s'%account_id)
return
except:
time.sleep(10)
self.update_sql_position(users)
def get_hold_stocks(self,accounts=['36005', '38736']):
if len(accounts)<1:
return False
table_name='acc%s'%accounts[0]
hold_df = self.get_table_df(table_name)
if len(accounts)>=2:
accounts.pop(0)
for account in accounts:
table_name='acc%s'%account
next_hold_df = self.get_table_df(table_name)
hold_df = hold_df.append(next_hold_df)
hold_stock_all = hold_df['证券代码'].values.tolist()
#hold_stocks = ['000932', '002362', '300431', '601009', '000007', '000932', '002405', '600570', '603398']
hold_stocks = list(set(hold_stock_all) | set(hold_stock_all))
#print('hold_stocks=',hold_stocks)
#print(hold_df)
available_sells = []
if not hold_df.empty:
available_sell_df = hold_df[(hold_df['valid']==1) & (hold_df['股份可用']>=100)]
if not available_sell_df.empty:
available_sells = available_sell_df['证券代码'].values.tolist()
return hold_df,hold_stocks,available_sells
def get_forvary_stocks(self):
return
def download_hist_as_csv(self,indexs = ['sh','sz','zxb','cyb','hs300','sh50'],dir='C:/hist/day/data/'):
for index in indexs:
index_df = self.get_table_df(table=index)
index_df = index_df.set_index('date')
#print(index_df)
file_name = dir+ '%s.csv' % index
try:
os.remove(file_name)
print('Delete and update the csv file')
except:
pass
index_df.to_csv(file_name ,encoding='utf-8')
return
def update_last_db_date(self,code_str,last_date,update_date):
"""
:param code_str: string type, code_name
:param last_date: string type, last db date
:param update_date: string type, this date
:return:
"""
if last_date:
if update_date>last_date:
self.update_data(table='histdata_last', fields='last_db_time', values="%s"%update_date, condition="code='%s'"%code_str)
else:
pass
else:
if update_date:
self.insert_data(table='histdata_last', fields='code,last_db_time', data=[[code_str,update_date]])
#print('firstly update: last_db_time',update_date)
else:
pass
#for chunk_df in pd.read_sql_query("SELECT * FROM today_stock", engine, chunksize=5):
# print(chunk_df)
stock_sql_obj=StockSQL()
stock_sql_obj.update_index_chooce_time()
def stock_sql_test():
stock_sql_obj=StockSQL()
table='test'
df=stock_sql_obj.get_table_df(table)#, columns=['name'])
print('get_table_df=')
print(df)
df2=stock_sql_obj.get_table_df(table, columns=['name'])
print(df2)
print('query_data:')
df3=stock_sql_obj.query_data(table)
print(df3)
df3=stock_sql_obj.query_data(table, 'name', "name='王五'")
print(df3)
print('insert_data:')
data=[['李二'],['黄三']]
stock_sql_obj.insert_data(table, 'name', data)
print('update_data:')
stock_sql_obj.update_data(table, 'name', "'陈五'", condition="name='王五'")
#stock_sql_obj.update_data(table, 'name', "'陈五'", condition="name='王五'")
print('delete_data')
stock_sql_obj.delete_data(table, "name='陈五'")
| gpl-2.0 | 8,063,363,953,376,115,000 | 42.96055 | 161 | 0.547881 | false |
cskyan/chmannot | bin/chm_gendata.py | 1 | 14312 | #!/usr/bin/env python
# -*- coding=utf-8 -*-
###########################################################################
# Copyright (C) 2013-2016 by Caspar. All rights reserved.
# File Name: chm_gendata.py
# Author: Shankai Yan
# E-mail: [email protected]
# Created Time: 2016-03-01 22:15:59
###########################################################################
#
import os
import sys
import logging
import ast
from optparse import OptionParser
import numpy as np
import scipy as sp
import pandas as pd
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer, MinMaxScaler
from sklearn.decomposition import LatentDirichletAllocation, NMF, TruncatedSVD
import bionlp.spider.pubmed as pm
import bionlp.spider.metamap as mm
from bionlp import ftslct, ftdecomp
from bionlp.util import fs, io, sampling
import hoc
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
PAR_DIR = os.path.abspath(os.path.join(FILE_DIR, os.path.pardir))
CONFIG_FILE = os.path.join(PAR_DIR, 'etc', 'config.yaml')
SPDR_MAP = {'hoc':hoc, 'pbmd':pm}
SC=';;'
opts, args = {}, []
cfgr = None
spdr = pm
def gen_data():
if (opts.local):
X, Y = spdr.get_data(None, from_file=True)
else:
pmid_list = spdr.get_pmids()
articles = spdr.fetch_artcls(pmid_list)
X, Y = spdr.get_data(articles, ft_type=opts.type, max_df=ast.literal_eval(opts.maxdf), min_df=ast.literal_eval(opts.mindf), fmt=opts.fmt, spfmt=opts.spfmt)
hallmarks = Y.columns
# Feature Selection
# mt = sp.sparse.coo_matrix(X)
# mask_mt = np.zeros(mt.shape)
# mask_mt[mt.row, mt.col] = 1
# stat = mask_mt.sum(axis=0)
# cln_X = X.iloc[:,np.arange(stat.shape[0])[stat>ast.literal_eval(opts.thrshd) * (stat.max() - stat.min()) + stat.min()]]
# Document Frequence
# stat, _ = ftslct.freqs(X.values, Y.values)
# Mutual information
# stat, _ = ftslct.mutual_info(X.values, Y.values)
# Information gain
# stat, _ = ftslct.info_gain(X.values, Y.values)
# GSS coefficient
# stat, _ = ftslct.gss_coef(X.values, Y.values)
# NGL coefficient
# stat, _ = ftslct.ngl_coef(X.values, Y.values)
# Odds ratio
# stat, _ = ftslct.odds_ratio(X.values, Y.values)
# Fisher criterion
# stat, _ = ftslct.fisher_crtrn(X.values, Y.values)
# GU metric
# stat, _ = ftslct.gu_metric(X.values, Y.values)
# Decision tree
# stat, _ = ftslct.decision_tree(X.values, Y.values)
# Combined feature
stat, _ = ftslct.utopk(X.values, Y.values, ftslct.decision_tree, fn=100)
io.write_npz(stat, os.path.join(spdr.DATA_PATH, 'ftw.npz'))
# cln_X = X.iloc[:,np.arange(stat.shape[0])[stat>stat.min()]]
cln_X = X.iloc[:,stat.argsort()[-500:][::-1]]
print 'The size of data has been changed from %s to %s.' % (X.shape, cln_X.shape)
if (opts.fmt == 'npz'):
io.write_df(cln_X, os.path.join(spdr.DATA_PATH, 'cln_X.npz'), with_idx=True, sparse_fmt=opts.spfmt, compress=True)
else:
cln_X.to_csv(os.path.join(spdr.DATA_PATH, 'cln_X.csv'), encoding='utf8')
del X, cln_X
for i in range(Y.shape[1]):
y = Y.iloc[:,i]
if (opts.fmt == 'npz'):
io.write_df(y, os.path.join(spdr.DATA_PATH, 'y_%s.npz' % i), with_col=False, with_idx=True)
else:
y.to_csv(os.path.join(spdr.DATA_PATH, 'y_%s.csv' % i), encoding='utf8')
def samp_data(sp_size = 0.3):
pid = opts.pid
if (pid != None):
iter_size = 30
X_iter, labels= spdr.get_feats_iter('y_%s.csv' % pid, iter_size)
new_X, new_y = sampling.samp_df_iter(X_iter, iter_size, labels, sp_size)
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'samp_X_%i.csv' % pid), encoding='utf8')
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'samp_y_%s.csv' % pid), encoding='utf8')
else:
for i in range(10):
iter_size = 30
X_iter, labels= spdr.get_feats_iter('y_%s.csv' % i, iter_size)
new_X, new_y = sampling.samp_df_iter(X_iter, iter_size, labels, sp_size)
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'samp_X_%i.csv' % i), encoding='utf8')
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'samp_y_%s.csv' % i), encoding='utf8')
def extend_mesh(ft_type='binary'):
X, Y = spdr.get_data(None, ft_type=opts.type, max_df=ast.literal_eval(opts.maxdf), min_df=ast.literal_eval(opts.mindf), from_file=True, fmt=opts.fmt, spfmt=opts.spfmt)
mesh_df = mm.mesh_countvec(X.index)
mesh_df.columns = ['extmesh_' + x for x in mesh_df.columns]
new_X = pd.concat([X, mesh_df], axis=1, join_axes=[X.index])
print 'The size of data has been changed from %s to %s.' % (X.shape, new_X.shape)
if (opts.fmt == 'npz'):
io.write_df(new_X, os.path.join(spdr.DATA_PATH, 'extmesh_X.npz'), with_idx=True, sparse_fmt=opts.spfmt, compress=True)
else:
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'extmesh_X.csv'), encoding='utf8')
def expand_data(ft_type='binary', db_name='mesh2016', db_type='LevelDB', store_path='store'):
from rdflib import Graph
from bionlp.util import ontology
X, Y = spdr.get_data(None, ft_type=opts.type, max_df=ast.literal_eval(opts.maxdf), min_df=ast.literal_eval(opts.mindf), from_file=True, fmt=opts.fmt, spfmt=opts.spfmt)
mesh_cols = filter(lambda x: x.startswith('mesh_') or x.startswith('extmesh_'), X.columns)
mesh_X = X.loc[:,mesh_cols]
exp_meshx = set([])
ext_meshx_dict = {}
g = Graph(store=db_type, identifier=db_name)
g.open(store_path)
for col in mesh_X.columns:
mesh_lb = col.strip('extmesh_').strip('mesh_').replace('"', '\\"')
# Get similar MeSH terms
em_set = set(ontology.slct_sim_terms(g, mesh_lb, prdns=[('meshv',ontology.MESHV)], eqprds=ontology.MESH_EQPRDC_MAP))
# Overall extended MeSH terms
exp_meshx |= em_set
# Extended MeSH terms per column
ext_meshx_dict[col] = em_set
g.close()
exp_mesh_X = pd.DataFrame(np.zeros((mesh_X.shape[0], len(exp_meshx)), dtype='int8'), index=X.index, columns=['expmesh_%s' % w for w in exp_meshx])
# Append the similar MeSH terms of each column to the final matrix
for col, sim_mesh in ext_meshx_dict.iteritems():
if (len(sim_mesh) == 0): continue
sim_cols = ['expmesh_%s' % w for w in sim_mesh]
if (ft_type == 'binary'):
exp_mesh_X.loc[:,sim_cols] = np.logical_or(exp_mesh_X.loc[:,sim_cols], mesh_X.loc[:,col].reshape((-1,1))).astype('int')
elif (ft_type == 'numeric'):
exp_mesh_X.loc[:,sim_cols] += mesh_X.loc[:,col].reshape((-1,1))
elif (ft_type == 'tfidf'):
pass
new_X = pd.concat([X, exp_mesh_X], axis=1, join_axes=[X.index])
print 'The size of data has been changed from %s to %s.' % (X.shape, new_X.shape)
if (opts.fmt == 'npz'):
io.write_df(new_X, os.path.join(spdr.DATA_PATH, 'exp_X.npz'), with_idx=True, sparse_fmt=opts.spfmt, compress=True)
else:
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'exp_X.csv'), encoding='utf8')
def decomp_data(method='LDA', n_components=100):
X, Y = spdr.get_data(None, ft_type=opts.type, max_df=ast.literal_eval(opts.maxdf), min_df=ast.literal_eval(opts.mindf), from_file=True, fmt=opts.fmt, spfmt=opts.spfmt)
method = method.upper()
n_components = min(n_components, X.shape[1])
if (method == 'LDA'):
model = make_pipeline(LatentDirichletAllocation(n_topics=n_components, learning_method='online', learning_offset=50., max_iter=5, n_jobs=opts.np, random_state=0), Normalizer(copy=False))
elif (method == 'NMF'):
model = make_pipeline(NMF(n_components=n_components, random_state=0, alpha=.1, l1_ratio=.5), Normalizer(copy=False))
elif (method == 'LSI'):
model = make_pipeline(TruncatedSVD(n_components), Normalizer(copy=False))
elif (method == 'TSNE'):
model = make_pipeline(ftdecomp.DecompTransformer(n_components, ftdecomp.t_sne, initial_dims=15*n_components, perplexity=30.0))
if (opts.prefix == 'all'):
td_cols = X.columns
else:
# Only apply dimension reduction on specific columns
td_cols = np.array(map(lambda x: True if any(x.startswith(prefix) for prefix in opts.prefix.split(SC)) else False, X.columns))
td_X = X.loc[:,td_cols]
new_td_X = model.fit_transform(td_X.as_matrix())
if (opts.prefix == 'all'):
columns = range(new_td_X.shape[1]) if not hasattr(model.steps[0][1], 'components_') else td_X.columns[model.steps[0][1].components_.argmax(axis=1)]
new_X = pd.DataFrame(new_td_X, index=X.index, columns=['tp_%s' % x for x in columns])
else:
columns = range(new_td_X.shape[1]) if not hasattr(model.steps[0][1], 'components_') else td_X.columns[model.steps[0][1].components_.argmax(axis=1)]
# Concatenate the components and the columns are not applied dimension reduction on
new_X = pd.concat([pd.DataFrame(new_td_X, index=X.index, columns=['tp_%s' % x for x in columns]), X.loc[:,np.logical_not(td_cols)]], axis=1)
if (opts.fmt == 'npz'):
io.write_df(new_X, os.path.join(spdr.DATA_PATH, '%s%i_X.npz' % (method.lower(), n_components)), with_idx=True, sparse_fmt=opts.spfmt, compress=True)
else:
new_X.to_csv(os.path.join(spdr.DATA_PATH, '%s%i_X.csv' % (method.lower(), n_components)), encoding='utf8')
def add_d2v(n_components=100, win_size=8, min_t=5, mdl_fname='d2v.mdl'):
from gensim.parsing.preprocessing import preprocess_string
from gensim.models.doc2vec import TaggedDocument, Doc2Vec
def read_files(fpaths, code='ascii'):
for fpath in fpaths:
try:
yield TaggedDocument(words=preprocess_string('\n'.join(fs.read_file(fpath, code))), tags=[os.path.splitext(os.path.basename(fpath))[0]])
except Exception as e:
continue
def read_prcsed_files(fpaths, code='ascii'):
for fpath in fpaths:
try:
words = []
for line in fs.read_file(fpath, code):
if (line == '~~~'):
continue
if (line == '. . .' or line == '~~~ ~~~' or line == ', , ,'):
continue
items = line.split()
if (len(items) < 3): # Skip the unrecognized words
continue
words.append(items[2].lower())
yield TaggedDocument(words=words, tags=[os.path.splitext(os.path.basename(fpath))[0]])
except Exception as e:
continue
mdl_fpath = os.path.join(spdr.DATA_PATH, mdl_fname)
if (os.path.exists(mdl_fpath)):
model = Doc2Vec.load(mdl_fpath)
else:
# model = Doc2Vec(read_files(fs.listf(spdr.ABS_PATH, full_path=True)), size=n_components, window=8, min_count=5, workers=opts.np)
model = Doc2Vec(read_prcsed_files(fs.listf(os.path.join(spdr.DATA_PATH, 'lem'), full_path=True)), size=n_components, window=8, min_count=5, workers=opts.np)
model.save(os.path.join(spdr.DATA_PATH, mdl_fname))
X, Y = spdr.get_data(None, ft_type=opts.type, max_df=ast.literal_eval(opts.maxdf), min_df=ast.literal_eval(opts.mindf), from_file=True, fmt=opts.fmt, spfmt=opts.spfmt)
# Map the index of original matrix to that of the paragraph vectors
d2v_idx = [model.docvecs.index_to_doctag(i).rstrip('.lem') for i in range(model.docvecs.count)]
mms = MinMaxScaler()
d2v_X = pd.DataFrame(mms.fit_transform(model.docvecs[range(model.docvecs.count)]), index=d2v_idx, columns=['d2v_%i' % i for i in range(model.docvecs[0].shape[0])])
# d2v_X = pd.DataFrame(model.docvecs[range(model.docvecs.count)], index=d2v_idx, columns=['d2v_%i' % i for i in range(model.docvecs[0].shape[0])])
new_X = pd.concat([X, d2v_X], axis=1, join_axes=[X.index])
print 'The size of data has been changed from %s to %s.' % (X.shape, new_X.shape)
if (opts.fmt == 'npz'):
io.write_df(d2v_X, os.path.join(spdr.DATA_PATH, 'd2v_X.npz'), with_idx=True, sparse_fmt=opts.spfmt, compress=True)
io.write_df(new_X, os.path.join(spdr.DATA_PATH, 'cmb_d2v_X.npz'), with_idx=True, sparse_fmt=opts.spfmt, compress=True)
else:
d2v_X.to_csv(os.path.join(spdr.DATA_PATH, 'd2v_X.csv'), encoding='utf8')
new_X.to_csv(os.path.join(spdr.DATA_PATH, 'cmb_d2v_X.csv'), encoding='utf8')
def main():
if (opts.method is None):
return
elif (opts.method == 'gen'):
gen_data()
elif (opts.method == 'samp'):
samp_data()
elif (opts.method == 'extend'):
extend_mesh()
elif (opts.method == 'expand'):
expand_data(store_path=os.path.join(spdr.DATA_PATH, 'store'))
elif (opts.method == 'decomp'):
decomp_data(method=opts.decomp.upper(), n_components=opts.cmpn)
elif (opts.method == 'd2v'):
add_d2v(n_components=opts.cmpn)
if __name__ == '__main__':
# Logging setting
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
# Parse commandline arguments
op = OptionParser()
op.add_option('-p', '--pid', action='store', type='int', dest='pid', help='indicate the process ID')
op.add_option('-n', '--np', default=-1, action='store', type='int', dest='np', help='indicate the number of processes used for training')
op.add_option('-f', '--fmt', default='npz', help='data stored format: csv or npz [default: %default]')
op.add_option('-s', '--spfmt', default='csr', help='sparse data stored format: csr or csc [default: %default]')
op.add_option('-l', '--local', default=False, action='store_true', dest='local', help='read data from the preprocessed data matrix file')
op.add_option('-t', '--type', default='binary', help='feature type: binary, numeric, tfidf [default: %default]')
op.add_option('-a', '--mindf', default='1', type='str', dest='mindf', help='lower document frequency threshold for term ignorance')
op.add_option('-b', '--maxdf', default='1.0', type='str', dest='maxdf', help='upper document frequency threshold for term ignorance')
op.add_option('-r', '--thrshd', default='0.05', type='str', dest='thrshd', help='feature frequency threshold for filtering')
op.add_option('-d', '--decomp', default='LDA', help='decomposition method to use: LDA, NMF, LSI or TSNE [default: %default]')
op.add_option('-c', '--cmpn', default=100, type='int', dest='cmpn', help='number of components that used in clustering model')
op.add_option('-j', '--prefix', default='all', type='str', dest='prefix', help='prefixes of the column names that the decomposition method acts on, for example, \'-j lem;;nn;;ner\' means columns that starts with \'lem_\', \'nn_\', or \'ner_\'')
op.add_option('-i', '--input', default='hoc', help='input source: hoc or pbmd [default: %default]')
op.add_option('-m', '--method', help='main method to run')
(opts, args) = op.parse_args()
if len(args) > 0:
op.print_help()
op.error('Please input options instead of arguments.')
sys.exit(1)
spdr = SPDR_MAP[opts.input]
# Parse config file
if (os.path.exists(CONFIG_FILE)):
cfgr = io.cfg_reader(CONFIG_FILE)
spdr_cfg = cfgr('bionlp.spider.%s' % opts.input, 'init')
if (len(spdr_cfg) > 0 and spdr_cfg['DATA_PATH'] is not None and os.path.exists(spdr_cfg['DATA_PATH'])):
spdr.DATA_PATH = spdr_cfg['DATA_PATH']
main() | apache-2.0 | -7,199,348,447,836,130,000 | 46.71 | 245 | 0.666015 | false |
gawel/panoramisk | examples/fast_agi_server_ivr.py | 1 | 1407 | from pprint import pprint
import asyncio
from panoramisk import fast_agi
loop = asyncio.get_event_loop()
async def call_waiting(request):
pprint(['AGI variables:', request.headers])
pprint((await request.send_command('ANSWER')))
pprint((await request.send_command('SAY DIGITS 1 \"\"')))
# To Raise a 510 error - 510 Invalid or unknown command
pprint((await request.send_command('INVALID-COMMAND')))
# To Raise a 520 error - 520-Invalid command syntax. Proper usage follows:
pprint((await request.send_command('SAY PHONETIC Hello world .')))
pprint((await request.send_command('SAY NUMBER 100 \"\"')))
pprint((await request.send_command('GET DATA hello-world 5000 2')))
pprint((await request.send_command('EXEC StartMusicOnHold')))
pprint((await request.send_command('EXEC Wait 30')))
def main():
fa_app = fast_agi.Application(loop=loop)
fa_app.add_route('call_waiting', call_waiting)
coro = asyncio.start_server(fa_app.handler, '0.0.0.0', 4574, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until CTRL+c is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__ == '__main__':
main()
| mit | -8,811,220,548,279,284,000 | 28.93617 | 78 | 0.673063 | false |
NeuroRoboticTech/Jetduino | Software/Python/grove_analog_read.py | 1 | 2116 | #!/usr/bin/env python
#
# Jetduino Example for using the analog read command to read analog sensor values
#
# The Jetduino connects the Jetson and Grove sensors. You can learn more about the Jetduino here: http://www.NeuroRoboticTech.com/Projects/Jetduino
#
# Have a question about this example? Ask on the forums here: http://www.NeuroRoboticTech.com/Forum
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Copyright (C) 2016 NeuroRobotic Technologies
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import jetduino
from jetduino_pins import *
sensor = ARD_A0
jetduino.pinMode(sensor, INPUT_PIN)
jetduino.setAnalogReadResolution(10)
while True:
try:
sensor_value = jetduino.analogRead(sensor)
#if sensor_value >= 0:
print ("sensor_value =", sensor_value)
time.sleep(.5)
except IOError:
print ("Error")
| mit | 5,633,382,517,186,987,000 | 34.266667 | 149 | 0.768431 | false |
cdman/hnarchive | app.py | 1 | 8451 | import datetime
import logging
import re
import webapp2
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
import bs4
class Node(ndb.Model):
parent = ndb.KeyProperty('Node', indexed=False)
title = ndb.StringProperty(indexed=False)
url = ndb.TextProperty(indexed=False)
user = ndb.StringProperty(indexed=False)
body = ndb.TextProperty(indexed=False)
score = ndb.IntegerProperty(indexed=False)
comment_count = ndb.IntegerProperty(indexed=False)
added_at = ndb.DateTimeProperty(indexed=False)
retrieved_at = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
class PendingNode(ndb.Model):
added_at = ndb.DateTimeProperty(auto_now_add=True, indexed=True)
class MinMax(ndb.Model):
INSTANCE_KEY = ndb.Key('MinMax', 1)
low_bound = ndb.IntegerProperty(default=0, indexed=False)
upper_bound = ndb.IntegerProperty(default=1, indexed=False)
processed_nodes = ndb.IntegerProperty(default=0, indexed=False)
class Webpage(ndb.Model):
url = ndb.StringProperty(indexed=False)
fetched_at = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
html = ndb.BlobProperty(indexed=False, compressed=True)
def get(url):
assert url.startswith('https://news.ycombinator.com/')
result = urlfetch.fetch(url=url,
headers={'User-Agent': 'HNArchive - [email protected] / https://github.com/cdman/hnarchive'})
logging.info('Retrieved %s', url)
assert result.status_code == 200
assert 'Hacker News' in result.content
ndb.non_transactional(Webpage(url=url, html=result.content).put)()
return bs4.BeautifulSoup(result.content, 'lxml')
@ndb.non_transactional(allow_existing=True)
def skipExisting(ids):
nodes = ndb.get_multi([ndb.Key(Node, i) for i in ids if i > 0])
keys = set([0] + [n.key.id() for n in nodes if n])
return [i for i in ids if not i in keys]
def extractUniqueIds(page):
return set([
long(re.sub(r'.*?(\d+)', r'\1', link['href']))
for link in page.find_all(href=re.compile(r'item\?id=\d+'))
])
@ndb.transactional(xg=True, propagation=ndb.TransactionOptions.INDEPENDENT)
def fetchListing(url):
listing = get(url)
minMax = MinMax.INSTANCE_KEY.get()
if not minMax:
minMax = MinMax(key=MinMax.INSTANCE_KEY)
ids = extractUniqueIds(listing)
new_ids = skipExisting(ids)
if not new_ids:
logging.info('No new ids found')
return
if max(new_ids) > minMax.upper_bound:
minMax.upper_bound = max(new_ids)
minMax.put()
logging.info('New upper bound: %d', max(new_ids))
ndb.non_transactional(ndb.put_multi)([
PendingNode(key=ndb.Key(PendingNode, i)) for i in new_ids])
logging.info('Discovered new nodes: %s', new_ids)
def fetchFrontPage():
fetchListing('https://news.ycombinator.com/')
def fetchNewest():
fetchListing('https://news.ycombinator.com/newest')
@ndb.transactional(xg=True, propagation=ndb.TransactionOptions.INDEPENDENT)
def fetchMin():
minMax = MinMax.INSTANCE_KEY.get()
if not minMax:
minMax = MinMax(key=MinMax.INSTANCE_KEY)
while True:
minMax.low_bound += 1
if minMax.low_bound >= minMax.upper_bound:
return
if ndb.non_transactional(ndb.Key(Node, minMax.low_bound).get)() is None:
break
ndb.put_multi([minMax, PendingNode(key=ndb.Key(PendingNode, minMax.low_bound))])
def extractMatch(text, pattern):
match = re.search(pattern, text)
if match is None: return
return match.group(1)
def populateFromMeta(node, meta, parent_id):
meta_text = meta.text
node.user = meta.find(href=re.compile(r'^user\?id=.+'))['href'].replace('user?id=', '')
node.key = ndb.Key(Node, long(
meta.find(href=re.compile(r'^item\?id=.+'))['href'].replace('item?id=', '')))
if extractMatch(meta_text, r'(\d+) points?'):
node.score = long(extractMatch(meta_text, r'(\d+) points?'))
if extractMatch(meta_text, r'(\d+) (?:minute|hour|day)s? ago'):
qty = long(extractMatch(meta_text, r'(\d+) (?:minute|hour|day)s? ago'))
metric = extractMatch(meta_text, r'\d+ (minute|hour|day)s? ago')
node.added_at = datetime.datetime.utcnow()
if metric == 'minute':
node.added_at -= datetime.timedelta(minutes=qty)
elif metric == 'hour':
node.added_at -= datetime.timedelta(hours=qty)
elif metric == 'day':
node.added_at -= datetime.timedelta(days=qty)
else:
assert False
if extractMatch(meta_text, r'(\d+) comments?'):
node.comment_count = long(extractMatch(meta_text, r'(\d+) comments?'))
parent = meta.find('a', text='parent')
if parent:
node.parent = ndb.Key(Node, long(parent['href'].replace('item?id=', '')))
else:
node.parent = ndb.Key(Node, parent_id)
@ndb.non_transactional
def parseTable(t, parent_id):
head = t.find('td', class_='title')
ids = []
if head is not None:
node = Node()
node.title = head.text
node.url = head.find('a')['href']
populateFromMeta(node, head.parent.parent.find_all('tr')[1], parent_id)
text = ''.join([unicode(n) for n in head.parent.parent.find_all('tr')[2:] if n.text.strip()])
text, _ = re.subn(r'</?t[dr]>', '', text)
if text:
node.body = text
node.put()
ids.append(node.key.id())
logging.info('Saved %d', node.key.id())
for comment in t.find_all('td', class_='default'):
parent_table = comment
while parent_table and parent_table.name != 'table':
parent_table = parent_table.parent
if parent_table and parent_table.find('a', text='link'):
pparent_id = long(parent_table.find('a', text='link')['href'].replace('item?id=', ''))
else:
pparent_id = parent_id
node = Node()
populateFromMeta(node, comment.find('span', class_='comhead'), pparent_id)
node.body = ''.join(
[unicode(c).strip() for c in comment.find('span', class_='comment').contents])
node.body = node.body.replace('<font color="#000000">', '').replace('</font>', '')
node.put()
ids.append(node.key.id())
logging.info('Saved %d', node.key.id())
return ids
@ndb.transactional(xg=True, propagation=ndb.TransactionOptions.INDEPENDENT)
def processsOneNode(pending_node):
page = get('https://news.ycombinator.com/item?id=%d' % pending_node.id())
ids = extractUniqueIds(page)
node_count = 0
for t in page.find_all('table'):
try:
table_ids = parseTable(t, pending_node.id())
ids -= set(table_ids)
node_count += len(table_ids)
except Exception:
logging.exception('Parsing failed')
new_ids = skipExisting(ids)
ndb.non_transactional(ndb.put_multi)([
PendingNode(key=ndb.Key(PendingNode, i)) for i in new_ids])
logging.info('Discovered new nodes: %s', new_ids)
pending_node.delete()
logging.info('Processed %d', pending_node.id())
minMax = MinMax.INSTANCE_KEY.get()
if not minMax:
minMax = MinMax(key=MinMax.INSTANCE_KEY)
minMax.processed_nodes += node_count
minMax.put()
@ndb.non_transactional
def fetchNode():
pending_node = PendingNode.query().order(PendingNode.added_at).fetch(1, keys_only=True)
if len(pending_node) == 0: return
pending_node = pending_node[0]
processsOneNode(pending_node)
class CrawlingPhase(ndb.Model):
INSTANCE_KEY = ndb.Key('CrawlingPhase', 1)
_STEPS = [fetchFrontPage, fetchNewest, fetchMin] + [fetchNode for _ in xrange(0, 7)]
state = ndb.IntegerProperty(default=0, indexed=False)
@staticmethod
@ndb.transactional(xg=True, propagation=ndb.TransactionOptions.INDEPENDENT)
def runNext():
instance = CrawlingPhase.INSTANCE_KEY.get()
if not instance:
instance = CrawlingPhase(key=CrawlingPhase.INSTANCE_KEY)
step = CrawlingPhase._STEPS[instance.state]
instance.state = (instance.state + 1) % len(CrawlingPhase._STEPS)
instance.put()
try:
step()
except Exception:
logging.exception('Step %s failed', step)
class Crawler(webapp2.RequestHandler):
def get(self):
CrawlingPhase.runNext()
self.response.write('Done')
app = webapp2.WSGIApplication([
('/task/crawl', Crawler),
])
| gpl-3.0 | 3,507,052,515,939,519,000 | 33.076613 | 102 | 0.636611 | false |
jdemon519/cfme_tests | sprout/appliances/tasks.py | 1 | 83964 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import diaper
import fauxfactory
import hashlib
import iso8601
import random
import re
import command
import yaml
from contextlib import closing
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from celery import chain, chord, shared_task
from celery.exceptions import MaxRetriesExceededError
from datetime import datetime, timedelta
from functools import wraps
from lxml import etree
from novaclient.exceptions import OverLimit as OSOverLimit
from paramiko import SSHException
from urllib2 import urlopen, HTTPError
import socket
from appliances.models import (
Provider, Group, Template, Appliance, AppliancePool, DelayedProvisionTask,
MismatchVersionMailer, User, GroupShepherd)
from sprout import settings, redis
from sprout.irc_bot import send_message
from sprout.log import create_logger
from utils import conf
from utils.appliance import Appliance as CFMEAppliance
from utils.path import project_path
from utils.providers import get_mgmt
from utils.timeutil import parsetime
from utils.trackerbot import api, depaginate, parse_template
from utils.version import Version
from utils.wait import wait_for
LOCK_EXPIRE = 60 * 15 # 15 minutes
VERSION_REGEXPS = [
r"^cfme-(\d)(\d)(\d)(\d)(\d{2})", # 1.2.3.4.11
# newer format
r"cfme-(\d)(\d)(\d)[.](\d{2})-", # cfme-524.02- -> 5.2.4.2
r"cfme-(\d)(\d)(\d)[.](\d{2})[.](\d)-", # cfme-524.02.1- -> 5.2.4.2.1
# 4 digits
r"cfme-(?:nightly-)?(\d)(\d)(\d)(\d)-", # cfme-5242- -> 5.2.4.2
r"cfme-(\d)(\d)(\d)-(\d)-", # cfme-520-1- -> 5.2.0.1
# 5 digits (not very intelligent but no better solution so far)
r"cfme-(?:nightly-)?(\d)(\d)(\d)(\d{2})-", # cfme-53111- -> 5.3.1.11, cfme-53101 -> 5.3.1.1
]
VERSION_REGEXPS = map(re.compile, VERSION_REGEXPS)
VERSION_REGEXP_UPSTREAM = re.compile(r'^miq-stable-([^-]+)-')
TRACKERBOT_PAGINATE = 20
def retrieve_cfme_appliance_version(template_name):
"""If possible, retrieve the appliance's version from template's name."""
for regexp in VERSION_REGEXPS:
match = regexp.search(template_name)
if match is not None:
return ".".join(map(str, map(int, match.groups())))
else:
match = VERSION_REGEXP_UPSTREAM.search(template_name)
if match is not None:
return match.groups()[0]
def trackerbot():
return api()
def none_dict(l):
""""If the parameter passed is None, returns empty dict. Otherwise it passes through"""
if l is None:
return {}
else:
return l
def provider_error_logger():
return create_logger("provider_errors")
def logged_task(*args, **kwargs):
kwargs["bind"] = True
def f(task):
@wraps(task)
def wrapped_task(self, *args, **kwargs):
self.logger = create_logger(task)
try:
return task(self, *args, **kwargs)
except Exception as e:
self.logger.error(
"An exception occured when executing with args: %r kwargs: %r",
args, kwargs)
self.logger.exception(e)
raise
return shared_task(*args, **kwargs)(wrapped_task)
return f
def singleton_task(*args, **kwargs):
kwargs["bind"] = True
wait = kwargs.pop('wait', False)
wait_countdown = kwargs.pop('wait_countdown', 10)
wait_retries = kwargs.pop('wait_retries', 30)
def f(task):
@wraps(task)
def wrapped_task(self, *args, **kwargs):
self.logger = create_logger(task)
# Create hash of all args
digest_base = "/".join(str(arg) for arg in args)
keys = sorted(kwargs.keys())
digest_base += "//" + "/".join("{}={}".format(key, kwargs[key]) for key in keys)
digest = hashlib.sha256(digest_base).hexdigest()
lock_id = '{0}-lock-{1}'.format(self.name, digest)
if cache.add(lock_id, 'true', LOCK_EXPIRE):
try:
return task(self, *args, **kwargs)
except Exception as e:
self.logger.error(
"An exception occured when executing with args: %r kwargs: %r",
args, kwargs)
self.logger.exception(e)
raise
finally:
cache.delete(lock_id)
elif wait:
self.logger.info("Waiting for another instance of the task to end.")
self.retry(args=args, countdown=wait_countdown, max_retries=wait_retries)
return shared_task(*args, **kwargs)(wrapped_task)
return f
@singleton_task()
def kill_unused_appliances(self):
"""This is the watchdog, that guards the appliances that were given to users. If you forget
to prolong the lease time, this is the thing that will take the appliance off your hands
and kill it."""
with transaction.atomic():
for appliance in Appliance.objects.filter(marked_for_deletion=False):
if appliance.leased_until is not None and appliance.leased_until <= timezone.now():
self.logger.info("Watchdog found an appliance that is to be deleted: {}/{}".format(
appliance.id, appliance.name))
kill_appliance.delay(appliance.id)
@singleton_task()
def kill_appliance(self, appliance_id, replace_in_pool=False, minutes=60):
"""As-reliable-as-possible appliance deleter. Turns off, deletes the VM and deletes the object.
If the appliance was assigned to pool and we want to replace it, redo the provisioning.
"""
self.logger.info("Initiated kill of appliance {}".format(appliance_id))
appliance = Appliance.objects.get(id=appliance_id)
workflow = [
disconnect_direct_lun.si(appliance_id),
appliance_power_off.si(appliance_id),
kill_appliance_delete.si(appliance_id),
]
if replace_in_pool:
if appliance.appliance_pool is not None:
workflow.append(
replace_clone_to_pool.si(
appliance.template.version, appliance.template.date,
appliance.appliance_pool.id, minutes, appliance.template.id))
workflow = chain(*workflow)
workflow()
@singleton_task()
def kill_appliance_delete(self, appliance_id, _delete_already_issued=False):
delete_issued = False
try:
appliance = Appliance.objects.get(id=appliance_id)
if appliance.provider_api.does_vm_exist(appliance.name):
appliance.set_status("Deleting the appliance from provider")
# If we haven't issued the delete order, do it now
if not _delete_already_issued:
appliance.provider_api.delete_vm(appliance.name)
delete_issued = True
# In any case, retry to wait for the VM to be deleted, but next time do not issue delete
self.retry(args=(appliance_id, True), countdown=5, max_retries=60)
appliance.delete()
except ObjectDoesNotExist:
# Appliance object already not there
return
except Exception as e:
try:
appliance.set_status("Could not delete appliance. Retrying.")
except UnboundLocalError:
return # The appliance is not there any more
# In case of error retry, and also specify whether the delete order was already issued
self.retry(
args=(appliance_id, _delete_already_issued or delete_issued),
exc=e, countdown=5, max_retries=60)
@singleton_task()
def poke_trackerbot(self):
"""This beat-scheduled task periodically polls the trackerbot if there are any new templates.
"""
template_usability = []
# Extract data from trackerbot
tbapi = trackerbot()
objects = depaginate(tbapi, tbapi.providertemplate().get(limit=TRACKERBOT_PAGINATE))["objects"]
per_group = {}
for obj in objects:
if obj["template"]["group"]["name"] == 'unknown':
continue
if obj["template"]["group"]["name"] not in per_group:
per_group[obj["template"]["group"]["name"]] = []
per_group[obj["template"]["group"]["name"]].append(obj)
# Sort them using the build date
for group in per_group.iterkeys():
per_group[group] = sorted(
per_group[group],
reverse=True, key=lambda o: o["template"]["datestamp"])
objects = []
# And interleave the the groups
while any(per_group.values()):
for key in per_group.iterkeys():
if per_group[key]:
objects.append(per_group[key].pop(0))
for template in objects:
if template["provider"]["key"] not in conf.cfme_data.management_systems.keys():
# If we don't use that provider in yamls, set the template as not usable
# 1) It will prevent adding this template if not added
# 2) It'll mark the template as unusable if it already exists
template["usable"] = False
template_usability.append(
(
template["provider"]["key"],
template["template"]["name"],
template["usable"]
)
)
if not template["usable"]:
continue
group, create = Group.objects.get_or_create(id=template["template"]["group"]["name"])
# Check if the template is already obsolete
if group.template_obsolete_days is not None:
build_date = parsetime.from_iso_date(template["template"]["datestamp"])
if build_date <= (parsetime.today() - timedelta(days=group.template_obsolete_days)):
# It is already obsolete, so ignore it
continue
provider, create = Provider.objects.get_or_create(id=template["provider"]["key"])
if not provider.is_working:
continue
if "sprout" not in provider.provider_data:
continue
if not provider.provider_data.get("use_for_sprout", False):
continue
template_name = template["template"]["name"]
ga_released = template['template']['ga_released']
date = parse_template(template_name).datestamp
if not date:
# Not a CFME/MIQ template, ignore it.
continue
# Original one
original_template = None
try:
original_template = Template.objects.get(
provider=provider, template_group=group, original_name=template_name,
name=template_name, preconfigured=False)
if original_template.ga_released != ga_released:
original_template.ga_released = ga_released
original_template.save()
except ObjectDoesNotExist:
if template_name in provider.templates:
date = parse_template(template_name).datestamp
if date is None:
self.logger.warning(
"Ignoring template {} because it does not have a date!".format(
template_name))
continue
template_version = retrieve_cfme_appliance_version(template_name)
if template_version is None:
# Make up a faux version
# First 3 fields of version get parsed as a zstream
# therefore ... makes it a "nil" stream
template_version = "...{}".format(date.strftime("%Y%m%d"))
with transaction.atomic():
tpl = Template(
provider=provider, template_group=group, original_name=template_name,
name=template_name, preconfigured=False, date=date,
version=template_version, ready=True, exists=True, usable=True)
tpl.save()
original_template = tpl
self.logger.info("Created a new template #{}".format(tpl.id))
# If the provider is set to not preconfigure templates, do not bother even doing it.
if provider.num_simultaneous_configuring > 0:
# Preconfigured one
try:
preconfigured_template = Template.objects.get(
provider=provider, template_group=group, original_name=template_name,
preconfigured=True)
if preconfigured_template.ga_released != ga_released:
preconfigured_template.ga_released = ga_released
preconfigured_template.save()
except ObjectDoesNotExist:
if template_name in provider.templates:
original_id = original_template.id if original_template is not None else None
create_appliance_template.delay(
provider.id, group.id, template_name, source_template_id=original_id)
# If any of the templates becomes unusable, let sprout know about it
# Similarly if some of them becomes usable ...
for provider_id, template_name, usability in template_usability:
provider, create = Provider.objects.get_or_create(id=provider_id)
with transaction.atomic():
for template in Template.objects.filter(provider=provider, original_name=template_name):
template.usable = usability
template.save()
# Kill all shepherd appliances if they were acidentally spun up
if not usability:
for appliance in Appliance.objects.filter(
template=template, marked_for_deletion=False,
appliance_pool=None):
Appliance.kill(appliance)
@logged_task()
def create_appliance_template(self, provider_id, group_id, template_name, source_template_id=None):
"""This task creates a template from a fresh CFME template. In case of fatal error during the
operation, the template object is deleted to make sure the operation will be retried next time
when poke_trackerbot runs."""
provider = Provider.objects.get(id=provider_id)
provider.cleanup() # Precaution
group = Group.objects.get(id=group_id)
with transaction.atomic():
# Limit the number of concurrent template configurations
if provider.remaining_configuring_slots == 0:
return False # It will be kicked again when trackerbot gets poked
try:
Template.objects.get(
template_group=group, provider=provider, original_name=template_name,
preconfigured=True)
return False
except ObjectDoesNotExist:
pass
# Fire off the template preparation
date = parse_template(template_name).datestamp
if not date:
return
template_version = retrieve_cfme_appliance_version(template_name)
if template_version is None:
# Make up a faux version
# First 3 fields of version get parsed as a zstream
# therefore ... makes it a "nil" stream
template_version = "...{}".format(date.strftime("%Y%m%d"))
new_template_name = settings.TEMPLATE_FORMAT.format(
group=group.id, date=date.strftime("%y%m%d"), rnd=fauxfactory.gen_alphanumeric(8))
if provider.template_name_length is not None:
allowed_length = provider.template_name_length
# There is some limit
if len(new_template_name) > allowed_length:
# Cut it down
randoms_length = len(new_template_name.rsplit("_", 1)[-1])
minimum_length = (len(new_template_name) - randoms_length) + 1 # One random must be
if minimum_length <= allowed_length:
# Just cut it
new_template_name = new_template_name[:allowed_length]
else:
# Another solution
new_template_name = settings.TEMPLATE_FORMAT.format(
group=group.id[:2], date=date.strftime("%y%m%d"), # Use only first 2 of grp
rnd=fauxfactory.gen_alphanumeric(2)) # And just 2 chars random
# TODO: If anything larger comes, do fix that!
if source_template_id is not None:
try:
source_template = Template.objects.get(id=source_template_id)
except ObjectDoesNotExist:
source_template = None
else:
source_template = None
template = Template(
provider=provider, template_group=group, name=new_template_name, date=date,
version=template_version, original_name=template_name, parent_template=source_template)
template.save()
workflow = chain(
prepare_template_deploy.si(template.id),
prepare_template_verify_version.si(template.id),
prepare_template_configure.si(template.id),
prepare_template_seal.si(template.id),
prepare_template_poweroff.si(template.id),
prepare_template_finish.si(template.id),
)
workflow.link_error(prepare_template_delete_on_error.si(template.id))
workflow()
@singleton_task()
def prepare_template_deploy(self, template_id):
template = Template.objects.get(id=template_id)
try:
if not template.exists_in_provider:
template.set_status("Deploying the template.")
provider_data = template.provider.provider_data
kwargs = provider_data["sprout"]
kwargs["power_on"] = True
if "allowed_datastores" not in kwargs and "allowed_datastores" in provider_data:
kwargs["allowed_datastores"] = provider_data["allowed_datastores"]
self.logger.info("Deployment kwargs: {}".format(repr(kwargs)))
template.provider_api.deploy_template(
template.original_name, vm_name=template.name, **kwargs)
else:
template.set_status("Waiting for deployment to be finished.")
template.provider_api.wait_vm_running(template.name)
except Exception as e:
template.set_status(
"Could not properly deploy the template. Retrying. {}: {}".format(
type(e).__name__, str(e)))
self.logger.exception(e)
self.retry(args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Template deployed.")
@singleton_task()
def prepare_template_verify_version(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Verifying version.")
appliance = CFMEAppliance(template.provider_name, template.name, container=template.container)
appliance.ipapp.wait_for_ssh()
try:
true_version = appliance.version
except Exception as e:
template.set_status("Some SSH error happened during appliance version check.")
self.retry(args=(template_id,), exc=e, countdown=20, max_retries=5)
supposed_version = Version(template.version)
if true_version is None or true_version.vstring == 'master':
return
if true_version != supposed_version:
# Check if the difference is not just in the suffixes, which can be the case ...
t = str(true_version)
s = str(supposed_version)
if supposed_version.version == true_version.version or t.startswith(s):
# The two have same version but different suffixes, apply the suffix to the template obj
# OR also a case - when the supposed version is incomplete so we will use the detected
# version.
with transaction.atomic():
template.version = t
template.save()
if template.parent_template is not None:
# In case we have a parent template, update the version there too.
if template.version != template.parent_template.version:
pt = template.parent_template
pt.version = template.version
pt.save()
return # no need to continue with spamming process
# SPAM SPAM SPAM!
with transaction.atomic():
mismatch_in_db = MismatchVersionMailer.objects.filter(
provider=template.provider,
template_name=template.original_name,
supposed_version=supposed_version,
actual_version=true_version)
if not mismatch_in_db:
mismatch = MismatchVersionMailer(
provider=template.provider,
template_name=template.original_name,
supposed_version=supposed_version,
actual_version=true_version)
mismatch.save()
# Run the task to mail the problem
mailer_version_mismatch.delay()
raise Exception("Detected version mismatch!")
@singleton_task()
def prepare_template_configure(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Customization started.")
appliance = CFMEAppliance(template.provider_name, template.name, container=template.container)
try:
appliance.configure(
setup_fleece=False,
log_callback=lambda s: template.set_status("Customization progress: {}".format(s)))
except Exception as e:
template.set_status("Could not properly configure the CFME. Retrying.")
self.retry(args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Template configuration was done.")
@singleton_task()
def prepare_template_seal(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Sealing template.")
try:
template.cfme.ipapp.seal_for_templatizing()
except Exception as e:
template.set_status("Could not seal the template. Retrying.")
self.retry(
args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Template sealed.")
@singleton_task()
def prepare_template_poweroff(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Powering off")
try:
template.provider_api.stop_vm(template.name)
template.provider_api.wait_vm_stopped(template.name)
except Exception as e:
template.set_status("Could not power off the appliance. Retrying.")
self.retry(args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Powered off.")
@singleton_task()
def prepare_template_finish(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Finishing template creation.")
try:
if template.temporary_name is None:
tmp_name = "templatize_{}".format(fauxfactory.gen_alphanumeric(8))
Template.objects.get(id=template_id).temporary_name = tmp_name # metadata, autosave
else:
tmp_name = template.temporary_name
template.provider_api.mark_as_template(
template.name, temporary_name=tmp_name, delete_on_error=False)
with transaction.atomic():
template = Template.objects.get(id=template_id)
template.ready = True
template.exists = True
template.save()
del template.temporary_name
except Exception as e:
template.set_status("Could not mark the appliance as template. Retrying.")
self.retry(args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Template preparation finished.")
@singleton_task()
def prepare_template_delete_on_error(self, template_id):
try:
template = Template.objects.get(id=template_id)
except ObjectDoesNotExist:
return True
template.set_status("Template creation failed. Deleting it.")
try:
if template.provider_api.does_vm_exist(template.name):
template.provider_api.delete_vm(template.name)
wait_for(template.provider_api.does_vm_exist, [template.name], timeout='5m', delay=10)
if template.provider_api.does_template_exist(template.name):
template.provider_api.delete_template(template.name)
wait_for(
template.provider_api.does_template_exist, [template.name], timeout='5m', delay=10)
if (template.temporary_name is not None and
template.provider_api.does_template_exist(template.temporary_name)):
template.provider_api.delete_template(template.temporary_name)
wait_for(
template.provider_api.does_template_exist,
[template.temporary_name], timeout='5m', delay=10)
template.delete()
except Exception as e:
self.retry(args=(template_id,), exc=e, countdown=60, max_retries=5)
@logged_task()
def request_appliance_pool(self, appliance_pool_id, time_minutes):
"""This task gives maximum possible amount of spinned-up appliances to the specified pool and
then if there is need to spin up another appliances, it spins them up via clone_template_to_pool
task."""
self.logger.info(
"Appliance pool {} requested for {} minutes.".format(appliance_pool_id, time_minutes))
pool = AppliancePool.objects.get(id=appliance_pool_id)
n = Appliance.give_to_pool(pool)
for i in range(pool.total_count - n):
tpls = pool.possible_provisioning_templates
if tpls:
template_id = tpls[0].id
clone_template_to_pool(template_id, pool.id, time_minutes)
else:
with transaction.atomic():
task = DelayedProvisionTask(pool=pool, lease_time=time_minutes)
task.save()
apply_lease_times_after_pool_fulfilled.delay(appliance_pool_id, time_minutes)
@singleton_task()
def apply_lease_times_after_pool_fulfilled(self, appliance_pool_id, time_minutes):
pool = AppliancePool.objects.get(id=appliance_pool_id)
if pool.fulfilled:
for appliance in pool.appliances:
apply_lease_times.delay(appliance.id, time_minutes)
rename_appliances_for_pool.delay(pool.id)
with transaction.atomic():
pool.finished = True
pool.save()
else:
# Look whether we can swap any provisioning appliance with some in shepherd
unfinished = list(Appliance.objects.filter(appliance_pool=pool, ready=False).all())
random.shuffle(unfinished)
if len(unfinished) > 0:
n = Appliance.give_to_pool(pool, len(unfinished))
with transaction.atomic():
for _ in range(n):
appl = unfinished.pop()
appl.appliance_pool = None
appl.save()
try:
self.retry(args=(appliance_pool_id, time_minutes), countdown=30, max_retries=120)
except MaxRetriesExceededError: # Bad luck, pool fulfillment failed. So destroy it.
pool.logger.error("Waiting for fulfillment failed. Initiating the destruction process.")
pool.kill()
@singleton_task()
def process_delayed_provision_tasks(self):
"""This picks up the provisioning tasks that were delayed due to ocncurrency limit of provision.
Goes one task by one and when some of them can be provisioned, it starts the provisioning and
then deletes the task.
"""
for task in DelayedProvisionTask.objects.order_by("id"):
if task.pool.not_needed_anymore:
task.delete()
continue
# Try retrieve from shepherd
appliances_given = Appliance.give_to_pool(task.pool, 1)
if appliances_given == 0:
# No free appliance in shepherd, so do it on our own
tpls = task.pool.possible_provisioning_templates
if task.provider_to_avoid is not None:
filtered_tpls = filter(lambda tpl: tpl.provider != task.provider_to_avoid, tpls)
if filtered_tpls:
# There are other providers to provision on, so try one of them
tpls = filtered_tpls
# If there is no other provider to provision on, we will use the original list.
# This will cause additional rejects until the provider quota is met
if tpls:
clone_template_to_pool(tpls[0].id, task.pool.id, task.lease_time)
task.delete()
else:
# Try freeing up some space in provider
for provider in task.pool.possible_providers:
appliances = provider.free_shepherd_appliances.exclude(
task.pool.appliance_container_q,
**task.pool.appliance_filter_params)
if appliances:
Appliance.kill(random.choice(appliances))
break # Just one
else:
# There was a free appliance in shepherd, so we took it and we don't need this task more
task.delete()
@logged_task()
def replace_clone_to_pool(
self, version, date, appliance_pool_id, time_minutes, exclude_template_id):
appliance_pool = AppliancePool.objects.get(id=appliance_pool_id)
if appliance_pool.not_needed_anymore:
return
exclude_template = Template.objects.get(id=exclude_template_id)
templates = appliance_pool.possible_templates
templates_excluded = filter(lambda tpl: tpl != exclude_template, templates)
if templates_excluded:
template = random.choice(templates_excluded)
else:
template = exclude_template # :( no other template to use
clone_template_to_pool(template.id, appliance_pool_id, time_minutes)
def clone_template_to_pool(template_id, appliance_pool_id, time_minutes):
template = Template.objects.get(id=template_id)
new_appliance_name = settings.APPLIANCE_FORMAT.format(
group=template.template_group.id,
date=template.date.strftime("%y%m%d"),
rnd=fauxfactory.gen_alphanumeric(8))
with transaction.atomic():
pool = AppliancePool.objects.get(id=appliance_pool_id)
if pool.not_needed_anymore:
return
# Apply also username
new_appliance_name = "{}_{}".format(pool.owner.username, new_appliance_name)
appliance = Appliance(template=template, name=new_appliance_name, appliance_pool=pool)
appliance.save()
# Set pool to these params to keep the appliances with same versions/dates
pool.version = template.version
pool.date = template.date
pool.save()
clone_template_to_appliance.delay(appliance.id, time_minutes, pool.yum_update)
@logged_task()
def apply_lease_times(self, appliance_id, time_minutes):
self.logger.info(
"Applying lease time {} minutes on appliance {}".format(time_minutes, appliance_id))
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.datetime_leased = timezone.now()
appliance.leased_until = appliance.datetime_leased + timedelta(minutes=int(time_minutes))
appliance.save()
@logged_task()
def clone_template(self, template_id):
self.logger.info("Cloning template {}".format(template_id))
template = Template.objects.get(id=template_id)
new_appliance_name = settings.APPLIANCE_FORMAT.format(
group=template.template_group.id,
date=template.date.strftime("%y%m%d"),
rnd=fauxfactory.gen_alphanumeric(8))
appliance = Appliance(template=template, name=new_appliance_name)
appliance.save()
clone_template_to_appliance.delay(appliance.id)
@singleton_task()
def clone_template_to_appliance(self, appliance_id, lease_time_minutes=None, yum_update=False):
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_status("Beginning deployment process")
tasks = [
clone_template_to_appliance__clone_template.si(appliance_id, lease_time_minutes),
clone_template_to_appliance__wait_present.si(appliance_id),
appliance_power_on.si(appliance_id),
]
if yum_update:
tasks.append(appliance_yum_update.si(appliance_id))
tasks.append(appliance_reboot.si(appliance_id, if_needs_restarting=True))
if appliance.preconfigured:
tasks.append(wait_appliance_ready.si(appliance_id))
else:
tasks.append(mark_appliance_ready.si(appliance_id))
workflow = chain(*tasks)
if Appliance.objects.get(id=appliance_id).appliance_pool is not None:
# Case of the appliance pool
if Appliance.objects.get(id=appliance_id).appliance_pool.not_needed_anymore:
return
# TODO: Make replace_in_pool work again
workflow.link_error(
kill_appliance.si(appliance_id, replace_in_pool=False, minutes=lease_time_minutes))
else:
# Case of shepherd
workflow.link_error(kill_appliance.si(appliance_id))
workflow()
@singleton_task()
def clone_template_to_appliance__clone_template(self, appliance_id, lease_time_minutes):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present, terminating the chain
self.request.callbacks[:] = []
return
if appliance.appliance_pool is not None:
if appliance.appliance_pool.not_needed_anymore:
# Terminate task chain
self.request.callbacks[:] = []
kill_appliance.delay(appliance_id)
return
appliance.provider.cleanup()
try:
if not appliance.provider_api.does_vm_exist(appliance.name):
appliance.set_status("Beginning template clone.")
provider_data = appliance.template.provider.provider_data
kwargs = provider_data["sprout"]
kwargs["power_on"] = False
if "allowed_datastores" not in kwargs and "allowed_datastores" in provider_data:
kwargs["allowed_datastores"] = provider_data["allowed_datastores"]
if appliance.appliance_pool is not None:
if appliance.appliance_pool.override_memory is not None:
kwargs['ram'] = appliance.appliance_pool.override_memory
if appliance.appliance_pool.override_cpu is not None:
kwargs['cpu'] = appliance.appliance_pool.override_cpu
appliance.provider_api.deploy_template(
appliance.template.name, vm_name=appliance.name,
progress_callback=lambda progress: appliance.set_status(
"Deploy progress: {}".format(progress)),
**kwargs)
except Exception as e:
messages = {"limit", "cannot add", "quota"}
if isinstance(e, OSOverLimit):
appliance.set_status("Hit OpenStack provisioning quota, trying putting it aside ...")
elif any(message in str(e).lower() for message in messages):
appliance.set_status("Provider has some troubles, putting it aside ... {}/{}".format(
type(e).__name__, str(e)
))
provider_error_logger().exception(e)
else:
# Something got screwed really bad
appliance.set_status("Error happened: {}({})".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id, lease_time_minutes), exc=e, countdown=60, max_retries=5)
# Ignore that and provision it somewhere else
if appliance.appliance_pool:
# We can put it aside for a while to wait for
self.request.callbacks[:] = [] # Quit this chain
pool = appliance.appliance_pool
try:
if appliance.provider_api.does_vm_exist(appliance.name):
# Better to check it, you never know when does that fail
appliance.provider_api.delete_vm(appliance.name)
wait_for(
appliance.provider_api.does_vm_exist,
[appliance.name], timeout='5m', delay=10)
except:
pass # Diaper here
appliance.delete(do_not_touch_ap=True)
with transaction.atomic():
new_task = DelayedProvisionTask(
pool=pool, lease_time=lease_time_minutes,
provider_to_avoid=appliance.template.provider)
new_task.save()
return
else:
# We cannot put it aside, so just try that again
self.retry(args=(appliance_id, lease_time_minutes), exc=e, countdown=60, max_retries=5)
else:
appliance.set_status("Template cloning finished. Refreshing provider VMs to get UUID.")
refresh_appliances_provider.delay(appliance.provider.id)
@singleton_task()
def clone_template_to_appliance__wait_present(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present, terminating the chain
self.request.callbacks[:] = []
return
if appliance.appliance_pool is not None:
if appliance.appliance_pool.not_needed_anymore:
# Terminate task chain
self.request.callbacks[:] = []
kill_appliance.delay(appliance_id)
return
try:
appliance.set_status("Waiting for the appliance to become visible in provider.")
if not appliance.provider_api.does_vm_exist(appliance.name):
self.retry(args=(appliance_id,), countdown=20, max_retries=30)
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id,), exc=e, countdown=20, max_retries=30)
else:
appliance.set_status("Template was successfully cloned.")
with diaper:
appliance.synchronize_metadata()
@singleton_task()
def mark_appliance_ready(self, appliance_id):
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ready = True
appliance.save()
Appliance.objects.get(id=appliance_id).set_status("Appliance was marked as ready")
@singleton_task()
def appliance_power_on(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present
return
try:
if appliance.provider_api.is_vm_running(appliance.name):
try:
current_ip = appliance.provider_api.current_ip_address(appliance.name)
except Exception:
current_ip = None
if current_ip is not None:
# IP present
Appliance.objects.get(id=appliance_id).set_status("Appliance was powered on.")
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ip_address = current_ip
appliance.set_power_state(Appliance.Power.ON)
appliance.save()
if appliance.containerized:
with appliance.ipapp.ssh_client as ssh:
# Fire up the container
ssh.run_command('cfme-start', ensure_host=True)
# VM is running now.
sync_appliance_hw.delay(appliance.id)
sync_provider_hw.delay(appliance.template.provider.id)
return
else:
# IP not present yet
Appliance.objects.get(id=appliance_id).set_status("Appliance waiting for IP.")
self.retry(args=(appliance_id, ), countdown=20, max_retries=40)
elif not appliance.provider_api.in_steady_state(appliance.name):
appliance.set_status("Waiting for appliance to be steady (current state: {}).".format(
appliance.provider_api.vm_status(appliance.name)))
self.retry(args=(appliance_id, ), countdown=20, max_retries=40)
else:
appliance.set_status("Powering on.")
appliance.provider_api.start_vm(appliance.name)
self.retry(args=(appliance_id, ), countdown=20, max_retries=40)
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id, ), exc=e, countdown=20, max_retries=30)
@singleton_task()
def appliance_reboot(self, appliance_id, if_needs_restarting=False):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present
return
try:
if if_needs_restarting:
with appliance.ssh as ssh:
if int(ssh.run_command("needs-restarting | wc -l").output.strip()) == 0:
return # No reboot needed
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_power_state(Appliance.Power.REBOOTING)
appliance.save()
appliance.ipapp.reboot(wait_for_web_ui=False, log_callback=appliance.set_status)
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_power_state(Appliance.Power.ON)
appliance.save()
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id, ), exc=e, countdown=20, max_retries=30)
@singleton_task()
def appliance_power_off(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present
return
try:
if appliance.provider_api.is_vm_stopped(appliance.name):
Appliance.objects.get(id=appliance_id).set_status("Appliance was powered off.")
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_power_state(Appliance.Power.OFF)
appliance.ready = False
appliance.save()
sync_provider_hw.delay(appliance.template.provider.id)
return
elif appliance.provider_api.is_vm_suspended(appliance.name):
appliance.set_status("Starting appliance from suspended state to properly off it.")
appliance.provider_api.start_vm(appliance.name)
self.retry(args=(appliance_id,), countdown=20, max_retries=40)
elif not appliance.provider_api.in_steady_state(appliance.name):
appliance.set_status("Waiting for appliance to be steady (current state: {}).".format(
appliance.provider_api.vm_status(appliance.name)))
self.retry(args=(appliance_id,), countdown=20, max_retries=40)
else:
appliance.set_status("Powering off.")
appliance.provider_api.stop_vm(appliance.name)
self.retry(args=(appliance_id,), countdown=20, max_retries=40)
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id,), exc=e, countdown=20, max_retries=40)
@singleton_task()
def appliance_suspend(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present
return
try:
if appliance.provider_api.is_vm_suspended(appliance.name):
Appliance.objects.get(id=appliance_id).set_status("Appliance was suspended.")
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_power_state(Appliance.Power.SUSPENDED)
appliance.ready = False
appliance.save()
sync_provider_hw.delay(appliance.template.provider.id)
return
elif not appliance.provider_api.in_steady_state(appliance.name):
appliance.set_status("Waiting for appliance to be steady (current state: {}).".format(
appliance.provider_api.vm_status(appliance.name)))
self.retry(args=(appliance_id,), countdown=20, max_retries=30)
else:
appliance.set_status("Suspending.")
appliance.provider_api.suspend_vm(appliance.name)
self.retry(args=(appliance_id,), countdown=20, max_retries=30)
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id,), exc=e, countdown=20, max_retries=30)
@singleton_task()
def retrieve_appliance_ip(self, appliance_id):
"""Updates appliance's IP address."""
try:
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_status("Retrieving IP address.")
ip_address = appliance.provider_api.current_ip_address(appliance.name)
if ip_address is None:
self.retry(args=(appliance_id,), countdown=30, max_retries=20)
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ip_address = ip_address
appliance.save()
except ObjectDoesNotExist:
# source object is not present, terminating
return
else:
appliance.set_status("IP address retrieved.")
@singleton_task()
def refresh_appliances(self):
"""Dispatches the appliance refresh process among the providers"""
self.logger.info("Initiating regular appliance provider refresh")
for provider in Provider.objects.filter(working=True, disabled=False):
refresh_appliances_provider.delay(provider.id)
@singleton_task(soft_time_limit=180)
def refresh_appliances_provider(self, provider_id):
"""Downloads the list of VMs from the provider, then matches them by name or UUID with
appliances stored in database.
"""
self.logger.info("Refreshing appliances in {}".format(provider_id))
provider = Provider.objects.get(id=provider_id)
if not hasattr(provider.api, "all_vms"):
# Ignore this provider
return
vms = provider.api.all_vms()
dict_vms = {}
uuid_vms = {}
for vm in vms:
dict_vms[vm.name] = vm
if vm.uuid:
uuid_vms[vm.uuid] = vm
for appliance in Appliance.objects.filter(template__provider=provider):
if appliance.uuid is not None and appliance.uuid in uuid_vms:
vm = uuid_vms[appliance.uuid]
# Using the UUID and change the name if it changed
appliance.name = vm.name
appliance.ip_address = vm.ip
appliance.set_power_state(Appliance.POWER_STATES_MAPPING.get(
vm.power_state, Appliance.Power.UNKNOWN))
appliance.save()
elif appliance.name in dict_vms:
vm = dict_vms[appliance.name]
# Using the name, and then retrieve uuid
appliance.uuid = vm.uuid
appliance.ip_address = vm.ip
appliance.set_power_state(Appliance.POWER_STATES_MAPPING.get(
vm.power_state, Appliance.Power.UNKNOWN))
appliance.save()
self.logger.info("Retrieved UUID for appliance {}/{}: {}".format(
appliance.id, appliance.name, appliance.uuid))
else:
# Orphaned :(
appliance.set_power_state(Appliance.Power.ORPHANED)
appliance.save()
@singleton_task()
def check_templates(self):
self.logger.info("Initiated a periodic template check")
for provider in Provider.objects.all():
check_templates_in_provider.delay(provider.id)
@singleton_task(soft_time_limit=180)
def check_templates_in_provider(self, provider_id):
self.logger.info("Initiated a periodic template check for {}".format(provider_id))
provider = Provider.objects.get(id=provider_id)
# Get templates and update metadata
try:
templates = map(str, provider.api.list_template())
except:
provider.working = False
provider.save()
else:
provider.working = True
provider.save()
with provider.edit_metadata as metadata:
metadata["templates"] = templates
if not provider.working:
return
# Check Sprout template existence
# expiration_time = (timezone.now() - timedelta(**settings.BROKEN_APPLIANCE_GRACE_TIME))
for template in Template.objects.filter(provider=provider):
with transaction.atomic():
tpl = Template.objects.get(pk=template.pk)
exists = tpl.name in templates
tpl.exists = exists
tpl.save()
# if not exists:
# if len(Appliance.objects.filter(template=template).all()) == 0\
# and template.status_changed < expiration_time:
# # No other appliance is made from this template so no need to keep it
# with transaction.atomic():
# tpl = Template.objects.get(pk=template.pk)
# tpl.delete()
@singleton_task()
def delete_nonexistent_appliances(self):
"""Goes through orphaned appliances' objects and deletes them from the database."""
expiration_time = (timezone.now() - timedelta(**settings.ORPHANED_APPLIANCE_GRACE_TIME))
for appliance in Appliance.objects.filter(ready=True).all():
if appliance.name in redis.renaming_appliances:
continue
if appliance.power_state == Appliance.Power.ORPHANED:
if appliance.power_state_changed > expiration_time:
# Ignore it for now
continue
self.logger.info(
"I will delete orphaned appliance {}/{}".format(appliance.id, appliance.name))
try:
appliance.delete()
except ObjectDoesNotExist as e:
if "AppliancePool" in str(e):
# Someone managed to delete the appliance pool before
appliance.appliance_pool = None
appliance.save()
appliance.delete()
else:
raise # No diaper pattern here!
# If something happened to the appliance provisioning process, just delete it to remove
# the garbage. It will be respinned again by shepherd.
# Grace time is specified in BROKEN_APPLIANCE_GRACE_TIME
expiration_time = (timezone.now() - timedelta(**settings.BROKEN_APPLIANCE_GRACE_TIME))
for appliance in Appliance.objects.filter(ready=False, marked_for_deletion=False).all():
if appliance.status_changed < expiration_time:
self.logger.info("Killing broken appliance {}/{}".format(appliance.id, appliance.name))
Appliance.kill(appliance) # Use kill because the appliance may still exist
# And now - if something happened during appliance deletion, call kill again
for appliance in Appliance.objects.filter(
marked_for_deletion=True, status_changed__lt=expiration_time).all():
with transaction.atomic():
appl = Appliance.objects.get(pk=appliance.pk)
appl.marked_for_deletion = False
appl.save()
self.logger.info(
"Trying to kill unkilled appliance {}/{}".format(appliance.id, appliance.name))
Appliance.kill(appl)
def generic_shepherd(self, preconfigured):
"""This task takes care of having the required templates spinned into required number of
appliances. For each template group, it keeps the last template's appliances spinned up in
required quantity. If new template comes out of the door, it automatically kills the older
running template's appliances and spins up new ones. Sorts the groups by the fulfillment."""
for gs in sorted(
GroupShepherd.objects.all(), key=lambda g: g.get_fulfillment_percentage(preconfigured)):
prov_filter = {'provider__user_groups': gs.user_group}
group_versions = Template.get_versions(
template_group=gs.template_group, ready=True, usable=True, preconfigured=preconfigured,
container=None, **prov_filter)
group_dates = Template.get_dates(
template_group=gs.template_group, ready=True, usable=True, preconfigured=preconfigured,
container=None, **prov_filter)
if group_versions:
# Downstream - by version (downstream releases)
version = group_versions[0]
# Find the latest date (one version can have new build)
dates = Template.get_dates(
template_group=gs.template_group, ready=True, usable=True,
version=group_versions[0], preconfigured=preconfigured, container=None,
**prov_filter)
if not dates:
# No template yet?
continue
date = dates[0]
filter_keep = {"version": version, "date": date, 'container': None}
filters_kill = []
for kill_date in dates[1:]:
filters_kill.append({"version": version, "date": kill_date})
for kill_version in group_versions[1:]:
filters_kill.append({"version": kill_version})
elif group_dates:
# Upstream - by date (upstream nightlies)
filter_keep = {"date": group_dates[0], 'container': None}
filters_kill = [{"date": v} for v in group_dates[1:]]
else:
continue # Ignore this group, no templates detected yet
filter_keep.update(prov_filter)
for filt in filters_kill:
filt.update(prov_filter)
# Keeping current appliances
# Retrieve list of all templates for given group
# I know joins might be a bit better solution but I'll leave that for later.
possible_templates = list(
Template.objects.filter(
usable=True, ready=True, template_group=gs.template_group,
preconfigured=preconfigured, **filter_keep).all())
# If it can be deployed, it must exist
possible_templates_for_provision = filter(lambda tpl: tpl.exists, possible_templates)
appliances = []
for template in possible_templates:
appliances.extend(
Appliance.objects.filter(
template=template, appliance_pool=None, marked_for_deletion=False))
# If we then want to delete some templates, better kill the eldest. status_changed
# says which one was provisioned when, because nothing else then touches that field.
appliances.sort(key=lambda appliance: appliance.status_changed)
pool_size = gs.template_pool_size if preconfigured else gs.unconfigured_template_pool_size
if len(appliances) < pool_size and possible_templates_for_provision:
# There must be some templates in order to run the provisioning
# Provision ONE appliance at time for each group, that way it is possible to maintain
# reasonable balancing
new_appliance_name = settings.APPLIANCE_FORMAT.format(
group=template.template_group.id,
date=template.date.strftime("%y%m%d"),
rnd=fauxfactory.gen_alphanumeric(8))
with transaction.atomic():
# Now look for templates that are on non-busy providers
tpl_free = filter(
lambda t: t.provider.free,
possible_templates_for_provision)
if tpl_free:
appliance = Appliance(
template=sorted(tpl_free, key=lambda t: t.provider.appliance_load)[0],
name=new_appliance_name)
appliance.save()
if tpl_free:
self.logger.info(
"Adding an appliance to shepherd: {}/{}".format(appliance.id, appliance.name))
clone_template_to_appliance.delay(appliance.id, None)
elif len(appliances) > pool_size:
# Too many appliances, kill the surplus
# Only kill those that are visible only for one group. This is necessary so the groups
# don't "fight"
for appliance in appliances[:len(appliances) - pool_size]:
if appliance.is_visible_only_in_group(gs.user_group):
self.logger.info("Killing an extra appliance {}/{} in shepherd".format(
appliance.id, appliance.name))
Appliance.kill(appliance)
# Killing old appliances
for filter_kill in filters_kill:
for template in Template.objects.filter(
ready=True, usable=True, template_group=gs.template_group,
preconfigured=preconfigured, container=None, **filter_kill):
for a in Appliance.objects.filter(
template=template, appliance_pool=None, marked_for_deletion=False):
self.logger.info(
"Killing appliance {}/{} in shepherd because it is obsolete now".format(
a.id, a.name))
Appliance.kill(a)
@singleton_task()
def free_appliance_shepherd(self):
generic_shepherd(self, True)
generic_shepherd(self, False)
@singleton_task()
def wait_appliance_ready(self, appliance_id):
"""This task checks for appliance's readiness for use. The checking loop is designed as retrying
the task to free up the queue."""
try:
appliance = Appliance.objects.get(id=appliance_id)
if appliance.appliance_pool is not None:
if appliance.appliance_pool.not_needed_anymore:
# Terminate task chain
self.request.callbacks[:] = []
kill_appliance.delay(appliance_id)
return
if appliance.power_state == Appliance.Power.UNKNOWN or appliance.ip_address is None:
self.retry(args=(appliance_id,), countdown=30, max_retries=45)
if Appliance.objects.get(id=appliance_id).cfme.ipapp.is_web_ui_running():
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ready = True
appliance.save()
appliance.set_status("The appliance is ready.")
with diaper:
appliance.synchronize_metadata()
else:
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ready = False
appliance.save()
appliance.set_status("Waiting for UI to appear.")
self.retry(args=(appliance_id,), countdown=30, max_retries=45)
except ObjectDoesNotExist:
# source object is not present, terminating
return
@singleton_task()
def anyvm_power_on(self, provider, vm):
provider = get_mgmt(provider)
provider.start_vm(vm)
@singleton_task()
def anyvm_power_off(self, provider, vm):
provider = get_mgmt(provider)
provider.stop_vm(vm)
@singleton_task()
def anyvm_suspend(self, provider, vm):
provider = get_mgmt(provider)
provider.suspend_vm(vm)
@singleton_task()
def anyvm_delete(self, provider, vm):
provider = get_mgmt(provider)
provider.delete_vm(vm)
@singleton_task()
def delete_template_from_provider(self, template_id):
template = Template.objects.get(id=template_id)
try:
template.provider_api.delete_template(template.name)
except Exception as e:
self.logger.exception(e)
return False
with transaction.atomic():
template = Template.objects.get(pk=template.pk)
template.exists = False
template.save()
return True
@singleton_task()
def appliance_rename(self, appliance_id, new_name):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
return None
if not appliance.provider.allow_renaming:
return None
if appliance.name == new_name:
return None
with redis.appliances_ignored_when_renaming(appliance.name, new_name):
self.logger.info("Renaming {}/{} to {}".format(appliance_id, appliance.name, new_name))
appliance.name = appliance.provider_api.rename_vm(appliance.name, new_name)
appliance.save()
return appliance.name
@singleton_task()
def rename_appliances_for_pool(self, pool_id):
with transaction.atomic():
try:
appliance_pool = AppliancePool.objects.get(id=pool_id)
except ObjectDoesNotExist:
return
appliances = [
appliance
for appliance
in appliance_pool.appliances
if appliance.provider_api.can_rename
]
for appliance in appliances:
if not appliance.provider.allow_renaming:
continue
new_name = '{}_'.format(appliance_pool.owner.username)
if appliance.version and not appliance.version.startswith('...'):
# CFME
new_name += 'cfme_{}_'.format(appliance.version.replace('.', ''))
else:
# MIQ
new_name += 'miq_'
new_name += '{}_{}'.format(
appliance.template.date.strftime("%y%m%d"),
fauxfactory.gen_alphanumeric(length=4))
appliance_rename.apply_async(
countdown=10, # To prevent clogging with the transaction.atomic
args=(appliance.id, new_name))
@singleton_task(soft_time_limit=60)
def check_update(self):
sprout_sh = project_path.join("sprout").join("sprout.sh")
try:
result = command.run([sprout_sh.strpath, "check-update"])
except command.CommandException as e:
result = e
needs_update = result.output.strip().lower() != "up-to-date"
redis.set("sprout-needs-update", needs_update)
@singleton_task()
def scavenge_managed_providers(self):
chord_tasks = []
for appliance in Appliance.objects.exclude(appliance_pool=None):
chord_tasks.append(scavenge_managed_providers_from_appliance.si(appliance.id))
chord(chord_tasks)(calculate_provider_management_usage.s())
@singleton_task(soft_time_limit=180)
def scavenge_managed_providers_from_appliance(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
return None
try:
managed_providers = appliance.ipapp.managed_known_providers
appliance.managed_providers = [prov.key for prov in managed_providers]
except Exception as e:
# To prevent single appliance messing up whole result
provider_error_logger().error("{}: {}".format(type(e).__name__, str(e)))
return None
return appliance.id
@singleton_task()
def calculate_provider_management_usage(self, appliance_ids):
results = {}
for appliance_id in filter(lambda id: id is not None, appliance_ids):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# Deleted in meanwhile
continue
for provider_key in appliance.managed_providers:
if provider_key not in results:
results[provider_key] = []
results[provider_key].append(appliance.id)
for provider in Provider.objects.all():
provider.appliances_manage_this_provider = results.get(provider.id, [])
@singleton_task()
def mailer_version_mismatch(self):
"""This is usually called per-mismatch, but as the mismatches are stored in database and the
mail can fail sending, so this can send the mismatches in a batch in this case."""
with transaction.atomic():
mismatches = MismatchVersionMailer.objects.filter(sent=False)
if not mismatches:
return
email_body = """\
Hello,
I am Sprout template version mismatch spammer. I think there are some version mismatches.
Here is the list:
{}
Sincerely,
Sprout template version mismatch spammer™
""".format(
"\n".join(
"* {} @ {} : supposed {} , true {}".format(
mismatch.template_name, mismatch.provider.id, mismatch.supposed_version,
mismatch.actual_version)
for mismatch in mismatches
)
)
user_mails = []
for user in User.objects.filter(is_superuser=True):
if user.email:
user_mails.append(user.email)
result = send_mail(
"Template version mismatches detected",
email_body,
"[email protected]",
user_mails,
)
if result > 0:
for mismatch in mismatches:
mismatch.sent = True
mismatch.save()
@singleton_task()
def obsolete_template_deleter(self):
for group in Group.objects.all():
if group.template_obsolete_days_delete:
# We can delete based on the template age
obsolete_templates = group.obsolete_templates
if obsolete_templates is not None:
for template in obsolete_templates:
if template.can_be_deleted:
delete_template_from_provider.delay(template.id)
@singleton_task()
def connect_direct_lun(self, appliance_id):
appliance = Appliance.objects.get(id=appliance_id)
if not hasattr(appliance.provider_api, "connect_direct_lun_to_appliance"):
return False
try:
appliance.provider_api.connect_direct_lun_to_appliance(appliance.name, False)
except Exception as e:
appliance.set_status("LUN: {}: {}".format(type(e).__name__, str(e)))
return False
else:
appliance.reload()
with transaction.atomic():
appliance.lun_disk_connected = True
appliance.save()
return True
@singleton_task()
def disconnect_direct_lun(self, appliance_id):
appliance = Appliance.objects.get(id=appliance_id)
if not appliance.lun_disk_connected:
return False
if not hasattr(appliance.provider_api, "connect_direct_lun_to_appliance"):
return False
try:
appliance.provider_api.connect_direct_lun_to_appliance(appliance.name, True)
except Exception as e:
appliance.set_status("LUN: {}: {}".format(type(e).__name__, str(e)))
return False
else:
appliance.reload()
with transaction.atomic():
appliance.lun_disk_connected = False
appliance.save()
return True
@singleton_task()
def appliance_yum_update(self, appliance_id):
appliance = Appliance.objects.get(id=appliance_id)
appliance.ipapp.update_rhel(reboot=False)
@singleton_task()
def pick_templates_for_deletion(self):
"""Applies some heuristics to guess templates that might be candidates to deletion."""
to_mail = {}
for group in Group.objects.all():
for zstream, versions in group.pick_versions_to_delete().iteritems():
for version in versions:
for template in Template.objects.filter(
template_group=group, version=version, exists=True, suggested_delete=False):
template.suggested_delete = True
template.save()
if group.id not in to_mail:
to_mail[group.id] = {}
if zstream not in to_mail[group.id]:
to_mail[group.id][zstream] = {}
if version not in to_mail[group.id][zstream]:
to_mail[group.id][zstream][version] = []
to_mail[group.id][zstream][version].append(
"{} @ {}".format(template.name, template.provider.id))
# TODO: Figure out why it was spamming
if to_mail and False:
data = yaml.safe_dump(to_mail, default_flow_style=False)
email_body = """\
Hello,
just letting you know that there are some templates that you might like to delete:
{}
Visit Sprout's Templates page for more informations.
Sincerely,
Sprout.
""".format(data)
user_mails = []
for user in User.objects.filter(is_superuser=True):
if user.email:
user_mails.append(user.email)
send_mail(
"Possible candidates for template deletion",
email_body,
"[email protected]",
user_mails,
)
@singleton_task()
def check_swap_in_appliances(self):
chord_tasks = []
for appliance in Appliance.objects.filter(
ready=True, power_state=Appliance.Power.ON, marked_for_deletion=False).exclude(
power_state=Appliance.Power.ORPHANED):
chord_tasks.append(check_swap_in_appliance.si(appliance.id))
chord(chord_tasks)(notify_owners.s())
@singleton_task()
def check_swap_in_appliance(self, appliance_id):
appliance = Appliance.objects.get(id=appliance_id)
try:
swap_amount = appliance.ipapp.swap
except (SSHException, socket.error, Exception) as e:
if type(e) is Exception and 'SSH is unavailable' not in str(e):
# Because otherwise it might not be an SSH error
raise
ssh_failed = True
swap_amount = None
else:
ssh_failed = False
went_up = (
(appliance.swap is not None and swap_amount > appliance.swap) or
(appliance.swap is None and swap_amount is not None and swap_amount > 0))
ssh_failed_changed = ssh_failed and not appliance.ssh_failed
appliance.swap = swap_amount
appliance.ssh_failed = ssh_failed
appliance.save()
# Returns a tuple - (appliance_id, went_up?, current_amount, ssh_failed?)
return appliance.id, went_up, swap_amount, ssh_failed_changed
@singleton_task()
def notify_owners(self, results):
# Filter out any errors
results = [x for x in results if isinstance(x, (list, tuple)) and len(x) == 4]
per_user = {}
for appliance_id, went_up, current_swap, ssh_failed_changed in results:
if not went_up and not ssh_failed_changed:
# Not interested
continue
appliance = Appliance.objects.get(id=appliance_id)
if appliance.appliance_pool is not None:
username = appliance.appliance_pool.owner.username
user = appliance.appliance_pool.owner
else:
username = 'SHEPHERD'
user = None
issues = []
if went_up:
issues.append('swap++ {}M'.format(current_swap))
if ssh_failed_changed:
issues.append('ssh unreachable')
message = '{}/{} {}'.format(
appliance.name, appliance.ip_address, ', '.join(issues))
if user is None:
# No email
continue
if not user.email:
# Same here
continue
# We assume that "living" users have an e-mail set therefore we will not nag about bots'
# appliances.
send_message('{}: {}'.format(username, message))
# Add the message to be sent
if user not in per_user:
per_user[user] = []
per_user[user].append(message)
# Send out the e-mails
for user, messages in per_user.iteritems():
appliance_list = '\n'.join('* {}'.format(message) for message in messages)
email_body = """\
Hello,
I discovered that some of your appliances are behaving badly. Please check them out:
{}
Best regards,
The Sprout™
""".format(appliance_list)
send_mail(
"[Sprout] Appliance swap report",
email_body,
"[email protected]",
[user.email],
)
@singleton_task()
def appliances_synchronize_metadata(self):
for appliance in Appliance.objects.all():
try:
appliance.synchronize_metadata()
except ObjectDoesNotExist:
return
@singleton_task()
def synchronize_untracked_vms(self):
for provider in Provider.objects.filter(working=True, disabled=False):
synchronize_untracked_vms_in_provider.delay(provider.id)
def parsedate(d):
if d is None:
return d
else:
return iso8601.parse_date(d)
@singleton_task()
def synchronize_untracked_vms_in_provider(self, provider_id):
"""'re'-synchronizes any vms that might be lost during outages."""
provider = Provider.objects.get(id=provider_id)
provider_api = provider.api
if not hasattr(provider_api, 'list_vm'):
# This provider does not have VMs (eg. Hawkular or Openshift)
return
for vm_name in sorted(map(str, provider_api.list_vm())):
if Appliance.objects.filter(name=vm_name, template__provider=provider).count() != 0:
continue
# We have an untracked VM. Let's investigate
try:
appliance_id = provider_api.get_meta_value(vm_name, 'sprout_id')
except KeyError:
continue
except NotImplementedError:
# Do not bother if not implemented in the API
return
# just check it again ...
if Appliance.objects.filter(id=appliance_id).count() == 1:
# For some reason it is already in
continue
# Now it appears that this is a VM that was in Sprout
construct = {'id': appliance_id}
# Retrieve appliance data
try:
self.logger.info('Trying to reconstruct appliance %d/%s', appliance_id, vm_name)
construct['name'] = vm_name
template_id = provider_api.get_meta_value(vm_name, 'sprout_source_template_id')
# Templates are not deleted from the DB so this should be OK.
construct['template'] = Template.objects.get(id=template_id)
construct['name'] = vm_name
construct['ready'] = provider_api.get_meta_value(vm_name, 'sprout_ready')
construct['description'] = provider_api.get_meta_value(vm_name, 'sprout_description')
construct['lun_disk_connected'] = provider_api.get_meta_value(
vm_name, 'sprout_lun_disk_connected')
construct['swap'] = provider_api.get_meta_value(vm_name, 'sprout_swap')
construct['ssh_failed'] = provider_api.get_meta_value(vm_name, 'sprout_ssh_failed')
# Time fields
construct['datetime_leased'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_datetime_leased'))
construct['leased_until'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_leased_until'))
construct['status_changed'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_status_changed'))
construct['created_on'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_created_on'))
construct['modified_on'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_modified_on'))
except KeyError as e:
self.logger.error('Failed to reconstruct %d/%s', appliance_id, vm_name)
self.logger.exception(e)
continue
# Retrieve pool data if applicable
try:
pool_id = provider_api.get_meta_value(vm_name, 'sprout_pool_id')
pool_construct = {'id': pool_id}
pool_construct['total_count'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_total_count')
group_id = provider_api.get_meta_value(
vm_name, 'sprout_pool_group')
pool_construct['group'] = Group.objects.get(id=group_id)
try:
construct_provider_id = provider_api.get_meta_value(
vm_name, 'sprout_pool_provider')
pool_construct['provider'] = Provider.objects.get(id=construct_provider_id)
except (KeyError, ObjectDoesNotExist):
# optional
pool_construct['provider'] = None
pool_construct['version'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_version')
pool_construct['date'] = parsedate(provider_api.get_meta_value(
vm_name, 'sprout_pool_appliance_date'))
owner_id = provider_api.get_meta_value(
vm_name, 'sprout_pool_owner_id')
try:
owner = User.objects.get(id=owner_id)
except ObjectDoesNotExist:
owner_username = provider_api.get_meta_value(
vm_name, 'sprout_pool_owner_username')
owner = User(id=owner_id, username=owner_username)
owner.save()
pool_construct['owner'] = owner
pool_construct['preconfigured'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_preconfigured')
pool_construct['description'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_description')
pool_construct['not_needed_anymore'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_not_needed_anymore')
pool_construct['finished'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_finished')
pool_construct['yum_update'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_yum_update')
try:
construct['appliance_pool'] = AppliancePool.objects.get(id=pool_id)
except ObjectDoesNotExist:
pool = AppliancePool(**pool_construct)
pool.save()
construct['appliance_pool'] = pool
except KeyError as e:
pass
appliance = Appliance(**construct)
appliance.save()
# And now, refresh!
refresh_appliances_provider.delay(provider.id)
@singleton_task()
def read_docker_images_from_url(self):
for group in Group.objects.exclude(Q(templates_url=None) | Q(templates_url='')):
read_docker_images_from_url_group.delay(group.id)
@singleton_task()
def read_docker_images_from_url_group(self, group_id):
group = Group.objects.get(id=group_id)
with closing(urlopen(group.templates_url)) as http:
root = etree.parse(http, parser=etree.HTMLParser()).getroot()
result = set()
for link in root.xpath('//a[../../td/img[contains(@src, "folder")]]'):
try:
href = link.attrib['href']
except KeyError:
continue
url = group.templates_url + href
version_with_suffix = href.rstrip('/') # Does not contain the last digit
try:
with closing(urlopen(url + 'cfme-docker')) as http:
cfme_docker = http.read().strip()
except HTTPError:
self.logger.info('Skipping {} (no docker)'.format(url))
continue
try:
with closing(urlopen(url + 'version')) as http:
cfme_version = http.read().strip()
if '-' in version_with_suffix:
# Use the suffix from the folder name
suffix = version_with_suffix.rsplit('-', 1)[-1]
cfme_version = '{}-{}'.format(cfme_version, suffix)
except HTTPError:
self.logger.info('Skipping {} (no version)'.format(url))
continue
cfme_docker = re.split(r'\s+', cfme_docker)
if len(cfme_docker) == 2:
pull_url, latest_mapping = cfme_docker
latest = re.sub(r'^\(latest=([^)]+)\)$', '\\1', latest_mapping)
proper_pull_url = re.sub(r':latest$', ':{}'.format(latest), pull_url)
elif cfme_docker and cfme_docker[0].lower().strip() == 'tags:':
# Multiple tags, take the longest
proper_pull_url = sorted(filter(None, cfme_docker[1:]), key=len, reverse=True)[0]
latest = proper_pull_url.rsplit(':', 1)[-1]
else:
self.logger.info('Skipping: unknown format: {!r}'.format(cfme_docker))
continue
if cfme_version in result:
continue
process_docker_images_from_url_group.delay(group.id, cfme_version, latest, proper_pull_url)
result.add(cfme_version)
@singleton_task()
def process_docker_images_from_url_group(self, group_id, version, docker_version, pull_url):
group = Group.objects.get(id=group_id)
# "-20160624221308"
date = docker_version.rsplit('-', 1)[-1]
try:
date = datetime.strptime(date, '%Y%m%d%H%M%S').date() # noqa
except AttributeError:
raise ValueError('Could not parse date from {}'.format(docker_version))
for provider in Provider.objects.exclude(container_base_template=None):
try:
Template.objects.get(
~Q(container=None), template_group=group, provider=provider, version=version,
date=date, preconfigured=True)
except ObjectDoesNotExist:
create_docker_vm.delay(group.id, provider.id, version, date, pull_url)
def docker_vm_name(version, date):
return 'docker-{}-{}-{}'.format(
re.sub(r'[^0-9a-z]', '', version.lower()),
re.sub(r'[^0-9]', '', str(date)),
fauxfactory.gen_alphanumeric(length=4).lower())
@singleton_task()
def create_docker_vm(self, group_id, provider_id, version, date, pull_url):
group = Group.objects.get(id=group_id)
provider = Provider.objects.get(id=provider_id)
with transaction.atomic():
if provider.remaining_configuring_slots < 1:
self.retry(
args=(group_id, provider_id, version, date, pull_url), countdown=60, max_retries=60)
new_name = docker_vm_name(version, date)
new_template = Template(
template_group=group, provider=provider,
container='cfme', name=new_name, original_name=provider.container_base_template,
version=version, date=date,
ready=False, exists=False, usable=True, preconfigured=True)
new_template.save()
workflow = chain(
prepare_template_deploy.si(new_template.id),
configure_docker_template.si(new_template.id, pull_url),
prepare_template_seal.si(new_template.id),
prepare_template_poweroff.si(new_template.id),
prepare_template_finish.si(new_template.id),
)
workflow.link_error(prepare_template_delete_on_error.si(new_template.id))
workflow()
@singleton_task()
def configure_docker_template(self, template_id, pull_url):
template = Template.objects.get(id=template_id)
template.set_status("Waiting for SSH.")
appliance = CFMEAppliance(template.provider_name, template.name, container=template.container)
appliance.ipapp.wait_for_ssh()
with appliance.ipapp.ssh_client as ssh:
template.set_status("Setting the pull URL.")
ssh.run_command(
'echo "export CFME_URL={}" > /etc/cfme_pull_url'.format(pull_url), ensure_host=True)
template.set_status("Pulling the {}.".format(pull_url))
ssh.run_command('docker pull {}'.format(pull_url), ensure_host=True)
template.set_status('Pulling finished.')
@singleton_task()
def sync_appliance_hw(self, appliance_id):
Appliance.objects.get(id=appliance_id).sync_hw()
@singleton_task()
def sync_provider_hw(self, provider_id):
Provider.objects.get(id=provider_id).perf_sync()
@singleton_task()
def sync_quotas_perf(self):
for provider in Provider.objects.all():
sync_provider_hw.delay(provider.id)
for appliance in provider.currently_managed_appliances:
sync_appliance_hw.delay(appliance.id)
| gpl-2.0 | 6,819,446,563,095,956,000 | 41.771268 | 100 | 0.617997 | false |
explorigin/Rocket | tests/test_threadpool.py | 1 | 4024 | # -*- coding: utf-8 -*-
# This file is part of the Rocket Web Server
# Copyright (c) 2012 Timothy Farrell
#
# See the included LICENSE.txt file for licensing details.
# Import System Modules
import time
import unittest
try:
from queue import Queue
except ImportError:
from Queue import Queue
try:
from functools import reduce
except ImportError:
pass
# Import Custom Modules
from rocket import threadpool, worker
# Constants
# Define Tests
class ThreadPoolTest(unittest.TestCase):
def setUp(self):
self.min_threads = 10
self.max_threads = 20
self.active_queue = Queue()
self.monitor_queue = Queue()
w = worker.Worker
self.tp = threadpool.ThreadPool(w,
dict(),
self.active_queue,
self.monitor_queue,
self.min_threads,
self.max_threads)
def aliveConnections(self):
return reduce(lambda x, y: x+y,
[1 if x.isAlive() else 0 for x in self.tp.threads],
0)
def testThreadPoolStart(self):
self.assertEqual(self.aliveConnections(), 0)
self.tp.start()
self.assertEqual(self.aliveConnections(), self.min_threads)
def testThreadPoolStop(self):
self.assertEqual(self.aliveConnections(), 0)
self.tp.start()
self.assertEqual(self.aliveConnections(), self.min_threads)
self.tp.stop()
self.assertEqual(len(self.tp.threads), 0)
def testThreadPoolShrink(self):
self.assertEqual(self.aliveConnections(), 0)
self.tp.start()
self.assertEqual(self.aliveConnections(), self.min_threads)
self.tp.shrink(1)
# Give the other threads some time to process the death threat
time.sleep(0.5)
self.assertEqual(self.aliveConnections(), self.min_threads - 1)
def testThreadPoolGrow(self):
self.assertEqual(self.aliveConnections(), 0)
self.tp.start()
self.assertEqual(self.aliveConnections(), self.min_threads)
self.tp.grow(1)
self.assertEqual(self.aliveConnections(), self.min_threads + 1)
def testThreadPoolDeadThreadCleanup(self):
self.assertEqual(self.aliveConnections(), 0)
self.tp.start()
self.assertEqual(self.aliveConnections(), self.min_threads)
self.tp.shrink(1)
# Give the other threads some time to process the death threat
time.sleep(0.5)
self.assertEqual(self.aliveConnections(), self.min_threads - 1)
self.assertEqual(len(self.tp.threads), self.min_threads)
self.tp.bring_out_your_dead()
self.assertEqual(len(self.tp.threads), self.min_threads - 1)
def testThreadPoolDynamicResizeDown(self):
self.assertEqual(self.aliveConnections(), 0)
self.tp.start()
self.assertEqual(self.aliveConnections(), self.min_threads)
self.tp.grow(1)
self.assertEqual(self.aliveConnections(), self.min_threads + 1)
self.assertEqual(len(self.tp.threads), self.min_threads + 1)
self.tp.dynamic_resize()
# Give the other threads some time to process the death threat
time.sleep(0.5)
self.tp.bring_out_your_dead()
self.assertEqual(self.aliveConnections(), self.min_threads)
self.assertEqual(len(self.tp.threads), self.min_threads)
def testThreadPoolDynamicResizeUp(self):
self.assertEqual(self.aliveConnections(), 0)
for x in range(self.max_threads * 3):
self.active_queue.put(None)
self.tp.alive = True
self.tp.dynamic_resize()
self.assertTrue(self.min_threads < len(self.tp.threads) < self.max_threads + 1)
def tearDown(self):
try:
self.tp.stop()
except:
pass
del self.tp
if __name__ == '__main__':
unittest.main()
| mit | 5,738,040,594,529,807,000 | 24.468354 | 87 | 0.608847 | false |
jeff-99/toolbox | toolbox/config.py | 1 | 5198 | import json
import collections
from .mixins import ConfigMixin
from .defaults import *
class ConfigManager(object):
"""
The config manager has the responsibility of persisting plugin configs.
On initialisation it creates a default file structure in the user's home directory
"""
FILE_EXT = '.json'
def __init__(self):
if not os.path.isdir(TOOLBOX_DIR):
os.mkdir(TOOLBOX_DIR)
if not os.path.isdir(CONF_DIR):
os.mkdir(CONF_DIR)
if not os.path.isdir(LOCAL_PLUGIN_DIR):
os.mkdir(LOCAL_PLUGIN_DIR)
self.config_dir = CONF_DIR
def load_plugin(self, name):
"""
Load the plugin config file by name and return an py:class:`toolbox.config.PluginConfig`
:param str name:
:return: an instance of PluginConfig for given plugin name
:rtype: toolbox.config.PluginConfig
"""
file_name = name + ConfigManager.FILE_EXT
path = os.path.join(self.config_dir, file_name)
if not os.path.exists(path):
plugin_config = PluginConfig()
elif os.path.exists(path) and not os.path.isfile(path):
raise TypeError('{} is not a file'.format(path))
else:
with open(path, 'r') as f:
try:
config = json.load(f)
plugin_config = PluginConfig.create_from_dict(config)
except ValueError:
plugin_config = PluginConfig()
return plugin_config
def save_plugin(self, name, config):
"""
save a plugin config by name
before saving the global config key is deleted
:param str name: Name of the plugin
:param config: instance of an py:class:`toolbox.config.PluginConfig`
:return:
"""
file_name = name + ConfigManager.FILE_EXT
path = os.path.join(self.config_dir, file_name)
if os.path.exists(path) and not os.path.isfile(path):
raise Exception('path exists but it ain\'t a file Brah')
if PluginConfig.GLOBAL_KEY in config:
del config[PluginConfig.GLOBAL_KEY]
with open(path, 'w') as f:
f.write(config.to_json())
def save(self, plugins):
"""
Convenience method to save a list of plugins. Only configs that have been modified since loading will be saved.
:param iterable plugins: list of instances of base class py:class:`toolbox.plugin.ToolboxPlugin`
:return:
"""
for plugin in plugins:
if isinstance(plugin, ConfigMixin):
conf = plugin.get_config()
if conf.modified:
self.save_plugin(plugin.name, conf)
class PluginConfig(object):
"""
Config container for plugin configs. Acts like a dictionary with some extra convenience methods.
The config has a special key for global configs which can be accessed with the 'get_global_config' method
"""
GLOBAL_KEY = '__GLOBAL__'
def __init__(self):
self._config = collections.defaultdict(lambda: None)
self.modified = False
def __getitem__(self, item):
return self._config[item]
def __setitem__(self, key, value):
self.modified = True if key != PluginConfig.GLOBAL_KEY else False
self._config[key] = value
def __delitem__(self, key):
self.modified = True if key != PluginConfig.GLOBAL_KEY else False
del self._config[key]
def __contains__(self, item):
return item in self._config
def __add__(self, other):
if not isinstance(other, PluginConfig):
return self
for key in other.keys():
self.modified = True if key != PluginConfig.GLOBAL_KEY else False
self[key] = other[key]
return self
def __sub__(self, other):
"""
Remove the keys of the other config
:param other:
:return:
"""
if self is other or not isinstance(other, PluginConfig):
return self
for key in other.keys():
if key in self:
self.modified = True if key != PluginConfig.GLOBAL_KEY else False
del self[key]
return self
def __len__(self):
return len(list(filter(lambda x: x != PluginConfig.GLOBAL_KEY,
self._config.keys())))
def set_global_config(self, config):
self[PluginConfig.GLOBAL_KEY] = config
def get_global_config(self):
return self[PluginConfig.GLOBAL_KEY]
def keys(self):
return self._config.keys()
def to_json(self):
"""
Converts the config values to a JSON string
:return: JSON string
:rtype: str
"""
return json.dumps(self._config, indent=True)
@classmethod
def create_from_dict(cls, dict):
"""
Factory method to create a PluginConfig from a python dictionary
:param dict:
:return: a PluginConfig
:rtype: py:class:`toolbox.config.PluginConfig`
"""
config = cls()
for k in dict:
config[k] = dict[k]
return config
| isc | 3,188,271,387,001,703,400 | 29.397661 | 119 | 0.585995 | false |
google/ml_collections | ml_collections/config_flags/tests/valueerror_config.py | 1 | 1190 | # Copyright 2021 The ML Collections Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python 3
"""Config file that raises ValueError on import.
When trying loading the configuration file as a flag, the flags library catches
ValueError exceptions then recasts them as a IllegalFlagValueError and rethrows
(b/63877430). The rethrow does not include the stacktrace from the original
exception, so we manually add the stracktrace in configflags.parse(). This is
tested in `ConfigFlagTest.testValueError` in `config_overriding_test.py`.
"""
def value_error_function():
raise ValueError('This is a ValueError.')
def get_config():
return {'item': value_error_function()}
| apache-2.0 | -911,633,976,046,960,600 | 37.387097 | 79 | 0.768908 | false |
apagac/cfme_tests | cfme/infrastructure/virtual_machines.py | 1 | 64368 | # -*- coding: utf-8 -*-
"""A model of Infrastructure Virtual Machines area of CFME. This includes the VMs explorer tree,
quadicon lists, and VM details page.
"""
import re
from collections import namedtuple
from copy import copy
import attr
import fauxfactory
from navmazing import NavigateToAttribute
from navmazing import NavigateToSibling
from navmazing import NavigationDestinationNotFound
from widgetastic.utils import Parameter
from widgetastic.utils import partial_match
from widgetastic.widget import NoSuchElementException
from widgetastic.widget import ParametrizedView
from widgetastic.widget import Table as WTable
from widgetastic.widget import Text
from widgetastic.widget import TextInput
from widgetastic.widget import View
from widgetastic_patternfly import BootstrapSelect
from widgetastic_patternfly import BootstrapSwitch
from widgetastic_patternfly import Button
from widgetastic_patternfly import CheckableBootstrapTreeview
from widgetastic_patternfly import Dropdown
from widgetastic_patternfly import Input as WInput
from cfme.base.login import BaseLoggedInPage
from cfme.common import TimelinesView
from cfme.common.vm import Template
from cfme.common.vm import TemplateCollection
from cfme.common.vm import VM
from cfme.common.vm import VMCollection
from cfme.common.vm_views import CloneVmView
from cfme.common.vm_views import EditView
from cfme.common.vm_views import ManagementEngineView
from cfme.common.vm_views import MigrateVmView
from cfme.common.vm_views import PolicySimulationView
from cfme.common.vm_views import ProvisionView
from cfme.common.vm_views import PublishVmView
from cfme.common.vm_views import RenameVmView
from cfme.common.vm_views import RetirementViewWithOffset
from cfme.common.vm_views import SetOwnershipView
from cfme.common.vm_views import VMDetailsEntities
from cfme.common.vm_views import VMEntities
from cfme.common.vm_views import VMToolbar
from cfme.exceptions import DestinationNotFound
from cfme.exceptions import displayed_not_implemented
from cfme.exceptions import ItemNotFound
from cfme.services.requests import RequestsView
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.conf import cfme_data
from cfme.utils.log import logger
from cfme.utils.pretty import Pretty
from cfme.utils.providers import get_crud_by_name
from cfme.utils.wait import wait_for
from widgetastic_manageiq import Accordion
from widgetastic_manageiq import CompareToolBarActionsView
from widgetastic_manageiq import ConditionalSwitchableView
from widgetastic_manageiq import ManageIQTree
from widgetastic_manageiq import PaginationPane
from widgetastic_manageiq import Search
from widgetastic_manageiq import SnapshotMemorySwitch
from widgetastic_manageiq import SummaryTable
from widgetastic_manageiq import Table
from widgetastic_manageiq.vm_reconfigure import DisksTable
def has_child(tree, text, parent_item=None):
"""Check if a tree has an item with text"""
if not parent_item:
parent_item = tree.root_item
if tree.child_items_with_text(parent_item, text):
return True
else:
for item in tree.child_items(parent_item):
if has_child(tree, text, item):
return True
return False
def find_path(tree, text, parent_item=None):
"""Find the path to an item with text"""
if not parent_item:
parent_item = tree.root_item
path = [parent_item.text]
tree.expand_node(tree.get_nodeid(parent_item))
children = tree.child_items_with_text(parent_item, text)
if children:
for child in children:
if child.text:
return path + [child.text]
return []
else:
for item in tree.child_items(parent_item):
child_path = find_path(tree, text, item)
if child_path:
return path + child_path
return []
class InfraGenericDetailsToolbar(View):
reload = Button(title='Refresh this page')
history = Dropdown('History')
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
monitoring = Dropdown("Monitoring")
download = Button(title='Print or export summary')
lifecycle = Dropdown('Lifecycle')
@ParametrizedView.nested
class custom_button(ParametrizedView): # noqa
PARAMETERS = ("button_group", )
_dropdown = Dropdown(text=Parameter("button_group"))
def item_select(self, button, handle_alert=None):
self._dropdown.item_select(button, handle_alert=handle_alert)
class InfraVmDetailsToolbar(InfraGenericDetailsToolbar):
"""Toolbar for VM details differs from All VMs&TemplatesView
"""
access = Dropdown("Access")
power = Dropdown(text='Power')
class VmsTemplatesAccordion(View):
"""
The accordion on the Virtual Machines page
"""
@View.nested
class vmstemplates(Accordion): # noqa
ACCORDION_NAME = 'VMs & Templates'
tree = ManageIQTree()
@View.nested
class vms(Accordion): # noqa
ACCORDION_NAME = 'VMs'
tree = ManageIQTree()
@View.nested
class templates(Accordion): # noqa
ACCORDION_NAME = 'Templates'
tree = ManageIQTree()
class InfraVmView(BaseLoggedInPage):
"""Base view for header/nav check, inherit for navigatable views"""
@property
def in_infra_vms(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Infrastructure', 'Virtual Machines'])
class VmsTemplatesAllView(InfraVmView):
"""
The collection page for instances
"""
actions = View.nested(CompareToolBarActionsView)
toolbar = View.nested(VMToolbar)
sidebar = View.nested(VmsTemplatesAccordion)
including_entities = View.include(VMEntities, use_parent=True)
pagination = PaginationPane
@property
def is_displayed(self):
return (
self.in_infra_vms and
self.sidebar.vmstemplates.tree.currently_selected == ['All VMs & Templates'] and
self.entities.title.text == 'All VMs & Templates')
def reset_page(self):
"""It resets the 'search filter' to empty or removes the value of 'search filter' if already
present"""
self.entities.search.remove_search_filters()
class OrphanedVmsAllView(VmsTemplatesAllView):
"""This view is for all Orphaned Vms page"""
@property
def is_displayed(self):
selected = self.sidebar.vmstemplates.tree.currently_selected
return (
self.in_infra_vms
and selected == ["All VMs & Templates", "<Orphaned>"]
and self.entities.title.text == "Orphaned VM or Templates"
)
class ArchivedVmsAllView(VmsTemplatesAllView):
"""This view is for all Archived Vms page"""
@property
def is_displayed(self):
selected = self.sidebar.vmstemplates.tree.currently_selected
return (
self.in_infra_vms
and selected == ["All VMs & Templates", "<Archived>"]
and self.entities.title.text == "Archived VM or Templates"
)
class VmTemplatesAllForProviderView(InfraVmView):
toolbar = View.nested(VMToolbar)
sidebar = View.nested(VmsTemplatesAccordion)
including_entities = View.include(VMEntities, use_parent=True)
@property
def is_displayed(self):
expected_provider = None
# Could be collection or entity
# If entity it will have provider attribute
if getattr(self.context['object'], 'provider', False):
expected_provider = self.context['object'].provider.name
# if collection will have provider filter
elif 'provider' in getattr(self.context['object'], 'filters', {}):
expected_provider = self.context['object'].filters.get('provider').name
if expected_provider is None:
self.logger.warning('No provider available on context for is_displayed: %s',
self.context['object'])
return False
else:
return (
self.in_infra_vms and
str(self.entities.title.text) ==
'VM or Templates under Provider "{}"'.format(expected_provider)
)
def reset_page(self):
self.entities.search.remove_search_filters()
class VmsOnlyAllView(InfraVmView):
toolbar = View.nested(VMToolbar)
sidebar = View.nested(VmsTemplatesAccordion)
search = View.nested(Search)
including_entities = View.include(VMEntities, use_parent=True)
@View.nested
class filters(Accordion): # noqa
ACCORDION_NAME = "All VMs"
tree = ManageIQTree()
@property
def is_displayed(self):
return (
self.in_infra_vms and
self.sidebar.vms.tree.currently_selected == ['All VMs'] and
self.entities.title.text == 'All VMs')
def reset_page(self):
self.entities.search.remove_search_filters()
class TemplatesOnlyAllView(InfraVmView):
toolbar = View.nested(VMToolbar)
sidebar = View.nested(VmsTemplatesAccordion)
search = View.nested(Search)
including_entities = View.include(VMEntities, use_parent=True)
@property
def is_displayed(self):
return (
self.in_infra_vms and
self.sidebar.templates.tree.currently_selected == ['All Templates'] and
self.entities.title.text == 'All Templates')
class ProviderTemplatesOnlyAllView(TemplatesOnlyAllView):
@property
def is_displayed(self):
data = {
'provider': self.context['object'].name,
'images': ' and Images' if self.browser.product_version > '5.10' else ''
}
title = '{provider} (All VM Templates{images})'.format(**data)
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Infrastructure', 'Providers'] and
self.entities.title.text == title
)
class HostTemplatesOnlyAllView(TemplatesOnlyAllView):
@property
def is_displayed(self):
if self.browser.product_version < "5.10":
title = "{} (All VM Templates)".format(self.context["object"].name)
else:
title = "{} (All VM Templates and Images)".format(self.context["object"].name)
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Infrastructure', 'Hosts'] and
self.entities.title.text == title
)
class InfraVmSummaryView(VMDetailsEntities):
operating_ranges = SummaryTable(title="Normal Operating Ranges (over 30 days)")
datastore_allocation = SummaryTable(title="Datastore Allocation Summary")
datastore_usage = SummaryTable(title="Datastore Actual Usage Summary")
class InfraVmDetailsView(InfraVmView):
title = Text('#explorer_title_text')
toolbar = ConditionalSwitchableView(reference='entities.title')
@toolbar.register(lambda title: "VM and Instance" in title or "Virtual Machine" in title)
class VmsToolbar(InfraVmDetailsToolbar):
pass
@toolbar.register(lambda title: "Template" in title)
class TemplatesToolbar(InfraGenericDetailsToolbar):
pass
sidebar = View.nested(VmsTemplatesAccordion)
entities = View.nested(VMDetailsEntities)
@property
def is_displayed(self):
if isinstance(self.context['object'], InfraVm):
expected_titles = ['VM and Instance "{}"'.format(self.context["object"].name),
'Virtual Machine "{}"'.format(self.context["object"].name)]
elif isinstance(self.context['object'], InfraTemplate):
expected_titles = ['VM Template and Image "{}"'.format(self.context["object"].name)]
else:
return False
expected_provider = self.context['object'].provider.name
try:
relationships = self.entities.summary('Relationships')
relationship_provider_name = relationships.get_text_of('Infrastructure Provider')
except NameError:
currently_selected = self.sidebar.vmstemplates.tree.currently_selected[-1]
if currently_selected in ['<Archived>', '<Orphaned>']:
return (
self.in_infra_vms and
self.entities.title.text in expected_titles)
self.logger.warning(
'No "Infrastructure Provider" Relationship, VM details view not displayed'
)
return False
return (
self.in_infra_vms and
self.entities.title.text in expected_titles and
relationship_provider_name == expected_provider)
class InfraVmTimelinesView(TimelinesView, InfraVmView):
@property
def is_displayed(self):
if self.breadcrumb.is_displayed:
check_object = self.breadcrumb.locations
else:
# since in 5.10 there is no breadcrumb
check_object = self.title.text
return (
self.context['object'].name in check_object and
# this last check is less specific due to BZ 1732517
"Timeline" in self.title.text
)
class InfraVmReconfigureView(BaseLoggedInPage):
title = Text('#explorer_title_text')
memory = BootstrapSwitch(name='cb_memory')
# memory set to True unlocks the following (order matters - first type then value!):
mem_size_unit = BootstrapSelect(id='mem_type')
mem_size = WInput(id='memory_value')
cpu = BootstrapSwitch(name='cb_cpu')
# cpu set to True unlocks the following:
sockets = BootstrapSelect(id='socket_count')
cores_per_socket = BootstrapSelect(id='cores_per_socket_count')
cpu_total = WInput() # read-only, TODO widgetastic
disks_table = DisksTable('//div/table[./../h3[normalize-space(text())="Disks"]//button]',
column_widgets={
"Type": BootstrapSelect(id="hdType"),
"Mode": BootstrapSelect(id="hdMode"),
"Size": WInput(id="dvcSize"),
"ControllerType": BootstrapSelect(id="Controller"),
"Unit": BootstrapSelect(id="hdUnit"),
"Dependent": BootstrapSwitch(name="vm.cb_dependent"),
"Delete Backing": BootstrapSwitch(name="cb_deletebacking"),
"Actions": Button(),
# second action button, 'Cancel Add' or 'Delete' depending on context of row
# https://github.com/RedHatQE/widgetastic.core/issues/95
9: Button(),
}
)
cd_dvd_table = WTable('//div/table[./../h3[normalize-space(text())="CD/DVD Drives"]]',
column_widgets={
"Host File": BootstrapSelect(id="isoName"),
"Actions": Button(),
# second action button, 'Cancel Add' or 'Delete' depending on context of row
# https://github.com/RedHatQE/widgetastic.core/issues/95
3: Button('Connect'),
}
)
affected_vms = Table('.//div[@id="records_div" or @id="miq-gtl-view"]//table')
submit_button = Button('Submit', classes=[Button.PRIMARY])
cancel_button = Button('Cancel', classes=[Button.DEFAULT])
@property
def is_displayed(self):
return (self.title.text == 'Reconfigure Virtual Machine' and
len([row for row in self.affected_vms.rows()]) == 1 and
self.context['object'].name in [row.name.text for row in self.affected_vms.rows()])
class InfraVmSnapshotToolbar(View):
"""The toolbar on the snapshots page"""
history = Dropdown('History')
reload = Button(title='Refresh this page')
create = Button(title='Create a new snapshot for this VM')
delete = Dropdown('Delete Snapshots')
revert = Button(title='Revert to selected snapshot')
class InfraVmSnapshotView(InfraVmView):
"""The Snapshots page"""
toolbar = View.nested(InfraVmSnapshotToolbar)
sidebar = View.nested(VmsTemplatesAccordion)
title = Text('#explorer_title_text')
description = Text('//label[normalize-space(.)="Description"]/../div/p|'
'//td[@class="key" and normalize-space(.)="Description"]/..'
'/td[not(contains(@class, "key"))]')
size = Text('.//label[normalize-space(.)="Size"]/../div/p')
tree = ManageIQTree('snapshot_treebox')
@property
def is_displayed(self):
"""Is this view being displayed"""
expected_title = '"Snapshots" for Virtual Machine "{}"'.format(self.context['object'].name)
return self.in_infra_vms and self.title.text == expected_title
class InfraVmSnapshotAddView(InfraVmView):
"""Add a snapshot"""
title = Text('#explorer_title_text')
name = TextInput('name')
description = TextInput('description')
snapshot_vm_memory = SnapshotMemorySwitch()
create = Button('Create')
cancel = Button('Cancel')
is_displayed = displayed_not_implemented
class InfraVmGenealogyToolbar(View):
"""The toolbar on the genalogy page"""
history = Dropdown(title='History')
reload = Button(title='Refresh this page')
edit_tags = Button(title='Edit Tags for this VM')
compare = Button(title='Compare selected VMs')
class InfraVmGenealogyView(InfraVmView):
"""The Genealogy page"""
toolbar = View.nested(InfraVmGenealogyToolbar)
sidebar = View.nested(VmsTemplatesAccordion)
title = Text('#explorer_title_text')
tree = CheckableBootstrapTreeview('genealogy_treebox')
@property
def is_displayed(self):
"""Is this view being displayed"""
expected_title = '"Genealogy" for Virtual Machine "{}"'.format(self.context['object'].name)
return self.in_infra_vms and self.title.text == expected_title
class VMDisk(
namedtuple('VMDisk', ['filename', 'size', 'size_unit', 'type', 'mode'])):
"""Represents a single VM disk
Note:
Cannot be changed once created.
"""
EQUAL_ATTRS = {'type', 'mode', 'size_mb'}
def __eq__(self, other):
# If both have filename, it's easy
if self.filename and other.filename:
return self.filename == other.filename
# If one of filenames is None (before disk is created), compare the rest
for eq_attr in self.EQUAL_ATTRS:
if getattr(self, eq_attr) != getattr(other, eq_attr):
return False
return True
@property
def size_mb(self):
return self.size * 1024 if self.size_unit == 'GB' else self.size
class VMHardware(object):
"""Represents VM's hardware, i.e. CPU (cores, sockets) and memory
"""
EQUAL_ATTRS = {'cores_per_socket', 'sockets', 'mem_size_mb'}
def __init__(self, cores_per_socket=None, sockets=None, mem_size=None, mem_size_unit='MB'):
self.cores_per_socket = cores_per_socket
self.sockets = sockets
self.mem_size = mem_size
self.mem_size_unit = mem_size_unit
def __eq__(self, other):
for eq_attr in self.EQUAL_ATTRS:
if getattr(self, eq_attr) != getattr(other, eq_attr):
return False
return True
@property
def mem_size_mb(self):
return self.mem_size * 1024 if self.mem_size_unit == 'GB' else self.mem_size
class VMConfiguration(Pretty):
"""Represents VM's full configuration - hardware, disks and so forth
Args:
vm: VM that exists within current appliance
Note:
It can be only instantiated by fetching an existing VM's configuration, as it is designed
to be used to reconfigure an existing VM.
"""
pretty_attrs = ['hw', 'num_disks']
def __init__(self, vm):
self.hw = VMHardware()
self.disks = []
self.vm = vm
self._load()
def __eq__(self, other):
return (
(self.hw == other.hw) and (self.num_disks == other.num_disks) and
all(disk in other.disks for disk in self.disks))
def _load(self):
"""Loads the configuration from the VM object's appliance (through DB)
"""
appl_db = self.vm.appliance.db.client
# Hardware
ems = appl_db['ext_management_systems']
vms = appl_db['vms']
hws = appl_db['hardwares']
hw_data = appl_db.session.query(ems, vms, hws).filter(
ems.name == self.vm.provider.name).filter(
vms.ems_id == ems.id).filter(
vms.name == self.vm.name).filter(
hws.vm_or_template_id == vms.id
).first().hardwares
self.hw = VMHardware(
hw_data.cpu_cores_per_socket, hw_data.cpu_sockets, hw_data.memory_mb, 'MB')
hw_id = hw_data.id
# Disks
disks = appl_db['disks']
disks_data = appl_db.session.query(disks).filter(
disks.hardware_id == hw_id).filter(
disks.device_type == 'disk'
).all()
for disk_data in disks_data:
# In DB stored in bytes, but UI default is GB
size_gb = disk_data.size / (1024 ** 3)
self.disks.append(
VMDisk(
filename=disk_data.filename,
size=size_gb,
size_unit='GB',
type=disk_data.disk_type,
mode=disk_data.mode
))
def copy(self):
"""Returns a copy of this configuration
"""
config = VMConfiguration.__new__(VMConfiguration)
config.hw = copy(self.hw)
# We can just make shallow copy here because disks can be only added or deleted, not edited
config.disks = self.disks[:]
config.vm = self.vm
return config
def add_disk(self, size, size_unit='GB', type='thin', mode='persistent'):
"""Adds a disk to the VM
Args:
size: Size of the disk
size_unit: Unit of size ('MB' or 'GB')
type: Type of the disk ('thin' or 'thick')
mode: Mode of the disk ('persistent', 'independent_persistent' or
'independent_nonpersistent')
Note:
This method is designed to correspond with the DB, not with the UI.
In the UI, dependency is represented by a separate Yes / No option which is _incorrect_
design that we don't follow. Correctly, mode should be a selectbox of 3 items:
Persistent, Independent Persistent and Independent Nonpersistent.
Just Nonpersistent is an invalid setup that UI currently (5.8) allows.
"""
# New disk doesn't have a filename, until actually added
disk = VMDisk(filename=None, size=size, size_unit=size_unit, type=type, mode=mode)
self.disks.append(disk)
return disk
def delete_disk(self, filename=None, index=None):
"""Removes a disk of given filename or index"""
if filename:
disk = [disk for disk in self.disks if disk.filename == filename][0]
self.disks.remove(disk)
elif index:
del self.disks[index]
else:
raise TypeError("Either filename or index must be specified")
@property
def num_disks(self):
return len(self.disks)
def get_changes_to_fill(self, other_configuration):
""" Returns changes to be applied to this config to reach the other config
Note:
Result of this method is used for form filling by VM's reconfigure method.
"""
changes = {}
changes['disks'] = []
for key in ['cores_per_socket', 'sockets']:
if getattr(self.hw, key) != getattr(other_configuration.hw, key):
changes[key] = str(getattr(other_configuration.hw, key))
changes['cpu'] = True
if (self.hw.mem_size != other_configuration.hw.mem_size or
self.hw.mem_size_unit != other_configuration.hw.mem_size_unit):
changes['memory'] = True
changes['mem_size'] = other_configuration.hw.mem_size
changes['mem_size_unit'] = other_configuration.hw.mem_size_unit
for disk in self.disks + other_configuration.disks:
if disk in self.disks and disk not in other_configuration.disks:
changes['disks'].append({'action': 'delete', 'disk': disk, 'delete_backing': None})
elif disk not in self.disks and disk in other_configuration.disks:
changes['disks'].append({'action': 'add', 'disk': disk})
return changes
@attr.s
class InfraVm(VM):
"""Represents an infrastructure provider's virtual machine in CFME
Note the args are defined at common.BaseVM|VM class
Args:
name: Name of the virtual machine resource
provider: :py:class:`cfme.infrastructure.provider.InfraProvider` object
template_name: Name of the template to use for provisioning
"""
@attr.s
# TODO snapshot collections
class Snapshot(object):
snapshot_tree = ManageIQTree('snapshot_treebox')
name = attr.ib(default=None)
description = attr.ib(default=None)
memory = attr.ib(default=None)
parent_vm = attr.ib(default=None)
@property
def exists(self):
title = getattr(self, self.parent_vm.provider.SNAPSHOT_TITLE)
view = navigate_to(self.parent_vm, 'SnapshotsAll')
if view.tree.is_displayed:
root_item = view.tree.expand_path(self.parent_vm.name)
return has_child(view.tree, title, root_item)
else:
return False
@property
def active(self):
"""Check if the snapshot is active.
Returns:
bool: True if snapshot is active, False otherwise.
"""
title = getattr(self, self.parent_vm.provider.SNAPSHOT_TITLE)
view = navigate_to(self.parent_vm, 'SnapshotsAll')
root_item = view.tree.expand_path(self.parent_vm.name)
from cfme.infrastructure.provider.rhevm import RHEVMProvider
if self.parent_vm.provider.one_of(RHEVMProvider):
child = view.tree.child_items(root_item)
last_snapshot = view.tree.child_items(child[0])[0]
return (len(child) == 1 and
child[0].text == 'Active VM (Active)' and
last_snapshot.text == title)
else:
return has_child(view.tree, '{} (Active)'.format(title), root_item)
@property
def size(self):
"""
Check the shapshot size in the UI. So far available only in RHV and CFME > "5.11"
:returns the size of the snapshot
"""
from cfme.infrastructure.provider.rhevm import RHEVMProvider
if not self.parent_vm.provider.one_of(RHEVMProvider):
raise Exception("Provider is not RHV, this feature is not available")
if self.parent_vm.appliance.version < "5.11":
raise Exception("This feature is available only starting in CFME 5.11")
title = getattr(self, self.parent_vm.provider.SNAPSHOT_TITLE)
view = navigate_to(self.parent_vm, 'SnapshotsAll')
root_item = view.tree.expand_path(self.parent_vm.name)
snapshot_path = find_path(view.tree, title, root_item)
if not snapshot_path:
raise Exception('Could not find snapshot with name "{}"'.format(title))
else:
view.tree.click_path(*snapshot_path)
return view.size.text
def create(self, force_check_memory=False):
"""Create a snapshot"""
view = navigate_to(self.parent_vm, 'SnapshotsAdd')
snapshot_dict = {'description': self.description}
if self.name is not None:
snapshot_dict['name'] = self.name
if (force_check_memory or self.parent_vm.mgmt.is_running):
snapshot_dict["snapshot_vm_memory"] = self.memory
if force_check_memory and not view.snapshot_vm_memory.is_displayed:
raise NoSuchElementException('Snapshot VM memory checkbox not present')
view.fill(snapshot_dict)
view.create.click()
view.flash.assert_no_error()
list_view = self.parent_vm.create_view(InfraVmSnapshotView)
wait_for(lambda: self.exists, num_sec=300, delay=20,
fail_func=list_view.toolbar.reload.click, handle_exception=True,
message="Waiting for snapshot create")
def delete(self, cancel=False):
title = getattr(self, self.parent_vm.provider.SNAPSHOT_TITLE)
view = navigate_to(self.parent_vm, 'SnapshotsAll')
root_item = view.tree.expand_path(self.parent_vm.name)
snapshot_path = find_path(view.tree, title, root_item)
if not snapshot_path:
raise Exception('Could not find snapshot with name "{}"'.format(title))
else:
view.tree.click_path(*snapshot_path)
view.toolbar.delete.item_select('Delete Selected Snapshot', handle_alert=not cancel)
if not cancel:
# TODO: test this in test_snapshot_crud, just assert_no_error here
view.flash.assert_message(
"Delete Snapshot initiated for 1 VM and Instance from the "
"{} Database".format(self.parent_vm.appliance.product_name)
)
wait_for(lambda: not self.exists, num_sec=300, delay=20, fail_func=view.browser.refresh,
message="Waiting for snapshot delete")
def delete_all(self, cancel=False):
view = navigate_to(self.parent_vm, 'SnapshotsAll')
view.toolbar.delete.item_select('Delete All Existing Snapshots',
handle_alert=not cancel)
if not cancel:
# TODO: test this in test_snapshot_crud, just assert_no_error here
view.flash.assert_message(
"Delete All Snapshots initiated for 1 VM and Instance from the "
"{} Database".format(self.parent_vm.appliance.product_name)
)
def revert_to(self, cancel=False):
title = getattr(self, self.parent_vm.provider.SNAPSHOT_TITLE)
view = navigate_to(self.parent_vm, 'SnapshotsAll')
root_item = view.tree.expand_path(self.parent_vm.name)
snapshot_path = find_path(view.tree, title, root_item)
if not snapshot_path:
raise Exception('Could not find snapshot with name "{}"'.format(title))
else:
view.tree.click_path(*snapshot_path)
view.toolbar.revert.click(handle_alert=not cancel)
if not cancel:
# TODO: test this in test_snapshot_crud, just assert_no_error here
view.flash.assert_message(
"Revert to a Snapshot initiated for 1 VM and Instance "
"from the {} Database".format(self.parent_vm.appliance.product_name)
)
def refresh(self):
view = navigate_to(self.parent_vm, 'SnapshotsAll')
view.toolbar.reload.click()
# POWER CONTROL OPTIONS
SUSPEND = "Suspend"
POWER_ON = "Power On"
POWER_OFF = "Power Off"
GUEST_RESTART = "Restart Guest"
GUEST_SHUTDOWN = "Shutdown Guest"
RESET = "Reset"
# POWER STATE
STATE_ON = "on"
STATE_OFF = "off"
STATE_SUSPENDED = "suspended"
ALL_LIST_LOCATION = "infra_vms"
TO_OPEN_EDIT = "Edit this VM"
TO_OPEN_RECONFIGURE = "Reconfigure this VM"
TO_RETIRE = "Retire this VM"
VM_TYPE = "Virtual Machine"
DETAILS_VIEW_CLASS = InfraVmDetailsView
def migrate_vm(self, email=None, first_name=None, last_name=None,
host=None, datastore=None):
view = navigate_to(self, 'Migrate')
first_name = first_name or fauxfactory.gen_alphanumeric()
last_name = last_name or fauxfactory.gen_alphanumeric()
email = email or "{}@{}.test".format(first_name, last_name)
try:
prov_data = cfme_data["management_systems"][self.provider.key]["provisioning"]
host_name = host or prov_data.get("host")
datastore_name = datastore or prov_data.get("datastore")
except (KeyError, IndexError):
raise ValueError("You have to specify the correct options in cfme_data.yaml")
request_data = {
'request': {
'email': email,
'first_name': first_name,
'last_name': last_name,
},
'environment': {
'host_name': {'name': host_name}
},
}
from cfme.infrastructure.provider.rhevm import RHEVMProvider
if not self.provider.one_of(RHEVMProvider):
request_data['environment'].update({'datastore_name': {'name': datastore_name}})
view.form.fill_with(request_data, on_change=view.form.submit)
def clone_vm(self, email=None, first_name=None, last_name=None,
vm_name=None, provision_type=None):
view = navigate_to(self, 'Clone')
first_name = first_name or fauxfactory.gen_alphanumeric()
last_name = last_name or fauxfactory.gen_alphanumeric()
email = email or "{}@{}.test".format(first_name, last_name)
try:
prov_data = cfme_data["management_systems"][self.provider.key]["provisioning"]
except (KeyError, IndexError):
raise ValueError("You have to specify the correct options in cfme_data.yaml")
provisioning_data = {
'catalog': {'vm_name': vm_name,
'provision_type': provision_type},
'request': {
'email': email,
'first_name': first_name,
'last_name': last_name},
'environment': {"host_name": {'name': prov_data.get("host")},
"datastore_name": {"name": prov_data.get("datastore")}},
'network': {'vlan': partial_match(prov_data.get("vlan"))},
}
view.form.fill_with(provisioning_data, on_change=view.form.submit_button)
def publish_to_template(self, template_name, email=None, first_name=None, last_name=None):
view = navigate_to(self, 'Publish')
first_name = first_name or fauxfactory.gen_alphanumeric()
last_name = last_name or fauxfactory.gen_alphanumeric()
email = email or "{}@{}.test".format(first_name, last_name)
try:
prov_data = cfme_data["management_systems"][self.provider.key]["provisioning"]
except (KeyError, IndexError):
raise ValueError("You have to specify the correct options in cfme_data.yaml")
provisioning_data = {
'catalog': {'vm_name': template_name},
'request': {
'email': email,
'first_name': first_name,
'last_name': last_name},
'environment': {'host_name': {'name': prov_data.get('host')},
'datastore_name': {'name': prov_data.get('datastore')}},
}
from cfme.infrastructure.provider.rhevm import RHEVMProvider
if self.provider.one_of(RHEVMProvider):
provisioning_data['environment'] = {'automatic_placement': True}
view.form.fill_with(provisioning_data, on_change=view.form.submit_button)
cells = {'Description': 'Publish from [{}] to [{}]'.format(self.name, template_name)}
provision_request = self.appliance.collections.requests.instantiate(cells=cells)
provision_request.wait_for_request()
return self.appliance.collections.infra_templates.instantiate(template_name, self.provider)
@property
def total_snapshots(self):
"""Returns the number of snapshots for this VM. If it says ``None``, returns ``0``."""
view = navigate_to(self, "Details")
snapshots = view.entities.summary("Properties").get_text_of("Snapshots").strip().lower()
if snapshots == "none":
return 0
else:
return int(snapshots)
@property
def current_snapshot_name(self):
"""Returns the current snapshot name."""
view = navigate_to(self, 'SnapshotsAll')
active_snapshot = view.tree.selected_item if view.tree.is_displayed else None
if active_snapshot:
return active_snapshot.text.split(' (Active')[0]
@property
def current_snapshot_description(self):
"""Returns the current snapshot description."""
view = navigate_to(self, 'SnapshotsAll')
active_snapshot = view.tree.selected_item if view.tree.is_displayed else None
if active_snapshot:
active_snapshot.click()
return view.description.text
@property
def genealogy(self):
return Genealogy(self)
def get_vm_via_rest(self):
return self.appliance.rest_api.collections.vms.get(name=self.name)
def get_collection_via_rest(self):
return self.appliance.rest_api.collections.vms
@property
def cluster_id(self):
"""returns id of cluster current vm belongs to"""
vm = self.get_vm_via_rest()
return int(vm.ems_cluster_id)
@attr.s
class CfmeRelationship(object):
vm = attr.ib()
def navigate(self):
return navigate_to(self.vm, 'EditManagementEngineRelationship', wait_for_view=0)
def is_relationship_set(self):
return '<Not a Server>' not in self.get_relationship()
def get_relationship(self):
view = self.navigate()
rel = str(view.form.server.all_selected_options[0])
view.form.cancel_button.click()
return rel
def set_relationship(self, server_name, server_id, click_cancel=False):
view = self.navigate()
view.form.fill({'server': '{} ({})'.format(server_name, server_id)})
if click_cancel:
view.form.cancel_button.click()
else:
view.form.save_button.click()
view.flash.assert_success_message('Management Engine Relationship saved')
@property
def configuration(self):
return VMConfiguration(self)
def reconfigure(self, new_configuration=None, changes=None, cancel=False):
"""Reconfigures the VM based on given configuration or set of changes
Args:
new_configuration: VMConfiguration object with desired configuration
changes: Set of changes to request; alternative to new_configuration
See VMConfiguration.get_changes_to_fill to see expected format of the data
cancel: `False` if we want to submit the changes, `True` otherwise
"""
from cfme.infrastructure.provider.rhevm import RHEVMProvider
if not new_configuration and not changes:
raise TypeError(
"You must provide either new configuration or changes to apply.")
if new_configuration:
changes = self.configuration.get_changes_to_fill(new_configuration)
any_changes = any(v not in [None, []] for v in changes.values())
if not any_changes and not cancel:
raise ValueError("No changes specified - cannot reconfigure VM.")
vm_recfg = navigate_to(self, 'Reconfigure')
# We gotta add disks separately
fill_data = {k: v for k, v in changes.items() if k != 'disks'}
vm_recfg.fill(fill_data)
# Helpers for VM Reconfigure request
cpu_message = "Processor Sockets: {}{}".format(
changes.get("sockets", "1"), ", Processor Cores Per Socket: {}".format(
changes.get("cores_per_socket", "1"))) if changes.get("cpu", False) else None
ram_message = "Memory: {} {}".format(
changes.get("mem_size", "0"), changes.get("mem_size_unit", "MB")) if changes.get(
"memory", False) else None
disk_message = None
for disk_change in changes['disks']:
action, disk = disk_change['action'], disk_change['disk']
if action == 'add':
# TODO This conditional has to go, once the 'Dependent' switch is removed from UI
if 'independent' in disk.mode:
mode = disk.mode.split('independent_')[1]
dependent = False
else:
mode = disk.mode
dependent = True
row = vm_recfg.disks_table.click_add_disk()
row.type.fill(disk.type)
if not self.provider.one_of(RHEVMProvider):
row.mode.fill(mode)
row.dependent.fill(dependent)
row.size.fill(disk.size)
row.unit.fill(disk.size_unit)
row.actions.widget.click()
disk_message = 'Add Disks'
elif action == 'delete':
row = vm_recfg.disks_table.row(name=disk.filename)
# `delete_backing` removes disk from the env
row.delete_backing.fill(True)
if not self.provider.one_of(RHEVMProvider):
# second action button, delete, is column 9 on colspan
# https://github.com/RedHatQE/widgetastic.core/issues/95
row[9].widget.click()
else:
# for RHV there's only one action button
row.actions.widget.click()
disk_message = 'Remove Disks'
else:
raise ValueError("Unknown disk change action; must be one of: add, delete")
message = ", ".join([_f for _f in [ram_message, cpu_message, disk_message] if _f])
if cancel:
vm_recfg.cancel_button.click()
view = self.appliance.browser.create_view(InfraVmDetailsView)
view.flash.assert_success_message('VM Reconfigure Request was cancelled by the user')
else:
vm_recfg.submit_button.click()
view = self.appliance.browser.create_view(RequestsView)
view.flash.assert_success_message("VM Reconfigure Request was saved")
return self.appliance.collections.requests.instantiate(description="{} - {}".format(
self.name, message), partial_check=True)
@property
def cluster(self):
vm_api = self.appliance.rest_api.collections.vms.get(name=self.name)
cluster_api = self.appliance.rest_api.collections.clusters.get(id=vm_api.ems_cluster_id)
cluster_api.reload(attributes='name')
return self.appliance.collections.clusters.instantiate(
name=cluster_api.name,
provider=self.provider
)
@property
def host(self):
vm_api = self.appliance.rest_api.collections.vms.get(name=self.name)
try:
vm_host = vm_api.host
except AttributeError:
logger.exception('No host attribute on rest_api vm entity')
return None
vm_host.reload(attributes='name')
host = self.appliance.collections.hosts.instantiate(name=vm_host.name,
provider=self.provider)
return host
@property
def datastore(self):
vm_api = self.appliance.rest_api.collections.vms.get(name=self.name)
vm_api.reload(attributes=['v_datastore_path'])
datastore_name = vm_api.v_datastore_path.split('/')[0]
return self.appliance.collections.datastores.instantiate(
name=datastore_name, provider=self.provider
)
@property
def vm_default_args(self):
"""Represents dictionary used for Vm/Instance provision with mandatory default args"""
provisioning = self.provider.data['provisioning']
inst_args = {
'request': {
'email': '[email protected]'},
'catalog': {
'vm_name': self.name},
'environment': {
'host_name': {'name': provisioning['host']},
'datastore_name': {'name': provisioning['datastore']}},
'network': {
'vlan': partial_match(provisioning['vlan'])}
}
return inst_args
@property
def vm_default_args_rest(self):
"""Represents dictionary used for REST API Vm/Instance provision with minimum required
default args
"""
from cfme.infrastructure.provider.rhevm import RHEVMProvider
if not self.provider.is_refreshed():
self.provider.refresh_provider_relationships()
wait_for(self.provider.is_refreshed, func_kwargs=dict(refresh_delta=10), timeout=600)
provisioning = self.provider.data['provisioning']
template_name = provisioning['template']
template = self.appliance.rest_api.collections.templates.get(name=template_name,
ems_id=self.provider.id)
host_id = self.appliance.rest_api.collections.hosts.get(name=provisioning['host']).id
ds_id = self.appliance.rest_api.collections.data_stores.get(
name=provisioning['datastore']).id
inst_args = {
"version": "1.1",
"template_fields": {
"guid": template.guid,
},
"vm_fields": {
"placement_auto": False,
"vm_name": self.name,
"request_type": "template",
"placement_ds_name": ds_id,
"placement_host_name": host_id,
"vlan": provisioning["vlan"],
},
"requester": {
"user_name": "admin",
"owner_email": "[email protected]",
"auto_approve": True,
},
"tags": {
},
"additional_values": {
},
"ems_custom_attributes": {
},
"miq_custom_attributes": {
}
}
if self.provider.one_of(RHEVMProvider):
inst_args['vm_fields']['provision_type'] = 'native_clone'
cluster_id = self.appliance.rest_api.collections.clusters.get(name='Default').id
inst_args['vm_fields']['placement_cluster_name'] = cluster_id
# BZ 1541036/1449157. <Template> uses template vnic_profile
return inst_args
@attr.s
class InfraVmCollection(VMCollection):
ENTITY = InfraVm
def all(self):
"""Return entities for all items in collection"""
# provider filter means we're viewing vms through provider details relationships
# provider filtered 'All' view includes vms and templates, can't be used
provider = self.filters.get('provider') # None if no filter, need for entity instantiation
view = navigate_to(provider or self,
'ProviderVms' if provider else 'VMsOnly')
# iterate pages here instead of use surf_pages=True because data is needed
entities = []
for _ in view.entities.paginator.pages(): # auto-resets to first page
page_entities = [entity for entity in view.entities.get_all(surf_pages=False)]
entities.extend(
# when provider filtered view, there's no provider data value
[self.instantiate(e.data['name'], provider or get_crud_by_name(e.data['provider']))
for e in page_entities
if e.data.get('provider') != ''] # safe provider check, orphaned shows no provider
)
# filtering
if self.filters.get("names"):
names = self.filters["names"]
entities = [e for e in entities if e.name in names]
if self.filters.get("name"):
name = self.filters["name"]
entities = [e for e in entities if e.name == name]
return entities
@attr.s
class InfraTemplate(Template):
REMOVE_MULTI = "Remove Templates from the VMDB"
VM_TYPE = "Template"
@property
def genealogy(self):
return Genealogy(self)
@attr.s
class InfraTemplateCollection(TemplateCollection):
ENTITY = InfraTemplate
def all(self):
"""Return entities for all items in collection"""
# provider filter means we're viewing templates through provider details relationships
# provider filtered 'All' view includes vms and templates, can't be used
provider = self.filters.get('provider') # None if no filter, need for entity instantiation
view = navigate_to(provider or self,
'ProviderTemplates' if provider else 'TemplatesOnly')
# iterate pages here instead of use surf_pages=True because data is needed
entities = []
for _ in view.entities.paginator.pages(): # auto-resets to first page
page_entities = [entity for entity in view.entities.get_all(surf_pages=False)]
entities.extend(
# when provider filtered view, there's no provider data value
[self.instantiate(e.data['name'], provider or get_crud_by_name(e.data['provider']))
for e in page_entities
if e.data.get('provider') != ''] # safe provider check, orphaned shows no provider
)
return entities
@attr.s
class Genealogy(object):
"""Class, representing genealogy of an infra object with possibility of data retrieval
and comparison.
Args:
o: The :py:class:`InfraVm` or :py:class:`Template` object.
"""
mode_mapping = {
'exists': 'Exists Mode',
'details': 'Details Mode',
}
attr_mapping = {
'all': 'All Attributes',
'different': 'Attributes with different values',
'same': 'Attributes with same values',
}
obj = attr.ib()
def navigate(self):
return navigate_to(self.obj, 'GenealogyAll')
def compare(self, *objects, **kwargs):
"""Compares two or more objects in the genealogy.
Args:
*objects: :py:class:`InfraVm` or :py:class:`Template` or :py:class:`str` with name.
Keywords:
sections: Which sections to compare.
attributes: `all`, `different` or `same`. Default: `all`.
mode: `exists` or `details`. Default: `exists`."""
sections = kwargs.get('sections')
attributes = kwargs.get('attributes', 'all').lower()
mode = kwargs.get('mode', 'exists').lower()
assert len(objects) >= 2, 'You must specify at least two objects'
objects = [o.name if isinstance(o, (InfraVm, InfraTemplate)) else o for o in objects]
view = self.navigate()
for obj in objects:
if not isinstance(obj, list):
path = find_path(view.tree, obj)
view.tree.check_node(*path)
view.toolbar.compare.click()
view.flash.assert_no_errors()
# COMPARE PAGE
compare_view = self.obj.create_view('Compare')
if sections is not None:
list(map(lambda path: compare_view.tree.check_node(*path), sections))
compare_view.apply.click()
compare_view.flash.assert_no_errors()
# Set requested attributes sets
getattr(compare_view.toolbar, self.attr_mapping[attributes]).click()
# Set the requested mode
getattr(compare_view.toolbar, self.mode_mapping[mode]).click()
@property
def tree(self):
"""Returns contents of the tree with genealogy"""
view = self.navigate()
return view.tree.read_contents()
@property
def ancestors(self):
"""Returns list of ancestors of the represented object."""
view = self.navigate()
path = find_path(view.tree, '(Selected)')
if not path:
raise ValueError("Something wrong happened, path not found!")
processed_path = []
for step in path[:-1]:
# We will remove the (parent) and (Selected) suffixes
processed_path.append(re.sub(r"\s*(?:\(Current\)|\(Parent\))$", "", step))
return processed_path
@navigator.register(Template, 'All')
@navigator.register(InfraVm, 'All')
@navigator.register(InfraTemplateCollection, 'All')
@navigator.register(InfraVmCollection, 'All')
class VmAllWithTemplates(CFMENavigateStep):
VIEW = VmsTemplatesAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select('Compute', 'Infrastructure', 'Virtual Machines')
self.view.sidebar.vmstemplates.tree.click_path('All VMs & Templates')
def resetter(self, *args, **kwargs):
if self.view.pagination.is_displayed:
self.view.pagination.set_items_per_page(1000)
self.view.reset_page()
@navigator.register(InfraTemplateCollection, 'OrphanedAll')
@navigator.register(InfraVmCollection, 'OrphanedAll')
class OrphanedVms(CFMENavigateStep):
VIEW = OrphanedVmsAllView
prerequisite = NavigateToSibling('All')
def step(self, *args, **kwargs):
self.view.sidebar.vmstemplates.tree.click_path('All VMs & Templates', '<Orphaned>')
@navigator.register(InfraTemplateCollection, 'ArchivedAll')
@navigator.register(InfraVmCollection, 'ArchivedAll')
class ArchivedVms(CFMENavigateStep):
VIEW = ArchivedVmsAllView
prerequisite = NavigateToSibling('All')
def step(self, *args, **kwargs):
self.view.sidebar.vmstemplates.tree.click_path('All VMs & Templates', '<Archived>')
@navigator.register(InfraTemplateCollection, 'AllForProvider')
@navigator.register(InfraTemplate, 'AllForProvider')
@navigator.register(InfraVmCollection, 'AllForProvider')
@navigator.register(InfraVm, 'AllForProvider')
class VmAllWithTemplatesForProvider(CFMENavigateStep):
VIEW = VmTemplatesAllForProviderView
def prerequisite(self):
try:
navigate_to(self.obj, 'All')
except NavigationDestinationNotFound:
navigate_to(self.obj.parent, 'All')
def step(self, *args, **kwargs):
# provider has been passed, TODO remove this usage
if 'provider' in kwargs:
provider_name = kwargs['provider'].name
# the collection is navigation target, use its filter value
elif (isinstance(self.obj, (InfraTemplateCollection, InfraVmCollection)) and
self.obj.filters.get('provider')):
provider_name = self.obj.filters['provider'].name
elif isinstance(self.obj, (InfraTemplate, InfraVm)):
provider_name = self.obj.provider.name
else:
raise DestinationNotFound("Unable to identify a provider for AllForProvider navigation")
self.view.sidebar.vmstemplates.tree.click_path('All VMs & Templates', provider_name)
def resetter(self, *args, **kwargs):
self.view.reset_page()
@navigator.register(InfraTemplate, 'Details')
@navigator.register(InfraVm, 'Details')
class VmAllWithTemplatesDetails(CFMENavigateStep):
VIEW = InfraVmDetailsView
prerequisite = NavigateToSibling('AllForProvider')
def step(self, *args, **kwargs):
try:
entity_item = self.prerequisite_view.entities.get_entity(
name=self.obj.name, surf_pages=True)
except ItemNotFound:
raise ItemNotFound('Failed to locate VM/Template with name "{}"'.format(self.obj.name))
entity_item.click()
def resetter(self, *args, **kwargs):
self.view.toolbar.reload.click()
@navigator.register(InfraTemplate, 'ArchiveDetails')
@navigator.register(InfraVm, 'ArchiveDetails')
class ArchiveDetails(CFMENavigateStep):
VIEW = InfraVmDetailsView
prerequisite = NavigateToSibling('All')
def step(self, *args, **kwargs):
try:
entity_item = self.prerequisite_view.entities.get_entity(
name=self.obj.name, surf_pages=True)
except ItemNotFound:
raise ItemNotFound('Failed to locate VM/Template with name "{}"'.format(self.obj.name))
entity_item.click()
def resetter(self, *args, **kwargs):
self.view.toolbar.reload.click()
@navigator.register(InfraTemplate, 'AnyProviderDetails')
@navigator.register(InfraVm, 'AnyProviderDetails')
class VmAllWithTemplatesDetailsAnyProvider(VmAllWithTemplatesDetails):
"""
Page with details for VM or template.
This is required in case you want to get details about archived/orphaned VM/template.
In such case, you cannot get to the detail page by navigating from list of VMs for a provider
since archived/orphaned VMs has lost its relationship with the original provider.
"""
prerequisite = NavigateToSibling('All')
@navigator.register(InfraVmCollection, 'VMsOnly')
class VmAll(CFMENavigateStep):
VIEW = VmsOnlyAllView
prerequisite = NavigateToSibling('All')
def step(self, *args, **kwargs):
if 'filter_folder' not in kwargs:
self.view.sidebar.vms.tree.click_path('All VMs')
elif 'filter_folder' in kwargs and 'filter_name' in kwargs:
self.view.sidebar.vms.tree.click_path('All VMs', kwargs['filter_folder'],
kwargs['filter_name'])
else:
raise DestinationNotFound("the destination isn't found")
def resetter(self, *args, **kwargs):
self.view.reset_page()
@navigator.register(InfraVm, 'VMsOnlyDetails')
class VmDetails(CFMENavigateStep):
VIEW = InfraVmDetailsView
prerequisite = NavigateToAttribute('parent', 'VMsOnly')
def step(self, *args, **kwargs):
try:
row = self.prerequisite_view.entities.get_entity(name=self.obj.name,
surf_pages=True)
except ItemNotFound:
raise ItemNotFound('Failed to locate VM/Template with name "{}"'.format(self.obj.name))
row.click()
def resetter(self, *args, **kwargs):
self.view.toolbar.reload.click()
@navigator.register(InfraVm, 'SnapshotsAll')
class VmSnapshotsAll(CFMENavigateStep):
VIEW = InfraVmSnapshotView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.entities.summary('Properties').click_at('Snapshots')
@navigator.register(InfraVm, 'SnapshotsAdd')
class VmSnapshotsAdd(CFMENavigateStep):
VIEW = InfraVmSnapshotAddView
prerequisite = NavigateToSibling('SnapshotsAll')
def step(self, *args, **kwargs):
if self.prerequisite_view.tree.is_displayed:
self.prerequisite_view.tree.click_path(self.obj.name)
self.prerequisite_view.toolbar.create.click()
@navigator.register(InfraTemplate, 'GenealogyAll')
@navigator.register(InfraVm, 'GenealogyAll')
class VmGenealogyAll(CFMENavigateStep):
VIEW = InfraVmGenealogyView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.entities.summary('Relationships').click_at('Genealogy')
@navigator.register(InfraVm, 'Migrate')
class VmMigrate(CFMENavigateStep):
VIEW = MigrateVmView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.lifecycle.item_select("Migrate this VM")
@navigator.register(InfraVm, 'Publish')
class VmPublish(CFMENavigateStep):
VIEW = PublishVmView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.lifecycle.item_select("Publish this VM to a Template")
@navigator.register(InfraVm, 'Clone')
class VmClone(CFMENavigateStep):
VIEW = CloneVmView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.lifecycle.item_select("Clone this VM")
@navigator.register(InfraVm, 'SetRetirement')
class SetRetirement(CFMENavigateStep):
VIEW = RetirementViewWithOffset
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.lifecycle.item_select('Set Retirement Date')
@navigator.register(InfraTemplateCollection, 'TemplatesOnly')
class TemplatesAll(CFMENavigateStep):
VIEW = TemplatesOnlyAllView
prerequisite = NavigateToSibling('All')
def step(self, *args, **kwargs):
if 'filter_folder' not in kwargs:
self.view.sidebar.templates.tree.click_path('All Templates')
elif 'filter_folder' in kwargs and 'filter_name' in kwargs:
self.view.sidebar.templates.tree.click_path('All Templates', kwargs['filter_folder'],
kwargs['filter_name'])
else:
raise DestinationNotFound("the destination isn't found")
@navigator.register(InfraVmCollection, 'Provision')
class ProvisionVM(CFMENavigateStep):
VIEW = ProvisionView
prerequisite = NavigateToSibling('All')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.lifecycle.item_select('Provision VMs')
@navigator.register(InfraVm, 'Timelines')
class Timelines(CFMENavigateStep):
VIEW = InfraVmTimelinesView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.monitoring.item_select('Timelines')
@navigator.register(InfraVm, 'Reconfigure')
class VmReconfigure(CFMENavigateStep):
VIEW = InfraVmReconfigureView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.configuration.item_select('Reconfigure this VM')
@navigator.register(InfraVm, 'Edit')
class VmEdit(CFMENavigateStep):
VIEW = EditView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.configuration.item_select('Edit this VM')
@navigator.register(InfraVm, 'EditManagementEngineRelationship')
class VmEngineRelationship(CFMENavigateStep):
VIEW = ManagementEngineView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.configuration.item_select(
'Edit Management Engine Relationship')
@navigator.register(InfraTemplate, 'SetOwnership')
@navigator.register(InfraVm, 'SetOwnership')
class SetOwnership(CFMENavigateStep):
VIEW = SetOwnershipView
prerequisite = NavigateToSibling('Details')
# No am_i_here because the page only indicates name and not provider
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.configuration.item_select('Set Ownership')
@navigator.register(InfraVm, 'Rename')
class Rename(CFMENavigateStep):
VIEW = RenameVmView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.configuration.item_select('Rename this VM')
@navigator.register(InfraVm, 'candu')
class VmUtilization(CFMENavigateStep):
@property
def VIEW(self): # noqa
return self.obj.provider.vm_utilization_view
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.monitoring.item_select('Utilization')
@navigator.register(InfraVm, 'PolicySimulation')
class PolicySimulation(CFMENavigateStep):
VIEW = PolicySimulationView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.policy.item_select('Policy Simulation')
@navigator.register(InfraVmCollection, "PolicySimulation") # noqa
class PolicySimulationOnCollection(CFMENavigateStep):
VIEW = PolicySimulationView
def prerequisite(self):
provider = self.obj.filters.get("provider") # None if no filter
if provider:
return navigate_to(provider, "ProviderVms")
else:
return navigate_to(self.obj, "All")
def step(self, *args, **kwargs):
# click the checkbox of every object in the filtered collection
for entity in self.obj.all():
self.prerequisite_view.entities.get_entity(name=entity.name, surf_pages=True).check()
self.prerequisite_view.toolbar.policy.item_select("Policy Simulation")
| gpl-2.0 | 6,130,493,969,100,131,000 | 38.058252 | 100 | 0.630406 | false |
flexpeace/btb | scanblog/scanning/views.py | 1 | 31871 | import json
import datetime
import tempfile
import logging
logger = logging.getLogger("django.request")
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Q
from django.shortcuts import get_object_or_404, render, redirect
from django.utils.translation import ugettext as _
from django.http import Http404, HttpResponseBadRequest
from django.contrib.auth.views import logout
from celery.result import AsyncResult
from scanblog.celery import app
from btb.utils import args_method_decorator, permission_required_or_deny, JSONView
from scanning import utils, tasks
from scanning.models import *
from scanning.forms import LockForm, TranscriptionForm, ScanUploadForm, \
FlagForm, get_org_upload_form
from annotations.models import Tag, Note, ReplyCode, handle_flag_spam
from annotations.tasks import send_flag_notification_email
from profiles.models import Organization, Profile, Affiliation
from comments.forms import CommentForm
def get_boolean(val):
return bool(val == "true" or val == "1")
class Scans(JSONView):
@permission_required_or_deny("scanning.change_scan")
def get(self, request, obj_id=None):
if obj_id:
scans = Scan.objects.filter(pk=obj_id)
else:
scans = Scan.objects.all().order_by('-created')
if request.GET.get("processing_complete"):
scans = scans.filter(
processing_complete=
get_boolean(request.GET.get("processing_complete"))
)
if request.GET.get("managed"):
try:
managed = bool(int(request.GET.get('managed')))
except ValueError:
managed = False
if managed:
scans = scans.filter(
Q(author__isnull=True) |
Q(author__profile__managed=True)
)
else:
scans = scans.filter(author__profile__managed=False)
if request.GET.get("editlock__isnull"):
scans = scans.filter(
editlock__isnull=
get_boolean(request.GET.get("editlock__isnull"))
)
# A scan can be valid two ways: 1 -- the author is in the moderator's
# orgs. 2 -- the scan's selected org is one of the author's orgs.
# Hence the "extra_q".
scans = scans.org_filter(request.user)
return self.paginated_response(request, scans)
class ScanSplits(JSONView):
"""
{
"scan": scan
"documents": [{
"id": document id
"type": type
"title": title or ""
"pages": [id,id,id,...]
}]
}
"""
def clean_params(self, request):
kw = json.loads(request.body)
return kw
@permission_required_or_deny("scanning.change_scan")
def get(self, request, obj_id=None):
try:
scan = Scan.objects.org_filter(request.user, pk=obj_id).get()
except Scan.DoesNotExist:
raise PermissionDenied
try:
lock = EditLock.objects.get(scan__pk=scan.pk)
if lock.user == request.user:
lock.save()
except EditLock.DoesNotExist:
lock = EditLock.objects.create(user=request.user, scan=scan)
tasks.expire_editlock.apply_async(args=[lock.id], countdown=60*5)
split = {
"scan": scan.to_dict(),
"documents": [],
"lock": lock.to_dict() if lock.user_id != request.user.id else None
}
# This will select a duplicate document for each scan page it contains.
documents = Document.objects.order_by(
'documentpage__scan_page__order'
).distinct().filter(scan__pk=scan.pk)
# Since we got duplicates, filter them down here.
visited = set()
for doc in documents:
if doc.id in visited:
continue
visited.add(doc.id)
split['documents'].append({
"id": doc.pk,
"type": doc.type,
"title": doc.title,
"status": doc.status,
"pages": list(doc.documentpage_set.order_by("order").values_list("scan_page__pk", flat=True))
})
return self.json_response(split)
@permission_required_or_deny("scanning.change_scan", "scanning.add_document",
"scanning.change_document", "scanning.delete_document")
def post(self, request, obj_id=None):
"""
Execute splits for a scan. This could be updating an existing models,
or creating new ones.
"""
logger.debug("Starting split")
with transaction.atomic():
try:
scan = Scan.objects.org_filter(request.user, pk=obj_id).get()
except Scan.DoesNotExist:
raise PermissionDenied
params = self.clean_params(request)
#
# Save scan.
#
try:
scan.author = Profile.objects.org_filter(request.user,
pk=params["scan"]["author"]["id"]
).get().user
scan.processing_complete = params["scan"]["processing_complete"]
except (KeyError, TypeError, Profile.DoesNotExist):
# Processing complete is always False if there is no author; hence
# two cases in the try block.
scan.author = None
scan.processing_complete = False
scan.save()
# Set pending scan.
ps_code = (params['scan'].pop("pendingscan_code", None) or "").strip()
try:
has_ps = bool(scan.pendingscan)
except PendingScan.DoesNotExist:
has_ps = False
if not ps_code and has_ps:
# Remove the cached pendingscan reference. Ugh. (simply setting
# scan.pendingscan to None raises an error)
ps = scan.pendingscan
ps.scan = None
ps.completed = None
ps.save()
scan = Scan.objects.get(pk=scan.pk)
elif ps_code:
try:
ps = PendingScan.objects.org_filter(
request.user, code=ps_code.strip()
).get()
except PendingScan.DoesNotExist:
pass
else:
if ps.scan_id != scan.id:
ps.scan = scan
ps.completed = datetime.datetime.now()
ps.save()
#
# Split documents
#
docs = []
for doc in params["documents"]:
if ("pages" not in doc) or (len(doc["pages"]) == 0):
# Delete stale document.
if "id" in doc:
try:
Document.objects.org_filter(
request.user, pk=doc["id"]
).get().full_delete()
except Document.DoesNotExist:
pass
continue
if "id" in doc:
# Retrieve existing document.
try:
document = Document.objects.org_filter(
request.user, pk=doc["id"]
).get()
except Document.DoesNotExist:
raise PermissionDenied
else:
# Create new document.
if doc["type"] in ("request", "license"):
status = "unpublishable"
else:
status = "unknown"
document = Document.objects.create(
scan=scan,
editor=request.user,
author=scan.author,
type=doc["type"],
status=status,
)
# Create tickets
if doc["type"] == "request":
Note.objects.create(
text="Request from scan.",
document=document,
resolved=None,
creator=request.user,
)
elif doc["type"] == "license" and \
not document.author.profile.consent_form_received:
Note.objects.create(
text="Please check this license agreement, then update the user's license status accordingly.",
document=document,
resolved=None,
creator=request.user,
)
# Apportion pages.
pages = []
# We need to transfer old page transforms to new document pages,
# indexed by the scanpage_id, which persists.
old_page_transformations = {}
# ... and do the same for highlight_transform -- we need to change
# the documentpage_id to the new documentpage_id.
if document.highlight_transform:
old_highlight_transform = json.loads(document.highlight_transform)
else:
old_highlight_transform = ""
highlight_scan_page_id = None
# Loop through current pages to get info to transfer to new pages,
# and delete the old pages.
for page in document.documentpage_set.all():
old_page_transformations[page.scan_page_id] = page.transformations
# Capture the old highlight transform's scan page ID.
if old_highlight_transform and \
page.pk == old_highlight_transform["document_page_id"]:
highlight_scan_page_id = page.scan_page_id
page.full_delete()
# Clear the highlight transform so that it remains 'valid' even if
# something goes wrong in identifying it with an old scan_page_id.
document.highlight_transform = ""
# Recreate the new pages, reusing the old transforms.
for order,scanpage_id in enumerate(doc["pages"]):
documentpage = DocumentPage.objects.create(
document=document,
scan_page=ScanPage.objects.get(pk=scanpage_id),
order=order,
transformations=old_page_transformations.get(scanpage_id, "{}"),
)
# Reuse the old highlight transform, if it matches.
if scanpage_id == highlight_scan_page_id:
old_highlight_transform["document_page_id"] = documentpage.pk
document.highlight_transform = json.dumps(old_highlight_transform)
document.save()
document.documentpage_set = pages
docs.append(document)
scan.document_set = docs
# Must do update_document_images outside transaction.atomic
for document in docs:
if document.status in ("published", "ready"):
# Persist any changes to highlight_transform.
tasks.update_document_images.delay(document.pk).get()
#XXX Shouldn't be necessary but seems to be.
scan.save()
return self.get(request, obj_id=scan.pk)
class MissingHighlight(Exception):
pass
class Documents(JSONView):
def clean_params(self, request):
kw = json.loads(request.body)
return kw
@permission_required_or_deny("scanning.change_document")
def get(self, request, obj_id=None):
docs = Document.objects.org_filter(request.user)
g = request.GET.get
if g("author__profile__managed", 0) == "1":
docs = docs.filter(author__profile__managed=True)
if g("author_id", None):
docs = docs.filter(author__pk=g("author_id"))
if g("type", None):
docs = docs.filter(type=g("type"))
if g("idlist", None):
ids = [a for a in g("idlist").split(".") if a]
if not ids:
raise Http404
docs = [b for a,b in sorted(docs.in_bulk(ids).items())]
if g("status", None):
docs = docs.filter(status=g("status"))
#TODO: EditLock's for documents.
return self.paginated_response(request, docs)
@permission_required_or_deny("scanning.change_document")
def put(self, request, obj_id=None):
try:
with transaction.atomic():
try:
doc = Document.objects.org_filter(request.user, pk=obj_id).get()
except Document.DoesNotExist:
raise PermissionDenied
kw = self.clean_params(request)
try:
doc.author = Profile.objects.org_filter(
request.user,
pk=kw['author']['id']
).get().user
except Profile.DoesNotExist:
raise PermissionDenied
doc.editor = request.user
doc.title = kw['title']
if doc.type == "post":
try:
assert len(kw['highlight_transform']['crop']) > 0
except (AssertionError, KeyError):
raise MissingHighlight
doc.highlight_transform = json.dumps(kw['highlight_transform'])
if not kw['in_reply_to']:
doc.in_reply_to = None
else:
reply_code = ReplyCode.objects.get(code__iexact=kw['in_reply_to'])
# Avoid recursion.
if reply_code.pk != doc.reply_code.pk:
doc.in_reply_to = reply_code
else:
doc.in_reply_to = None
# Set affiliation, if any
try:
doc.affiliation = Affiliation.objects.org_filter(request.user).get(
pk=kw['affiliation']['id'])
except (TypeError, KeyError, Affiliation.DoesNotExist):
doc.affiliation = None
doc.adult = kw['adult']
# Ensure other processes won't try to serve this until we're done building.
doc.date_written = kw['date_written']
doc.status = "unknown"
doc.save()
# tags
tags = []
for name in kw['tags'].split(';'):
name = name.strip()
if name:
tag, created = Tag.objects.get_or_create(name=name.strip().lower())
tags.append(tag)
doc.tags = tags
# pages
order_changed = []
for page in kw['pages']:
docpage = doc.documentpage_set.get(pk=page['id'])
transformations = json.dumps(page['transformations'] or "")
if docpage.transformations != transformations:
docpage.transformations = transformations
docpage.save()
if page['order'] != docpage.order:
# Save a nonsensical order to avoid constraint clash, set
# correct order, but don't save until we're all done.
docpage.order = -docpage.order - 1
docpage.save()
docpage.order = page['order']
order_changed.append(docpage)
for page in order_changed:
page.save()
except MissingHighlight:
return HttpResponseBadRequest("Missing highlight.")
#XXX this additional save should not be needed, but seems to be. Issue
# with transaction.atomic() ?
doc.save()
# Split images.
result = tasks.update_document_images.delay(
document_id=doc.pk, status=kw['status']).get()
logger.debug(u"post image update {}".format(doc.highlight_transform))
# Update to get current status after task finishes.
doc = Document.objects.get(pk=doc.pk)
response = self.json_response(doc.to_dict())
return response
#
# Pending scan CRUD
#
class PendingScans(JSONView):
@permission_required_or_deny("scanning.change_pendingscan")
def get(self, request, obj_id=None):
if obj_id:
pendingscans = PendingScan.objects.filter(pk=obj_id)
elif "missing" in request.GET:
pendingscans = PendingScan.objects.missing()
elif "pending" in request.GET:
pendingscans = PendingScan.objects.pending()
elif "fulfilled" in request.GET:
pendingscans = PendingScan.objects.fulfilled()
else:
pendingscans = PendingScan.objects.all()
if "author_id" in request.GET:
pendingscans = pendingscans.filter(author__pk=request.GET["author_id"])
pendingscans = pendingscans.org_filter(request.user)
return self.paginated_response(request, pendingscans)
@permission_required_or_deny("scanning.add_pendingscan")
def post(self, request, obj_id=None):
params = json.loads(request.body)
try:
org = Organization.objects.org_filter(
request.user, pk=params["org_id"]
).get()
except Organization.DoesNotExist:
raise PermissionDenied
try:
author = Profile.objects.org_filter(
request.user, pk=params["author_id"]
).get().user
except Profile.DoesNotExist:
raise PermissionDenied
pendingscan = PendingScan.objects.create(
editor=self.request.user,
author=author,
org=org,
)
return self.json_response(pendingscan.to_dict())
@permission_required_or_deny("scanning.change_pendingscan")
def put(self, request, obj_id=None):
try:
ps = PendingScan.objects.org_filter(
request.user, pk=obj_id
).get()
except PendingScan.DoesNotExist:
raise PermissionDenied
params = json.loads(request.body)
if 'missing' in params:
if params['missing'] == 1:
ps.completed = datetime.datetime.now()
else:
ps.completed = None
ps.save()
return self.json_response(ps.to_dict())
@permission_required_or_deny("scanning.delete_scan")
def delete(self, request, obj_id=None):
try:
ps = PendingScan.objects.org_filter(
request.user, pk=obj_id
).get()
except PendingScan.DoesNotExist:
raise PermissionDenied
ps.delete()
return self.json_response(ps.to_dict())
class ScanCodes(JSONView):
def get(self, request):
if "term" not in request.GET:
raise Http404
pss = PendingScan.objects.org_filter(
request.user,
code__icontains=request.GET.get("term"),
scan__isnull=True,
)
return self.json_response([ps.to_dict() for ps in pss])
@permission_required("scanning.add_scan")
def scan_add(request):
"""Displays a form for uploading a scan."""
FormClass = get_org_upload_form(request.user)
form = FormClass(request.POST or None, request.FILES or None, types={
"pdf": "application/pdf",
"zip": "application/zip",
})
if form.is_valid():
if request.FILES['file'].name.lower().endswith(".zip"):
with tempfile.NamedTemporaryFile(delete=False, suffix="scans.zip") as fh:
for chunk in request.FILES['file'].chunks():
fh.write(chunk)
fh.flush()
task_id = tasks.process_zip.delay(filename=fh.name,
uploader_id=request.user.pk,
org_id=form.cleaned_data['organization'].pk,
redirect=reverse("moderation.home")
)
else:
path = tasks.move_scan_file(uploaded_file=request.FILES['file'])
scan = Scan.objects.create(
uploader=request.user,
pdf=os.path.relpath(path, settings.MEDIA_ROOT),
under_construction=True,
org=form.cleaned_data['organization'])
task_id = tasks.split_scan.delay(scan_id=scan.pk,
redirect=reverse("moderation.edit_scan", args=[scan.pk]))
return redirect('moderation.wait_for_processing', task_id)
return render(request, "scanning/upload.html", {'form': form})
@permission_required("scanning.change_scan")
def scan_merge(request, scan_id):
""" Merge an existing scan with a new file """
try:
scan = Scan.objects.org_filter(request.user, pk=scan_id).get()
except Scan.DoesNotExist:
raise Http404
form = ScanUploadForm(request.POST or None, request.FILES or None, types={
'pdf': 'application/pdf',
})
if form.is_valid():
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as fh:
for chunk in request.FILES['file'].chunks():
fh.write(chunk)
name = fh.name
task_id = tasks.merge_scans.delay(
scan_id=scan_id,
filename=name,
redirect=reverse("moderation.edit_scan", args=[scan.pk])
)
return redirect("moderation.wait_for_processing", task_id)
return render(request, "scanning/merge.html", {'form': form})
@permission_required("scanning.change_scan")
def scan_replace(request, scan_id=None):
try:
scan = Scan.objects.org_filter(request.user, pk=scan_id).get()
except Scan.DoesNotExist:
raise PermissionDenied
form = ScanUploadForm(request.POST or None, request.FILES or None, types={
"pdf": "application/pdf",
})
if form.is_valid():
filepath = tasks.move_scan_file(uploaded_file=request.FILES['file'])
scan.full_delete(filesonly=True)
scan.uploader = request.user
scan.pdf = os.path.relpath(filepath, settings.MEDIA_ROOT)
scan.save()
task_id = tasks.split_scan.delay(
scan_id=scan.pk,
redirect=reverse("moderation.edit_scan", args=[scan.pk])
)
return redirect('moderation.wait_for_processing', task_id)
return render(request, "scanning/replace.html", {'form': form})
@permission_required("scanning.delete_scan")
def scan_delete(request, scan_id=None):
try:
scan = Scan.objects.org_filter(request.user, pk=scan_id).get()
except Scan.DoesNotExist:
raise PermissionDenied
if request.method != "POST":
return render(request, "scanning/delete.html", {
'scan': scan
})
scan.full_delete()
messages.info(request, "Scan deleted.")
return redirect(reverse("moderation.home") + "#/process")
@permission_required("scanning.delete_document")
def doc_delete(request, document_id=None):
try:
doc = Document.objects.org_filter(request.user, pk=document_id).get()
except Document.DoesNotExist:
raise PermissionDenied
if request.method != 'POST':
return redirect(reverse("moderation.edit_doc", document_id))
doc.full_delete()
messages.info(request, "Document deleted.")
return redirect(reverse("moderation.home") + "#/process")
@permission_required('scanning.change_scan')
def scan_reimport(request, scan_id=None):
try:
scan = Scan.objects.org_filter(request.user, pk=scan_id).get()
except Scan.DoesNotExist:
raise PermissionDenied
if request.method != "POST":
return render(request, "scanning/reimport.html", {
'scan': scan
})
task_id = tasks.process_scan.delay(
scan_id=scan_id,
redirect=reverse("moderation.home") + \
"#/process/scan/%s" % scan.id
).task_id
return redirect("moderation.wait_for_processing", task_id)
#
# Transcriptions
#
@permission_required('scanning.change_transcription')
def transcribe_document(request, document_id):
"""Show and process the form for editing a transcription."""
if not settings.TRANSCRIPTION_OPEN:
raise Http404
document = get_object_or_404(Document, pk=document_id)
if not document.scan_id:
raise Http404
can_lock = request.user.has_perm('scanning.change_locked_transcription')
try:
transcription = document.transcription
except Transcription.DoesNotExist:
transcription = Transcription(document=document)
if transcription.locked and not can_lock:
raise PermissionDenied
if can_lock:
lockform = LockForm(request.POST or None)
else:
lockform = ''
current = transcription.current()
if current:
initial = {'body': current.body, 'complete': transcription.complete}
else:
initial = None
form = TranscriptionForm(request.POST or None, initial=initial)
if form.is_valid():
if lockform and lockform.is_valid():
transcription.locked = lockform.cleaned_data['lock_transcription']
transcription.save()
# "sugar" is a honeypot for spam
if form.has_changed() and not request.POST.get("sugar", None):
# Don't add a revision for rapid changes.
cutoff = datetime.datetime.now() - datetime.timedelta(seconds=120)
transcription.complete = form.cleaned_data.get('complete', False)
transcription.save()
if (current and current.editor == request.user and
cutoff < current.modified):
current.body = form.cleaned_data['body']
current.save()
else:
if not current or current.body != form.cleaned_data['body']:
current = TranscriptionRevision.objects.create(
revision=current.revision + 1 if current else 0,
transcription=transcription,
body=form.cleaned_data['body'],
editor=request.user
)
messages.success(request, _("Thanks for your attention to detail. Transcription updated."))
if document.type == "post":
return redirect("scanning.after_transcribe_comment", document_id=document.pk)
return redirect(document.get_absolute_url() + "#transcription")
pages = document.documentpage_set.all()
return render(request, "scanning/transcription_edit.html", {
'lockform': lockform,
'transcription': transcription,
'document': document,
'documentpages': pages,
'documentpage_count': pages.count(),
'form': form,
'cancel_link': document.get_absolute_url(),
})
@permission_required("scanning.change_transcription")
def after_transcribe_comment(request, document_id):
"""
Prompt for a comment after a transcription is done.
"""
document = get_object_or_404(Document, pk=document_id,
type="post",
scan__isnull=False,
transcription__isnull=False)
# Don't prompt for comment if they've already commented on this post.
if document.comments.filter(user=request.user).exists() or \
(not settings.COMMENTS_OPEN) or \
document.author.profile.comments_disabled:
return redirect(document.get_absolute_url() + "#transcription")
if document.transcription.complete:
prompt_text = "Thanks for writing! I finished the transcription for your post."
else:
prompt_text = "Thanks for writing! I worked on the transcription for your post."
form = CommentForm(request.POST or None, initial={
'comment': prompt_text
})
if form.is_valid():
comment, created = Comment.objects.get_or_create(
document=document,
comment=form.cleaned_data['comment'],
user=request.user,
)
if created:
comment.document = document
return redirect("%s#%s" % (request.path, comment.pk))
return render(request, "scanning/after_transcribe_comment.html", {
'document': document,
'form': form,
})
def revision_list(request, document_id):
"""
Main revision display.
"""
doc = get_object_or_404(Document, pk=document_id)
if doc.status != "published":
raise Http404
try:
revisions = list(doc.transcription.revisions.all())
except Transcription.DoesNotExist:
revisions = []
return render(request, "scanning/revision_list.html", {
'document' : doc,
'revisions': revisions,
})
def revision_compare(request, document_id):
"""
AJAX comparison between two revisions
"""
try:
document = Document.objects.get(pk=document_id)
earliest = TranscriptionRevision.objects.get(
transcription__document=document,
revision=int(request.GET['earliest']))
latest = TranscriptionRevision.objects.get(
transcription__document=document,
revision=int(request.GET['latest']))
except (KeyError, Document.DoesNotExist, TranscriptionRevision.DoesNotExist):
raise
return render(request, "scanning/_column_diff.html", {
'document': document, 'earliest': earliest, 'latest': latest,
})
@login_required
def flag_document(request, document_id):
"""
Flag a post.
"""
if not request.user.is_active:
raise PermissionDenied
doc = get_object_or_404(Document, pk=document_id)
form = FlagForm(request.POST or None)
if form.is_valid():
if handle_flag_spam(request.user, form.cleaned_data['reason']):
messages.info(request, _(u"Your account has been suspended due to behavior that looks like spam. If this is an error, please contact us using the contact link at the bottom of the page."))
logout(request)
return redirect("/")
ticket, created = Note.objects.get_or_create(
creator=request.user,
text="FLAG from user. \n %s" % form.cleaned_data['reason'],
resolved=None,
important=form.cleaned_data['urgent'],
document=doc,
)
# Queue up an async process to send notification email in 2 minutes (we
# delay to trap spam floods).
if created:
send_flag_notification_email.apply_async(
args=[ticket.pk], countdown=120)
messages.info(request, _(u"A moderator will review that post shortly. Thanks for helping us run a tight ship."))
return redirect(doc.get_absolute_url())
# redirect to confirmation.
return render(request, "scanning/flag.html", {
'form': form,
})
| agpl-3.0 | -3,211,643,835,057,357,300 | 38.739401 | 200 | 0.555395 | false |
yuntae1000/SoftwareDesignFall15 | Mini_project1/MP1_postfeedback.py | 1 | 3420 | #code to mining the urls from google and save it to local .txt files
# using patter. to search from Google
# I integrated all files in one single file after the feedback
# Using functions and readable documents
from pattern.web import Google
import indicoio
indicoio.config.api_key = '8d05933c4c2ca769d1e064dfbea1fe8a'
#save the results of the analysis in the "stats.txt" file.
# declare arrays which save raw url mined from pattern.search
# new york times urls, cbs new urls, wallstreet journal urls, foxnew urls
rawurl_nytimes=[]
rawurl_cbsnews=[]
rawurl_wsj=[]
rawurl_foxnews=[]
journal_names=['nytimes', 'cbsnews', 'wsj', 'foxnews']
rawurls=[rawurl_nytimes, rawurl_cbsnews, rawurl_wsj, rawurl_foxnews]
result_page=4
ny_analysis=[]
cbs_analysis=[]
wsj_analysis=[]
foxnews_analysis=[]
analysis=[ny_analysis,cbs_analysis, wsj_analysis,foxnews_analysis]
folders=["url_nytimes.txt", "url_cbsnews.txt", "url_wsj.txt", "url_foxnews.txt"]
g=Google()
#get the New York Times url
def get_articles(journal_num):
for i in range(1,result_page):
# search google results correspoding to the following keyword
for result in g.search('Donald Trump opinion site:'+journal_names[journal_num]+'.com', start=i):
rawurls[journal_num].append(result.url)
# saves the keyword to the local file in order to reduce query
# we will use this file for analyzing later on
def saveinfile(journal_num):
f=open('url_'+journal_names[journal_num]+'.txt', "w")
print >>f, rawurls[journal_num]
f.close()
def get_save_articles(journal_num):
get_articles(journal_num)
saveinfile(journal_num)
# then you can write a "master function" at the end which chains all of the steps
# in your process together
## get and save articles from all 4 medias
#mini project open the url file which we saved from harvesting and execute political analysis*/
##########################################
#for each files split the string by comma from the array
def analyze_text(journal_num):
f= open(folders[journal_num], 'r')
line=f.readline()
url_dummy=line.split(',') # dummy-> lists or urls,get all urls from the saved file
for i in range(len(url_dummy)-1):
# get rid of useless html.
url_dummy[i]=url_dummy[i][3:-1]
url_dummy[-1]=url_dummy[-1][3:-2] ## because last url has on more ' , get rid of it
## do political analysis using indicoio using the API and apped it to the array
for j in range(len(url_dummy)):
analysis[journal_num].append(indicoio.political(url_dummy[j]))
f.close()
## get the average of the analysis
## add all the results of the urls and divide with the number of urls
def political_analysis(journal_num):
sum_stats=[0,0,0,0] #sum of all stats gained from indicoio
for i in range(len(analysis)):
sum_stats[0]=sum_stats[0]+analysis[journal_num][i]["Libertarian"]
sum_stats[1]=sum_stats[1]+analysis[journal_num][i]["Green"]
sum_stats[2]=sum_stats[2]+analysis[journal_num][i]["Liberal"]
sum_stats[3]=sum_stats[3]+analysis[journal_num][i]["Conservative"]
aver_stats=[0,0,0,0]
for i in range(4):
aver_stats[i]=sum_stats[i]/float(len(analysis)) # divide by length to get average
print journal_names[journal_num]+" [Libertarian , Green , Liberal , Conservative]"
print aver_stats
# get_save_articles(0)
# get_save_articles(1)
# get_save_articles(2)
# get_save_articles(3)
for i in range(4):
get_save_articles(i)
analyze_text(i)
political_analysis(i)
| mit | -1,029,171,061,040,329,300 | 30.376147 | 98 | 0.713158 | false |
arielalmendral/ert | python/tests/gui/plottery/test_plot_style.py | 1 | 6924 | import datetime
from ert.test import ExtendedTestCase
from ert_gui.plottery import PlotStyle, PlotConfig, PlotLimits
class PlotStyleTest(ExtendedTestCase):
def test_plot_style_test_defaults(self):
style = PlotStyle("Test")
self.assertEqual(style.name, "Test")
self.assertEqual(style.color, "#000000")
self.assertEqual(style.line_style, "-")
self.assertEqual(style.alpha, 1.0)
self.assertEqual(style.marker, "")
self.assertEqual(style.width, 1.0)
self.assertEqual(style.size, 7.5)
self.assertTrue(style.isEnabled())
style.line_style = None
style.marker = None
self.assertEqual(style.line_style, "")
self.assertEqual(style.marker, "")
def test_plot_style_builtin_checks(self):
style = PlotStyle("Test")
style.name = None
self.assertIsNone(style.name)
style.color = "notacolor"
self.assertEqual(style.color, "notacolor") # maybe make this a proper check in future ?
style.line_style = None
self.assertEqual(style.line_style, "")
style.marker = None
self.assertEqual(style.marker, "")
style.width = -1
self.assertEqual(style.width, 0.0)
style.size = -1
self.assertEqual(style.size, 0.0)
style.alpha = 1.1
self.assertEqual(style.alpha, 1.0)
style.alpha = -0.1
self.assertEqual(style.alpha, 0.0)
style.setEnabled(False)
self.assertFalse(style.isEnabled())
def test_plot_style_copy_style(self):
style = PlotStyle("Test", "red", 0.5, ".", "o", 2.5)
style.setEnabled(False)
copy_style = PlotStyle("Copy")
copy_style.copyStyleFrom(style)
self.assertNotEqual(style.name, copy_style.name)
self.assertEqual(style.color, copy_style.color)
self.assertEqual(style.alpha, copy_style.alpha)
self.assertEqual(style.line_style, copy_style.line_style)
self.assertEqual(style.marker, copy_style.marker)
self.assertEqual(style.width, copy_style.width)
self.assertEqual(style.size, copy_style.size)
self.assertNotEqual(style.isEnabled(), copy_style.isEnabled())
another_copy_style = PlotStyle("Another Copy")
another_copy_style.copyStyleFrom(style, copy_enabled_state=True)
self.assertEqual(style.isEnabled(), another_copy_style.isEnabled())
def test_plot_config(self):
plot_config = PlotConfig("Golden Sample", x_label="x", y_label="y")
limits = PlotLimits()
limits.count_limits = 1, 2
limits.depth_limits = 3, 4
limits.density_limits = 5, 6
limits.date_limits = datetime.date(2005, 2, 5), datetime.date(2006, 2, 6)
limits.index_limits = 7, 8
limits.value_limits = 9.0, 10.0
plot_config.limits = limits
self.assertEqual(plot_config.limits, limits)
plot_config.setDistributionLineEnabled(True)
plot_config.setLegendEnabled(False)
plot_config.setGridEnabled(False)
plot_config.setRefcaseEnabled(False)
plot_config.setObservationsEnabled(False)
style = PlotStyle("test_style", line_style=".", marker="g", width=2.5, size=7.5)
plot_config.setDefaultStyle(style)
plot_config.setRefcaseStyle(style)
plot_config.setStatisticsStyle("mean", style)
plot_config.setStatisticsStyle("min-max", style)
plot_config.setStatisticsStyle("p50", style)
plot_config.setStatisticsStyle("p10-p90", style)
plot_config.setStatisticsStyle("p33-p67", style)
plot_config.setStatisticsStyle("std", style)
copy_of_plot_config = PlotConfig("Copy of Golden Sample")
copy_of_plot_config.copyConfigFrom(plot_config)
self.assertEqual(plot_config.isLegendEnabled(), copy_of_plot_config.isLegendEnabled())
self.assertEqual(plot_config.isGridEnabled(), copy_of_plot_config.isGridEnabled())
self.assertEqual(plot_config.isObservationsEnabled(), copy_of_plot_config.isObservationsEnabled())
self.assertEqual(plot_config.isDistributionLineEnabled(), copy_of_plot_config.isDistributionLineEnabled())
self.assertEqual(plot_config.refcaseStyle(), copy_of_plot_config.refcaseStyle())
self.assertEqual(plot_config.observationsStyle(), copy_of_plot_config.observationsStyle())
self.assertEqual(plot_config.histogramStyle(), copy_of_plot_config.histogramStyle())
self.assertEqual(plot_config.defaultStyle(), copy_of_plot_config.defaultStyle())
self.assertEqual(plot_config.currentColor(), copy_of_plot_config.currentColor())
self.assertEqual(plot_config.getStatisticsStyle("mean"), copy_of_plot_config.getStatisticsStyle("mean"))
self.assertEqual(plot_config.getStatisticsStyle("min-max"), copy_of_plot_config.getStatisticsStyle("min-max"))
self.assertEqual(plot_config.getStatisticsStyle("p50"), copy_of_plot_config.getStatisticsStyle("p50"))
self.assertEqual(plot_config.getStatisticsStyle("p10-p90"), copy_of_plot_config.getStatisticsStyle("p10-p90"))
self.assertEqual(plot_config.getStatisticsStyle("p33-p67"), copy_of_plot_config.getStatisticsStyle("p33-p67"))
self.assertEqual(plot_config.getStatisticsStyle("std"), copy_of_plot_config.getStatisticsStyle("std"))
self.assertEqual(plot_config.title(), copy_of_plot_config.title())
self.assertEqual(plot_config.limits, copy_of_plot_config.limits)
plot_config.currentColor() # cycle state will not be copied
plot_config.nextColor()
copy_of_plot_config = PlotConfig("Another Copy of Golden Sample")
copy_of_plot_config.copyConfigFrom(plot_config)
self.assertEqual(plot_config.refcaseStyle(), copy_of_plot_config.refcaseStyle())
self.assertEqual(plot_config.observationsStyle(), copy_of_plot_config.observationsStyle())
self.assertNotEqual(plot_config.histogramStyle(), copy_of_plot_config.histogramStyle())
self.assertNotEqual(plot_config.defaultStyle(), copy_of_plot_config.defaultStyle())
self.assertNotEqual(plot_config.currentColor(), copy_of_plot_config.currentColor())
self.assertNotEqual(plot_config.getStatisticsStyle("mean"), copy_of_plot_config.getStatisticsStyle("mean"))
self.assertNotEqual(plot_config.getStatisticsStyle("min-max"), copy_of_plot_config.getStatisticsStyle("min-max"))
self.assertNotEqual(plot_config.getStatisticsStyle("p50"), copy_of_plot_config.getStatisticsStyle("p50"))
self.assertNotEqual(plot_config.getStatisticsStyle("p10-p90"), copy_of_plot_config.getStatisticsStyle("p10-p90"))
self.assertNotEqual(plot_config.getStatisticsStyle("p33-p67"), copy_of_plot_config.getStatisticsStyle("p33-p67"))
self.assertNotEqual(plot_config.getStatisticsStyle("std"), copy_of_plot_config.getStatisticsStyle("std"))
| gpl-3.0 | -1,916,162,239,338,717,200 | 41.740741 | 121 | 0.687031 | false |
ecotux/objectDetection | 04saveSVM.py | 1 | 1784 |
import cv2
import numpy as np
import os
import re
#############################################
#
# Gray magic:
# - the value of "C"
# - the value of "gamma"
# - the functions "preprocess*"
#
C = 20
gamma = 0.0005
#
# blurring image
#
def preprocess1(data):
img = cv2.GaussianBlur(data, (5,5), 0)
img = cv2.bilateralFilter(img,9,75,75)
img = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
return img
#
# data feature extraction
#
def preprocess2(data):
YCrCb = cv2.cvtColor(data, cv2.COLOR_BGR2YCR_CB)
normalized = np.ravel(np.float32(YCrCb)) / 255.0
return normalized[76800:]
#############################################
#
# Main
#
#############################################
if __name__ == '__main__':
xRes = 320
yRes = 240
dirTrain = 'trainData/'
params = dict( kernel_type = cv2.SVM_RBF, svm_type = cv2.SVM_C_SVC, C = C, gamma = gamma )
# Loading Training Set
print "Loading Training Set..."
numTrainSamples = len([name for name in os.listdir(dirTrain)])
trainSamples = np.empty( (numTrainSamples, yRes, xRes, 3), dtype = np.uint8 )
targets = np.empty( numTrainSamples, dtype = np.float32 )
for i, nameFile in enumerate(os.listdir(dirTrain)):
match1=re.search(r"object(\d+)",nameFile)
if match1:
trainSamples[i] = cv2.imread(dirTrain+nameFile)
targets[i] = np.float32(match1.group(1))
# Preprocessing Training Set
print 'Preprocessing Training Set...'
trainSet = np.array([preprocess2(preprocess1(trainSamples[i])) for i in np.ndindex(trainSamples.shape[:1])])
# Training
print 'Training SVM...'
model = cv2.SVM()
model.train(trainSet, targets, params = params)
# Saving
print 'saving SVM...'
model.save("objectSVM.xml")
| mit | -5,204,423,392,927,158,000 | 20.582278 | 109 | 0.598094 | false |
eunchong/build | scripts/master/buildbucket/unittests/common_test.py | 1 | 1268 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for common module"""
import unittest
import test_env # pylint: disable=W0611
from master.buildbucket import common
class CommonUnitTest(unittest.TestCase):
def test_parse_info_property_succeed(self):
value, expected = {'build': {'foo': 'bar'}}, {'build': {'foo': 'bar'}}
self.assertDictEqual(common.parse_info_property(value), expected)
value, expected = {'build': '{"foo": "bar"}'}, {'build': {'foo': 'bar'}}
self.assertDictEqual(common.parse_info_property(value), expected)
value, expected = '{"build": {"foo": "bar"}}', {'build': {'foo': 'bar'}}
self.assertDictEqual(common.parse_info_property(value), expected)
def test_parse_info_property_fail(self):
value = 'invalid json'
self.assertRaises(ValueError, lambda: common.parse_info_property(value))
value = {'build': "invalid json"}
self.assertRaises(ValueError, lambda: common.parse_info_property(value))
value = '42' # not a dict
self.assertRaises(ValueError, lambda: common.parse_info_property(value))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -5,558,177,832,832,482,000 | 32.368421 | 76 | 0.684543 | false |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/Cura/cura_sf/skeinforge_application/skeinforge_plugins/craft_plugins/stretch.py | 1 | 21092 | """
This page is in the table of contents.
Stretch is very important Skeinforge plugin that allows you to partially compensate for the fact that extruded holes are smaller then they should be. It stretches the threads to partially compensate for filament shrinkage when extruded.
The stretch manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Stretch
Extruded holes are smaller than the model because while printing an arc the head is depositing filament on both sides of the arc but in the inside of the arc you actually need less material then on the outside of the arc. You can read more about this on the RepRap ArcCompensation page:
http://reprap.org/bin/view/Main/ArcCompensation
In general, stretch will widen holes and push corners out. In practice the filament contraction will not be identical to the algorithm, so even once the optimal parameters are determined, the stretch script will not be able to eliminate the inaccuracies caused by contraction, but it should reduce them.
All the defaults assume that the thread sequence choice setting in fill is the edge being extruded first, then the loops, then the infill. If the thread sequence choice is different, the optimal thread parameters will also be different. In general, if the infill is extruded first, the infill would have to be stretched more so that even after the filament shrinkage, it would still be long enough to connect to the loop or edge.
Holes should be made with the correct area for their radius. In other words, for example if your modeling program approximates a hole of radius one (area = pi) by making a square with the points at [(1,0), (0,1), (-1,0), (0,-1)] (area = 2), the radius should be increased by sqrt(pi/2). This can be done in fabmetheus xml by writing:
radiusAreal='True'
in the attributes of the object or any parent of that object. In other modeling programs, you'll have to this manually or make a script. If area compensation is not done, then changing the stretch parameters to over compensate for too small hole areas will lead to incorrect compensation in other shapes.
==Operation==
The default 'Activate Stretch' checkbox is off. When it is on, the functions described below will work, when it is off, the functions will not be called.
==Settings==
===Loop Stretch Over Perimeter Width===
Default is 0.1.
Defines the ratio of the maximum amount the loop aka inner shell threads will be stretched compared to the edge width, in general this value should be the same as the 'Perimeter Outside Stretch Over Perimeter Width' setting.
===Path Stretch Over Perimeter Width===
Default is zero.
Defines the ratio of the maximum amount the threads which are not loops, like the infill threads, will be stretched compared to the edge width.
===Perimeter===
====Perimeter Inside Stretch Over Perimeter Width====
Default is 0.32.
Defines the ratio of the maximum amount the inside edge thread will be stretched compared to the edge width, this is the most important setting in stretch. The higher the value the more it will stretch the edge and the wider holes will be. If the value is too small, the holes could be drilled out after fabrication, if the value is too high, the holes would be too wide and the part would have to junked.
====Perimeter Outside Stretch Over Perimeter Width====
Default is 0.1.
Defines the ratio of the maximum amount the outside edge thread will be stretched compared to the edge width, in general this value should be around a third of the 'Perimeter Inside Stretch Over Perimeter Width' setting.
===Stretch from Distance over Perimeter Width===
Default is two.
The stretch algorithm works by checking at each turning point on the extrusion path what the direction of the thread is at a distance of 'Stretch from Distance over Perimeter Width' times the edge width, on both sides, and moves the thread in the opposite direction. So it takes the current turning-point, goes "Stretch from Distance over Perimeter Width" * "Perimeter Width" ahead, reads the direction at that point. Then it goes the same distance in back in time, reads the direction at that other point. It then moves the thread in the opposite direction, away from the center of the arc formed by these 2 points+directions.
The magnitude of the stretch increases with:
the amount that the direction of the two threads is similar and
by the '..Stretch Over Perimeter Width' ratio.
==Examples==
The following examples stretch the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and stretch.py.
> python stretch.py
This brings up the stretch dialog.
> python stretch.py Screw Holder Bottom.stl
The stretch tool is parsing the file:
Screw Holder Bottom.stl
..
The stretch tool has created the file:
.. Screw Holder Bottom_stretch.gcode
"""
from __future__ import absolute_import
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import sys
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
#maybe speed up feedRate option
def getCraftedText( fileName, gcodeText, stretchRepository = None ):
"Stretch a gcode linear move text."
return getCraftedTextFromText( archive.getTextIfEmpty(fileName, gcodeText), stretchRepository )
def getCraftedTextFromText( gcodeText, stretchRepository = None ):
"Stretch a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'stretch'):
return gcodeText
if stretchRepository == None:
stretchRepository = settings.getReadRepository( StretchRepository() )
if not stretchRepository.activateStretch.value:
return gcodeText
return StretchSkein().getCraftedGcode( gcodeText, stretchRepository )
def getNewRepository():
'Get new repository.'
return StretchRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Stretch a gcode linear move file. Chain stretch the gcode if it is not already stretched."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'stretch', shouldAnalyze)
class LineIteratorBackward(object):
"Backward line iterator class."
def __init__( self, isLoop, lineIndex, lines ):
self.firstLineIndex = None
self.isLoop = isLoop
self.lineIndex = lineIndex
self.lines = lines
def getIndexBeforeNextDeactivate(self):
"Get index two lines before the deactivate command."
for lineIndex in xrange( self.lineIndex + 1, len(self.lines) ):
line = self.lines[lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'M103':
return lineIndex - 2
print('This should never happen in stretch, no deactivate command was found for this thread.')
raise StopIteration, "You've reached the end of the line."
def getNext(self):
"Get next line going backward or raise exception."
while self.lineIndex > 3:
if self.lineIndex == self.firstLineIndex:
raise StopIteration, "You've reached the end of the line."
if self.firstLineIndex == None:
self.firstLineIndex = self.lineIndex
nextLineIndex = self.lineIndex - 1
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'M103':
if self.isLoop:
nextLineIndex = self.getIndexBeforeNextDeactivate()
else:
raise StopIteration, "You've reached the end of the line."
if firstWord == 'G1':
if self.isBeforeExtrusion():
if self.isLoop:
nextLineIndex = self.getIndexBeforeNextDeactivate()
else:
raise StopIteration, "You've reached the end of the line."
else:
self.lineIndex = nextLineIndex
return line
self.lineIndex = nextLineIndex
raise StopIteration, "You've reached the end of the line."
def isBeforeExtrusion(self):
"Determine if index is two or more before activate command."
linearMoves = 0
for lineIndex in xrange( self.lineIndex + 1, len(self.lines) ):
line = self.lines[lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'G1':
linearMoves += 1
if firstWord == 'M101':
return linearMoves > 0
if firstWord == 'M103':
return False
print('This should never happen in isBeforeExtrusion in stretch, no activate command was found for this thread.')
return False
class LineIteratorForward(object):
"Forward line iterator class."
def __init__( self, isLoop, lineIndex, lines ):
self.firstLineIndex = None
self.isLoop = isLoop
self.lineIndex = lineIndex
self.lines = lines
def getIndexJustAfterActivate(self):
"Get index just after the activate command."
for lineIndex in xrange( self.lineIndex - 1, 3, - 1 ):
line = self.lines[lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'M101':
return lineIndex + 1
print('This should never happen in stretch, no activate command was found for this thread.')
raise StopIteration, "You've reached the end of the line."
def getNext(self):
"Get next line or raise exception."
while self.lineIndex < len(self.lines):
if self.lineIndex == self.firstLineIndex:
raise StopIteration, "You've reached the end of the line."
if self.firstLineIndex == None:
self.firstLineIndex = self.lineIndex
nextLineIndex = self.lineIndex + 1
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'M103':
if self.isLoop:
nextLineIndex = self.getIndexJustAfterActivate()
else:
raise StopIteration, "You've reached the end of the line."
self.lineIndex = nextLineIndex
if firstWord == 'G1':
return line
raise StopIteration, "You've reached the end of the line."
class StretchRepository(object):
"A class to handle the stretch settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.stretch.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Stretch', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Stretch')
self.activateStretch = settings.BooleanSetting().getFromValue('Activate Stretch', self, False )
self.crossLimitDistanceOverEdgeWidth = settings.FloatSpin().getFromValue( 3.0, 'Cross Limit Distance Over Perimeter Width (ratio):', self, 10.0, 5.0 )
self.loopStretchOverEdgeWidth = settings.FloatSpin().getFromValue( 0.05, 'Loop Stretch Over Perimeter Width (ratio):', self, 0.25, 0.11 )
self.pathStretchOverEdgeWidth = settings.FloatSpin().getFromValue( 0.0, 'Path Stretch Over Perimeter Width (ratio):', self, 0.2, 0.0 )
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Perimeter -', self )
self.edgeInsideStretchOverEdgeWidth = settings.FloatSpin().getFromValue( 0.12, 'Perimeter Inside Stretch Over Perimeter Width (ratio):', self, 0.52, 0.32 )
self.edgeOutsideStretchOverEdgeWidth = settings.FloatSpin().getFromValue( 0.05, 'Perimeter Outside Stretch Over Perimeter Width (ratio):', self, 0.25, 0.1 )
settings.LabelSeparator().getFromRepository(self)
self.stretchFromDistanceOverEdgeWidth = settings.FloatSpin().getFromValue( 1.0, 'Stretch From Distance Over Perimeter Width (ratio):', self, 3.0, 2.0 )
self.executeTitle = 'Stretch'
def execute(self):
"Stretch button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class StretchSkein(object):
"A class to stretch a skein of extrusions."
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.edgeWidth = 0.4
self.extruderActive = False
self.feedRateMinute = 959.0
self.isLoop = False
self.layerCount = settings.LayerCount()
self.lineIndex = 0
self.lines = None
self.oldLocation = None
def getCraftedGcode( self, gcodeText, stretchRepository ):
"Parse gcode text and store the stretch gcode."
self.lines = archive.getTextLines(gcodeText)
self.stretchRepository = stretchRepository
self.parseInitialization()
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
self.parseStretch(line)
return self.distanceFeedRate.output.getvalue()
def getCrossLimitedStretch( self, crossLimitedStretch, crossLineIterator, locationComplex ):
"Get cross limited relative stretch for a location."
try:
line = crossLineIterator.getNext()
except StopIteration:
return crossLimitedStretch
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
pointComplex = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine).dropAxis()
pointMinusLocation = locationComplex - pointComplex
pointMinusLocationLength = abs( pointMinusLocation )
if pointMinusLocationLength <= self.crossLimitDistanceFraction:
return crossLimitedStretch
parallelNormal = pointMinusLocation / pointMinusLocationLength
parallelStretch = euclidean.getDotProduct( parallelNormal, crossLimitedStretch ) * parallelNormal
if pointMinusLocationLength > self.crossLimitDistance:
return parallelStretch
crossNormal = complex( parallelNormal.imag, - parallelNormal.real )
crossStretch = euclidean.getDotProduct( crossNormal, crossLimitedStretch ) * crossNormal
crossPortion = ( self.crossLimitDistance - pointMinusLocationLength ) / self.crossLimitDistanceRemainder
return parallelStretch + crossStretch * crossPortion
def getRelativeStretch( self, locationComplex, lineIterator ):
"Get relative stretch for a location."
lastLocationComplex = locationComplex
oldTotalLength = 0.0
pointComplex = locationComplex
totalLength = 0.0
while 1:
try:
line = lineIterator.getNext()
except StopIteration:
locationMinusPoint = locationComplex - pointComplex
locationMinusPointLength = abs( locationMinusPoint )
if locationMinusPointLength > 0.0:
return locationMinusPoint / locationMinusPointLength
return complex()
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = splitLine[0]
pointComplex = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine).dropAxis()
locationMinusPoint = lastLocationComplex - pointComplex
locationMinusPointLength = abs( locationMinusPoint )
totalLength += locationMinusPointLength
if totalLength >= self.stretchFromDistance:
distanceFromRatio = ( self.stretchFromDistance - oldTotalLength ) / locationMinusPointLength
totalPoint = distanceFromRatio * pointComplex + ( 1.0 - distanceFromRatio ) * lastLocationComplex
locationMinusTotalPoint = locationComplex - totalPoint
return locationMinusTotalPoint / self.stretchFromDistance
lastLocationComplex = pointComplex
oldTotalLength = totalLength
def getStretchedLine( self, splitLine ):
"Get stretched gcode line."
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.feedRateMinute = gcodec.getFeedRateMinute( self.feedRateMinute, splitLine )
self.oldLocation = location
if self.extruderActive and self.threadMaximumAbsoluteStretch > 0.0:
return self.getStretchedLineFromIndexLocation( self.lineIndex - 1, self.lineIndex + 1, location )
if self.isJustBeforeExtrusion() and self.threadMaximumAbsoluteStretch > 0.0:
return self.getStretchedLineFromIndexLocation( self.lineIndex - 1, self.lineIndex + 1, location )
return self.lines[self.lineIndex]
def getStretchedLineFromIndexLocation( self, indexPreviousStart, indexNextStart, location ):
"Get stretched gcode line from line index and location."
crossIteratorForward = LineIteratorForward( self.isLoop, indexNextStart, self.lines )
crossIteratorBackward = LineIteratorBackward( self.isLoop, indexPreviousStart, self.lines )
iteratorForward = LineIteratorForward( self.isLoop, indexNextStart, self.lines )
iteratorBackward = LineIteratorBackward( self.isLoop, indexPreviousStart, self.lines )
locationComplex = location.dropAxis()
relativeStretch = self.getRelativeStretch( locationComplex, iteratorForward ) + self.getRelativeStretch( locationComplex, iteratorBackward )
relativeStretch *= 0.8
relativeStretch = self.getCrossLimitedStretch( relativeStretch, crossIteratorForward, locationComplex )
relativeStretch = self.getCrossLimitedStretch( relativeStretch, crossIteratorBackward, locationComplex )
relativeStretchLength = abs( relativeStretch )
if relativeStretchLength > 1.0:
relativeStretch /= relativeStretchLength
absoluteStretch = relativeStretch * self.threadMaximumAbsoluteStretch
stretchedPoint = location.dropAxis() + absoluteStretch
return self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.feedRateMinute, stretchedPoint, location.z )
def isJustBeforeExtrusion(self):
"Determine if activate command is before linear move command."
for lineIndex in xrange( self.lineIndex + 1, len(self.lines) ):
line = self.lines[lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'G1' or firstWord == 'M103':
return False
if firstWord == 'M101':
return True
# print('This should never happen in isJustBeforeExtrusion in stretch, no activate or deactivate command was found for this thread.')
return False
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('stretch')
return
elif firstWord == '(<edgeWidth>':
edgeWidth = float(splitLine[1])
self.crossLimitDistance = self.edgeWidth * self.stretchRepository.crossLimitDistanceOverEdgeWidth.value
self.loopMaximumAbsoluteStretch = self.edgeWidth * self.stretchRepository.loopStretchOverEdgeWidth.value
self.pathAbsoluteStretch = self.edgeWidth * self.stretchRepository.pathStretchOverEdgeWidth.value
self.edgeInsideAbsoluteStretch = self.edgeWidth * self.stretchRepository.edgeInsideStretchOverEdgeWidth.value
self.edgeOutsideAbsoluteStretch = self.edgeWidth * self.stretchRepository.edgeOutsideStretchOverEdgeWidth.value
self.stretchFromDistance = self.stretchRepository.stretchFromDistanceOverEdgeWidth.value * edgeWidth
self.threadMaximumAbsoluteStretch = self.pathAbsoluteStretch
self.crossLimitDistanceFraction = 0.333333333 * self.crossLimitDistance
self.crossLimitDistanceRemainder = self.crossLimitDistance - self.crossLimitDistanceFraction
self.distanceFeedRate.addLine(line)
def parseStretch(self, line):
"Parse a gcode line and add it to the stretch skein."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
line = self.getStretchedLine(splitLine)
elif firstWord == 'M101':
self.extruderActive = True
elif firstWord == 'M103':
self.extruderActive = False
self.setStretchToPath()
elif firstWord == '(<layer>':
self.layerCount.printProgressIncrement('stretch')
elif firstWord == '(<loop>':
self.isLoop = True
self.threadMaximumAbsoluteStretch = self.loopMaximumAbsoluteStretch
elif firstWord == '(</loop>)':
self.setStretchToPath()
elif firstWord == '(<edge>':
self.isLoop = True
self.threadMaximumAbsoluteStretch = self.edgeInsideAbsoluteStretch
if splitLine[1] == 'outer':
self.threadMaximumAbsoluteStretch = self.edgeOutsideAbsoluteStretch
elif firstWord == '(</edge>)':
self.setStretchToPath()
self.distanceFeedRate.addLine(line)
def setStretchToPath(self):
"Set the thread stretch to path stretch and is loop false."
self.isLoop = False
self.threadMaximumAbsoluteStretch = self.pathAbsoluteStretch
def main():
"Display the stretch dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
| agpl-3.0 | 618,423,446,586,803,700 | 48.862884 | 630 | 0.775081 | false |
zeroq/amun | vuln_modules/vuln-mydoom/mydoom_modul.py | 1 | 3864 | """
[Amun - low interaction honeypot]
Copyright (C) [2014] [Jan Goebel]
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>
"""
try:
import psyco ; psyco.full()
from psyco.classes import *
except ImportError:
pass
import struct
import random
import mydoom_shellcodes
class vuln:
def __init__(self):
try:
self.vuln_name = "MYDOOM Vulnerability"
self.stage = "MYDOOM_STAGE1"
self.welcome_message = ""
self.shellcode = []
except KeyboardInterrupt:
raise
def print_message(self, data):
print "\n"
counter = 1
for byte in data:
if counter==16:
ausg = hex(struct.unpack('B',byte)[0])
if len(ausg) == 3:
list = str(ausg).split('x')
ausg = "%sx0%s" % (list[0],list[1])
print ausg
else:
print ausg
counter = 0
else:
ausg = hex(struct.unpack('B',byte)[0])
if len(ausg) == 3:
list = str(ausg).split('x')
ausg = "%sx0%s" % (list[0],list[1])
print ausg,
else:
print ausg,
counter += 1
print "\n>> Incoming Codesize: %s\n\n" % (len(data))
def getVulnName(self):
return self.vuln_name
def getCurrentStage(self):
return self.stage
def getWelcomeMessage(self):
return self.welcome_message
def incoming(self, message, bytes, ip, vuLogger, random_reply, ownIP):
try:
self.reply = []
for i in range(0,62):
try:
self.reply.append("\x00")
except KeyboardInterrupt:
raise
resultSet = {}
resultSet['vulnname'] = self.vuln_name
resultSet['result'] = False
resultSet['accept'] = False
resultSet['shutdown'] = False
resultSet['reply'] = "None"
resultSet['stage'] = self.stage
resultSet['shellcode'] = "None"
resultSet['isFile'] = False
if self.stage=="MYDOOM_STAGE1" and bytes==5:
if mydoom_shellcodes.mydoom_request_stage1==message:
resultSet['result'] = True
resultSet['accept'] = True
self.stage = "SHELLCODE"
return resultSet
elif self.stage=="MYDOOM_STAGE1" and (bytes==1024 or bytes==541 or bytes==645):
resultSet['result'] = True
resultSet['accept'] = True
#resultSet['reply'] = "".join(self.reply)
self.shellcode.append(message)
self.stage = "SHELLCODE"
#resultSet['shellcode'] = "".join(self.shellcode)
return resultSet
elif self.stage=="MYDOOM_STAGE1" and message.startswith('GET'):
resultSet['result'] = False
resultSet['accept'] = True
resultSet['shutdown'] = True
self.stage = "SHELLCODE"
return resultSet
elif self.stage=="SHELLCODE":
if bytes>0:
resultSet['result'] = True
resultSet['accept'] = True
#resultSet['reply'] = "".join(self.reply)
self.shellcode.append(message)
self.stage = "SHELLCODE"
#resultSet['shellcode'] = "".join(self.shellcode)
return resultSet
else:
resultSet['result'] = False
resultSet['accept'] = True
resultSet['isFile'] = True
resultSet['reply'] = "None"
self.shellcode.append(message)
resultSet['shellcode'] = "".join(self.shellcode)
return resultSet
else:
resultSet['result'] = False
resultSet['accept'] = False
resultSet['reply'] = "None"
return resultSet
return resultSet
except KeyboardInterrupt:
raise
except StandardError, e:
print e
return resultSet
except:
print "MYDOOM FATAL ERROR!"
| gpl-2.0 | -7,833,382,891,770,390,000 | 28.272727 | 239 | 0.659161 | false |
HorizonXP/python-react-router | react_router/bundle.py | 1 | 4196 | import json
import os
import re
from optional_django import staticfiles
from webpack.compiler import webpack
from webpack.config_file import ConfigFile, JS
from js_host.conf import settings as js_host_settings
from react.exceptions import ComponentSourceFileNotFound
from react_router.conf import settings
def bundle_component(path, client_path, translate=None, path_to_react=None, devtool=None, client=False):
if not os.path.isabs(path):
abs_path = staticfiles.find(path)
if not abs_path:
raise ComponentSourceFileNotFound(path)
path = abs_path
if not os.path.exists(path):
raise ComponentSourceFileNotFound(path)
if not os.path.isabs(client_path):
abs_client_path = staticfiles.find(client_path)
if not abs_client_path:
raise ComponentSourceFileNotFound(client_path)
client_path = abs_client_path
if not os.path.exists(client_path):
raise ComponentSourceFileNotFound(client_path)
config = generate_config_for_component(path, client_path, translate=translate, path_to_react=path_to_react, devtool=devtool)
config_file = generate_config_file(config)
var = generate_var_from_path(client_path)
path_to_config_file = get_path_to_config_file(config_file, prefix=var + '.')
return webpack(path_to_config_file)
def get_path_to_config_file(config_file, prefix=None):
path = config_file.generate_path_to_file(prefix=prefix)
return config_file.write(path, force=False)
def generate_config_file(config):
return ConfigFile(
JS('var path = require("path");\n'),
JS('module.exports = '),
config,
JS(';'),
)
def generate_config_for_component(path, client_path, translate=None, path_to_react=None, devtool=None):
"""
Generates a webpack config object to bundle a component
"""
var = generate_var_from_path(path)
client_var = generate_var_from_path(client_path)
node_modules = os.path.join(js_host_settings.SOURCE_ROOT, 'node_modules')
if path_to_react is None:
path_to_react = settings.PATH_TO_REACT or os.path.join(node_modules, 'react')
config = {
'context': js_path_join(os.path.dirname(path)),
'entry': {
'server': './{}'.format(os.path.basename(path)),
'client': './{}'.format(os.path.basename(client_path)),
},
'output': {
'path': js_path_join(os.path.join('[bundle_dir]', 'components')),
'filename': '[name]-[hash].js',
'libraryTarget': 'umd',
'library': '[name]',
},
}
if translate:
config.update({
'module': {
'loaders': [{
'test': JS(settings.TRANSLATE_TEST),
'exclude': JS('/node_modules/'),
'loader': 'babel-loader',
'query': {
'stage': 0,
}
}]
},
'resolveLoader': {
'root': js_path_join(node_modules)
}
})
if devtool:
config['devtool'] = devtool
return config
def split_path(path):
"""
Splits a path into the various parts and returns a list
"""
parts = []
drive, path = os.path.splitdrive(path)
while True:
newpath, tail = os.path.split(path)
if newpath == path:
assert not tail
if path:
parts.append(path)
break
parts.append(tail)
path = newpath
if drive:
parts.append(drive)
parts.reverse()
return parts
def js_path_join(path):
"""
Splits a path so that it can be rejoined by the JS engine. Helps to avoid
OS compatibility issues due to string encoding
"""
return JS('path.join.apply(path, ' + json.dumps(split_path(path)) + ')')
def generate_var_from_path(path):
"""
Infer a variable name from a path
"""
var = '{parent_dir}__{filename}'.format(
parent_dir=os.path.basename(os.path.dirname(path)),
filename=os.path.splitext(os.path.basename(path))[0]
)
return re.sub(r'\W+', '_', var)
| mit | 2,315,873,355,505,756,000 | 26.788079 | 128 | 0.593422 | false |
repotvsupertuga/tvsupertuga.repository | plugin.video.youtube/resources/lib/youtube_plugin/youtube/client/login_client.py | 1 | 14799 | __author__ = 'bromix'
import time
import urlparse
import requests
from ...youtube.youtube_exceptions import LoginException
from ...kodion import Context
from __config__ import api, youtube_tv, keys_changed
context = Context()
class LoginClient(object):
api_keys_changed = keys_changed
CONFIGS = {
'youtube-tv': {
'system': 'YouTube TV',
'key': youtube_tv['key'],
'id': youtube_tv['id'],
'secret': youtube_tv['secret']
},
'main': {
'system': 'All',
'key': api['key'],
'id': api['id'],
'secret': api['secret']
}
}
def __init__(self, config=None, language='en-US', region='', access_token='', access_token_tv=''):
self._config = self.CONFIGS['main'] if config is None else config
self._config_tv = self.CONFIGS['youtube-tv']
self._verify = context.get_settings().verify_ssl()
# the default language is always en_US (like YouTube on the WEB)
if not language:
language = 'en_US'
language = language.replace('-', '_')
self._language = language
self._region = region
self._access_token = access_token
self._access_token_tv = access_token_tv
self._log_error_callback = None
def set_log_error(self, callback):
self._log_error_callback = callback
def log_error(self, text):
if self._log_error_callback:
self._log_error_callback(text)
else:
print text
def revoke(self, refresh_token):
# https://developers.google.com/youtube/v3/guides/auth/devices
headers = {'Host': 'accounts.google.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded'}
post_data = {'token': refresh_token}
# url
url = 'https://accounts.google.com/o/oauth2/revoke'
result = requests.post(url, data=post_data, headers=headers, verify=self._verify)
try:
json_data = result.json()
if 'error' in json_data:
context.log_error('Revoke failed: Code: |%s| JSON: |%s|' % (str(result.status_code), json_data))
json_data.update({'code': str(result.status_code)})
raise LoginException(json_data)
except ValueError:
json_data = None
if result.status_code != requests.codes.ok:
response_dump = self._get_response_dump(result, json_data)
context.log_error('Revoke failed: Code: |%s| Response dump: |%s|' % (str(result.status_code), response_dump))
raise LoginException('Logout Failed')
def refresh_token_tv(self, refresh_token, grant_type=''):
client_id = str(self.CONFIGS['youtube-tv']['id'])
client_secret = str(self.CONFIGS['youtube-tv']['secret'])
return self.refresh_token(refresh_token, client_id=client_id,
client_secret=client_secret, grant_type=grant_type)
def refresh_token(self, refresh_token, client_id='', client_secret='', grant_type=''):
# https://developers.google.com/youtube/v3/guides/auth/devices
headers = {'Host': 'www.googleapis.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded'}
client_id = client_id or self._config['id']
client_secret = client_secret or self._config['secret']
post_data = {'client_id': client_id,
'client_secret': client_secret,
'refresh_token': refresh_token,
'grant_type': 'refresh_token'}
# url
url = 'https://www.googleapis.com/oauth2/v4/token'
config_type = self._get_config_type(client_id, client_secret)
context.log_debug('Refresh token: Config: |%s| Client id [:5]: |%s| Client secret [:5]: |%s|' %
(config_type, client_id[:5], client_secret[:5]))
result = requests.post(url, data=post_data, headers=headers, verify=self._verify)
try:
json_data = result.json()
if 'error' in json_data:
context.log_error('Refresh Failed: Code: |%s| JSON: |%s|' % (str(result.status_code), json_data))
json_data.update({'code': str(result.status_code)})
raise LoginException(json_data)
except ValueError:
json_data = None
if result.status_code != requests.codes.ok:
response_dump = self._get_response_dump(result, json_data)
context.log_error('Refresh failed: Config: |%s| Client id [:5]: |%s| Client secret [:5]: |%s| Code: |%s| Response dump |%s|' %
(config_type, client_id[:5], client_secret[:5], str(result.status_code), response_dump))
raise LoginException('Login Failed')
if result.headers.get('content-type', '').startswith('application/json'):
if not json_data:
json_data = result.json()
access_token = json_data['access_token']
expires_in = time.time() + int(json_data.get('expires_in', 3600))
return access_token, expires_in
return '', ''
def request_access_token_tv(self, code, client_id='', client_secret='', grant_type=''):
client_id = client_id or self.CONFIGS['youtube-tv']['id']
client_secret = client_secret or self.CONFIGS['youtube-tv']['secret']
return self.request_access_token(code, client_id=client_id, client_secret=client_secret, grant_type=grant_type)
def request_access_token(self, code, client_id='', client_secret='', grant_type=''):
# https://developers.google.com/youtube/v3/guides/auth/devices
headers = {'Host': 'www.googleapis.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded'}
client_id = client_id or self._config['id']
client_secret = client_secret or self._config['secret']
post_data = {'client_id': client_id,
'client_secret': client_secret,
'code': code,
'grant_type': 'http://oauth.net/grant_type/device/1.0'}
# url
url = 'https://www.googleapis.com/oauth2/v4/token'
config_type = self._get_config_type(client_id, client_secret)
context.log_debug('Requesting access token: Config: |%s| Client id [:5]: |%s| Client secret [:5]: |%s|' %
(config_type, client_id[:5], client_secret[:5]))
result = requests.post(url, data=post_data, headers=headers, verify=self._verify)
authorization_pending = False
try:
json_data = result.json()
if 'error' in json_data:
if json_data['error'] != u'authorization_pending':
context.log_error('Requesting access token: Code: |%s| JSON: |%s|' % (str(result.status_code), json_data))
json_data.update({'code': str(result.status_code)})
raise LoginException(json_data)
else:
authorization_pending = True
except ValueError:
json_data = None
if (result.status_code != requests.codes.ok) and not authorization_pending:
response_dump = self._get_response_dump(result, json_data)
context.log_error('Requesting access token: Config: |%s| Client id [:5]: |%s| Client secret [:5]: |%s| Code: |%s| Response dump |%s|' %
(config_type, client_id[:5], client_secret[:5], str(result.status_code), response_dump))
raise LoginException('Login Failed: Code %s' % str(result.status_code))
if result.headers.get('content-type', '').startswith('application/json'):
if json_data:
return json_data
else:
return result.json()
else:
response_dump = self._get_response_dump(result, json_data)
context.log_error('Requesting access token: Config: |%s| Client id [:5]: |%s| Client secret [:5]: |%s| Code: |%s| Response dump |%s|' %
(config_type, client_id[:5], client_secret[:5], str(result.status_code), response_dump))
raise LoginException('Login Failed: Unknown response')
def request_device_and_user_code_tv(self):
client_id = str(self.CONFIGS['youtube-tv']['id'])
return self.request_device_and_user_code(client_id=client_id)
def request_device_and_user_code(self, client_id=''):
# https://developers.google.com/youtube/v3/guides/auth/devices
headers = {'Host': 'accounts.google.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded'}
client_id = client_id or self._config['id']
post_data = {'client_id': client_id,
'scope': 'https://www.googleapis.com/auth/youtube'}
# url
url = 'https://accounts.google.com/o/oauth2/device/code'
config_type = self._get_config_type(client_id)
context.log_debug('Requesting device and user code: Config: |%s| Client id [:5]: |%s|' %
(config_type, client_id[:5]))
result = requests.post(url, data=post_data, headers=headers, verify=self._verify)
try:
json_data = result.json()
if 'error' in json_data:
context.log_error('Requesting device and user code failed: Code: |%s| JSON: |%s|' % (str(result.status_code), json_data))
json_data.update({'code': str(result.status_code)})
raise LoginException(json_data)
except ValueError:
json_data = None
if result.status_code != requests.codes.ok:
response_dump = self._get_response_dump(result, json_data)
context.log_error('Requesting device and user code failed: Config: |%s| Client id [:5]: |%s| Code: |%s| Response dump |%s|' %
(config_type, client_id[:5], str(result.status_code), response_dump))
raise LoginException('Login Failed')
if result.headers.get('content-type', '').startswith('application/json'):
if json_data:
return json_data
else:
return result.json()
else:
response_dump = self._get_response_dump(result, json_data)
context.log_error('Requesting access token: Config: |%s| Client id [:5]: |%s| Code: |%s| Response dump |%s|' %
(config_type, client_id[:5], str(result.status_code), response_dump))
raise LoginException('Login Failed: Unknown response')
def get_access_token(self):
return self._access_token
def authenticate(self, username, password):
headers = {'device': '38c6ee9a82b8b10a',
'app': 'com.google.android.youtube',
'User-Agent': 'GoogleAuth/1.4 (GT-I9100 KTU84Q)',
'content-type': 'application/x-www-form-urlencoded',
'Host': 'android.clients.google.com',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip'}
post_data = {'device_country': self._region.lower(),
'operatorCountry': self._region.lower(),
'lang': self._language.replace('-', '_'),
'sdk_version': '19',
# 'google_play_services_version': '6188034',
'accountType': 'HOSTED_OR_GOOGLE',
'Email': username.encode('utf-8'),
'service': 'oauth2:https://www.googleapis.com/auth/youtube https://www.googleapis.com/auth/youtube.force-ssl https://www.googleapis.com/auth/plus.me https://www.googleapis.com/auth/emeraldsea.mobileapps.doritos.cookie https://www.googleapis.com/auth/plus.stream.read https://www.googleapis.com/auth/plus.stream.write https://www.googleapis.com/auth/plus.pages.manage https://www.googleapis.com/auth/identity.plus.page.impersonation',
'source': 'android',
'androidId': '38c6ee9a82b8b10a',
'app': 'com.google.android.youtube',
# 'client_sig': '24bb24c05e47e0aefa68a58a766179d9b613a600',
'callerPkg': 'com.google.android.youtube',
# 'callerSig': '24bb24c05e47e0aefa68a58a766179d9b613a600',
'Passwd': password.encode('utf-8')}
# url
url = 'https://android.clients.google.com/auth'
result = requests.post(url, data=post_data, headers=headers, verify=self._verify)
if result.status_code != requests.codes.ok:
raise LoginException('Login Failed')
lines = result.text.replace('\n', '&')
params = dict(urlparse.parse_qsl(lines))
token = params.get('Auth', '')
expires = int(params.get('Expiry', -1))
if not token or expires == -1:
raise LoginException('Failed to get token')
return token, expires
def _get_config_type(self, client_id, client_secret=None):
"""used for logging"""
if client_secret is None:
using_conf_tv = (client_id == self.CONFIGS['youtube-tv'].get('id'))
using_conf_main = (client_id == self.CONFIGS['main'].get('id'))
else:
using_conf_tv = ((client_id == self.CONFIGS['youtube-tv'].get('id')) and (client_secret == self.CONFIGS['youtube-tv'].get('secret')))
using_conf_main = ((client_id == self.CONFIGS['main'].get('id')) and (client_secret == self.CONFIGS['main'].get('secret')))
if not using_conf_main and not using_conf_tv:
return 'None'
elif using_conf_tv:
return 'YouTube-TV'
elif using_conf_main:
return 'YouTube-Kodi'
else:
return 'Unknown'
@staticmethod
def _get_response_dump(response, json_data=None):
if json_data:
return json_data
else:
try:
return response.json()
except ValueError:
try:
return response.text
except:
return 'None'
| gpl-2.0 | 7,997,651,502,358,368,000 | 45.832278 | 454 | 0.568552 | false |
jgeskens/django | django/core/management/validation.py | 1 | 23298 | import collections
import sys
from django.conf import settings
from django.core.management.color import color_style
from django.utils.encoding import force_str
from django.utils.itercompat import is_iterable
from django.utils import six
class ModelErrorCollection:
def __init__(self, outfile=sys.stdout):
self.errors = []
self.outfile = outfile
self.style = color_style()
def add(self, context, error):
self.errors.append((context, error))
self.outfile.write(self.style.ERROR(force_str("%s: %s\n" % (context, error))))
def get_validation_errors(outfile, app=None):
"""
Validates all models that are part of the specified app. If no app name is provided,
validates all models of all installed apps. Writes errors, if any, to outfile.
Returns number of errors.
"""
from django.db import models, connection
from django.db.models.loading import get_app_errors
from django.db.models.deletion import SET_NULL, SET_DEFAULT
e = ModelErrorCollection(outfile)
for (app_name, error) in get_app_errors().items():
e.add(app_name, error)
for cls in models.get_models(app, include_swapped=True):
opts = cls._meta
# Check swappable attribute.
if opts.swapped:
try:
app_label, model_name = opts.swapped.split('.')
except ValueError:
e.add(opts, "%s is not of the form 'app_label.app_name'." % opts.swappable)
continue
if not models.get_model(app_label, model_name):
e.add(opts, "Model has been swapped out for '%s' which has not been installed or is abstract." % opts.swapped)
# No need to perform any other validation checks on a swapped model.
continue
# If this is the current User model, check known validation problems with User models
if settings.AUTH_USER_MODEL == '%s.%s' % (opts.app_label, opts.object_name):
# Check that the USERNAME FIELD isn't included in REQUIRED_FIELDS.
if cls.USERNAME_FIELD in cls.REQUIRED_FIELDS:
e.add(opts, 'The field named as the USERNAME_FIELD should not be included in REQUIRED_FIELDS on a swappable User model.')
# Check that the username field is unique
if not opts.get_field(cls.USERNAME_FIELD).unique:
e.add(opts, 'The USERNAME_FIELD must be unique. Add unique=True to the field parameters.')
# Model isn't swapped; do field-specific validation.
for f in opts.local_fields:
if f.name == 'id' and not f.primary_key and opts.pk.name == 'id':
e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name)
if f.name.endswith('_'):
e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name)
if (f.primary_key and f.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
e.add(opts, '"%s": Primary key fields cannot have null=True.' % f.name)
if isinstance(f, models.CharField):
try:
max_length = int(f.max_length)
if max_length <= 0:
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
except (ValueError, TypeError):
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
if isinstance(f, models.DecimalField):
decimalp_ok, mdigits_ok = False, False
decimalp_msg = '"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.'
try:
decimal_places = int(f.decimal_places)
if decimal_places < 0:
e.add(opts, decimalp_msg % f.name)
else:
decimalp_ok = True
except (ValueError, TypeError):
e.add(opts, decimalp_msg % f.name)
mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.'
try:
max_digits = int(f.max_digits)
if max_digits <= 0:
e.add(opts, mdigits_msg % f.name)
else:
mdigits_ok = True
except (ValueError, TypeError):
e.add(opts, mdigits_msg % f.name)
invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.'
if decimalp_ok and mdigits_ok:
if decimal_places > max_digits:
e.add(opts, invalid_values_msg % f.name)
if isinstance(f, models.FileField) and not f.upload_to:
e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name)
if isinstance(f, models.ImageField):
try:
from django.utils.image import Image
except ImportError:
e.add(opts, '"%s": To use ImageFields, you need to install Pillow. Get it at https://pypi.python.org/pypi/Pillow.' % f.name)
if isinstance(f, models.BooleanField) and getattr(f, 'null', False):
e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name)
if isinstance(f, models.FilePathField) and not (f.allow_files or f.allow_folders):
e.add(opts, '"%s": FilePathFields must have either allow_files or allow_folders set to True.' % f.name)
if f.choices:
if isinstance(f.choices, six.string_types) or not is_iterable(f.choices):
e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name)
else:
for c in f.choices:
if not isinstance(c, (list, tuple)) or len(c) != 2:
e.add(opts, '"%s": "choices" should be a sequence of two-tuples.' % f.name)
if f.db_index not in (None, True, False):
e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name)
# Perform any backend-specific field validation.
connection.validation.validate_field(e, opts, f)
# Check if the on_delete behavior is sane
if f.rel and hasattr(f.rel, 'on_delete'):
if f.rel.on_delete == SET_NULL and not f.null:
e.add(opts, "'%s' specifies on_delete=SET_NULL, but cannot be null." % f.name)
elif f.rel.on_delete == SET_DEFAULT and not f.has_default():
e.add(opts, "'%s' specifies on_delete=SET_DEFAULT, but has no default value." % f.name)
# Check to see if the related field will clash with any existing
# fields, m2m fields, m2m related objects or related objects
if f.rel:
if f.rel.to not in models.get_models():
# If the related model is swapped, provide a hint;
# otherwise, the model just hasn't been installed.
if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped:
e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable))
else:
e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, six.string_types):
continue
# Make sure the related field specified by a ForeignKey is unique
if f.requires_unique_target:
if len(f.foreign_related_fields) > 1:
has_unique_field = False
for rel_field in f.foreign_related_fields:
has_unique_field = has_unique_field or rel_field.unique
if not has_unique_field:
e.add(opts, "Field combination '%s' under model '%s' must have a unique=True constraint" % (','.join([rel_field.name for rel_field in f.foreign_related_fields]), f.rel.to.__name__))
else:
if not f.foreign_related_fields[0].unique:
e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.foreign_related_fields[0].name, f.rel.to.__name__))
rel_opts = f.rel.to._meta
rel_name = f.related.get_accessor_name()
rel_query_name = f.related_query_name()
if not f.rel.is_hidden():
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
seen_intermediary_signatures = []
for i, f in enumerate(opts.local_many_to_many):
# Check to see if the related m2m field will clash with any
# existing fields, m2m fields, m2m related objects or related
# objects
if f.rel.to not in models.get_models():
# If the related model is swapped, provide a hint;
# otherwise, the model just hasn't been installed.
if not isinstance(f.rel.to, six.string_types) and f.rel.to._meta.swapped:
e.add(opts, "'%s' defines a relation with the model '%s.%s', which has been swapped out. Update the relation to point at settings.%s." % (f.name, f.rel.to._meta.app_label, f.rel.to._meta.object_name, f.rel.to._meta.swappable))
else:
e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, six.string_types):
continue
# Check that the field is not set to unique. ManyToManyFields do not support unique.
if f.unique:
e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name)
if f.rel.through is not None and not isinstance(f.rel.through, six.string_types):
from_model, to_model = cls, f.rel.to
if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created:
e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.")
seen_from, seen_to, seen_self = False, False, 0
for inter_field in f.rel.through._meta.fields:
rel_to = getattr(inter_field.rel, 'to', None)
if from_model == to_model: # relation to self
if rel_to == from_model:
seen_self += 1
if seen_self > 2:
e.add(opts, "Intermediary model %s has more than "
"two foreign keys to %s, which is ambiguous "
"and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
if rel_to == from_model:
if seen_from:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
seen_from = True
elif rel_to == to_model:
if seen_to:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
rel_to._meta.object_name
)
)
else:
seen_to = True
if f.rel.through not in models.get_models(include_auto_created=True):
e.add(opts, "'%s' specifies an m2m relation through model "
"%s, which has not been installed." % (f.name, f.rel.through)
)
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
e.add(opts, "The model %s has two manually-defined m2m "
"relations through the model %s, which is not "
"permitted. Please consider using an extra field on "
"your intermediary model instead." % (
cls._meta.object_name,
f.rel.through._meta.object_name
)
)
else:
seen_intermediary_signatures.append(signature)
if not f.rel.through._meta.auto_created:
seen_related_fk, seen_this_fk = False, False
for field in f.rel.through._meta.fields:
if field.rel:
if not seen_related_fk and field.rel.to == f.rel.to:
seen_related_fk = True
elif field.rel.to == cls:
seen_this_fk = True
if not seen_related_fk or not seen_this_fk:
e.add(opts, "'%s' is a manually-defined m2m relation "
"through model %s, which does not have foreign keys "
"to %s and %s" % (f.name, f.rel.through._meta.object_name,
f.rel.to._meta.object_name, cls._meta.object_name)
)
elif isinstance(f.rel.through, six.string_types):
e.add(opts, "'%s' specifies an m2m relation through model %s, "
"which has not been installed" % (f.name, f.rel.through)
)
rel_opts = f.rel.to._meta
rel_name = f.related.get_accessor_name()
rel_query_name = f.related_query_name()
# If rel_name is none, there is no reverse accessor (this only
# occurs for symmetrical m2m relations to self). If this is the
# case, there are no clashes to check for this field, as there are
# no reverse descriptors for this field.
if rel_name is not None:
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
# Check ordering attribute.
if opts.ordering:
for field_name in opts.ordering:
if field_name == '?':
continue
if field_name.startswith('-'):
field_name = field_name[1:]
if opts.order_with_respect_to and field_name == '_order':
continue
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field_name:
continue
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
if field_name == 'pk':
continue
try:
opts.get_field(field_name, many_to_many=False)
except models.FieldDoesNotExist:
e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name)
# Check unique_together.
for ut in opts.unique_together:
validate_local_fields(e, opts, "unique_together", ut)
if not isinstance(opts.index_together, collections.Sequence):
e.add(opts, '"index_together" must a sequence')
else:
for it in opts.index_together:
validate_local_fields(e, opts, "index_together", it)
return len(e.errors)
def validate_local_fields(e, opts, field_name, fields):
from django.db import models
if not isinstance(fields, collections.Sequence):
e.add(opts, 'all %s elements must be sequences' % field_name)
else:
for field in fields:
try:
f = opts.get_field(field, many_to_many=True)
except models.FieldDoesNotExist:
e.add(opts, '"%s" refers to %s, a field that doesn\'t exist.' % (field_name, field))
else:
if isinstance(f.rel, models.ManyToManyRel):
e.add(opts, '"%s" refers to %s. ManyToManyFields are not supported in %s.' % (field_name, f.name, field_name))
if f not in opts.local_fields:
e.add(opts, '"%s" refers to %s. This is not in the same model as the %s statement.' % (field_name, f.name, field_name))
| bsd-3-clause | -6,676,675,528,759,319,000 | 62.655738 | 264 | 0.533565 | false |
tqchen/tvm | python/tvm/auto_scheduler/auto_schedule.py | 1 | 7844 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
User interface for TVM Auto-scheduler.
The basic schedule search process for TVM Auto-scheduler is designed to be:
`Program sampling` -> `Performance Tuning`.
In `Program sampling`, we use some predefined precise or heuristic rules to generate several
initial schedules. Based on these initial starting points, we perform `Performance Tuning` which
uses cost model based evolutionary search to select schedules with the best performance.
Candidate schedules are measured against the specific hardware target.
"""
import tvm._ffi
from tvm.runtime import Object
from .measure import LocalBuilder, LocalRunner
from .workload_registry import make_workload_key
from .compute_dag import ComputeDAG
from .cost_model import XGBModel
from .search_policy import SketchPolicy
from . import _ffi_api
@tvm._ffi.register_object("auto_scheduler.HardwareParams")
class HardwareParams(Object):
"""The parameters of target hardware used to guide the search policy
TODO(jcf94): This is considered to be merged with the new Target specification:
https://discuss.tvm.ai/t/rfc-tvm-target-specification/6844
Parameters
----------
num_cores : int
The number of device cores.
vector_unit_bytes : int
The width of vector units in bytes.
cache_line_bytes : int
The size of cache line in bytes.
"""
def __init__(self, num_cores, vector_unit_bytes, cache_line_bytes):
self.__init_handle_by_constructor__(
_ffi_api.HardwareParams, num_cores, vector_unit_bytes, cache_line_bytes
)
@tvm._ffi.register_object("auto_scheduler.SearchTask")
class SearchTask(Object):
"""The computation information and hardware parameters for a schedule search task.
Parameters
----------
dag : ComputeDAG
The ComputeDAG for the corresponding compute declaration.
workload_key : str
The workload key for the corresponding compute declaration.
target : tvm.target.Target
The target device of this search task.
target_host : Optional[tvm.target.Target]
The target host device of this search task.
hardware_params : Optional[HardwareParams]
Hardware parameters used in this search task.
"""
def __init__(self, dag, workload_key, target, target_host=None, hardware_params=None):
self.__init_handle_by_constructor__(
_ffi_api.SearchTask, dag, workload_key, target, target_host, hardware_params
)
@tvm._ffi.register_object("auto_scheduler.TuningOptions")
class TuningOptions(Object):
"""This controls the options of performance tuning.
Parameters
----------
num_measure_trials: int = 0
The number of measurement trials.
The search policy measures `num_measure_trials` schedules in total and returns the best one
among them.
With `num_measure_trials` == 0, the policy will do the schedule search but won't involve
measurement. This can be used to get a runnable schedule quickly without auto-tuning.
early_stopping: Optional[int]
Stop the tuning early if getting no improvement after n measurements.
num_measures_per_round: int = 64
The number of schedules to be measured at each search round.
The whole schedule search process will try a total number of `num_measure_trials` in several
rounds.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during schedule search.
builder: Union[ProgramBuilder, str] = 'local'
ProgramBuilder which builds the program.
runner: Union[ProgramRunner, str] = 'local'
ProgramRunner which runs the program and measures time costs.
measure_callbacks: Optional[List[MeasureCallback]]
Callback functions called after each measurement.
Candidates:
- auto_scheduler.RecordToFile
"""
def __init__(
self,
num_measure_trials=0,
early_stopping=None,
num_measures_per_round=64,
verbose=1,
builder="local",
runner="local",
measure_callbacks=None,
):
if isinstance(builder, str):
if builder == "local":
builder = LocalBuilder()
else:
raise ValueError("Invalid builder: " + builder)
elif not isinstance(builder, tvm.auto_scheduler.measure.ProgramBuilder):
raise ValueError(
"Invalid builder: "
+ builder
+ " . TuningOptions expects a ProgramBuilder or string."
)
if isinstance(runner, str):
if runner == "local":
runner = LocalRunner()
else:
raise ValueError("Invalid runner: " + runner)
elif not isinstance(runner, tvm.auto_scheduler.measure.ProgramRunner):
raise ValueError(
"Invalid runner: " + runner + " . TuningOptions expects a ProgramRunner or string."
)
self.__init_handle_by_constructor__(
_ffi_api.TuningOptions,
num_measure_trials,
early_stopping or -1,
num_measures_per_round,
verbose,
builder,
runner,
measure_callbacks,
)
def create_task(func, args, target, target_host=None, hardware_params=None):
"""Create a search task
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Union[Tuple[Any, ...], List[Any]]
The args of the function.
target : tvm.target.Target
The target device of this search task.
target_host : Optional[tvm.target.Target]
The target host device of this search task.
hardware_params : Optional[HardwareParams]
Hardware parameters used in this search task.
Returns
-------
SearchTask: the created task
"""
workload_key = make_workload_key(func, args)
dag = ComputeDAG(workload_key)
return SearchTask(dag, workload_key, target, target_host, hardware_params)
def auto_schedule(task, search_policy=None, tuning_options=TuningOptions()):
"""Run auto scheduling search for a task
Parameters
----------
task : SearchTask
The SearchTask for the computation declaration.
search_policy : Optional[SearchPolicy]
The search policy to be used for schedule search.
tuning_options : Optional[TuningOptions]
Tuning and measurement options.
Returns
-------
A `te.schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`.
"""
if not isinstance(task, SearchTask):
raise ValueError(
"Invalid task: " + task + " . `auto_scheduler.auto_schedule` expects a SearchTask."
)
if search_policy is None:
cost_model = XGBModel()
search_policy = SketchPolicy(task, cost_model)
sch, tensors = _ffi_api.AutoSchedule(search_policy, tuning_options)
return sch, tensors
| apache-2.0 | 6,643,239,991,650,872,000 | 35.654206 | 100 | 0.666624 | false |
abhinavsingh/proxy.py | tests/http/test_web_server.py | 1 | 10582 | # -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import gzip
import os
import tempfile
import unittest
import selectors
from unittest import mock
from proxy.proxy import Proxy
from proxy.core.connection import TcpClientConnection
from proxy.http.handler import HttpProtocolHandler
from proxy.http.parser import httpParserStates
from proxy.common.utils import build_http_response, build_http_request, bytes_, text_
from proxy.common.constants import CRLF, PLUGIN_HTTP_PROXY, PLUGIN_PAC_FILE, PLUGIN_WEB_SERVER, PROXY_PY_DIR
from proxy.http.server import HttpWebServerPlugin
class TestWebServerPlugin(unittest.TestCase):
@mock.patch('selectors.DefaultSelector')
@mock.patch('socket.fromfd')
def setUp(self, mock_fromfd: mock.Mock, mock_selector: mock.Mock) -> None:
self.fileno = 10
self._addr = ('127.0.0.1', 54382)
self._conn = mock_fromfd.return_value
self.mock_selector = mock_selector
self.flags = Proxy.initialize()
self.flags.plugins = Proxy.load_plugins([
bytes_(PLUGIN_HTTP_PROXY),
bytes_(PLUGIN_WEB_SERVER),
])
self.protocol_handler = HttpProtocolHandler(
TcpClientConnection(self._conn, self._addr),
flags=self.flags)
self.protocol_handler.initialize()
@mock.patch('selectors.DefaultSelector')
@mock.patch('socket.fromfd')
def test_pac_file_served_from_disk(
self, mock_fromfd: mock.Mock, mock_selector: mock.Mock) -> None:
pac_file = os.path.join(
os.path.dirname(PROXY_PY_DIR),
'helper',
'proxy.pac')
self._conn = mock_fromfd.return_value
self.mock_selector_for_client_read(mock_selector)
self.init_and_make_pac_file_request(pac_file)
self.protocol_handler.run_once()
self.assertEqual(
self.protocol_handler.request.state,
httpParserStates.COMPLETE)
with open(pac_file, 'rb') as f:
self._conn.send.called_once_with(build_http_response(
200, reason=b'OK', headers={
b'Content-Type': b'application/x-ns-proxy-autoconfig',
b'Connection': b'close'
}, body=f.read()
))
@mock.patch('selectors.DefaultSelector')
@mock.patch('socket.fromfd')
def test_pac_file_served_from_buffer(
self, mock_fromfd: mock.Mock, mock_selector: mock.Mock) -> None:
self._conn = mock_fromfd.return_value
self.mock_selector_for_client_read(mock_selector)
pac_file_content = b'function FindProxyForURL(url, host) { return "PROXY localhost:8899; DIRECT"; }'
self.init_and_make_pac_file_request(text_(pac_file_content))
self.protocol_handler.run_once()
self.assertEqual(
self.protocol_handler.request.state,
httpParserStates.COMPLETE)
self._conn.send.called_once_with(build_http_response(
200, reason=b'OK', headers={
b'Content-Type': b'application/x-ns-proxy-autoconfig',
b'Connection': b'close'
}, body=pac_file_content
))
@mock.patch('selectors.DefaultSelector')
@mock.patch('socket.fromfd')
def test_default_web_server_returns_404(
self, mock_fromfd: mock.Mock, mock_selector: mock.Mock) -> None:
self._conn = mock_fromfd.return_value
mock_selector.return_value.select.return_value = [(
selectors.SelectorKey(
fileobj=self._conn,
fd=self._conn.fileno,
events=selectors.EVENT_READ,
data=None), selectors.EVENT_READ), ]
flags = Proxy.initialize()
flags.plugins = Proxy.load_plugins([
bytes_(PLUGIN_HTTP_PROXY),
bytes_(PLUGIN_WEB_SERVER),
])
self.protocol_handler = HttpProtocolHandler(
TcpClientConnection(self._conn, self._addr),
flags=flags)
self.protocol_handler.initialize()
self._conn.recv.return_value = CRLF.join([
b'GET /hello HTTP/1.1',
CRLF,
])
self.protocol_handler.run_once()
self.assertEqual(
self.protocol_handler.request.state,
httpParserStates.COMPLETE)
self.assertEqual(
self.protocol_handler.client.buffer[0],
HttpWebServerPlugin.DEFAULT_404_RESPONSE)
@unittest.skipIf(os.environ.get('GITHUB_ACTIONS', False),
'Disabled on GitHub actions because this test is flaky on GitHub infrastructure.')
@mock.patch('selectors.DefaultSelector')
@mock.patch('socket.fromfd')
def test_static_web_server_serves(
self, mock_fromfd: mock.Mock, mock_selector: mock.Mock) -> None:
# Setup a static directory
static_server_dir = os.path.join(tempfile.gettempdir(), 'static')
index_file_path = os.path.join(static_server_dir, 'index.html')
html_file_content = b'''<html><head></head><body><h1>Proxy.py Testing</h1></body></html>'''
os.makedirs(static_server_dir, exist_ok=True)
with open(index_file_path, 'wb') as f:
f.write(html_file_content)
self._conn = mock_fromfd.return_value
self._conn.recv.return_value = build_http_request(
b'GET', b'/index.html')
mock_selector.return_value.select.side_effect = [
[(selectors.SelectorKey(
fileobj=self._conn,
fd=self._conn.fileno,
events=selectors.EVENT_READ,
data=None), selectors.EVENT_READ)],
[(selectors.SelectorKey(
fileobj=self._conn,
fd=self._conn.fileno,
events=selectors.EVENT_WRITE,
data=None), selectors.EVENT_WRITE)], ]
flags = Proxy.initialize(
enable_static_server=True,
static_server_dir=static_server_dir)
flags.plugins = Proxy.load_plugins([
bytes_(PLUGIN_HTTP_PROXY),
bytes_(PLUGIN_WEB_SERVER),
])
self.protocol_handler = HttpProtocolHandler(
TcpClientConnection(self._conn, self._addr),
flags=flags)
self.protocol_handler.initialize()
self.protocol_handler.run_once()
self.protocol_handler.run_once()
self.assertEqual(mock_selector.return_value.select.call_count, 2)
self.assertEqual(self._conn.send.call_count, 1)
encoded_html_file_content = gzip.compress(html_file_content)
self.assertEqual(self._conn.send.call_args[0][0], build_http_response(
200, reason=b'OK', headers={
b'Content-Type': b'text/html',
b'Cache-Control': b'max-age=86400',
b'Content-Encoding': b'gzip',
b'Connection': b'close',
b'Content-Length': bytes_(len(encoded_html_file_content)),
},
body=encoded_html_file_content
))
@mock.patch('selectors.DefaultSelector')
@mock.patch('socket.fromfd')
def test_static_web_server_serves_404(
self,
mock_fromfd: mock.Mock,
mock_selector: mock.Mock) -> None:
self._conn = mock_fromfd.return_value
self._conn.recv.return_value = build_http_request(
b'GET', b'/not-found.html')
mock_selector.return_value.select.side_effect = [
[(selectors.SelectorKey(
fileobj=self._conn,
fd=self._conn.fileno,
events=selectors.EVENT_READ,
data=None), selectors.EVENT_READ)],
[(selectors.SelectorKey(
fileobj=self._conn,
fd=self._conn.fileno,
events=selectors.EVENT_WRITE,
data=None), selectors.EVENT_WRITE)], ]
flags = Proxy.initialize(enable_static_server=True)
flags.plugins = Proxy.load_plugins([
bytes_(PLUGIN_HTTP_PROXY),
bytes_(PLUGIN_WEB_SERVER),
])
self.protocol_handler = HttpProtocolHandler(
TcpClientConnection(self._conn, self._addr),
flags=flags)
self.protocol_handler.initialize()
self.protocol_handler.run_once()
self.protocol_handler.run_once()
self.assertEqual(mock_selector.return_value.select.call_count, 2)
self.assertEqual(self._conn.send.call_count, 1)
self.assertEqual(self._conn.send.call_args[0][0],
HttpWebServerPlugin.DEFAULT_404_RESPONSE)
@mock.patch('socket.fromfd')
def test_on_client_connection_called_on_teardown(
self, mock_fromfd: mock.Mock) -> None:
flags = Proxy.initialize()
plugin = mock.MagicMock()
flags.plugins = {b'HttpProtocolHandlerPlugin': [plugin]}
self._conn = mock_fromfd.return_value
self.protocol_handler = HttpProtocolHandler(
TcpClientConnection(self._conn, self._addr),
flags=flags)
self.protocol_handler.initialize()
plugin.assert_called()
with mock.patch.object(self.protocol_handler, 'run_once') as mock_run_once:
mock_run_once.return_value = True
self.protocol_handler.run()
self.assertTrue(self._conn.closed)
plugin.return_value.on_client_connection_close.assert_called()
def init_and_make_pac_file_request(self, pac_file: str) -> None:
flags = Proxy.initialize(pac_file=pac_file)
flags.plugins = Proxy.load_plugins([
bytes_(PLUGIN_HTTP_PROXY),
bytes_(PLUGIN_WEB_SERVER),
bytes_(PLUGIN_PAC_FILE),
])
self.protocol_handler = HttpProtocolHandler(
TcpClientConnection(self._conn, self._addr),
flags=flags)
self.protocol_handler.initialize()
self._conn.recv.return_value = CRLF.join([
b'GET / HTTP/1.1',
CRLF,
])
def mock_selector_for_client_read(self, mock_selector: mock.Mock) -> None:
mock_selector.return_value.select.return_value = [(
selectors.SelectorKey(
fileobj=self._conn,
fd=self._conn.fileno,
events=selectors.EVENT_READ,
data=None), selectors.EVENT_READ), ]
| bsd-3-clause | 6,437,683,575,565,306,000 | 39.521073 | 108 | 0.602307 | false |
jpwarren/holideck | examples/soundlevel.py | 1 | 1766 | #!/usr/bin/python
"""
A sound level meter for the MooresCloud Holiday.
Requires PyAudio.
Copyright (c) 2013, Josh Deprez
License: MIT (see LICENSE for details)
"""
__author__ = 'Josh Deprez'
__version__ = '0.01-dev'
__license__ = 'MIT'
import pyaudio
import audioop
import struct
import math
import holiday
import sys
import time
def render(hol, value):
for i in xrange(value):
alpha = i / 50.0
beta = 1.0 - alpha
hol.setglobe(i, alpha * 0xFF, beta * 0xFF, 0x00) # Green -> Red
# Black remaining lights
for i in xrange(value,50):
hol.setglobe(i, 0x00, 0x00, 0x00)
hol.render()
return
if __name__ == '__main__':
if len(sys.argv) > 1:
the_hostname = sys.argv[1]
print the_hostname
else:
# Assume the holiday is the simulator
the_hostname = 'localhost:8080'
hol = holiday.Holiday(remote=True,addr=the_hostname)
render(hol, 0)
# Do PyAudio stuff
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
INPUT_BLOCK_TIME = 0.02
BUFFER = 1024 #Seems to work...
# How do we select the appropriate input device?
#input_device = 0 #Built-in Microphone (seems good for OSX)
#input_device = 3 # this seems to be correct for juno
input_device = 15 # Ubuntu default
pa = pyaudio.PyAudio()
stream = pa.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
#input_device_index = input_device,
frames_per_buffer = BUFFER)
SCALE = 300 # Probably need to tweak this
MAX_LIGHT = 50
errorcount = 0
print "Press Ctrl-C to quit"
while True:
try:
block = stream.read(BUFFER)
except IOError, e:
errorcount += 1
print( "(%d) Error recording: %s"%(errorcount,e))
amplitude = audioop.rms(block, 2)
#print amplitude
render(hol, min(amplitude / SCALE, MAX_LIGHT))
| mit | 655,493,103,852,588,200 | 20.277108 | 65 | 0.665345 | false |
beni55/Sublime-BracketGuard | BracketGuard.py | 1 | 2769 | import sublime, sublime_plugin
import re
from collections import namedtuple
BracketPosition = namedtuple("BracketPosition", "position opener")
BracketResult = namedtuple("BracketResult", "success start end")
# House keeping for the async beast
activeChecks = 0
dismissedChecks = 0
# scopeNames is used to avoid a weird memory leak with Sublime Text which occurs
# when calling view.scope_name within an async routine
scopeNames = []
class SelectionListener(sublime_plugin.EventListener):
def on_modified(self, view):
global scopeNames
scopeNames = [view.scope_name(i) for i in range(len(self.getBufferContent(view)))]
if view.settings().get("is_test", False):
self.on_modified_async(view)
def on_modified_async(self, view):
global activeChecks, dismissedChecks
if activeChecks > 0:
dismissedChecks += 1
return
contentRegion = sublime.Region(0, view.size())
bufferContent = self.getBufferContent(view)
activeChecks += 1
bracketResult = getFirstBracketError(bufferContent, view)
if dismissedChecks > 0:
dismissedChecks = 0
bracketResult = getFirstBracketError(bufferContent, view)
activeChecks -= 1
if bracketResult.success:
view.erase_regions("BracketGuardRegions")
else:
openerRegion = sublime.Region(bracketResult.start, bracketResult.start + 1)
closerRegion = sublime.Region(bracketResult.end, bracketResult.end + 1)
view.add_regions("BracketGuardRegions", [openerRegion, closerRegion], "invalid")
def getBufferContent(self, view):
contentRegion = sublime.Region(0, view.size())
return view.substr(contentRegion)
def getFirstBracketError(codeStr, view):
global scopeNames
opener = list("({[")
closer = list(")}]")
matchingStack = []
for index, char in enumerate(codeStr):
if dismissedChecks > 0:
# we will have to start over
return BracketResult(True, -1, -1)
scopeName = scopeNames[index]
if "string" in scopeName or "comment" in scopeName:
# ignore unmatched brackets in strings and comments
continue
if char in opener:
matchingStack.append(BracketPosition(index, char))
elif char in closer:
matchingOpener = opener[closer.index(char)]
if len(matchingStack) == 0:
return BracketResult(False, -1, index)
poppedOpener = matchingStack.pop()
if matchingOpener != poppedOpener.opener:
return BracketResult(False, poppedOpener.position, index)
if len(matchingStack) == 0:
return BracketResult(True, -1, -1)
else:
poppedOpener = matchingStack.pop()
return BracketResult(False, poppedOpener.position, -1)
| mit | -2,855,513,871,807,219,000 | 25.969697 | 86 | 0.681835 | false |
kotoroshinoto/TCGA_MAF_Analysis | gooch_maf_tools/commands/analysis/get_stats/mutcount_length_linreg.py | 1 | 4685 | import rpy2.robjects.packages as rpackages
import rpy2.robjects as ro
import click
import csv
import sys
from typing import List
from typing import Dict
from ..get_stats.util import *
class GeneLinregEntry:
def __init__(self, symbol: str):
self.symbol = symbol
self.symbol_key = symbol.casefold()
self.count = 0
self.length = 0
class GeneLinregData:
def __init__(self):
self.data_dict = dict() # type: Dict[str, GeneLinregEntry]
self.symbol_list = list() #type: List[str]
def read_count_file(self, filehandle, name_col: int, count_col: int, has_header: bool):
reader = csv.reader(filehandle, dialect='excel-tab')
if has_header:
next(reader) # skip first line
for row in reader:
symbol = row[name_col]
symbol_key = symbol.casefold()
if symbol_key not in self.data_dict:
entry = GeneLinregEntry(symbol) # type: GeneLinregEntry
self.data_dict[symbol_key] = entry
self.symbol_list.append(symbol_key)
else:
entry = self.data_dict[symbol_key] # type: GeneLinregEntry
entry.count = int(row[count_col])
def read_length_file(self, filehandle, name_col: int, length_col: int, has_header: bool):
reader = csv.reader(filehandle, dialect='excel-tab')
if has_header:
next(reader) # skip first line
for row in reader:
symbol = row[name_col]
symbol_key = symbol.casefold()
if (symbol_key not in self.symbol_list) or (symbol_key not in self.data_dict):
continue
entry = self.data_dict[symbol_key]
entry.length = int(row[length_col])
def generate_count_vector(self) -> ro.IntVector:
counts = list()
for symbol in self.symbol_list:
counts.append(self.data_dict[symbol].count)
return ro.IntVector(counts)
def generate_length_vector(self) -> ro.IntVector:
lengths = list()
for symbol in self.symbol_list:
lengths.append(self.data_dict[symbol].length)
return ro.IntVector(lengths)
def get_symbol_list(self):
return self.symbol_list
@click.command(name='Gene_Outliers', help="compute studentized residuals for list of gene counts")
@click.option('--count_file', type=(click.File('r'), int, int), default=(None, None, None), required=True, help="count file, symbol column, count column")
@click.option('--length_file', type=(click.File('r'), int, int), default=(None, None, None), required=True, help="length file, symbol column, length column")
@click.option('--header_count/--noheader_count', default=True)
@click.option('--header_length/--noheader_length', default=True)
@click.option('--header_name_map/--noheader_name_map', default=True)
@click.option('--output', required=False, default=None, type=click.Path(dir_okay=False, writable=True), help="output file path")
@click.pass_context
def cli(ctx, count_file, length_file, output, header_count, header_length, header_name_map):
#TODO find out why some lengths are not matching and are being given a size of zero
errormsg=list()
if count_file[0] is None:
errormsg.append("--count_file is required")
if length_file[0] is None:
errormsg.append("--length_file is required")
# if name_map_file[0] is None:
# errormsg.append("--name_map_file is required")
if len(errormsg) > 0:
print(cli.get_help(ctx))
raise click.UsageError(', '.join(errormsg))
check_and_install_R_dependency('MASS')
rpackages.importr('MASS')
linreg_data = GeneLinregData()
#read in counts file
linreg_data.read_count_file(count_file[0], count_file[1], count_file[2], header_count)
#read in length file
linreg_data.read_length_file(length_file[0], length_file[1], length_file[2], header_length)
length_vector = linreg_data.generate_length_vector()
count_vctr = linreg_data.generate_count_vector()
ro.r('x=' + str(length_vector.r_repr()))
ro.r('y=' + str(count_vctr.r_repr()))
linreg_result = ro.r('lm(y~x)')
studres_func = ro.r('studres')
studres_result = studres_func(linreg_result)
if output is None:
output_file = sys.stdout
else:
output_file = open(output, newline='', mode='w')
fieldnames = list()
fieldnames.append('Gene_Symbol')
fieldnames.append('Length')
fieldnames.append('Mutation_Count')
fieldnames.append('Studentized_Residual')
output_writer = csv.writer(output_file, dialect='excel-tab')
output_writer.writerow(fieldnames)
symbol_list = linreg_data.symbol_list
for i in range(0, len(symbol_list)):
symbol = symbol_list[i]
if (symbol not in linreg_data.symbol_list) or (symbol not in linreg_data.data_dict):
continue
dataentry = linreg_data.data_dict[symbol] # type: GeneLinregEntry
row = list()
row.append(dataentry.symbol)
row.append(dataentry.length)
row.append(dataentry.count)
row.append(studres_result[i])
output_writer.writerow(row)
if __name__ == "__main__":
cli()
| unlicense | -2,991,018,952,282,382,000 | 33.962687 | 158 | 0.710566 | false |
jackrzhang/zulip | zerver/tests/test_realm_filters.py | 1 | 4054 | # -*- coding: utf-8 -*-
from zerver.lib.actions import get_realm, do_add_realm_filter
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import RealmFilter
class RealmFilterTest(ZulipTestCase):
def test_list(self) -> None:
email = self.example_email('iago')
self.login(email)
realm = get_realm('zulip')
do_add_realm_filter(
realm,
"#(?P<id>[123])",
"https://realm.com/my_realm_filter/%(id)s")
result = self.client_get("/json/realm/filters")
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
self.assertEqual(len(result.json()["filters"]), 1)
def test_create(self) -> None:
email = self.example_email('iago')
self.login(email)
data = {"pattern": "", "url_format_string": "https://realm.com/my_realm_filter/%(id)s"}
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_error(result, 'This field cannot be blank.')
data['pattern'] = '$a'
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_error(result, 'Invalid filter pattern, you must use the following format OPTIONAL_PREFIX(?P<id>.+)')
data['pattern'] = r'ZUL-(?P<id>\d++)'
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_error(result, 'Invalid filter pattern, you must use the following format OPTIONAL_PREFIX(?P<id>.+)')
data['pattern'] = r'ZUL-(?P<id>\d+)'
data['url_format_string'] = '$fgfg'
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_error(result, 'Enter a valid URL.')
data['pattern'] = r'ZUL-(?P<id>\d+)'
data['url_format_string'] = 'https://realm.com/my_realm_filter/'
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_error(result, 'URL format string must be in the following format: `https://example.com/%(\\w+)s`')
data['url_format_string'] = 'https://realm.com/my_realm_filter/#hashtag/%(id)s'
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_success(result)
data['pattern'] = r'ZUL2-(?P<id>\d+)'
data['url_format_string'] = 'https://realm.com/my_realm_filter/?value=%(id)s'
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_success(result)
# This is something we'd like to support, but don't currently;
# this test is a reminder of something we should allow in the
# future.
data['pattern'] = r'(?P<org>[a-z]+)/(?P<repo>[a-z]+)#(?P<id>[0-9]+)'
data['url_format_string'] = 'https://github.com/%(org)/%(repo)/issue/%(id)'
result = self.client_post("/json/realm/filters", info=data)
self.assert_json_error(result, 'URL format string must be in the following format: `https://example.com/%(\\w+)s`')
def test_not_realm_admin(self) -> None:
email = self.example_email('hamlet')
self.login(email)
result = self.client_post("/json/realm/filters")
self.assert_json_error(result, 'Must be an organization administrator')
result = self.client_delete("/json/realm/filters/15")
self.assert_json_error(result, 'Must be an organization administrator')
def test_delete(self) -> None:
email = self.example_email('iago')
self.login(email)
realm = get_realm('zulip')
filter_id = do_add_realm_filter(
realm,
"#(?P<id>[123])",
"https://realm.com/my_realm_filter/%(id)s")
filters_count = RealmFilter.objects.count()
result = self.client_delete("/json/realm/filters/{0}".format(filter_id + 1))
self.assert_json_error(result, 'Filter not found')
result = self.client_delete("/json/realm/filters/{0}".format(filter_id))
self.assert_json_success(result)
self.assertEqual(RealmFilter.objects.count(), filters_count - 1)
| apache-2.0 | 7,356,531,176,840,872,000 | 46.139535 | 125 | 0.614455 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.