repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
vhakulinen/bruno-server
|
bruno/commands/decorators.py
|
1
|
1424
|
from bruno.send_utils import send_error
from bruno.env import inputs
class Args:
"""
Decorator for validating number of args. You can also pass in help
message.
"""
def __init__(self, no_args, msg=None):
self.no_args = no_args
if msg:
self.msg = '(' + msg + ')'
else:
self.msg = None
def __call__(self, func):
def wrapper(socket, args):
if self.no_args == len(args):
func(socket, args)
else:
send_error(socket, 102,
'Invalid arguments ' + self.msg if self.msg else '')
return wrapper
class auth_required:
"""
Decorator for checking if client has loged in. If not, sends auth error
to client.
"""
def __init__(self, func):
self.func = func
def __call__(self, socket, *args, **kwargs):
if inputs[socket].profile:
self.func(socket, *args, **kwargs)
else:
send_error(socket, 203)
class udp_required:
"""
Decorator for chekcing if client has gave us udp connection. If not
sends error to client.
"""
def __init__(self, func):
self.func = func
def __call__(self, socket, *args, **kwargs):
if inputs[socket].udp_addr:
self.func(socket, *args, **kwargs)
else:
send_error(socket, 103)
|
gpl-2.0
| -6,546,121,015,751,768,000 | 25.37037 | 79 | 0.525983 | false | 3.988796 | false | false | false |
FireWalkerX/eyeOS-FOSS-V.2.0
|
devtools/qooxdoo-sdk/tool/bin/create-application.py
|
1
|
12157
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2008 - 2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# LGPL: http://www.gnu.org/licenses/lgpl.html
# EPL: http://www.eclipse.org/org/documents/epl-v10.php
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Fabian Jakobs (fjakobs)
# * Andreas Ecker (ecker)
#
################################################################################
import re, os, sys, optparse, shutil, errno, stat, codecs, glob
from string import Template
import qxenviron
from ecmascript.frontend import lang
from generator.runtime.Log import Log
from misc import Path
SCRIPT_DIR = qxenviron.scriptDir
FRAMEWORK_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
SKELETON_DIR = unicode(os.path.normpath(os.path.join(FRAMEWORK_DIR, "component", "skeleton")))
APP_DIRS = [x for x in os.listdir(SKELETON_DIR) if not re.match(r'^\.',x)]
R_ILLEGAL_NS_CHAR = re.compile(r'(?u)[^\.\w]') # allow unicode, but disallow $
R_SHORT_DESC = re.compile(r'(?m)^short:: (.*)$') # to search "short:: ..." in skeleton's 'readme.txt'
QOOXDOO_VERSION = '' # will be filled later
def getAppInfos():
appInfos = {}
for dir in APP_DIRS:
readme = os.path.join(SKELETON_DIR, dir, "readme.txt")
appinfo = ""
if os.path.isfile(readme):
cont = open(readme, "r").read()
mo = R_SHORT_DESC.search(cont)
if mo:
appinfo = mo.group(1)
appInfos[dir] = appinfo
return appInfos
APP_INFOS = getAppInfos()
def getQxVersion():
global QOOXDOO_VERSION
versionFile = os.path.join(FRAMEWORK_DIR, "version.txt")
version = codecs.open(versionFile,"r", "utf-8").read()
version = version.strip()
QOOXDOO_VERSION = version
return
def createApplication(options):
out = options.out
if sys.platform == 'win32' and re.match( r'^[a-zA-Z]:$', out):
out = out + '\\'
else:
out = os.path.expanduser(out)
if not os.path.isdir(out):
if os.path.isdir(normalizePath(out)):
out = normalizePath(out)
else:
console.error("Output directory '%s' does not exist" % out)
sys.exit(1)
outDir = os.path.join(out, options.name)
copySkeleton(options.skeleton_path, options.type, outDir, options.namespace)
if options.type == "contribution":
patchSkeleton(os.path.join(outDir, "trunk"), FRAMEWORK_DIR, options)
else:
patchSkeleton(outDir, FRAMEWORK_DIR, options)
return
def copySkeleton(skeleton_path, app_type, dir, namespace):
console.log("Copy skeleton into the output directory: %s" % dir)
def rename_folders(root_dir):
# rename name space parts of paths
# rename in class path
source_dir = os.path.join(root_dir, "source", "class", "custom")
out_dir = os.path.join(root_dir, "source", "class")
expand_dir(source_dir, out_dir, namespace)
# rename in resource path
resource_dir = os.path.join(root_dir, "source", "resource", "custom")
out_dir = os.path.join(root_dir, "source", "resource")
expand_dir(resource_dir, out_dir, namespace)
# rename in script path
script_dir = os.path.join(root_dir, "source", "script")
script_files = glob.glob(os.path.join(script_dir, "custom.*js"))
if script_files:
for script_file in script_files:
os.rename(script_file, script_file.replace("custom", namespace))
template = os.path.join(skeleton_path, app_type)
if not os.path.isdir(template):
console.error("Unknown application type '%s'." % app_type)
sys.exit(1)
try:
shutil.copytree(template, dir)
except OSError:
console.error("Failed to copy skeleton, maybe the directory already exists")
sys.exit(1)
if app_type == "contribution":
app_dir = os.path.join(dir, "trunk")
else:
app_dir = dir
rename_folders(app_dir)
if app_type == "contribution":
rename_folders(os.path.join(app_dir, "demo", "default"))
#clean svn directories
for root, dirs, files in os.walk(dir, topdown=False):
if ".svn" in dirs:
filename = os.path.join(root, ".svn")
shutil.rmtree(filename, ignore_errors=False, onerror=handleRemoveReadonly)
def expand_dir(indir, outroot, namespace):
"appends namespace parts to outroot, and renames indir to the last part"
if not (os.path.isdir(indir) and os.path.isdir(outroot)):
return
ns_parts = namespace.split('.')
target = outroot
for part in ns_parts:
target = os.path.join(target, part)
if part == ns_parts[-1]: # it's the last part
os.rename(indir, target)
else:
os.mkdir(target)
def patchSkeleton(dir, framework_dir, options):
absPath = normalizePath(framework_dir)
if absPath[-1] == "/":
absPath = absPath[:-1]
if sys.platform == 'cygwin':
if re.match( r'^\.{1,2}\/', dir ):
relPath = Path.rel_from_to(normalizePath(dir), framework_dir)
elif re.match( r'^/cygdrive\b', dir):
relPath = Path.rel_from_to(dir, framework_dir)
else:
relPath = Path.rel_from_to(normalizePath(dir), normalizePath(framework_dir))
else:
relPath = Path.rel_from_to(normalizePath(dir), normalizePath(framework_dir))
relPath = re.sub(r'\\', "/", relPath)
if relPath[-1] == "/":
relPath = relPath[:-1]
if not os.path.isdir(os.path.join(dir, relPath)):
console.error("Relative path to qooxdoo directory is not correct: '%s'" % relPath)
sys.exit(1)
if options.type == "contribution":
relPath = os.path.join(os.pardir, os.pardir, "qooxdoo", QOOXDOO_VERSION)
relPath = re.sub(r'\\', "/", relPath)
for root, dirs, files in os.walk(dir):
for file in files:
split = file.split(".")
if len(split) >= 3 and split[1] == "tmpl":
outFile = os.path.join(root, split[0] + "." + ".".join(split[2:]))
inFile = os.path.join(root, file)
console.log("Patching file '%s'" % outFile)
config = Template(open(inFile).read())
out = open(outFile, "w")
out.write(
config.substitute({
"Name": options.name,
"Namespace": options.namespace,
"NamespacePath" : (options.namespace).replace('.', '/'),
"REL_QOOXDOO_PATH": relPath,
"ABS_QOOXDOO_PATH": absPath,
"QOOXDOO_VERSION": QOOXDOO_VERSION,
"Cache" : options.cache,
}).encode('utf-8')
)
out.close()
os.remove(inFile)
for root, dirs, files in os.walk(dir):
for file in [file for file in files if file.endswith(".py")]:
os.chmod(os.path.join(root, file), (stat.S_IRWXU
|stat.S_IRGRP |stat.S_IXGRP
|stat.S_IROTH |stat.S_IXOTH)) # 0755
def handleRemoveReadonly(func, path, exc):
# For Windows the 'readonly' must not be set for resources to be removed
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
func(path)
else:
raise
def normalizePath(path):
# Fix Windows annoyance to randomly return drive letters uppercase or lowercase.
# Under Cygwin the user could also supply a lowercase drive letter. For those
# two systems, the drive letter is always converted to uppercase, the remaining
# path to lowercase
if not sys.platform == 'win32' and not sys.platform == 'cygwin':
return path
path = re.sub(r'\\+', "/", path)
if sys.platform == 'cygwin':
search = re.match( r'^/cygdrive/([a-zA-Z])(/.*)$', path)
if search:
return search.group(1).upper() + ":" + search.group(2).lower()
search = re.match( r'^([a-zA-Z])(:.*)$', path )
if search:
return search.group(1).upper() + search.group(2).lower()
return path
def checkNamespace(options):
# check availability and spelling
if not options.namespace:
if R_ILLEGAL_NS_CHAR.search(options.name):
convertedName = R_ILLEGAL_NS_CHAR.sub("_", options.name)
console.log("WARNING: Converted illegal characters in name (from %s to %s)" % (options.name, convertedName))
options.name = convertedName
options.namespace = convertedName.lower()
else:
options.namespace = options.name.lower()
else:
options.namespace = options.namespace.decode('utf-8')
if R_ILLEGAL_NS_CHAR.search(options.namespace):
convertedNamespace = R_ILLEGAL_NS_CHAR.sub("_", options.namespace)
console.log("WARNING: Converted illegal characters in namespace (from %s to %s)" % (options.namespace, convertedNamespace))
options.namespace = convertedNamespace
# check reserved words
if options.namespace in lang.GLOBALS:
console.error("JS reserved word '%s' is not allowed as name space" % options.namespace)
sys.exit(1)
def main():
parser = optparse.OptionParser()
parser.set_usage('''\
%prog --name APPLICATIONNAME [--out DIRECTORY]
[--namespace NAMESPACE] [--type TYPE]
[-logfile LOGFILE] [--skeleton-path PATH]
Script to create a new qooxdoo application.
Example: For creating a regular GUI application \'myapp\' you could execute:
%prog --name myapp''')
parser.add_option(
"-n", "--name", dest="name", metavar="APPLICATIONNAME",
help="Name of the application. An application folder with identical name will be created. (Required)"
)
parser.add_option(
"-o", "--out", dest="out", metavar="DIRECTORY", default=".",
help="Output directory for the application folder. (Default: %default)"
)
parser.add_option(
"-s", "--namespace", dest="namespace", metavar="NAMESPACE", default=None,
help="Applications's top-level namespace. (Default: APPLICATIONNAME)"
)
parser.add_option(
"-t", "--type", dest="type", metavar="TYPE", default="gui",
help="Type of the application to create, one of: "+str(map(str, sorted(APP_INFOS.keys())))+"." +
str(", ".join(["'%s' %s" % (x, y) for x,y in sorted(APP_INFOS.items())])) +
". (Default: %default)"
)
parser.add_option(
"-l", "--logfile", dest="logfile", metavar="LOGFILE",
default=None, type="string", help="Log file"
)
parser.add_option(
"-p", "--skeleton-path", dest="skeleton_path", metavar="PATH", default=SKELETON_DIR,
help="(Advanced) Path where the script looks for skeletons. " +
"The directory must contain sub directories named by " +
"the application types. (Default: %default)"
)
parser.add_option(
"--cache", dest="cache", metavar="PATH", default="${TMPDIR}/cache",
help="Path to the cache directory; will be entered into config.json's CACHE macro (Default: %default)"
)
(options, args) = parser.parse_args(sys.argv[1:])
if not options.name:
parser.print_help()
sys.exit(1)
else:
options.name = options.name.decode('utf-8')
# Initialize console
global console
console = Log(options.logfile, "info")
checkNamespace(options)
getQxVersion()
createApplication(options)
console.log("DONE")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print
print "Keyboard interrupt!"
sys.exit(1)
|
agpl-3.0
| 4,756,107,839,994,583,000 | 33.933908 | 135 | 0.586329 | false | 3.601007 | false | false | false |
gtesei/fast-furious
|
competitions/santander-customer-transaction-prediction/base_light_gbm1.py
|
1
|
2556
|
import lightgbm as lgb
import pandas as pd
import numpy as np
import sys
from datetime import datetime
from pathlib import Path
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import RepeatedStratifiedKFold
path=Path("data/")
train=pd.read_csv(path/"train.csv").drop("ID_code",axis=1)
test=pd.read_csv(path/"test.csv").drop("ID_code",axis=1)
param = {
'boost_from_average':'false',
'bagging_fraction': 0.5,
'boost': 'gbdt',
'feature_fraction': 0.02,
'learning_rate': 0.001,
'max_depth': 6,
'metric':'auc',
'min_data_in_leaf': 100,
'min_sum_hessian_in_leaf': 10.0,
'num_leaves': 13,
'n_jobs': 30,
'tree_learner': 'serial',
'objective': 'binary',
'verbosity': -1
}
result=np.zeros(test.shape[0])
rskf = RepeatedStratifiedKFold(n_splits=5, n_repeats=5,random_state=10)
best_iteration , best_valid_auc = 0, 0
for counter,(train_index, valid_index) in enumerate(rskf.split(train, train.target),1):
print ("Rep-Fold:",counter)
sys.stdout.flush()
#Train data
t=train.iloc[train_index]
trn_data = lgb.Dataset(t.drop("target",axis=1), label=t.target)
#Validation data
v=train.iloc[valid_index]
val_data = lgb.Dataset(v.drop("target",axis=1), label=v.target)
#Training
model = lgb.train(param, trn_data, 1000000, feature_name=train.columns.tolist()[1:], valid_sets = [trn_data, val_data], verbose_eval=500, early_stopping_rounds = 4000)
result +=model.predict(test)
## feat imp
gain = model.feature_importance('gain')
ft = pd.DataFrame({'feature':train.columns.tolist()[1:],'split':model.feature_importance('split'),'gain':100 * gain / gain.sum()}).sort_values('gain', ascending=False)
print("************ FEAT IMPORTANCE *****************")
print(ft.head(25))
print()
##
_best_valid_auc = model.best_score['valid_1']['auc']
_best_iteration = model.best_iteration
print("best_iteration:",_best_iteration,"- best_valid_auc:",_best_valid_auc )
best_valid_auc +=_best_valid_auc
best_iteration += _best_iteration
submission = pd.read_csv(path/'sample_submission.csv')
submission['target'] = result/counter
filename="{:%Y-%m-%d_%H_%M}_sub_after_tune.csv".format(datetime.now())
submission.to_csv(filename, index=False)
## feat importance
best_valid_auc = best_valid_auc/counter
best_iteration = best_iteration/counter
fh = open("base_light_gbm1.log","w")
print("best_iteration_avg:",best_iteration,"- best_valid_auc_avg:",best_valid_auc,file=fh)
fh.close()
|
mit
| -557,002,843,498,613,570 | 31.769231 | 171 | 0.664319 | false | 3.028436 | false | false | false |
GarrettArm/TheDjangoBook
|
mysite_project/blog/views.py
|
1
|
1361
|
from django.shortcuts import render
from django.utils import timezone
from django.views import generic
from .models import Post
class IndexView(generic.ListView):
template_name = "blog/post_list.html"
context_object_name = "posts"
def get_queryset(self):
return Post.objects.filter(published_date__lte=timezone.now()).order_by(
"published_date"
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
for i in context["object_list"]:
i.text_as_list = self.split_to_paragraphs(i.text)
description = ["A description"]
context["description"] = description
return context
def split_to_paragraphs(self, text):
text_list = f"{text[:300]}...".split("\n")
return text_list
class DetailView(generic.DetailView):
template_name = "blog/detail.html"
model = Post
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
print(context)
context["object"].text_as_list = self.split_to_paragraphs(
context["object"].text
)
description = ["Another description"]
context["description"] = description
return context
def split_to_paragraphs(self, text):
text_list = text.split("\n")
return text_list
|
gpl-3.0
| -2,322,962,383,464,944,600 | 28.586957 | 80 | 0.626745 | false | 3.991202 | false | false | false |
Great-Li-Xin/PythonDev
|
Games/Chap8/Zombie Mob.py
|
1
|
9550
|
# MyLibrary.py
import sys, time, random, math, pygame
from pygame.locals import *
# prints text using the supplied font
def print_text(font, x, y, text, color=(255, 255, 255)):
imgText = font.render(text, True, color)
screen = pygame.display.get_surface() # req'd when function moved into MyLibrary
screen.blit(imgText, (x, y))
# MySprite class extends pygame.sprite.Sprite
class MySprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self) # extend the base Sprite class
self.master_image = None
self.frame = 0
self.old_frame = -1
self.frame_width = 1
self.frame_height = 1
self.first_frame = 0
self.last_frame = 0
self.columns = 1
self.last_time = 0
self.direction = 0
self.velocity = Point(0.0, 0.0)
# X property
def _getx(self):
return self.rect.x
def _setx(self, value):
self.rect.x = value
X = property(_getx, _setx)
# Y property
def _gety(self):
return self.rect.y
def _sety(self, value):
self.rect.y = value
Y = property(_gety, _sety)
# position property
def _getpos(self):
return self.rect.topleft
def _setpos(self, pos):
self.rect.topleft = pos
position = property(_getpos, _setpos)
def load(self, filename, width, height, columns):
self.master_image = pygame.image.load(filename).convert_alpha()
self.frame_width = width
self.frame_height = height
self.rect = Rect(0, 0, width, height)
self.columns = columns
# try to auto-calculate total frames
rect = self.master_image.get_rect()
self.last_frame = (rect.width // width) * (rect.height // height) - 1
def update(self, current_time, rate=30):
# update animation frame number
if current_time > self.last_time + rate:
self.frame += 1
if self.frame > self.last_frame:
self.frame = self.first_frame
self.last_time = current_time
# build current frame only if it changed
if self.frame != self.old_frame:
frame_x = (self.frame % self.columns) * self.frame_width
frame_y = (self.frame // self.columns) * self.frame_height
rect = Rect(frame_x, frame_y, self.frame_width, self.frame_height)
self.image = self.master_image.subsurface(rect)
self.old_frame = self.frame
def __str__(self):
return str(self.frame) + "," + str(self.first_frame) + \
"," + str(self.last_frame) + "," + str(self.frame_width) + \
"," + str(self.frame_height) + "," + str(self.columns) + \
"," + str(self.rect)
# Point class
class Point(object):
def __init__(self, x, y):
self.__x = x
self.__y = y
# X property
def getx(self): return self.__x
def setx(self, x): self.__x = x
x = property(getx, setx)
# Y property
def gety(self): return self.__y
def sety(self, y): self.__y = y
y = property(gety, sety)
def __str__(self):
return "{X:" + "{:.0f}".format(self.__x) + ",Y:" + "{:.0f}".format(self.__y) + "}"
def calc_velocity(direction, vel=1.0):
velocity = Point(0, 0)
if direction == 0: # north
velocity.y = -vel
elif direction == 2: # east
velocity.x = vel
elif direction == 4: # south
velocity.y = vel
elif direction == 6: # west
velocity.x = -vel
return velocity
def reverse_direction(sprite):
if sprite.direction == 0:
sprite.direction = 4
elif sprite.direction == 2:
sprite.direction = 6
elif sprite.direction == 4:
sprite.direction = 0
elif sprite.direction == 6:
sprite.direction = 2
if __name__ == '__main__':
# main program begins
pygame.init()
screen = pygame.display.set_mode((800, 600))
pygame.display.set_caption("Collision Demo")
font = pygame.font.Font(None, 36)
timer = pygame.time.Clock()
# create sprite groups
player_group = pygame.sprite.Group()
zombie_group = pygame.sprite.Group()
health_group = pygame.sprite.Group()
# create the player sprite
player = MySprite()
player.load("farmer walk.png", 96, 96, 8)
player.position = 80, 80
player.direction = 4
player_group.add(player)
# create the zombie sprite
zombie_image = pygame.image.load("zombie walk.png").convert_alpha()
for counter in range(0, 10):
zombie = MySprite()
zombie.load("zombie walk.png", 96, 96, 8)
zombie.position = random.randint(0, 700), random.randint(0, 500)
zombie.direction = random.randint(0, 3) * 2
zombie_group.add(zombie)
# create heath sprite
health = MySprite()
health.load("health.png", 32, 32, 1)
health.position = 400, 300
health_group.add(health)
game_over = False
player_moving = False
player_health = 100
# repeating loop
while True:
timer.tick(30)
ticks = pygame.time.get_ticks()
for event in pygame.event.get():
if event.type == QUIT: sys.exit()
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
sys.exit()
elif keys[K_UP] or keys[K_w]:
player.direction = 0
player_moving = True
elif keys[K_RIGHT] or keys[K_d]:
player.direction = 2
player_moving = True
elif keys[K_DOWN] or keys[K_s]:
player.direction = 4
player_moving = True
elif keys[K_LEFT] or keys[K_a]:
player.direction = 6
player_moving = True
else:
player_moving = False
if not game_over:
# set animation frames based on player's direction
player.first_frame = player.direction * player.columns
player.last_frame = player.first_frame + player.columns - 1
if player.frame < player.first_frame:
player.frame = player.first_frame
if not player_moving:
# stop animating when player is not pressing a key
player.frame = player.first_frame = player.last_frame
else:
# move player in direction
player.velocity = calc_velocity(player.direction, 1.5)
player.velocity.x *= 1.5
player.velocity.y *= 1.5
# update player sprite
player_group.update(ticks, 50)
# manually move the player
if player_moving:
player.X += player.velocity.x
player.Y += player.velocity.y
if player.X < 0:
player.X = 0
elif player.X > 700:
player.X = 700
if player.Y < 0:
player.Y = 0
elif player.Y > 500:
player.Y = 500
# update zombie sprites
zombie_group.update(ticks, 50)
# manually iterate through all the zombies
for single_zombie in zombie_group:
# set the zombie's animation range
single_zombie.first_frame = single_zombie.direction * single_zombie.columns
single_zombie.last_frame = single_zombie.first_frame + single_zombie.columns - 1
if single_zombie.frame < single_zombie.first_frame:
single_zombie.frame = single_zombie.first_frame
single_zombie.velocity = calc_velocity(single_zombie.direction)
# keep the zombie on the screen
single_zombie.X += single_zombie.velocity.x
single_zombie.Y += single_zombie.velocity.y
if single_zombie.X < 0 or single_zombie.X > 700 or single_zombie.Y < 0 or single_zombie.Y > 500:
reverse_direction(single_zombie)
# check for collision with zombies
attacker = None
attacker = pygame.sprite.spritecollideany(player, zombie_group)
if attacker is not None:
# we got a hit, now do a more precise check
if pygame.sprite.collide_rect_ratio(0.5)(player, attacker):
player_health -= 10
if attacker.X < player.X:
attacker.X -= 10
elif attacker.X > player.X:
attacker.X += 10
else:
attacker = None
# update the health drop
health_group.update(ticks, 50)
# check for collision with health
if pygame.sprite.collide_rect_ratio(0.5)(player, health):
player_health += 30
if player_health > 100: player_health = 100
health.X = random.randint(0, 700)
health.Y = random.randint(0, 500)
# is player dead?
if player_health <= 0:
game_over = True
# clear the screen
screen.fill((50, 50, 100))
# draw sprites
health_group.draw(screen)
zombie_group.draw(screen)
player_group.draw(screen)
# draw energy bar
pygame.draw.rect(screen, (50, 150, 50, 180), Rect(300, 570, player_health * 2, 25))
pygame.draw.rect(screen, (100, 200, 100, 180), Rect(300, 570, 200, 25), 2)
if game_over:
print_text(font, 300, 100, "G A M E O V E R")
pygame.display.update()
|
mit
| 5,144,566,735,357,413,000 | 31.263514 | 112 | 0.553298 | false | 3.750982 | false | false | false |
oculusstorystudio/kraken
|
Python/kraken/ui/HAppkit_Editors/editor_widgets/integer_editor.py
|
1
|
2518
|
import sys
from kraken.ui.Qt import QtWidgets, QtGui, QtCore
from ..fe import FE
from ..widget_factory import EditorFactory
from ..base_editor import BaseValueEditor
class IntegerEditor(BaseValueEditor):
def __init__(self, valueController, parent=None):
super(IntegerEditor, self).__init__(valueController, parent=parent)
hbox = QtWidgets.QHBoxLayout()
self._editor = QtWidgets.QSpinBox(self)
if(self._dataType == 'UInt8' or
self._dataType == 'UInt16' or
self._dataType == 'UInt32' or
self._dataType == 'UInt64' or
self._dataType == 'Index' or
self._dataType == 'Size' or
self._dataType == 'Byte'):
self._editor.setMinimum(0)
else:
self._editor.setMinimum(-100000000)
self._editor.setMaximum(100000000)
self._editor.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
hbox.addWidget(self._editor, 1)
hbox.addStretch(0)
hbox.setContentsMargins(0, 0, 0, 0)
self.setLayout(hbox)
self.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
self.updateEditorValue()
self._editor.valueChanged.connect(self._setValueToController)
self.setEditable( valueController.isEditable() )
def setEditable(self, editable):
self._editor.setReadOnly( not editable )
def getEditorValue(self):
value = self._editor.value()
return value#self._klType(value)
def setEditorValue(self, value):
# Clamp values to avoid OverflowError
if value > sys.maxint:
value = sys.maxint
elif value < -sys.maxint:
value = -sys.maxint
self._editor.setValue(value)
@classmethod
def canDisplay(cls, valueController):
dataType = valueController.getDataType()
return (dataType == 'Integer' or
dataType == 'UInt8' or
dataType == 'SInt8' or
dataType == 'UInt16' or
dataType == 'SInt16' or
dataType == 'UInt32' or
dataType == 'SInt32' or
dataType == 'UInt64' or
dataType == 'SInt64' or
dataType == 'Index' or
dataType == 'Size' or
dataType == 'Byte')
EditorFactory.registerEditorClass(IntegerEditor)
|
bsd-3-clause
| -3,679,933,215,878,452,700 | 30.475 | 96 | 0.575854 | false | 4.319039 | false | false | false |
bvesperman/Sector67RaspberryPiAccessControl
|
space_machines/door.py
|
1
|
5509
|
import logging
import time
import Queue
import threading
from Tkinter import *
from pystates import StateMachine
class DoorState(StateMachine):
def CLOSED_LOCKED(self):
self.generate_message({"event": self.name + "_CLOSED_LOCKED"})
if self.show_gui: self.v.set("CLOSED_LOCKED")
self.log.debug("turn off solenoid")
self.generate_message({"event": self.name + "_LOCK_DOOR"})
while True:
ev = yield
if ev['event'] == "VALID_KEY":
self.transition(self.CLOSED_UNLOCKING)
if ev['event'] == "DOOR_OPENED":
self.transition(self.FORCED_OPEN)
def CLOSED_UNLOCKING(self):
self.generate_message({"event": self.name + "_CLOSED_UNLOCKING", "timeout": self.unlock_timeout})
if self.show_gui: self.v.set("CLOSED_UNLOCKING")
self.log.debug("turn on solenoid")
self.generate_message({"event": self.name + "_UNLOCK_DOOR"})
self.log.debug("waiting up to " + str(self.unlock_timeout) + " seconds")
while True:
ev = yield
if ev['event'] == "DOOR_OPENED":
self.log.debug('Unlocked and opened')
self.transition(self.OPEN_UNLOCKING)
if self.duration() > self.unlock_timeout:
self.log.debug('Unlocked but not opened')
self.transition(self.CLOSED_LOCKED)
def OPEN_UNLOCKING(self):
self.generate_message({"event": self.name + "_OPEN_UNLOCKING"})
if self.show_gui: self.v.set("OPEN_UNLOCKING")
self.log.debug("waiting up to " + str(self.open_unlock_timeout) + " seconds")
while True:
ev = yield
if ev['event'] == "DOOR_CLOSED":
self.log.debug('Door closed')
self.transition(self.CLOSED_LOCKED)
if self.duration() > self.open_unlock_timeout:
self.transition(self.OPEN_LOCKED)
def OPEN_LOCKED(self):
self.generate_message({"event": self.name + "_OPEN_LOCKED"})
if self.show_gui: self.v.set("OPEN_LOCKED")
self.log.debug("turn off solenoid")
self.generate_message({"event": self.name + "_LOCK_DOOR"})
self.log.debug("waiting up to " + str(self.stuck_open_timeout) + "seconds")
while True:
ev = yield
if ev['event'] == "DOOR_CLOSED":
self.log.debug('Door closed')
self.transition(self.CLOSED_LOCKED)
if self.duration() > self.stuck_open_timeout:
self.log.debug("timeout!")
self.transition(self.STUCK_OPEN)
def STUCK_OPEN(self):
self.generate_message({"event": self.name + "_STUCK_OPEN"})
if self.show_gui: self.v.set("STUCK_OPEN")
self.log.debug("door stuck open")
while True:
ev = yield
if ev['event'] == "DOOR_CLOSED":
self.log.debug('Door finally closed')
self.transition(self.CLOSED_LOCKED)
def FORCED_OPEN(self):
if self.show_gui: self.v.set("FORCED_OPEN")
self.generate_message({"event": self.name + "_FORCED_OPEN"})
self.log.debug("door forced open")
while True:
ev = yield
if ev['event'] == "DOOR_CLOSED":
self.log.debug('Door closed')
self.transition(self.CLOSED_LOCKED)
if self.duration() > self.stuck_open_timeout:
self.log.debug("timeout!")
self.transition(self.STUCK_OPEN)
def setup(self, out_queue, name, unlock_timeout=5, open_unlock_timeout=1, stuck_open_timeout=15):
self.log = logging.getLogger("DoorState")
self.out_queue = out_queue
self.name = name
self.unlock_timeout = int(unlock_timeout)
self.open_unlock_timeout = int(open_unlock_timeout)
self.stuck_open_timeout = int(stuck_open_timeout)
""" Perform initialization here, detect the current state and send that
to the super class start.
"""
def start(self):
# assume a starting state of CLOSED_LOCKED and appropriate messages will send it to the correct state
super(DoorState, self).start(self.CLOSED_LOCKED)
def config_gui(self, root):
self.show_gui = True
# Set up the GUI part
frame = LabelFrame(root, text=self.name, padx=5, pady=5)
frame.pack(fill=X)
self.v = StringVar()
self.v.set("UNKNOWN")
w = Label(frame, textvariable=self.v)
w.pack(side=LEFT)
def main():
out_queue = Queue.Queue()
logging.basicConfig(level=logging.DEBUG)
name = "TEST_DOOR"
doorstate = DoorState(name=name)
doorstate.setup(out_queue, name=name)
doorstate.start()
doorstate.send_message({"event": "VALID_KEY"})
logging.info('unlock the door, open then close it')
doorstate.send_message({"event":"VALID_KEY"})
time.sleep(2)
doorstate.send_message({"event":"DOOR_OPENED"})
time.sleep(2)
doorstate.send_message({"event":"DOOR_CLOSED"})
time.sleep(2)
logging.info('current state:' + doorstate.current_state())
logging.info('unlock the door but do not open it')
time.sleep(2)
doorstate.send_message({"event":"VALID_KEY"})
time.sleep(10)
logging.info('open the door and close it quickly')
time.sleep(0.1)
doorstate.send_message({"event":"VALID_KEY"})
doorstate.send_message({"event":"DOOR_OPENED"})
doorstate.send_message({"event":"DOOR_CLOSED"})
time.sleep(2)
logging.info('open the door and leave it open for 30 seconds')
time.sleep(2)
doorstate.send_message({"event":"VALID_KEY"})
doorstate.send_message({"event":"DOOR_OPENED"})
time.sleep(30)
time.sleep(2)
doorstate.send_message({"event":"DOOR_CLOSED"})
time.sleep(2)
logging.info('force the door open')
time.sleep(2)
doorstate.send_message({"event":"DOOR_OPENED"})
time.sleep(2)
doorstate.send_message({"event":"DOOR_CLOSED"})
time.sleep(2)
if __name__=='__main__':
main()
|
mit
| 492,777,001,027,727,100 | 32.186747 | 105 | 0.656744 | false | 3.208503 | false | false | false |
rmvanhees/pynadc
|
pynadc/scia/hk.py
|
1
|
6316
|
"""
This file is part of pynadc
https://github.com/rmvanhees/pynadc
Routines to convert Sciamachy house-keeping data from raw counts
to physical units.
Copyright (c) 2018 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
from datetime import timedelta
import numpy as np
from bitstring import BitArray
def get_det_temp(channel, raw_tm):
"""
convert raw temperature counts to Kelvin
"""
nch = channel - 1
if nch < 0 or nch > 7:
raise ValueError('channel must be between 1 and 8')
tab_tm = [
(0, 17876, 18312, 18741, 19161, 19574, 19980, 20379,
20771, 21157, 21908, 22636, 24684, 26550, 28259, 65535),
(0, 18018, 18456, 18886, 19309, 19724, 20131, 20532,
20926, 21313, 22068, 22798, 24852, 26724, 28436, 65535),
(0, 20601, 20996, 21384, 21765, 22140, 22509, 22872,
23229, 23581, 23927, 24932, 26201, 27396, 28523, 65535),
(0, 20333, 20725, 21110, 21490, 21863, 22230, 22591,
22946, 23295, 23640, 24640, 25905, 27097, 28222, 65535),
(0, 20548, 20942, 21330, 21711, 22086, 22454, 22817,
23174, 23525, 23871, 24875, 26144, 27339, 28466, 65535),
(0, 17893, 18329, 18758, 19179, 19593, 20000, 20399,
20792, 21178, 21931, 22659, 24709, 26578, 28289, 65535),
(0, 12994, 13526, 14046, 14555, 15054, 15543, 16022,
16492, 17850, 20352, 22609, 24656, 26523, 28232, 65535),
(0, 13129, 13664, 14188, 14702, 15204, 15697, 16180,
16653, 18019, 20536, 22804, 24860, 26733, 28447, 65535)
] # shape (8, 16)
tab_temp = [
(179., 180., 185., 190., 195., 200., 205., 210.,
215., 220., 230., 240., 270., 300., 330., 331.),
(179., 180., 185., 190., 195., 200., 205., 210.,
215., 220., 230., 240., 270., 300., 330., 331.),
(209., 210., 215., 220., 225., 230., 235., 240.,
245., 250., 255., 270., 290., 310., 330., 331.),
(209., 210., 215., 220., 225., 230., 235., 240.,
245., 250., 255., 270., 290., 310., 330., 331.),
(209., 210., 215., 220., 225., 230., 235., 240.,
245., 250., 255., 270., 290., 310., 330., 331.),
(179., 180., 185., 190., 195., 200., 205., 210.,
215., 220., 230., 240., 270., 300., 330., 331.),
(129., 130., 135., 140., 145., 150., 155., 160.,
165., 180., 210., 240., 270., 300., 330., 331.),
(129., 130., 135., 140., 145., 150., 155., 160.,
165., 180., 210., 240., 270., 300., 330., 331.)
] # shape (8, 16)
# use linear interpolation (nothing fancy)
return np.interp(raw_tm, tab_tm[nch], tab_temp[nch])
def get_det_vis_pet(chan_hdr):
"""
convert raw timing data to detector data to pixel-exposure-time (VIS)
"""
# The layout of the detector command word for channels 1--5
# 14 bits: exposure time factor (ETF)
# ETF >= 1: pet = etf * 62.5 ms * ratio
# ETF == 0: pet = 31.25 ms
# 2 bits: mode
# 0: Normal Mode
# 1: Normal Mode
# 2: Test Mode
# 3: ADC calibration
# 9 bits: section address (2 pixels resolution)
# start of virtual channel b at 2 * section
# 5 bits: ratio
# ratio of exposure time between virtual channels
# 2 bits: control
# 1: restart of readout cycle
# 3: hardware reset of detector module electronics
#
command = BitArray(uintbe=chan_hdr['command'], length=32)
etf = int(command.bin[0:14], 2)
section = int(command.bin[16:25], 2)
ratio = int(command.bin[25:30], 2)
vir_chan_b = 2 * section
if etf == 0:
return (1 / 32, vir_chan_b)
pet = etf / 16
if section > 0 and ratio > 1:
return ([pet * ratio, pet], vir_chan_b)
return (pet, vir_chan_b)
def get_det_ir_pet(chan_hdr):
"""
convert raw timing data to detector data to pixel-exposure-time (IR)
"""
# The layout of the detector command word for channels 6--8
# 14 bits: exposure time factor (ETF)
# ETF >= 1: pet = etf * 62.5 ms * ratio
# ETF == 0: pet = 31.25 ms
# 2 bits: mode
# 0: Normal Mode
# 1: Hot Mode
# 2: Test Mode
# 3: ADC calibration
# 2 bits: comp. mode, sets the offset compensation
# 0: no offset compensation
# 1: store offset compensation
# 2: use stored offset
# 3: continuous offset
# 3 bits: not used
# 3 bits: fine bias settings [mV]
# (-16, 10, -5, -3, -2, -1, 0, 2)
# 2 bits: not used
# 4 bits: short pixel exposure time for Hot mode
# pet = 28.125e-6 * 2^x with x <= 10
# 2 bits: control
# 1: restart of readout cycle
# 3: hardware reset of detector module electronics
#
command = BitArray(uintbe=chan_hdr['command'], length=32)
etf = int(command.bin[0:14], 2)
mode = int(command.bin[14:16], 2)
spet = int(command.bin[26:30], 2)
# hot mode
if mode == 1:
return 28.125e-6 * 2 ** min(spet, 10)
# normal mode
if etf == 0:
return 1 / 32
return etf / 16
def mjd_to_datetime(state_id, det_isp):
"""
Calculates datetime at end of each integration time
"""
# BCPS enable delay per instrument state
ri_delay = (0,
86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
86, 86, 86, 86, 86, 86, 86, 86, 86, 86,
86, 86, 86, 86, 86, 86, 86, 86, 111, 86,
303, 86, 86, 86, 86, 86, 86, 86, 111, 303)
# Add BCPS H/W delay (92.765 ms)
_ri = 0.092765 + ri_delay[state_id] / 256
# the function datetime.timedelta only accepts Python integers
mst_time = np.full(det_isp.size, np.datetime64('2000', 'us'))
for ni, dsr in enumerate(det_isp):
days = int(dsr['mjd']['days'])
secnds = int(dsr['mjd']['secnds'])
musec = int(dsr['mjd']['musec']
+ 1e6 * (dsr['chan_data']['hdr']['bcps'][0] / 16 + _ri))
mst_time[ni] += np.timedelta64(timedelta(days, secnds, musec))
return mst_time
|
bsd-3-clause
| -2,944,959,474,597,993,500 | 34.483146 | 76 | 0.542749 | false | 2.993365 | false | false | false |
40223233/2015cd_midterm
|
wsgi.py
|
1
|
26126
|
#@+leo-ver=5-thin
#@+node:2014fall.20141212095015.1775: * @file wsgi.py
# coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
#@@language python
#@@tabwidth -4
#@+<<declarations>>
#@+node:2014fall.20141212095015.1776: ** <<declarations>> (wsgi)
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
# 導入 random 模組
import random
# 導入 gear 模組
import gear
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
'''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印
# 利用 input() 取得的資料型別為字串
toprint = input("要印甚麼內容?")
# 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換
repeat_no = int(input("重複列印幾次?"))
for i in range(repeat_no):
print(toprint)
'''
#@-<<declarations>>
#@+others
#@+node:2014fall.20141212095015.1777: ** class Hello
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Hello(object):
# Hello 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
#'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
#@+others
#@+node:2014fall.20141212095015.2004: *3* __init__
def __init__(self):
# 配合透過案例啟始建立所需的目錄
if not os.path.isdir(data_dir+'/tmp'):
os.mkdir(data_dir+'/tmp')
if not os.path.isdir(data_dir+"/downloads"):
os.mkdir(data_dir+"/downloads")
if not os.path.isdir(data_dir+"/images"):
os.mkdir(data_dir+"/images")
#@+node:2014fall.20141212095015.1778: *3* index_orig
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index_orig(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141212095015.1779: *3* hello
@cherrypy.expose
def hello(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141215194146.1791: *3* index
@cherrypy.expose
def twoDgear(self, guess=None):
# 將標準答案存入 answer session 對應區
theanswer = random.randint(1, 100)
thecount = 0
# 將答案與計算次數變數存進 session 對應變數
cherrypy.session['answer'] = theanswer
cherrypy.session['count'] = thecount
# 印出讓使用者輸入的超文件表單
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>
<hr>
<!-- 以下在網頁內嵌 Brython 程式 -->
<script type="text/python">
from browser import document, alert
def echo(ev):
alert(document["zone"].value)
# 將文件中名稱為 mybutton 的物件, 透過 click 事件與 echo 函式 bind 在一起
document['mybutton'].bind('click',echo)
</script>
<input id="zone"><button id="mybutton">click !</button>
<hr>
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
# 以下使用中文變數名稱
畫布 = document["plotarea"]
ctx = 畫布.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1713: *3* twoDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def index(self, N=None, M=None, P=None):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=mygeartest>
<p>學號:40223233 姓名:彭于亮
<p>齒數:
<select name=N>
<option>10
<option>20
<option>30
<option>40
<option>50
</select>
<p>模數:
<select name=M>
<option>2
<option>4
<option>6
<option>8
<option>10
</select>
<p>壓力角:
<select name=P>
<option>10
<option>12
<option>14
<option>16
<option>18
<option>20
</select>
</br>
<input type=submit value=送出>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1733: *3* threeDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def threeDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do3Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1762: *3* do2Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do2Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1735: *3* do3Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do3Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1765: *3* mygeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest(self, N=50, M=5, P=20):
D = int(N)*int(M)
outstring = '''
<!DOCTYPE html>
<html>
<head>
齒輪已完成。
<a href='index'>返回上一頁</a>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def 齒輪(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos('''+str(P)+'''*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan('''+str(P)+'''*deg)-'''+str(P)+'''*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
齒輪(500,500,'''+str(D)+''','''+str(N)+''',"black")
</script>
<canvas id="plotarea" width="2000" height="1000"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1737: *3* my3Dgeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def my3Dgeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def 齒輪(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
齒輪(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
#@+node:2014fall.20141215194146.1793: *3* doCheck
@cherrypy.expose
def doCheck(self, guess=None):
# 假如使用者直接執行 doCheck, 則設法轉回根方法
if guess is None:
raise cherrypy.HTTPRedirect("/")
# 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況
try:
theanswer = int(cherrypy.session.get('answer'))
except:
raise cherrypy.HTTPRedirect("/")
# 經由表單所取得的 guess 資料型別為 string
try:
theguess = int(guess)
except:
return "error " + self.guessform()
# 每執行 doCheck 一次,次數增量一次
cherrypy.session['count'] += 1
# 答案與所猜數字進行比對
if theanswer < theguess:
return "big " + self.guessform()
elif theanswer > theguess:
return "small " + self.guessform()
else:
# 已經猜對, 從 session 取出累計猜測次數
thecount = cherrypy.session.get('count')
return "exact: <a href=''>再猜</a>"
#@+node:2014fall.20141215194146.1789: *3* guessform
def guessform(self):
# 印出讓使用者輸入的超文件表單
outstring = str(cherrypy.session.get('answer')) + "/" + str(cherrypy.session.get('count')) + '''<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>'''
return outstring
#@-others
#@-others
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
root = Hello()
root.gear = gear.Gear()
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(root, config=application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(root, config=application_conf)
#@-leo
|
gpl-2.0
| -7,574,877,212,437,480,000 | 28.628342 | 137 | 0.549093 | false | 2.210232 | false | false | false |
opentrials/processors
|
processors/base/writers/fda_application.py
|
1
|
1620
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from . import write_organisation
from .. import helpers
logger = logging.getLogger(__name__)
# Module API
def write_fda_application(conn, fda_application, source_id):
"""Write fda_application to database.
Args:
conn (dict): connection dict
fda_application (dict): normalized data
source_id (str): data source id
Raises:
KeyError: if data structure is not valid
Returns:
str/None: object identifier/if not written (skipped)
"""
if 'organisation' in fda_application:
organisation_name = fda_application['organisation']
del fda_application['organisation']
slug = helpers.slugify_string(organisation_name)
organisation = conn['database']['organisations'].find_one(slug=slug)
if not organisation:
organisation = {
'name': organisation_name,
'slug': slug,
}
organisation_id = write_organisation(conn, organisation, source_id)
else:
organisation_id = organisation['id']
fda_application['organisation_id'] = organisation_id
conn['database']['fda_applications'].upsert(fda_application,
['id'],
ensure=False)
# Log debug
logger.debug('FDA Application upserted: %s', fda_application['id'])
return fda_application['id']
|
mit
| -2,835,677,600,094,138,400 | 29.566038 | 79 | 0.605556 | false | 4.390244 | false | false | false |
jparyani/Mailpile
|
mailpile/plugins/setup_magic.py
|
1
|
40822
|
import os
import random
import sys
import datetime
from urllib import urlencode
import mailpile.auth
from mailpile.defaults import CONFIG_RULES
from mailpile.i18n import ListTranslations, ActivateTranslation, gettext
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.plugins import PluginManager
from mailpile.plugins import PLUGINS
from mailpile.plugins.contacts import AddProfile
from mailpile.plugins.contacts import ListProfiles
from mailpile.plugins.migrate import Migrate
from mailpile.plugins.tags import AddTag
from mailpile.commands import Command
from mailpile.config import SecurePassphraseStorage
from mailpile.crypto.gpgi import GnuPG, SignatureInfo, EncryptionInfo
from mailpile.crypto.gpgi import GnuPGKeyGenerator, GnuPGKeyEditor
from mailpile.httpd import BLOCK_HTTPD_LOCK, Idle_HTTPD
from mailpile.smtp_client import SendMail, SendMailError
from mailpile.urlmap import UrlMap
from mailpile.ui import Session, SilentInteraction
from mailpile.util import *
_ = lambda s: s
_plugins = PluginManager(builtin=__file__)
##[ Commands ]################################################################
class SetupMagic(Command):
"""Perform initial setup"""
SYNOPSIS = (None, None, None, None)
ORDER = ('Internals', 0)
LOG_PROGRESS = True
TAGS = {
'New': {
'type': 'unread',
'label': False,
'display': 'invisible',
'icon': 'icon-new',
'label_color': '03-gray-dark',
'name': _('New'),
},
'Inbox': {
'type': 'inbox',
'display': 'priority',
'display_order': 2,
'icon': 'icon-inbox',
'label_color': '06-blue',
'name': _('Inbox'),
},
'Blank': {
'type': 'blank',
'flag_editable': True,
'display': 'invisible',
'name': _('Blank'),
},
'Drafts': {
'type': 'drafts',
'flag_editable': True,
'display': 'priority',
'display_order': 1,
'icon': 'icon-compose',
'label_color': '03-gray-dark',
'name': _('Drafts'),
},
'Outbox': {
'type': 'outbox',
'display': 'priority',
'display_order': 3,
'icon': 'icon-outbox',
'label_color': '06-blue',
'name': _('Outbox'),
},
'Sent': {
'type': 'sent',
'display': 'priority',
'display_order': 4,
'icon': 'icon-sent',
'label_color': '03-gray-dark',
'name': _('Sent'),
},
'Spam': {
'type': 'spam',
'flag_hides': True,
'display': 'priority',
'display_order': 5,
'icon': 'icon-spam',
'label_color': '10-orange',
'name': _('Spam'),
},
'MaybeSpam': {
'display': 'invisible',
'icon': 'icon-spam',
'label_color': '10-orange',
'name': _('MaybeSpam'),
},
'Ham': {
'type': 'ham',
'display': 'invisible',
'name': _('Ham'),
},
'Trash': {
'type': 'trash',
'flag_hides': True,
'display': 'priority',
'display_order': 6,
'icon': 'icon-trash',
'label_color': '13-brown',
'name': _('Trash'),
},
# These are magical tags that perform searches and show
# messages in contextual views.
'All Mail': {
'type': 'tag',
'icon': 'icon-logo',
'label_color': '06-blue',
'search_terms': 'all:mail',
'name': _('All Mail'),
'display_order': 1000,
},
'Photos': {
'type': 'tag',
'icon': 'icon-photos',
'label_color': '08-green',
'search_terms': 'att:jpg',
'name': _('Photos'),
'template': 'photos',
'display_order': 1001,
},
'Files': {
'type': 'tag',
'icon': 'icon-document',
'label_color': '06-blue',
'search_terms': 'has:attachment',
'name': _('Files'),
'template': 'files',
'display_order': 1002,
},
'Links': {
'type': 'tag',
'icon': 'icon-links',
'label_color': '12-red',
'search_terms': 'http',
'name': _('Links'),
'display_order': 1003,
},
# These are internal tags, used for tracking user actions on
# messages, as input for machine learning algorithms. These get
# automatically added, and may be automatically removed as well
# to keep the working sets reasonably small.
'mp_rpl': {'type': 'replied', 'label': False, 'display': 'invisible'},
'mp_fwd': {'type': 'fwded', 'label': False, 'display': 'invisible'},
'mp_tag': {'type': 'tagged', 'label': False, 'display': 'invisible'},
'mp_read': {'type': 'read', 'label': False, 'display': 'invisible'},
'mp_ham': {'type': 'ham', 'label': False, 'display': 'invisible'},
}
def basic_app_config(self, session,
save_and_update_workers=True,
want_daemons=True):
# Create local mailboxes
session.config.open_local_mailbox(session)
# Create standard tags and filters
created = []
for t in self.TAGS:
if not session.config.get_tag_id(t):
AddTag(session, arg=[t]).run(save=False)
created.append(t)
session.config.get_tag(t).update(self.TAGS[t])
for stype, statuses in (('sig', SignatureInfo.STATUSES),
('enc', EncryptionInfo.STATUSES)):
for status in statuses:
tagname = 'mp_%s-%s' % (stype, status)
if not session.config.get_tag_id(tagname):
AddTag(session, arg=[tagname]).run(save=False)
created.append(tagname)
session.config.get_tag(tagname).update({
'type': 'attribute',
'display': 'invisible',
'label': False,
})
if 'New' in created:
session.ui.notify(_('Created default tags'))
# Import all the basic plugins
reload_config = False
for plugin in PLUGINS:
if plugin not in session.config.sys.plugins:
session.config.sys.plugins.append(plugin)
reload_config = True
for plugin in session.config.plugins.WANTED:
if plugin in session.config.plugins.available():
session.config.sys.plugins.append(plugin)
if reload_config:
with session.config._lock:
session.config.save()
session.config.load(session)
try:
# If spambayes is not installed, this will fail
import mailpile.plugins.autotag_sb
if 'autotag_sb' not in session.config.sys.plugins:
session.config.sys.plugins.append('autotag_sb')
session.ui.notify(_('Enabling spambayes autotagger'))
except ImportError:
session.ui.warning(_('Please install spambayes '
'for super awesome spam filtering'))
vcard_importers = session.config.prefs.vcard.importers
if not vcard_importers.gravatar:
vcard_importers.gravatar.append({'active': True})
session.ui.notify(_('Enabling gravatar image importer'))
gpg_home = os.path.expanduser('~/.gnupg')
if os.path.exists(gpg_home) and not vcard_importers.gpg:
vcard_importers.gpg.append({'active': True,
'gpg_home': gpg_home})
session.ui.notify(_('Importing contacts from GPG keyring'))
if ('autotag_sb' in session.config.sys.plugins and
len(session.config.prefs.autotag) == 0):
session.config.prefs.autotag.append({
'match_tag': 'spam',
'unsure_tag': 'maybespam',
'tagger': 'spambayes',
'trainer': 'spambayes'
})
session.config.prefs.autotag[0].exclude_tags[0] = 'ham'
if save_and_update_workers:
session.config.save()
session.config.prepare_workers(session, daemons=want_daemons)
def setup_command(self, session, do_gpg_stuff=False):
do_gpg_stuff = do_gpg_stuff or ('do_gpg_stuff' in self.args)
# Stop the workers...
want_daemons = session.config.cron_worker is not None
session.config.stop_workers()
# Perform any required migrations
Migrate(session).run(before_setup=True, after_setup=False)
# Basic app config, tags, plugins, etc.
self.basic_app_config(session,
save_and_update_workers=False,
want_daemons=want_daemons)
# Assumption: If you already have secret keys, you want to
# use the associated addresses for your e-mail.
# If you don't already have secret keys, you should have
# one made for you, if GnuPG is available.
# If GnuPG is not available, you should be warned.
if do_gpg_stuff:
gnupg = GnuPG(None)
accepted_keys = []
if gnupg.is_available():
keys = gnupg.list_secret_keys()
cutoff = (datetime.date.today() + datetime.timedelta(days=365)
).strftime("%Y-%m-%d")
for key, details in keys.iteritems():
# Ignore revoked/expired/disabled keys.
revoked = details.get('revocation_date')
expired = details.get('expiration_date')
if (details.get('disabled') or
(revoked and revoked <= cutoff) or
(expired and expired <= cutoff)):
continue
accepted_keys.append(key)
for uid in details["uids"]:
if "email" not in uid or uid["email"] == "":
continue
if uid["email"] in [x["email"]
for x in session.config.profiles]:
# Don't set up the same e-mail address twice.
continue
# FIXME: Add route discovery mechanism.
profile = {
"email": uid["email"],
"name": uid["name"],
}
session.config.profiles.append(profile)
if (session.config.prefs.gpg_recipient in (None, '', '!CREATE')
and details["capabilities_map"]["encrypt"]):
session.config.prefs.gpg_recipient = key
session.ui.notify(_('Encrypting config to %s') % key)
if session.config.prefs.crypto_policy == 'none':
session.config.prefs.crypto_policy = 'openpgp-sign'
if len(accepted_keys) == 0:
# FIXME: Start background process generating a key once a
# user has supplied a name and e-mail address.
pass
else:
session.ui.warning(_('Oh no, PGP/GPG support is unavailable!'))
# If we have a GPG key, but no master key, create it
self.make_master_key()
# Perform any required migrations
Migrate(session).run(before_setup=False, after_setup=True)
session.config.save()
session.config.prepare_workers(session, daemons=want_daemons)
return self._success(_('Performed initial Mailpile setup'))
def make_master_key(self):
session = self.session
if (session.config.prefs.gpg_recipient not in (None, '', '!CREATE')
and not session.config.master_key
and not session.config.prefs.obfuscate_index):
#
# This secret is arguably the most critical bit of data in the
# app, it is used as an encryption key and to seed hashes in
# a few places. As such, the user may need to type this in
# manually as part of data recovery, so we keep it reasonably
# sized and devoid of confusing chars.
#
# The strategy below should give about 281 bits of randomness:
#
# import math
# math.log((25 + 25 + 8) ** (12 * 4), 2) == 281.183...
#
secret = ''
chars = 12 * 4
while len(secret) < chars:
secret = sha512b64(os.urandom(1024),
'%s' % session.config,
'%s' % time.time())
secret = CleanText(secret,
banned=CleanText.NONALNUM + 'O01l'
).clean[:chars]
session.config.master_key = secret
if self._idx() and self._idx().INDEX:
session.ui.warning(_('Unable to obfuscate search index '
'without losing data. Not indexing '
'encrypted mail.'))
else:
session.config.prefs.obfuscate_index = True
session.config.prefs.index_encrypted = True
session.ui.notify(_('Obfuscating search index and enabling '
'indexing of encrypted e-mail. Yay!'))
return True
else:
return False
def command(self, *args, **kwargs):
session = self.session
if session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
return self.setup_command(session, *args, **kwargs)
class TestableWebbable(SetupMagic):
HTTP_AUTH_REQUIRED = 'Maybe'
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = {
'_path': 'Redirect path'
}
HTTP_POST_VARS = {
'testing': 'Yes or No, if testing',
'advance': 'Yes or No, advance setup flow',
}
TRUTHY = {
'0': False, 'no': False, 'fuckno': False, 'false': False,
'1': True, 'yes': True, 'hellyeah': True, 'true': True,
}
def _advance(self):
path = self.data.get('_path', [None])[0]
data = dict([(k, v) for k, v in self.data.iteritems()
if k not in self.HTTP_POST_VARS
and k not in ('_method',)])
nxt = Setup.Next(self.session.config, None, needed_auth=False)
if nxt:
url = '/%s/' % nxt.SYNOPSIS[2]
elif path and path != '/%s/' % Setup.SYNOPSIS[2]:
# Use the same redirection logic as the Authenticator
mailpile.auth.Authenticate.RedirectBack(path, data)
else:
url = '/'
qs = urlencode([(k, v) for k, vl in data.iteritems() for v in vl])
raise UrlRedirectException(''.join([url, '?%s' % qs if qs else '']))
def _success(self, message, result=True, advance=False):
if (advance or
self.TRUTHY.get(self.data.get('advance', ['no'])[0].lower())):
self._advance()
return SetupMagic._success(self, message, result=result)
def _testing(self):
self._testing_yes(lambda: True)
return (self.testing is not None)
def _testing_yes(self, method, *args, **kwargs):
testination = self.data.get('testing')
if testination:
self.testing = random.randint(0, 1)
if testination[0].lower() in self.TRUTHY:
self.testing = self.TRUTHY[testination[0].lower()]
return self.testing
self.testing = None
return method(*args, **kwargs)
def _testing_data(self, method, tdata, *args, **kwargs):
result = self._testing_yes(method, *args, **kwargs) or []
return (result
if (self.testing is None) else
(self.testing and tdata or []))
def setup_command(self, session):
raise Exception('FIXME')
class SetupGetEmailSettings(TestableWebbable):
"""Guess server details for an e-mail address"""
SYNOPSIS = (None, 'setup/email_servers', 'setup/email_servers', None)
HTTP_CALLABLE = ('GET', )
HTTP_QUERY_VARS = dict_merge(TestableWebbable.HTTP_QUERY_VARS, {
'email': 'E-mail address'
})
TEST_DATA = {
'imap_host': 'imap.wigglebonk.com',
'imap_port': 993,
'imap_tls': True,
'pop3_host': 'pop3.wigglebonk.com',
'pop3_port': 110,
'pop3_tls': False,
'smtp_host': 'smtp.wigglebonk.com',
'smtp_port': 465,
'smtp_tls': False
}
def _get_domain_settings(self, domain):
raise Exception('FIXME')
def setup_command(self, session):
results = {}
for email in list(self.args) + self.data.get('email'):
settings = self._testing_data(self._get_domain_settings,
self.TEST_DATA, email)
if settings:
results[email] = settings
if results:
self._success(_('Found settings for %d addresses'), results)
else:
self._error(_('No settings found'))
class SetupWelcome(TestableWebbable):
SYNOPSIS = (None, None, 'setup/welcome', None)
HTTP_CALLABLE = ('GET', 'POST')
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS, {
'language': 'Language selection'
})
def bg_setup_stage_1(self):
# Wait a bit, so the user has something to look at befor we
# block the web server and do real work.
time.sleep(2)
# Intial configuration of app goes here...
if not self.session.config.tags:
with BLOCK_HTTPD_LOCK, Idle_HTTPD(allowed=0):
self.basic_app_config(self.session)
# Next, if we have any secret GPG keys, extract all the e-mail
# addresses and create a profile for each one.
with BLOCK_HTTPD_LOCK, Idle_HTTPD(allowed=0):
SetupProfiles(self.session).auto_create_profiles()
def setup_command(self, session):
config = session.config
if self.data.get('_method') == 'POST' or self._testing():
language = self.data.get('language', [''])[0]
if language:
try:
i18n = lambda: ActivateTranslation(session, config,
language)
if not self._testing_yes(i18n):
raise ValueError('Failed to configure i18n')
config.prefs.language = language
if not self._testing():
self._background_save(config=True)
except ValueError:
return self._error(_('Invalid language: %s') % language)
config.slow_worker.add_unique_task(
session, 'Setup, Stage 1', lambda: self.bg_setup_stage_1())
results = {
'languages': ListTranslations(config),
'language': config.prefs.language
}
return self._success(_('Welcome to Mailpile!'), results)
class SetupCrypto(TestableWebbable):
SYNOPSIS = (None, None, 'setup/crypto', None)
HTTP_CALLABLE = ('GET', 'POST')
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS, {
'choose_key': 'Select an existing key to use',
'passphrase': 'Specify a passphrase',
'passphrase_confirm': 'Confirm the passphrase',
'index_encrypted': 'y/n: index encrypted mail?',
# 'obfuscate_index': 'y/n: obfuscate keywords?', # Omitted do to DANGER
'encrypt_mail': 'y/n: encrypt locally stored mail?',
'encrypt_index': 'y/n: encrypt search index?',
'encrypt_vcards': 'y/n: encrypt vcards?',
'encrypt_events': 'y/n: encrypt event log?',
'encrypt_misc': 'y/n: encrypt plugin and misc data?'
})
TEST_DATA = {}
def list_secret_keys(self):
cutoff = (datetime.date.today() + datetime.timedelta(days=365)
).strftime("%Y-%m-%d")
keylist = {}
for key, details in self._gnupg().list_secret_keys().iteritems():
# Ignore (soon to be) revoked/expired/disabled keys.
revoked = details.get('revocation_date')
expired = details.get('expiration_date')
if (details.get('disabled') or
(revoked and revoked <= cutoff) or
(expired and expired <= cutoff)):
continue
# Ignore keys that cannot both encrypt and sign
caps = details["capabilities_map"]
if not caps["encrypt"] or not caps["sign"]:
continue
keylist[key] = details
return keylist
def gpg_key_ready(self, gpg_keygen):
if not gpg_keygen.failed:
self.session.config.prefs.gpg_recipient = gpg_keygen.generated_key
self.make_master_key()
self._background_save(config=True)
self.save_profiles_to_key()
def save_profiles_to_key(self, key_id=None, add_all=False, now=False,
profiles=None):
if key_id is None:
if (Setup.KEY_CREATING_THREAD and
not Setup.KEY_CREATING_THREAD.failed):
key_id = Setup.KEY_CREATING_THREAD.generated_key
add_all = True
if not add_all:
self.session.ui.warning('FIXME: Not updating GPG key!')
return
if key_id is not None:
uids = []
data = ListProfiles(self.session).run().result
for profile in data['profiles']:
uids.append({
'name': profile["fn"],
'email': profile["email"][0]["email"],
'comment': profile.get('note', '')
})
if not uids:
return
editor = GnuPGKeyEditor(key_id, set_uids=uids,
sps=self.session.config.gnupg_passphrase,
deletes=max(10, 2*len(uids)))
def start_editor(*unused_args):
with Setup.KEY_WORKER_LOCK:
Setup.KEY_EDITING_THREAD = editor
editor.start()
with Setup.KEY_WORKER_LOCK:
if now:
start_editor()
elif Setup.KEY_EDITING_THREAD is not None:
Setup.KEY_EDITING_THREAD.on_complete('edit keys',
start_editor)
elif Setup.KEY_CREATING_THREAD is not None:
Setup.KEY_CREATING_THREAD.on_complete('edit keys',
start_editor)
else:
start_editor()
def setup_command(self, session):
changed = authed = False
results = {
'secret_keys': self.list_secret_keys(),
}
error_info = None
if self.data.get('_method') == 'POST' or self._testing():
# 1st, are we choosing or creating a new key?
choose_key = self.data.get('choose_key', [''])[0]
if choose_key and not error_info:
if (choose_key not in results['secret_keys'] and
choose_key != '!CREATE'):
error_info = (_('Invalid key'), {
'invalid_key': True,
'chosen_key': choose_key
})
# 2nd, check authentication...
#
# FIXME: Creating a new key will allow a malicious actor to
# bypass authentication and change settings.
#
try:
passphrase = self.data.get('passphrase', [''])[0]
passphrase2 = self.data.get('passphrase_confirm', [''])[0]
chosen_key = ((not error_info) and choose_key
) or session.config.prefs.gpg_recipient
if not error_info:
assert(passphrase == passphrase2)
if chosen_key == '!CREATE':
assert(passphrase != '')
sps = SecurePassphraseStorage(passphrase)
elif chosen_key:
sps = mailpile.auth.VerifyAndStorePassphrase(
session.config,
passphrase=passphrase,
key=chosen_key)
else:
sps = mailpile.auth.VerifyAndStorePassphrase(
session.config, passphrase=passphrase)
if not chosen_key:
choose_key = '!CREATE'
results['updated_passphrase'] = True
session.config.gnupg_passphrase.data = sps.data
mailpile.auth.SetLoggedIn(self)
except AssertionError:
error_info = (_('Invalid passphrase'), {
'invalid_passphrase': True,
'chosen_key': session.config.prefs.gpg_recipient
})
# 3rd, if necessary master key and/or GPG key
with BLOCK_HTTPD_LOCK, Idle_HTTPD():
if choose_key and not error_info:
session.config.prefs.gpg_recipient = choose_key
# FIXME: This should probably only happen if the GPG
# key was successfully created.
self.make_master_key()
changed = True
with Setup.KEY_WORKER_LOCK:
if ((not error_info) and
(session.config.prefs.gpg_recipient
== '!CREATE') and
(Setup.KEY_CREATING_THREAD is None or
Setup.KEY_CREATING_THREAD.failed)):
gk = GnuPGKeyGenerator(
sps=session.config.gnupg_passphrase,
on_complete=('notify',
lambda: self.gpg_key_ready(gk)))
Setup.KEY_CREATING_THREAD = gk
Setup.KEY_CREATING_THREAD.start()
# Finally we update misc. settings
for key in self.HTTP_POST_VARS.keys():
# FIXME: This should probably only happen if the GPG
# key was successfully created.
# Continue iff all is well...
if error_info:
break
if key in (['choose_key', 'passphrase', 'passphrase_confirm'] +
TestableWebbable.HTTP_POST_VARS.keys()):
continue
try:
val = self.data.get(key, [''])[0]
if val:
session.config.prefs[key] = self.TRUTHY[val.lower()]
changed = True
except (ValueError, KeyError):
error_info = (_('Invalid preference'), {
'invalid_setting': True,
'variable': key
})
results.update({
'creating_key': (Setup.KEY_CREATING_THREAD is not None and
Setup.KEY_CREATING_THREAD.running),
'creating_failed': (Setup.KEY_CREATING_THREAD is not None and
Setup.KEY_CREATING_THREAD.failed),
'chosen_key': session.config.prefs.gpg_recipient,
'prefs': {
'index_encrypted': session.config.prefs.index_encrypted,
'obfuscate_index': session.config.prefs.obfuscate_index,
'encrypt_mail': session.config.prefs.encrypt_mail,
'encrypt_index': session.config.prefs.encrypt_index,
'encrypt_vcards': session.config.prefs.encrypt_vcards,
'encrypt_events': session.config.prefs.encrypt_events,
'encrypt_misc': session.config.prefs.encrypt_misc
}
})
if changed:
self._background_save(config=True)
if error_info:
return self._error(error_info[0],
info=error_info[1], result=results)
elif changed:
return self._success(_('Updated crypto preferences'), results)
else:
return self._success(_('Configure crypto preferences'), results)
class SetupProfiles(SetupCrypto):
SYNOPSIS = (None, None, 'setup/profiles', None)
HTTP_AUTH_REQUIRED = True
HTTP_CALLABLE = ('GET', 'POST')
HTTP_QUERY_VARS = dict_merge(TestableWebbable.HTTP_QUERY_VARS, {
})
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS, {
'email': 'Create a profile for this e-mail address',
'name': 'Name associated with this e-mail',
'note': 'Profile note',
'pass': 'Password for remote accounts',
'route_id': 'Route ID for sending mail',
})
TEST_DATA = {}
# This is where we cache the passwords we are given, for use later.
# This is deliberately made a singleton on the class.
PASSWORD_CACHE = {}
def _auto_configurable(self, email):
# FIXME: Actually look things up, this is super lame
return email.endswith('@gmail.com')
def get_profiles(self, secret_keys=None):
data = ListProfiles(self.session).run().result
profiles = {}
for rid, ofs in data["rids"].iteritems():
profile = data["profiles"][ofs]
email = profile["email"][0]["email"]
name = profile["fn"]
note = profile.get('note', '')
profiles[rid] = {
"name": name,
"note": note,
"pgp_keys": [], # FIXME
"email": email,
"route_id": profile.get('x-mailpile-profile-route', ''),
"photo": profile.get('photo', [{}])[0].get('photo', ''),
"auto_configurable": self._auto_configurable(email)
}
for key, info in (secret_keys or {}).iteritems():
for uid in info['uids']:
email = uid.get('email')
if email in profiles:
profiles[email]["pgp_keys"].append(key)
return profiles
def discover_new_email_addresses(self, profiles):
addresses = {}
existing = set([p['email'] for p in profiles.values()])
for key, info in self.list_secret_keys().iteritems():
for uid in info['uids']:
email = uid.get('email')
note = uid.get('comment')
if email:
if email in existing:
continue
if email not in addresses:
addresses[email] = {'pgp_keys': [],
'name': '', 'note': ''}
ai = addresses[email]
name = uid.get('name')
ai['name'] = name if name else ai['name']
ai['note'] = note if note else ai['note']
ai['pgp_keys'].append(key)
# FIXME: Scan Thunderbird and MacMail for e-mails, other apps...
return addresses
def auto_create_profiles(self):
new_emails = self.discover_new_email_addresses(self.get_profiles())
for email, info in new_emails.iteritems():
AddProfile(self.session, data={
'_method': 'POST',
'email': [email],
'note': [info["note"]],
'name': [info['name']]
}).run()
def _result(self):
profiles = self.get_profiles()
return {
'new_emails': self.discover_new_email_addresses(profiles),
'profiles': profiles,
'routes': self.session.config.routes,
'default_email': self.session.config.prefs.default_email
}
def setup_command(self, session):
changed = False
if self.data.get('_method') == 'POST' or self._testing():
name, email, note, pwd = (self.data.get(k, [None])[0] for k in
('name', 'email', 'note', 'pass'))
if email:
rv = AddProfile(session, data=self.data).run()
if rv.status == 'success':
#
# FIXME: We need to fire off a background process to
# try and auto-discover routes and sources.
#
if not session.config.prefs.default_email:
session.config.prefs.default_email = email
changed = True
self.save_profiles_to_key()
else:
return self._error(_('Failed to add profile'),
info=rv.error_info,
result=self._result())
if email and pwd:
sps = SecurePassphraseStorage(pwd)
SetupProfiles.PASSWORD_CACHE[email] = sps
result = self._result()
if not result['default_email']:
profiles = result['profiles'].values()
profiles.sort(key=lambda p: (len(p['pgp_keys']),
len(p['name'])))
e = result['default_email'] = profiles[-1]['email']
session.config.prefs.default_email = e
changed = True
else:
result = self._result()
if changed:
self._background_save(config=True)
return self._success(_('Your profiles'), result)
class SetupConfigureKey(SetupProfiles):
SYNOPSIS = (None, None, 'setup/configure_key', None)
HTTP_AUTH_REQUIRED = True
HTTP_CALLABLE = ('GET', 'POST')
HTTP_QUERY_VARS = dict_merge(TestableWebbable.HTTP_QUERY_VARS, {
})
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS, {
})
TEST_DATA = {}
def _result(self):
keylist = self.list_secret_keys()
profiles = self.get_profiles(secret_keys=keylist)
return {
'secret_keys': keylist,
'profiles': profiles,
}
def setup_command(self, session):
# FIXME!
return self._success(_('Configuring a key'), self._result())
class SetupTestRoute(SetupProfiles):
SYNOPSIS = (None, None, 'setup/test_route', None)
HTTP_AUTH_REQUIRED = True
HTTP_CALLABLE = ('POST', )
HTTP_POST_VARS = dict_merge(TestableWebbable.HTTP_POST_VARS,
dict((k, v[0]) for k, v in
CONFIG_RULES['routes'][1].iteritems()),
{'route_id': 'ID of existing route'})
TEST_DATA = {}
def setup_command(self, session):
if self.args:
route_id = self.args[0]
elif 'route_id' in self.data:
route_id = self.data['route_id'][0]
else:
route_id = None
if route_id:
route = self.session.config.routes[route_id]
assert(route)
else:
route = {}
for k in CONFIG_RULES['routes'][1]:
if k not in self.data:
pass
elif CONFIG_RULES['routes'][1][k][1] in (int, 'int'):
route[k] = int(self.data[k][0])
else:
route[k] = self.data[k][0]
fromaddr = route.get('username', '')
if '@' not in fromaddr:
fromaddr = self.session.config.get_profile()['email']
if not fromaddr or '@' not in fromaddr:
fromaddr = '%s@%s' % (route.get('username', 'test'),
route.get('host', 'example.com'))
assert(fromaddr)
error_info = {'error': _('Unknown error')}
try:
assert(SendMail(self.session, None,
[(fromaddr,
[fromaddr, '[email protected]'],
None,
[self.event])],
test_only=True, test_route=route))
return self._success(_('Route is working'),
result=route)
except OSError:
error_info = {'error': _('Invalid command'),
'invalid_command': True}
except SendMailError, e:
error_info = {'error': e.message,
'sendmail_error': True}
error_info.update(e.error_info)
except:
import traceback
traceback.print_exc()
return self._error(_('Route is not working'),
result=route, info=error_info)
class Setup(TestableWebbable):
"""Enter setup flow"""
SYNOPSIS = (None, 'setup', 'setup', '[do_gpg_stuff]')
ORDER = ('Internals', 0)
LOG_PROGRESS = True
HTTP_CALLABLE = ('GET',)
HTTP_AUTH_REQUIRED = True
# These are a global, may be modified...
KEY_WORKER_LOCK = CryptoRLock()
KEY_CREATING_THREAD = None
KEY_EDITING_THREAD = None
@classmethod
def _check_profiles(self, config):
session = Session(config)
session.ui = SilentInteraction(config)
session.ui.block()
data = ListProfiles(session).run().result
okay = routes = bad = 0
for rid, ofs in data["rids"].iteritems():
profile = data["profiles"][ofs]
if profile.get('email', None):
okay += 1
route_id = profile.get('x-mailpile-profile-route', '')
if route_id:
if route_id in config.routes:
routes += 1
else:
bad += 1
else:
bad += 1
return (routes > 0) and (okay > 0) and (bad == 0)
@classmethod
def _CHECKPOINTS(self, config):
return [
# Stage 0: Welcome: Choose app language
('language', lambda: config.prefs.language, SetupWelcome),
# Stage 1: Crypto: Configure our master key stuff
('crypto', lambda: config.prefs.gpg_recipient, SetupCrypto),
# Stage 2: Identity (via. single page install flow)
('profiles', lambda: self._check_profiles(config), Setup),
# Stage 3: Routes (via. single page install flow)
('routes', lambda: config.routes, Setup),
# Stage 4: Sources (via. single page install flow)
('sources', lambda: config.sources, Setup),
# Stage 5: Is All Complete
('complete', lambda: config.web.setup_complete, Setup),
# FIXME: Check for this too?
#(lambda: config.prefs.crypto_policy != 'none', SetupConfigureKey),
]
@classmethod
def Next(cls, config, default, needed_auth=True):
if not config.loaded_config:
return default
for name, guard, step in cls._CHECKPOINTS(config):
auth_required = (step.HTTP_AUTH_REQUIRED is True
or (config.prefs.gpg_recipient and
step.HTTP_AUTH_REQUIRED == 'Maybe'))
if not guard():
if (not needed_auth) or (not auth_required):
return step
return default
def setup_command(self, session):
if '_method' in self.data:
return self._success(_('Entering setup flow'), result=dict(
((c[0], c[1]() and True or False)
for c in self._CHECKPOINTS(session.config)
)))
else:
return SetupMagic.setup_command(self, session)
_ = gettext
_plugins.register_commands(SetupMagic,
SetupGetEmailSettings,
SetupWelcome,
SetupCrypto,
SetupProfiles,
SetupConfigureKey,
SetupTestRoute,
Setup)
|
apache-2.0
| -7,887,133,988,458,968,000 | 37.95229 | 83 | 0.503748 | false | 4.329409 | true | false | false |
munhanha/mtg-random
|
djangoappengine/boot.py
|
1
|
8121
|
import logging
import os
import sys
PROJECT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DATA_ROOT = os.path.join(PROJECT_DIR, '.gaedata')
# Overrides for os.environ
env_ext = {'DJANGO_SETTINGS_MODULE': 'settings'}
def setup_env():
"""Configures app engine environment for command-line apps."""
# Try to import the appengine code from the system path.
try:
from google.appengine.api import apiproxy_stub_map
except ImportError:
for k in [k for k in sys.modules if k.startswith('google')]:
del sys.modules[k]
# Not on the system path. Build a list of alternative paths where it
# may be. First look within the project for a local copy, then look for
# where the Mac OS SDK installs it.
paths = [os.path.join(PROJECT_DIR, '.google_appengine'),
os.environ.get('APP_ENGINE_SDK'),
'/usr/local/google_appengine',
'/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine']
for path in os.environ.get('PATH', '').split(os.pathsep):
path = path.rstrip(os.sep)
if path.endswith('google_appengine'):
paths.append(path)
if os.name in ('nt', 'dos'):
path = r'%(PROGRAMFILES)s\Google\google_appengine' % os.environ
paths.append(path)
# Loop through all possible paths and look for the SDK dir.
sdk_path = None
for path in paths:
if not path:
continue
path = os.path.expanduser(path)
path = os.path.realpath(path)
if os.path.exists(path):
sdk_path = path
break
if sdk_path is None:
# The SDK could not be found in any known location.
sys.stderr.write('The Google App Engine SDK could not be found!\n'
"Make sure it's accessible via your PATH "
"environment and called google_appengine.\n")
sys.exit(1)
# Add the SDK and the libraries within it to the system path.
extra_paths = [sdk_path]
lib = os.path.join(sdk_path, 'lib')
# Automatically add all packages in the SDK's lib folder:
for name in os.listdir(lib):
root = os.path.join(lib, name)
subdir = name
# Package can be under 'lib/<pkg>/<pkg>/' or 'lib/<pkg>/lib/<pkg>/'
detect = (os.path.join(root, subdir), os.path.join(root, 'lib', subdir))
for path in detect:
if os.path.isdir(path):
extra_paths.append(os.path.dirname(path))
break
else:
if name == 'webapp2':
extra_paths.append(root)
elif name == 'webob_0_9':
extra_paths.append(root)
sys.path = extra_paths + sys.path
from google.appengine.api import apiproxy_stub_map
setup_project()
from .utils import have_appserver
if have_appserver:
# App Engine's threading.local is broken
setup_threading()
elif not os.path.exists(DATA_ROOT):
os.mkdir(DATA_ROOT)
setup_logging()
if not have_appserver:
# Patch Django to support loading management commands from zip files
from django.core import management
management.find_commands = find_commands
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
This version works for django deployments which are file based or
contained in a ZIP (in sys.path).
Returns an empty list if no commands are defined.
"""
import pkgutil
return [modname for importer, modname, ispkg in pkgutil.iter_modules(
[os.path.join(management_dir, 'commands')]) if not ispkg]
def setup_threading():
if sys.version_info >= (2, 7):
return
# XXX: On Python 2.5 GAE's threading.local doesn't work correctly with subclassing
try:
from django.utils._threading_local import local
import threading
threading.local = local
except ImportError:
pass
def setup_logging():
# Fix Python 2.6 logging module
logging.logMultiprocessing = 0
# Enable logging
level = logging.DEBUG
from .utils import have_appserver
if have_appserver:
# We can't import settings at this point when running a normal
# manage.py command because this module gets imported from settings.py
from django.conf import settings
if not settings.DEBUG:
level = logging.INFO
logging.getLogger().setLevel(level)
def setup_project():
from .utils import have_appserver, on_production_server
if have_appserver:
# This fixes a pwd import bug for os.path.expanduser()
env_ext['HOME'] = PROJECT_DIR
# The dev_appserver creates a sandbox which restricts access to certain
# modules and builtins in order to emulate the production environment.
# Here we get the subprocess module back into the dev_appserver sandbox.
# This module is just too important for development.
# Also we add the compiler/parser module back and enable https connections
# (seem to be broken on Windows because the _ssl module is disallowed).
if not have_appserver:
from google.appengine.tools import dev_appserver
try:
# Backup os.environ. It gets overwritten by the dev_appserver,
# but it's needed by the subprocess module.
env = dev_appserver.DEFAULT_ENV
dev_appserver.DEFAULT_ENV = os.environ.copy()
dev_appserver.DEFAULT_ENV.update(env)
# Backup the buffer() builtin. The subprocess in Python 2.5 on
# Linux and OS X uses needs it, but the dev_appserver removes it.
dev_appserver.buffer = buffer
except AttributeError:
logging.warn('Could not patch the default environment. '
'The subprocess module will not work correctly.')
try:
# Allow importing compiler/parser, _ssl (for https),
# _io for Python 2.7 io support on OS X
dev_appserver.HardenedModulesHook._WHITE_LIST_C_MODULES.extend(
('parser', '_ssl', '_io'))
except AttributeError:
logging.warn('Could not patch modules whitelist. '
'The compiler and parser modules will not work and '
'SSL support is disabled.')
elif not on_production_server:
try:
# Restore the real subprocess module
from google.appengine.api.mail_stub import subprocess
sys.modules['subprocess'] = subprocess
# Re-inject the buffer() builtin into the subprocess module
from google.appengine.tools import dev_appserver
subprocess.buffer = dev_appserver.buffer
except Exception, e:
logging.warn('Could not add the subprocess module to the sandbox: %s' % e)
os.environ.update(env_ext)
extra_paths = [PROJECT_DIR, os.path.join(os.path.dirname(__file__), 'lib')]
zip_packages_dir = os.path.join(PROJECT_DIR, 'zip-packages')
# We support zipped packages in the common and project folders.
if os.path.isdir(zip_packages_dir):
for zip_package in os.listdir(zip_packages_dir):
extra_paths.append(os.path.join(zip_packages_dir, zip_package))
# App Engine causes main.py to be reloaded if an exception gets raised
# on the first request of a main.py instance, so don't call setup_project()
# multiple times. We ensure this indirectly by checking if we've already
# modified sys.path, already.
if len(sys.path) < len(extra_paths) or \
sys.path[:len(extra_paths)] != extra_paths:
for path in extra_paths:
while path in sys.path:
sys.path.remove(path)
sys.path = extra_paths + sys.path
|
bsd-3-clause
| 4,427,875,777,957,635,000 | 41.742105 | 147 | 0.619751 | false | 4.262992 | false | false | false |
TheWebMonks/equipo
|
app/freelancers/viewsets.py
|
1
|
4110
|
from .serializers import serializers
from .models import *
from django.contrib.auth.models import User, Group
from rest_framework import viewsets, permissions, renderers
class ProjectViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Project.objects.all()
serializer_class = serializers.ProjectSerializer
class CompanyViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Company.objects.all()
serializer_class = serializers.CompanySerializer
class SkillViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Skill.objects.all()
serializer_class = serializers.SkillSerializer
class TypeOfContractViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = TypeOfContract.objects.all()
serializer_class = serializers.TypeOfContractSerializer
class SocialNetworkViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = SocialNetwork.objects.all()
serializer_class = serializers.SocialNetworkSerializer
class SocialAccountViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = SocialAccount.objects.all()
serializer_class = serializers.SocialAccountsSerializer
class ProfileViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows profiles to be viewed or edited.
"""
queryset = Profile.objects.all()
serializer_class = serializers.ProfileSerializer
# renderer_classes = (TemplateHTMLRenderer,)
class ExperienceViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows profiles to be viewed or edited.
"""
queryset = Experience.objects.all()
serializer_class = serializers.ExperienceSerializer
# renderer_classes = (TemplateHTMLRenderer,)
class EducationViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows profiles to be viewed or edited.
"""
queryset = Education.objects.all()
serializer_class = serializers.EducationSerializer
# renderer_classes = (TemplateHTMLRenderer,)
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = serializers.UserSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = serializers.GroupSerializer
class CategoryViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Category.objects.all()
serializer_class = serializers.CategorySerializer
class KindOfTaskViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = KindOfTask.objects.all()
serializer_class = serializers.KindOfTaskSerializer
class ExpenseViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Expense.objects.all()
serializer_class = serializers.ExpenseSerializer
class ExpendedTimeViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = ExpendedTime.objects.all()
serializer_class = serializers.ExpendedTimeSerializer
class ContractViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Contract.objects.all()
serializer_class = serializers.ContractSerializer
class InvoiceViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Invoice.objects.all()
serializer_class = serializers.InvoiceSerializer
|
apache-2.0
| -1,411,435,979,445,298,400 | 27.541667 | 65 | 0.725304 | false | 4.363057 | false | false | false |
srossross/stable.world
|
stable_world/managers/base.py
|
1
|
1825
|
from __future__ import unicode_literals
import os
import click
from pipes import quote
from .push_file import push_file, pull_file
from stable_world.py_helpers import urlparse, urlunparse
class BaseManager(object):
NAME = None
PROGRAM = None
@classmethod
def enabled(cls):
if cls.PROGRAM is None:
return True
for path in os.getenv('PATH', '').split(os.pathsep):
if os.path.isfile(os.path.join(path, cls.PROGRAM)):
return True
def __init__(self, site_url, urls, bucket, token, dryrun):
self.site_url = site_url
self.bucket = bucket
self.token = token
self.dryrun = dryrun
self.cache_name = self.NAME
self.cache_info = urls[self.NAME]
@property
def config_file(self):
raise NotImplementedError()
@property
def cache_dir(self):
cache_dir = os.path.join('~', '.cache', 'stable.world', self.bucket)
return os.path.expanduser(cache_dir)
def get_base_url(self, basicAuthRequired=False):
site_url = self.site_url
if basicAuthRequired:
site_uri = urlparse(self.site_url)
site_url = urlunparse(site_uri._replace(netloc='{}:{}@{}'.format(
'token',
self.token,
site_uri.netloc
)))
return '%s/cache/%s/%s/' % (site_url, self.bucket, self.cache_name)
def use(self):
if not self.dryrun:
push_file(self.config_file)
return self.update_config_file()
@classmethod
def unuse(cls, info):
if not info:
return
for config_file in info.get('config_files', []):
click.echo('Removing {} config file {}'.format(cls.NAME, quote(config_file)))
pull_file(config_file)
|
bsd-2-clause
| -4,610,486,736,509,769,000 | 25.071429 | 89 | 0.580274 | false | 3.882979 | true | false | false |
avedaee/DIRAC
|
DataManagementSystem/Service/FileCatalogProxyHandler.py
|
1
|
3492
|
########################################################################
# $HeadURL $
# File: FileCatalogProxyHandler.py
########################################################################
""" :mod: FileCatalogProxyHandler
================================
.. module: FileCatalogProxyHandler
:synopsis: This is a service which represents a DISET proxy to the File Catalog
"""
## imports
import os
from types import StringTypes, DictType, TupleType
## from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
__RCSID__ = "$Id$"
def initializeFileCatalogProxyHandler( serviceInfo ):
""" service initalisation """
return S_OK()
class FileCatalogProxyHandler( RequestHandler ):
"""
.. class:: FileCatalogProxyHandler
"""
types_callProxyMethod = [ StringTypes, StringTypes, TupleType, DictType ]
def export_callProxyMethod( self, fcName, methodName, args, kargs ):
""" A generic method to call methods of the Storage Element.
"""
res = pythonCall( 120, self.__proxyWrapper, fcName, methodName, args, kargs )
if res['OK']:
return res['Value']
else:
return res
def __proxyWrapper( self, fcName, methodName, args, kwargs ):
""" The wrapper will obtain the client proxy and set it up in the environment.
The required functionality is then executed and returned to the client.
:param self: self reference
:param str name: fcn name
:param tuple args: fcn args
:param dict kwargs: fcn keyword args
"""
result = self.__prepareSecurityDetails()
if not result['OK']:
return result
proxyLocation =result['Value']
try:
fileCatalog = FileCatalog( [fcName] )
method = getattr( fileCatalog, methodName )
except AttributeError, error:
errStr = "%s proxy: no method named %s" % ( fcName, methodName )
gLogger.exception( errStr, methodName, error )
return S_ERROR( errStr )
try:
result = method( *args, **kwargs )
if os.path.exists(proxyLocation):
os.remove(proxyLocation)
return result
except Exception, error:
if os.path.exists(proxyLocation):
os.remove(proxyLocation)
errStr = "%s proxy: Exception while performing %s" % ( fcName, methodName )
gLogger.exception( errStr, error )
return S_ERROR( errStr )
def __prepareSecurityDetails( self, vomsFlag = True ):
""" Obtains the connection details for the client """
try:
credDict = self.getRemoteCredentials()
clientDN = credDict[ 'DN' ]
clientUsername = credDict['username']
clientGroup = credDict['group']
gLogger.debug( "Getting proxy for %s@%s (%s)" % ( clientUsername, clientGroup, clientDN ) )
if vomsFlag:
result = gProxyManager.downloadVOMSProxyToFile( clientDN, clientGroup )
else:
result = gProxyManager.downloadProxyToFile( clientDN, clientGroup )
if not result['OK']:
return result
gLogger.debug( "Updating environment." )
os.environ['X509_USER_PROXY'] = result['Value']
return result
except Exception, error:
exStr = "__getConnectionDetails: Failed to get client connection details."
gLogger.exception( exStr, '', error )
return S_ERROR( exStr )
|
gpl-3.0
| 5,032,431,279,644,278,000 | 35.757895 | 97 | 0.645762 | false | 4.279412 | false | false | false |
googleapis/googleapis-gen
|
grafeas/v1/grafeas-v1-py/grafeas/grafeas_v1/types/build.py
|
1
|
2488
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from grafeas.grafeas_v1.types import provenance as g_provenance
__protobuf__ = proto.module(
package='grafeas.v1',
manifest={
'BuildNote',
'BuildOccurrence',
},
)
class BuildNote(proto.Message):
r"""Note holding the version of the provider's builder and the
signature of the provenance message in the build details
occurrence.
Attributes:
builder_version (str):
Required. Immutable. Version of the builder
which produced this build.
"""
builder_version = proto.Field(
proto.STRING,
number=1,
)
class BuildOccurrence(proto.Message):
r"""Details of a build occurrence.
Attributes:
provenance (grafeas.grafeas_v1.types.BuildProvenance):
Required. The actual provenance for the
build.
provenance_bytes (str):
Serialized JSON representation of the provenance, used in
generating the build signature in the corresponding build
note. After verifying the signature, ``provenance_bytes``
can be unmarshalled and compared to the provenance to
confirm that it is unchanged. A base64-encoded string
representation of the provenance bytes is used for the
signature in order to interoperate with openssl which
expects this format for signature verification.
The serialized form is captured both to avoid ambiguity in
how the provenance is marshalled to json as well to prevent
incompatibilities with future changes.
"""
provenance = proto.Field(
proto.MESSAGE,
number=1,
message=g_provenance.BuildProvenance,
)
provenance_bytes = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -2,803,864,520,464,966,700 | 30.493671 | 74 | 0.668408 | false | 4.252991 | false | false | false |
TuKo/brainiak
|
examples/fcma/classification.py
|
1
|
9141
|
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from brainiak.fcma.classifier import Classifier
from brainiak.fcma.preprocessing import prepare_fcma_data
from brainiak.io import dataset
from sklearn import svm
#from sklearn.linear_model import LogisticRegression
import sys
import logging
import numpy as np
from scipy.spatial.distance import hamming
from sklearn import model_selection
#from sklearn.externals import joblib
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# if want to output log to a file instead of outputting log to the console,
# replace "stream=sys.stdout" with "filename='fcma.log'"
logging.basicConfig(level=logging.INFO, format=format, stream=sys.stdout)
logger = logging.getLogger(__name__)
def example_of_aggregating_sim_matrix(raw_data, labels, num_subjects, num_epochs_per_subj):
# aggregate the kernel matrix to save memory
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
clf = Classifier(svm_clf, num_processed_voxels=1000, epochs_per_subj=num_epochs_per_subj)
rearranged_data = raw_data[num_epochs_per_subj:] + raw_data[0:num_epochs_per_subj]
rearranged_labels = labels[num_epochs_per_subj:] + labels[0:num_epochs_per_subj]
clf.fit(list(zip(rearranged_data, rearranged_data)), rearranged_labels,
num_training_samples=num_epochs_per_subj*(num_subjects-1))
predict = clf.predict()
print(predict)
print(clf.decision_function())
test_labels = labels[0:num_epochs_per_subj]
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when aggregating the similarity matrix to save memory, '
'the accuracy is %d / %d = %.2f' %
(num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# when the kernel matrix is computed in portion, the test data is already in
print(clf.score(None, test_labels))
def example_of_cross_validation_with_detailed_info(raw_data, labels, num_subjects, num_epochs_per_subj):
# no shrinking, set C=1
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
#logit_clf = LogisticRegression()
clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
# doing leave-one-subject-out cross validation
for i in range(num_subjects):
leave_start = i * num_epochs_per_subj
leave_end = (i+1) * num_epochs_per_subj
training_data = raw_data[0:leave_start] + raw_data[leave_end:]
test_data = raw_data[leave_start:leave_end]
training_labels = labels[0:leave_start] + labels[leave_end:]
test_labels = labels[leave_start:leave_end]
clf.fit(list(zip(training_data, training_data)), training_labels)
# joblib can be used for saving and loading models
#joblib.dump(clf, 'model/logistic.pkl')
#clf = joblib.load('model/svm.pkl')
predict = clf.predict(list(zip(test_data, test_data)))
print(predict)
print(clf.decision_function(list(zip(test_data, test_data))))
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when leaving subject %d out for testing, the accuracy is %d / %d = %.2f' %
(i, num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
print(clf.score(list(zip(test_data, test_data)), test_labels))
def example_of_cross_validation_using_model_selection(raw_data, labels, num_subjects, num_epochs_per_subj):
# NOTE: this method does not work for sklearn.svm.SVC with precomputed kernel
# when the kernel matrix is computed in portions; also, this method only works
# for self-correlation, i.e. correlation between the same data matrix.
# no shrinking, set C=1
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
#logit_clf = LogisticRegression()
clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
# doing leave-one-subject-out cross validation
# no shuffling in cv
skf = model_selection.StratifiedKFold(n_splits=num_subjects,
shuffle=False)
scores = model_selection.cross_val_score(clf, list(zip(raw_data, raw_data)),
y=labels,
cv=skf)
print(scores)
logger.info(
'the overall cross validation accuracy is %.2f' %
np.mean(scores)
)
def example_of_correlating_two_components(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj):
# aggregate the kernel matrix to save memory
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
num_training_samples=num_epochs_per_subj*(num_subjects-1)
clf.fit(list(zip(raw_data[0:num_training_samples], raw_data2[0:num_training_samples])),
labels[0:num_training_samples])
X = list(zip(raw_data[num_training_samples:], raw_data2[num_training_samples:]))
predict = clf.predict(X)
print(predict)
print(clf.decision_function(X))
test_labels = labels[num_training_samples:]
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when aggregating the similarity matrix to save memory, '
'the accuracy is %d / %d = %.2f' %
(num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# when the kernel matrix is computed in portion, the test data is already in
print(clf.score(X, test_labels))
def example_of_correlating_two_components_aggregating_sim_matrix(raw_data, raw_data2, labels,
num_subjects, num_epochs_per_subj):
# aggregate the kernel matrix to save memory
svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
clf = Classifier(svm_clf, num_processed_voxels=1000, epochs_per_subj=num_epochs_per_subj)
num_training_samples=num_epochs_per_subj*(num_subjects-1)
clf.fit(list(zip(raw_data, raw_data2)), labels,
num_training_samples=num_training_samples)
predict = clf.predict()
print(predict)
print(clf.decision_function())
test_labels = labels[num_training_samples:]
incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
logger.info(
'when aggregating the similarity matrix to save memory, '
'the accuracy is %d / %d = %.2f' %
(num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
(num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
)
# when the kernel matrix is computed in portion, the test data is already in
print(clf.score(None, test_labels))
# python3 classification.py face_scene bet.nii.gz face_scene/prefrontal_top_mask.nii.gz face_scene/fs_epoch_labels.npy
if __name__ == '__main__':
if len(sys.argv) != 5:
logger.error('the number of input argument is not correct')
sys.exit(1)
data_dir = sys.argv[1]
extension = sys.argv[2]
mask_file = sys.argv[3]
epoch_file = sys.argv[4]
epoch_list = np.load(epoch_file)
num_subjects = len(epoch_list)
num_epochs_per_subj = epoch_list[0].shape[1]
images = dataset.load_images_from_dir(data_dir, extension)
mask = dataset.load_boolean_mask(mask_file)
conditions = dataset.load_labels(epoch_file)
raw_data, _, labels = prepare_fcma_data(images, conditions, mask)
example_of_aggregating_sim_matrix(raw_data, labels, num_subjects, num_epochs_per_subj)
example_of_cross_validation_with_detailed_info(raw_data, labels, num_subjects, num_epochs_per_subj)
example_of_cross_validation_using_model_selection(raw_data, labels, num_subjects, num_epochs_per_subj)
# test of two different components for correlation computation
# images = dataset.load_images_from_dir(data_dir, extension)
# mask2 = dataset.load_boolean_mask('face_scene/visual_top_mask.nii.gz')
# raw_data, raw_data2, labels = prepare_fcma_data(images, conditions, mask,
# mask2)
#example_of_correlating_two_components(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj)
#example_of_correlating_two_components_aggregating_sim_matrix(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj)
|
apache-2.0
| 8,559,069,756,804,332,000 | 49.502762 | 129 | 0.68067 | false | 3.34223 | true | false | false |
praekelt/vumi-go
|
go/routers/app_multiplexer/forms.py
|
1
|
1271
|
from django import forms
class ApplicationMultiplexerTitleForm(forms.Form):
content = forms.CharField(
label="Menu title",
max_length=100
)
class ApplicationMultiplexerForm(forms.Form):
application_label = forms.CharField(
label="Application label"
)
endpoint_name = forms.CharField(
label="Endpoint name"
)
class BaseApplicationMultiplexerFormSet(forms.formsets.BaseFormSet):
@staticmethod
def initial_from_config(data):
initial_data = []
for entry in data:
initial_data.append({
'application_label': entry['label'],
'endpoint_name': entry['endpoint'],
})
return initial_data
def to_config(self):
entries = []
for form in self.ordered_forms:
if not form.is_valid():
continue
entries.append({
"label": form.cleaned_data['application_label'],
"endpoint": form.cleaned_data['endpoint_name'],
})
return entries
ApplicationMultiplexerFormSet = forms.formsets.formset_factory(
ApplicationMultiplexerForm,
can_delete=True,
can_order=True,
extra=1,
formset=BaseApplicationMultiplexerFormSet)
|
bsd-3-clause
| 8,391,352,842,481,823,000 | 24.938776 | 68 | 0.610543 | false | 4.539286 | false | false | false |
xmnlab/minilab
|
arch/socket_arch/simple_socket/client.py
|
1
|
1045
|
# Echo client program
from __future__ import print_function, division, unicode_literals
from random import randint
import socket
import pickle
import sys
import platform
if platform.system() == 'Linux':
sys.path.insert(0, '/var/www/mswim')
else:
sys.path.insert(0, '/mswim/')
from mswim import settings
HOST = 'localhost' # The remote host
PORT = 50007 # The same port as used by the server
def acquisition_data(type):
conn_id = str(randint(0, 10**10))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall(pickle.dumps((settings.DEVICES[type], conn_id)))
breaker = '\n\n\n\n'
cache = ''
while True:
while True:
cache += s.recv(1024)
if breaker in cache:
i = cache.index(breaker)
data = pickle.loads(cache[:i])
cache = cache[i+len(breaker):]
print('Received %s registers.' % len(data))
del data
s.close()
acquisition_data('ceramic')
|
gpl-3.0
| 7,342,832,662,091,720,000 | 23.325581 | 65 | 0.6 | false | 3.530405 | false | false | false |
penkin/python-dockercloud
|
dockercloud/api/http.py
|
1
|
2612
|
from __future__ import absolute_import
import logging
from requests import Request, Session
from requests import utils
from urllib.parse import urljoin
import dockercloud
from .exceptions import ApiError, AuthError
logger = logging.getLogger("python-dockercloud")
global_session = Session()
def get_session():
return global_session
def close_session():
try:
global global_session
global_session.close()
except:
pass
def new_session():
close_session()
global global_session
global_session = Session()
def send_request(method, path, inject_header=True, **kwargs):
json = None
url = urljoin(dockercloud.rest_host.encode(), path.strip("/").encode())
if not url.endswith(b"/"):
url = b"%s/" % url
user_agent = 'python-dockercloud/%s' % dockercloud.__version__
if dockercloud.user_agent:
user_agent = "%s %s" % (dockercloud.user_agent, user_agent)
# construct headers
headers = {'Content-Type': 'application/json', 'User-Agent': user_agent}
headers.update(dockercloud.auth.get_auth_header())
# construct request
s = get_session()
request = Request(method, url, headers=headers, **kwargs)
# get environment proxies
env_proxies = utils.get_environ_proxies(url) or {}
kw_args = {'proxies': env_proxies}
# make the request
req = s.prepare_request(request)
logger.info("Prepared Request: %s, %s, %s, %s" % (req.method, req.url, req.headers, kwargs))
response = s.send(req, **kw_args)
status_code = getattr(response, 'status_code', None)
logger.info("Response: Status %s, %s, %s" % (str(status_code), response.headers, response.text))
# handle the response
if not status_code:
# Most likely network trouble
raise ApiError("No Response (%s %s)" % (method, url))
elif 200 <= status_code <= 299:
# Success
if status_code != 204:
# Try to parse the response.
try:
json = response.json()
if response.headers and inject_header:
json["dockercloud_action_uri"] = response.headers.get("X-DockerCloud-Action-URI", "")
except TypeError:
raise ApiError("JSON Parse Error (%s %s). Response: %s" % (method, url, response.text))
else:
json = None
else:
# Server returned an error
if status_code == 401:
raise AuthError("Not authorized")
else:
raise ApiError("Status %s (%s %s). Response: %s" % (str(status_code), method, url, response.text))
return json
|
apache-2.0
| 2,378,145,631,790,428,000 | 30.095238 | 110 | 0.620597 | false | 3.835536 | false | false | false |
rawdigits/wee-slack
|
wee_slack.py
|
1
|
170611
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict
from functools import wraps
from itertools import islice, count
import textwrap
import time
import json
import pickle
import sha
import os
import re
import urllib
import sys
import traceback
import collections
import ssl
import random
import string
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from websocket import create_connection, WebSocketConnectionClosedException
# hack to make tests possible.. better way?
try:
import weechat
except:
pass
SCRIPT_NAME = "slack"
SCRIPT_AUTHOR = "Ryan Huber <[email protected]>"
SCRIPT_VERSION = "2.2.0"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Extends weechat for typing notification/search/etc on slack.com"
BACKLOG_SIZE = 200
SCROLLBACK_SIZE = 500
RECORD_DIR = "/tmp/weeslack-debug"
SLACK_API_TRANSLATOR = {
"channel": {
"history": "channels.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "channels.mark",
"info": "channels.info",
},
"im": {
"history": "im.history",
"join": "conversations.open",
"leave": "conversations.close",
"mark": "im.mark",
},
"mpim": {
"history": "mpim.history",
"join": "mpim.open", # conversations.open lacks unread_count_display
"leave": "conversations.close",
"mark": "mpim.mark",
"info": "groups.info",
},
"group": {
"history": "groups.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "groups.mark",
"info": "groups.info"
},
"shared": {
"history": "conversations.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "channels.mark",
"info": "conversations.info",
},
"thread": {
"history": None,
"join": None,
"leave": None,
"mark": None,
}
}
###### Decorators have to be up here
def slack_buffer_or_ignore(f):
"""
Only run this function if we're in a slack buffer, else ignore
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return w.WEECHAT_RC_OK
return f(data, current_buffer, *args, **kwargs)
return wrapper
def slack_buffer_required(f):
"""
Only run this function if we're in a slack buffer, else print error
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return w.WEECHAT_RC_ERROR
return f(data, current_buffer, *args, **kwargs)
return wrapper
def utf8_decode(f):
"""
Decode all arguments from byte strings to unicode strings. Use this for
functions called from outside of this script, e.g. callbacks from weechat.
"""
@wraps(f)
def wrapper(*args, **kwargs):
return f(*decode_from_utf8(args), **decode_from_utf8(kwargs))
return wrapper
NICK_GROUP_HERE = "0|Here"
NICK_GROUP_AWAY = "1|Away"
NICK_GROUP_EXTERNAL = "2|External"
sslopt_ca_certs = {}
if hasattr(ssl, "get_default_verify_paths") and callable(ssl.get_default_verify_paths):
ssl_defaults = ssl.get_default_verify_paths()
if ssl_defaults.cafile is not None:
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
EMOJI = []
###### Unicode handling
def encode_to_utf8(data):
if isinstance(data, unicode):
return data.encode('utf-8')
if isinstance(data, bytes):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(encode_to_utf8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(encode_to_utf8, data))
else:
return data
def decode_from_utf8(data):
if isinstance(data, bytes):
return data.decode('utf-8')
if isinstance(data, unicode):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(decode_from_utf8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(decode_from_utf8, data))
else:
return data
class WeechatWrapper(object):
def __init__(self, wrapped_class):
self.wrapped_class = wrapped_class
# Helper method used to encode/decode method calls.
def wrap_for_utf8(self, method):
def hooked(*args, **kwargs):
result = method(*encode_to_utf8(args), **encode_to_utf8(kwargs))
# Prevent wrapped_class from becoming unwrapped
if result == self.wrapped_class:
return self
return decode_from_utf8(result)
return hooked
# Encode and decode everything sent to/received from weechat. We use the
# unicode type internally in wee-slack, but has to send utf8 to weechat.
def __getattr__(self, attr):
orig_attr = self.wrapped_class.__getattribute__(attr)
if callable(orig_attr):
return self.wrap_for_utf8(orig_attr)
else:
return decode_from_utf8(orig_attr)
# Ensure all lines sent to weechat specifies a prefix. For lines after the
# first, we want to disable the prefix, which is done by specifying a space.
def prnt_date_tags(self, buffer, date, tags, message):
message = message.replace("\n", "\n \t")
return self.wrap_for_utf8(self.wrapped_class.prnt_date_tags)(buffer, date, tags, message)
class ProxyWrapper(object):
def __init__(self):
self.proxy_name = w.config_string(weechat.config_get('weechat.network.proxy_curl'))
self.proxy_string = ""
self.proxy_type = ""
self.proxy_address = ""
self.proxy_port = ""
self.proxy_user = ""
self.proxy_password = ""
self.has_proxy = False
if self.proxy_name:
self.proxy_string = "weechat.proxy.{}".format(self.proxy_name)
self.proxy_type = w.config_string(weechat.config_get("{}.type".format(self.proxy_string)))
if self.proxy_type == "http":
self.proxy_address = w.config_string(weechat.config_get("{}.address".format(self.proxy_string)))
self.proxy_port = w.config_integer(weechat.config_get("{}.port".format(self.proxy_string)))
self.proxy_user = w.config_string(weechat.config_get("{}.username".format(self.proxy_string)))
self.proxy_password = w.config_string(weechat.config_get("{}.password".format(self.proxy_string)))
self.has_proxy = True
else:
w.prnt("", "\nWarning: weechat.network.proxy_curl is set to {} type (name : {}, conf string : {}). Only HTTP proxy is supported.\n\n".format(self.proxy_type, self.proxy_name, self.proxy_string))
def curl(self):
if not self.has_proxy:
return ""
if self.proxy_user and self.proxy_password:
user = "{}:{}@".format(self.proxy_user, self.proxy_password)
else:
user = ""
if self.proxy_port:
port = ":{}".format(self.proxy_port)
else:
port = ""
return "--proxy {}{}{}".format(user, self.proxy_address, port)
##### Helpers
def format_exc_tb():
return decode_from_utf8(traceback.format_exc())
def format_exc_only():
etype, value, _ = sys.exc_info()
return ''.join(decode_from_utf8(traceback.format_exception_only(etype, value)))
def get_nick_color_name(nick):
info_name_prefix = "irc_" if int(weechat_version) < 0x1050000 else ""
return w.info_get(info_name_prefix + "nick_color_name", nick)
def get_functions_with_prefix(prefix):
return {name[len(prefix):]: ref for name, ref in globals().items()
if name.startswith(prefix)}
###### New central Event router
class EventRouter(object):
def __init__(self):
"""
complete
Eventrouter is the central hub we use to route:
1) incoming websocket data
2) outgoing http requests and incoming replies
3) local requests
It has a recorder that, when enabled, logs most events
to the location specified in RECORD_DIR.
"""
self.queue = []
self.slow_queue = []
self.slow_queue_timer = 0
self.teams = {}
self.context = {}
self.weechat_controller = WeechatController(self)
self.previous_buffer = ""
self.reply_buffer = {}
self.cmds = get_functions_with_prefix("command_")
self.proc = get_functions_with_prefix("process_")
self.handlers = get_functions_with_prefix("handle_")
self.local_proc = get_functions_with_prefix("local_process_")
self.shutting_down = False
self.recording = False
self.recording_path = "/tmp"
self.handle_next_hook = None
self.handle_next_hook_interval = -1
def record(self):
"""
complete
Toggles the event recorder and creates a directory for data if enabled.
"""
self.recording = not self.recording
if self.recording:
if not os.path.exists(RECORD_DIR):
os.makedirs(RECORD_DIR)
def record_event(self, message_json, file_name_field, subdir=None):
"""
complete
Called each time you want to record an event.
message_json is a json in dict form
file_name_field is the json key whose value you want to be part of the file name
"""
now = time.time()
if subdir:
directory = "{}/{}".format(RECORD_DIR, subdir)
else:
directory = RECORD_DIR
if not os.path.exists(directory):
os.makedirs(directory)
mtype = message_json.get(file_name_field, 'unknown')
f = open('{}/{}-{}.json'.format(directory, now, mtype), 'w')
f.write("{}".format(json.dumps(message_json)))
f.close()
def store_context(self, data):
"""
A place to store data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
identifier = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(40))
self.context[identifier] = data
dbg("stored context {} {} ".format(identifier, data.url))
return identifier
def retrieve_context(self, identifier):
"""
A place to retrieve data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
data = self.context.get(identifier, None)
if data:
# dbg("retrieved context {} ".format(identifier))
return data
def delete_context(self, identifier):
"""
Requests can span multiple requests, so we may need to delete this as a last step
"""
if identifier in self.context:
# dbg("deleted eontext {} ".format(identifier))
del self.context[identifier]
def shutdown(self):
"""
complete
This toggles shutdown mode. Shutdown mode tells us not to
talk to Slack anymore. Without this, typing /quit will trigger
a race with the buffer close callback and may result in you
leaving every slack channel.
"""
self.shutting_down = not self.shutting_down
def register_team(self, team):
"""
complete
Adds a team to the list of known teams for this EventRouter.
"""
if isinstance(team, SlackTeam):
self.teams[team.get_team_hash()] = team
else:
raise InvalidType(type(team))
def reconnect_if_disconnected(self):
for team_id, team in self.teams.iteritems():
if not team.connected:
team.connect()
dbg("reconnecting {}".format(team))
def receive_ws_callback(self, team_hash):
"""
This is called by the global method of the same name.
It is triggered when we have incoming data on a websocket,
which needs to be read. Once it is read, we will ensure
the data is valid JSON, add metadata, and place it back
on the queue for processing as JSON.
"""
team = self.teams[team_hash]
try:
# Read the data from the websocket associated with this team.
data = team.ws.recv()
except WebSocketConnectionClosedException:
w.prnt(team.channel_buffer,
'Lost connection to slack team {} (on receive), reconnecting.'.format(team.domain))
dbg('receive_ws_callback failed with exception:\n{}'.format(format_exc_tb()), level=5)
team.set_disconnected()
return w.WEECHAT_RC_OK
except ssl.SSLWantReadError:
# Expected to happen occasionally on SSL websockets.
return w.WEECHAT_RC_OK
message_json = json.loads(decode_from_utf8(data))
metadata = WeeSlackMetadata({
"team": team_hash,
}).jsonify()
message_json["wee_slack_metadata"] = metadata
if self.recording:
self.record_event(message_json, 'type', 'websocket')
self.receive_json(json.dumps(message_json))
def receive_httprequest_callback(self, data, command, return_code, out, err):
"""
complete
Receives the result of an http request we previously handed
off to weechat (weechat bundles libcurl). Weechat can fragment
replies, so it buffers them until the reply is complete.
It is then populated with metadata here so we can identify
where the request originated and route properly.
"""
request_metadata = self.retrieve_context(data)
try:
dbg("RECEIVED CALLBACK with request of {} id of {} and code {} of length {}".format(request_metadata.request, request_metadata.response_id, return_code, len(out)))
except:
dbg(request_metadata)
return
if return_code == 0:
if len(out) > 0:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = StringIO()
self.reply_buffer[request_metadata.response_id].write(out)
try:
j = json.loads(self.reply_buffer[request_metadata.response_id].getvalue())
except:
pass
# dbg("Incomplete json, awaiting more", True)
try:
j["wee_slack_process_method"] = request_metadata.request_normalized
j["wee_slack_request_metadata"] = pickle.dumps(request_metadata)
self.reply_buffer.pop(request_metadata.response_id)
if self.recording:
self.record_event(j, 'wee_slack_process_method', 'http')
self.receive_json(json.dumps(j))
self.delete_context(data)
except:
dbg("HTTP REQUEST CALLBACK FAILED", True)
pass
# We got an empty reply and this is weird so just ditch it and retry
else:
dbg("length was zero, probably a bug..")
self.delete_context(data)
self.receive(request_metadata)
elif return_code != -1:
self.reply_buffer.pop(request_metadata.response_id, None)
self.delete_context(data)
if request_metadata.request == 'rtm.start':
w.prnt('', ('Failed connecting to slack team with token starting with {}, ' +
'retrying. If this persists, try increasing slack_timeout.')
.format(request_metadata.token[:15]))
dbg('rtm.start failed with return_code {}. stack:\n{}'
.format(return_code, ''.join(traceback.format_stack())), level=5)
self.receive(request_metadata)
else:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = StringIO()
self.reply_buffer[request_metadata.response_id].write(out)
def receive_json(self, data):
"""
complete
Receives a raw JSON string from and unmarshals it
as dict, then places it back on the queue for processing.
"""
dbg("RECEIVED JSON of len {}".format(len(data)))
message_json = json.loads(data)
self.queue.append(message_json)
def receive(self, dataobj):
"""
complete
Receives a raw object and places it on the queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.queue.append(dataobj)
def receive_slow(self, dataobj):
"""
complete
Receives a raw object and places it on the slow queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.slow_queue.append(dataobj)
def handle_next(self):
"""
complete
Main handler of the EventRouter. This is called repeatedly
via callback to drain events from the queue. It also attaches
useful metadata and context to events as they are processed.
"""
wanted_interval = 100
if len(self.slow_queue) > 0 or len(self.queue) > 0:
wanted_interval = 10
if self.handle_next_hook is None or wanted_interval != self.handle_next_hook_interval:
if self.handle_next_hook:
w.unhook(self.handle_next_hook)
self.handle_next_hook = w.hook_timer(wanted_interval, 0, 0, "handle_next", "")
self.handle_next_hook_interval = wanted_interval
if len(self.slow_queue) > 0 and ((self.slow_queue_timer + 1) < time.time()):
# for q in self.slow_queue[0]:
dbg("from slow queue", 0)
self.queue.append(self.slow_queue.pop())
# self.slow_queue = []
self.slow_queue_timer = time.time()
if len(self.queue) > 0:
j = self.queue.pop(0)
# Reply is a special case of a json reply from websocket.
kwargs = {}
if isinstance(j, SlackRequest):
if j.should_try():
if j.retry_ready():
local_process_async_slack_api_request(j, self)
else:
self.slow_queue.append(j)
else:
dbg("Max retries for Slackrequest")
else:
if "reply_to" in j:
dbg("SET FROM REPLY")
function_name = "reply"
elif "type" in j:
dbg("SET FROM type")
function_name = j["type"]
elif "wee_slack_process_method" in j:
dbg("SET FROM META")
function_name = j["wee_slack_process_method"]
else:
dbg("SET FROM NADA")
function_name = "unknown"
# Here we are passing the actual objects. No more lookups.
meta = j.get("wee_slack_metadata", None)
if meta:
try:
if isinstance(meta, basestring):
dbg("string of metadata")
team = meta.get("team", None)
if team:
kwargs["team"] = self.teams[team]
if "user" in j:
kwargs["user"] = self.teams[team].users[j["user"]]
if "channel" in j:
kwargs["channel"] = self.teams[team].channels[j["channel"]]
except:
dbg("metadata failure")
dbg("running {}".format(function_name))
if function_name.startswith("local_") and function_name in self.local_proc:
self.local_proc[function_name](j, self, **kwargs)
elif function_name in self.proc:
self.proc[function_name](j, self, **kwargs)
elif function_name in self.handlers:
self.handlers[function_name](j, self, **kwargs)
else:
dbg("Callback not implemented for event: {}".format(function_name))
def handle_next(*args):
"""
complete
This is just a place to call the event router globally.
This is a dirty hack. There must be a better way.
"""
try:
EVENTROUTER.handle_next()
except:
if config.debug_mode:
traceback.print_exc()
else:
pass
return w.WEECHAT_RC_OK
class WeechatController(object):
"""
Encapsulates our interaction with weechat
"""
def __init__(self, eventrouter):
self.eventrouter = eventrouter
self.buffers = {}
self.previous_buffer = None
self.buffer_list_stale = False
def iter_buffers(self):
for b in self.buffers:
yield (b, self.buffers[b])
def register_buffer(self, buffer_ptr, channel):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
if isinstance(buffer_ptr, basestring):
self.buffers[buffer_ptr] = channel
else:
raise InvalidType(type(buffer_ptr))
def unregister_buffer(self, buffer_ptr, update_remote=False, close_buffer=False):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
channel = self.buffers.get(buffer_ptr)
if channel:
channel.destroy_buffer(update_remote)
del self.buffers[buffer_ptr]
if close_buffer:
w.buffer_close(buffer_ptr)
def get_channel_from_buffer_ptr(self, buffer_ptr):
return self.buffers.get(buffer_ptr, None)
def get_all(self, buffer_ptr):
return self.buffers
def get_previous_buffer_ptr(self):
return self.previous_buffer
def set_previous_buffer(self, data):
self.previous_buffer = data
def check_refresh_buffer_list(self):
return self.buffer_list_stale and self.last_buffer_list_update + 1 < time.time()
def set_refresh_buffer_list(self, setting):
self.buffer_list_stale = setting
###### New Local Processors
def local_process_async_slack_api_request(request, event_router):
"""
complete
Sends an API request to Slack. You'll need to give this a well formed SlackRequest object.
DEBUGGING!!! The context here cannot be very large. Weechat will crash.
"""
if not event_router.shutting_down:
weechat_request = 'url:{}'.format(request.request_string())
weechat_request += '&nonce={}'.format(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(4)))
params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
request.tried()
context = event_router.store_context(request)
# TODO: let flashcode know about this bug - i have to 'clear' the hashtable or retry requests fail
w.hook_process_hashtable('url:', params, config.slack_timeout, "", context)
w.hook_process_hashtable(weechat_request, params, config.slack_timeout, "receive_httprequest_callback", context)
###### New Callbacks
@utf8_decode
def receive_httprequest_callback(data, command, return_code, out, err):
"""
complete
This is a dirty hack. There must be a better way.
"""
# def url_processor_cb(data, command, return_code, out, err):
EVENTROUTER.receive_httprequest_callback(data, command, return_code, out, err)
return w.WEECHAT_RC_OK
@utf8_decode
def receive_ws_callback(*args):
"""
complete
The first arg is all we want here. It contains the team
hash which is set when we _hook the descriptor.
This is a dirty hack. There must be a better way.
"""
EVENTROUTER.receive_ws_callback(args[0])
return w.WEECHAT_RC_OK
@utf8_decode
def ws_ping_cb(data, remaining_calls):
for team in EVENTROUTER.teams.values():
if team.ws:
team.ws.ping()
return w.WEECHAT_RC_OK
@utf8_decode
def reconnect_callback(*args):
EVENTROUTER.reconnect_if_disconnected()
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_closing_callback(signal, sig_type, data):
"""
Receives a callback from weechat when a buffer is being closed.
"""
EVENTROUTER.weechat_controller.unregister_buffer(data, True, False)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_input_callback(signal, buffer_ptr, data):
"""
incomplete
Handles everything a user types in the input bar. In our case
this includes add/remove reactions, modifying messages, and
sending messages.
"""
eventrouter = eval(signal)
channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(buffer_ptr)
if not channel:
return w.WEECHAT_RC_ERROR
def get_id(message_id):
if not message_id:
return 1
elif message_id[0] == "$":
return message_id[1:]
else:
return int(message_id)
message_id_regex = "(\d*|\$[0-9a-fA-F]{3,})"
reaction = re.match("^{}(\+|-):(.*):\s*$".format(message_id_regex), data)
substitute = re.match("^{}s/".format(message_id_regex), data)
if reaction:
if reaction.group(2) == "+":
channel.send_add_reaction(get_id(reaction.group(1)), reaction.group(3))
elif reaction.group(2) == "-":
channel.send_remove_reaction(get_id(reaction.group(1)), reaction.group(3))
elif substitute:
msg_id = get_id(substitute.group(1))
try:
old, new, flags = re.split(r'(?<!\\)/', data)[1:]
except ValueError:
pass
else:
# Replacement string in re.sub() is a string, not a regex, so get
# rid of escapes.
new = new.replace(r'\/', '/')
old = old.replace(r'\/', '/')
channel.edit_nth_previous_message(msg_id, old, new, flags)
else:
if data.startswith(('//', ' ')):
data = data[1:]
channel.send_message(data)
# this is probably wrong channel.mark_read(update_remote=True, force=True)
return w.WEECHAT_RC_OK
# Workaround for supporting multiline messages. It intercepts before the input
# callback is called, as this is called with the whole message, while it is
# normally split on newline before being sent to buffer_input_callback
def input_text_for_buffer_cb(data, modifier, current_buffer, string):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return string
message = decode_from_utf8(string)
if not message.startswith("/") and "\n" in message:
buffer_input_callback("EVENTROUTER", current_buffer, message)
return ""
return string
@utf8_decode
def buffer_switch_callback(signal, sig_type, data):
"""
incomplete
Every time we change channels in weechat, we call this to:
1) set read marker 2) determine if we have already populated
channel history data
"""
eventrouter = eval(signal)
prev_buffer_ptr = eventrouter.weechat_controller.get_previous_buffer_ptr()
# this is to see if we need to gray out things in the buffer list
prev = eventrouter.weechat_controller.get_channel_from_buffer_ptr(prev_buffer_ptr)
if prev:
prev.mark_read()
new_channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(data)
if new_channel:
if not new_channel.got_history:
new_channel.get_history()
eventrouter.weechat_controller.set_previous_buffer(data)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_list_update_callback(data, somecount):
"""
incomplete
A simple timer-based callback that will update the buffer list
if needed. We only do this max 1x per second, as otherwise it
uses a lot of cpu for minimal changes. We use buffer short names
to indicate typing via "#channel" <-> ">channel" and
user presence via " name" <-> "+name".
"""
eventrouter = eval(data)
# global buffer_list_update
for b in eventrouter.weechat_controller.iter_buffers():
b[1].refresh()
# buffer_list_update = True
# if eventrouter.weechat_controller.check_refresh_buffer_list():
# # gray_check = False
# # if len(servers) > 1:
# # gray_check = True
# eventrouter.weechat_controller.set_refresh_buffer_list(False)
return w.WEECHAT_RC_OK
def quit_notification_callback(signal, sig_type, data):
stop_talking_to_slack()
return w.WEECHAT_RC_OK
@utf8_decode
def typing_notification_cb(data, signal, current_buffer):
msg = w.buffer_get_string(current_buffer, "input")
if len(msg) > 8 and msg[:1] != "/":
global typing_timer
now = time.time()
if typing_timer + 4 < now:
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if channel and channel.type != "thread":
identifier = channel.identifier
request = {"type": "typing", "channel": identifier}
channel.team.send_to_websocket(request, expect_reply=False)
typing_timer = now
return w.WEECHAT_RC_OK
@utf8_decode
def typing_update_cb(data, remaining_calls):
w.bar_item_update("slack_typing_notice")
return w.WEECHAT_RC_OK
@utf8_decode
def slack_never_away_cb(data, remaining_calls):
if config.never_away:
for t in EVENTROUTER.teams.values():
slackbot = t.get_channel_map()['Slackbot']
channel = t.channels[slackbot]
request = {"type": "typing", "channel": channel.identifier}
channel.team.send_to_websocket(request, expect_reply=False)
return w.WEECHAT_RC_OK
@utf8_decode
def typing_bar_item_cb(data, item, current_window, current_buffer, extra_info):
"""
Privides a bar item indicating who is typing in the current channel AND
why is typing a DM to you globally.
"""
typers = []
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
# first look for people typing in this channel
if current_channel:
# this try is mostly becuase server buffers don't implement is_someone_typing
try:
if current_channel.type != 'im' and current_channel.is_someone_typing():
typers += current_channel.get_typing_list()
except:
pass
# here is where we notify you that someone is typing in DM
# regardless of which buffer you are in currently
for t in EVENTROUTER.teams.values():
for channel in t.channels.values():
if channel.type == "im":
if channel.is_someone_typing():
typers.append("D/" + channel.slack_name)
pass
typing = ", ".join(typers)
if typing != "":
typing = w.color('yellow') + "typing: " + typing
return typing
@utf8_decode
def nick_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all @-prefixed nicks to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
for m in current_channel.members:
u = current_channel.team.users.get(m, None)
if u:
w.hook_completion_list_add(completion, "@" + u.name, 1, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def emoji_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all :-prefixed emoji to completion list
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if current_channel is None:
return w.WEECHAT_RC_OK
for e in current_channel.team.emoji_completions:
w.hook_completion_list_add(completion, ":" + e + ":", 0, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def complete_next_cb(data, current_buffer, command):
"""Extract current word, if it is equal to a nick, prefix it with @ and
rely on nick_completion_cb adding the @-prefixed versions to the
completion lists, then let Weechat's internal completion do its
thing
"""
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
# channel = channels.find(current_buffer)
if not hasattr(current_channel, 'members') or current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
line_input = w.buffer_get_string(current_buffer, "input")
current_pos = w.buffer_get_integer(current_buffer, "input_pos") - 1
input_length = w.buffer_get_integer(current_buffer, "input_length")
word_start = 0
word_end = input_length
# If we're on a non-word, look left for something to complete
while current_pos >= 0 and line_input[current_pos] != '@' and not line_input[current_pos].isalnum():
current_pos = current_pos - 1
if current_pos < 0:
current_pos = 0
for l in range(current_pos, 0, -1):
if line_input[l] != '@' and not line_input[l].isalnum():
word_start = l + 1
break
for l in range(current_pos, input_length):
if not line_input[l].isalnum():
word_end = l
break
word = line_input[word_start:word_end]
for m in current_channel.members:
u = current_channel.team.users.get(m, None)
if u and u.name == word:
# Here, we cheat. Insert a @ in front and rely in the @
# nicks being in the completion list
w.buffer_set(current_buffer, "input", line_input[:word_start] + "@" + line_input[word_start:])
w.buffer_set(current_buffer, "input_pos", str(w.buffer_get_integer(current_buffer, "input_pos") + 1))
return w.WEECHAT_RC_OK_EAT
return w.WEECHAT_RC_OK
def script_unloaded():
stop_talking_to_slack()
return w.WEECHAT_RC_OK
def stop_talking_to_slack():
"""
complete
Prevents a race condition where quitting closes buffers
which triggers leaving the channel because of how close
buffer is handled
"""
EVENTROUTER.shutdown()
return w.WEECHAT_RC_OK
##### New Classes
class SlackRequest(object):
"""
complete
Encapsulates a Slack api request. Valuable as an object that we can add to the queue and/or retry.
makes a SHA of the requst url and current time so we can re-tag this on the way back through.
"""
def __init__(self, token, request, post_data={}, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
self.tries = 0
self.start_time = time.time()
self.domain = 'api.slack.com'
self.request = request
self.request_normalized = re.sub(r'\W+', '', request)
self.token = token
post_data["token"] = token
self.post_data = post_data
self.params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
self.url = 'https://{}/api/{}?{}'.format(self.domain, request, urllib.urlencode(encode_to_utf8(post_data)))
self.response_id = sha.sha("{}{}".format(self.url, self.start_time)).hexdigest()
self.retries = kwargs.get('retries', 3)
# def __repr__(self):
# return "URL: {} Tries: {} ID: {}".format(self.url, self.tries, self.response_id)
def request_string(self):
return "{}".format(self.url)
def tried(self):
self.tries += 1
self.response_id = sha.sha("{}{}".format(self.url, time.time())).hexdigest()
def should_try(self):
return self.tries < self.retries
def retry_ready(self):
return (self.start_time + (self.tries**2)) < time.time()
class SlackTeam(object):
"""
incomplete
Team object under which users and channels live.. Does lots.
"""
def __init__(self, eventrouter, token, websocket_url, team_info, nick, myidentifier, users, bots, channels, **kwargs):
self.identifier = team_info["id"]
self.ws_url = websocket_url
self.connected = False
self.connecting = False
self.ws = None
self.ws_counter = 0
self.ws_replies = {}
self.eventrouter = eventrouter
self.token = token
self.team = self
self.subdomain = team_info["domain"]
self.domain = self.subdomain + ".slack.com"
self.preferred_name = self.domain
self.nick = nick
self.myidentifier = myidentifier
try:
if self.channels:
for c in channels.keys():
if not self.channels.get(c):
self.channels[c] = channels[c]
except:
self.channels = channels
self.users = users
self.bots = bots
self.team_hash = SlackTeam.generate_team_hash(self.nick, self.subdomain)
self.name = self.domain
self.channel_buffer = None
self.got_history = True
self.create_buffer()
self.set_muted_channels(kwargs.get('muted_channels', ""))
for c in self.channels.keys():
channels[c].set_related_server(self)
channels[c].check_should_open()
# self.channel_set_related_server(c)
# Last step is to make sure my nickname is the set color
self.users[self.myidentifier].force_color(w.config_string(w.config_get('weechat.color.chat_nick_self')))
# This highlight step must happen after we have set related server
self.set_highlight_words(kwargs.get('highlight_words', ""))
self.load_emoji_completions()
self.type = "team"
def __repr__(self):
return "domain={} nick={}".format(self.subdomain, self.nick)
def __eq__(self, compare_str):
if compare_str == self.token or compare_str == self.domain or compare_str == self.subdomain:
return True
else:
return False
@property
def members(self):
return self.users.viewkeys()
def load_emoji_completions(self):
self.emoji_completions = list(EMOJI)
if self.emoji_completions:
s = SlackRequest(self.token, "emoji.list", {}, team_hash=self.team_hash)
self.eventrouter.receive(s)
def add_channel(self, channel):
self.channels[channel["id"]] = channel
channel.set_related_server(self)
# def connect_request_generate(self):
# return SlackRequest(self.token, 'rtm.start', {})
# def close_all_buffers(self):
# for channel in self.channels:
# self.eventrouter.weechat_controller.unregister_buffer(channel.channel_buffer, update_remote=False, close_buffer=True)
# #also close this server buffer
# self.eventrouter.weechat_controller.unregister_buffer(self.channel_buffer, update_remote=False, close_buffer=True)
def create_buffer(self):
if not self.channel_buffer:
alias = config.server_aliases.get(self.subdomain)
if alias:
self.preferred_name = alias
elif config.short_buffer_names:
self.preferred_name = self.subdomain
else:
self.preferred_name = self.domain
self.channel_buffer = w.buffer_new(self.preferred_name, "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'server')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.nick)
w.buffer_set(self.channel_buffer, "localvar_set_server", self.preferred_name)
if w.config_string(w.config_get('irc.look.server_buffer')) == 'merge_with_core':
w.buffer_merge(self.channel_buffer, w.buffer_search_main())
def destroy_buffer(self, update_remote):
pass
def set_muted_channels(self, muted_str):
self.muted_channels = {x for x in muted_str.split(',') if x}
for channel in self.channels.itervalues():
channel.set_highlights()
def set_highlight_words(self, highlight_str):
self.highlight_words = {x for x in highlight_str.split(',') if x}
for channel in self.channels.itervalues():
channel.set_highlights()
def formatted_name(self, **kwargs):
return self.domain
def buffer_prnt(self, data, message=False):
tag_name = "team_message" if message else "team_info"
w.prnt_date_tags(self.channel_buffer, SlackTS().major, tag(tag_name), data)
def send_message(self, message, subtype=None, request_dict_ext={}):
w.prnt("", "ERROR: Sending a message in the team buffer is not supported")
def find_channel_by_members(self, members, channel_type=None):
for channel in self.channels.itervalues():
if channel.get_members() == members and (
channel_type is None or channel.type == channel_type):
return channel
def get_channel_map(self):
return {v.slack_name: k for k, v in self.channels.iteritems()}
def get_username_map(self):
return {v.name: k for k, v in self.users.iteritems()}
def get_team_hash(self):
return self.team_hash
@staticmethod
def generate_team_hash(nick, subdomain):
return str(sha.sha("{}{}".format(nick, subdomain)).hexdigest())
def refresh(self):
self.rename()
def rename(self):
pass
# def attach_websocket(self, ws):
# self.ws = ws
def is_user_present(self, user_id):
user = self.users.get(user_id)
if user and user.presence == 'active':
return True
else:
return False
def mark_read(self, ts=None, update_remote=True, force=False):
pass
def connect(self):
if not self.connected and not self.connecting:
self.connecting = True
if self.ws_url:
try:
# only http proxy is currently supported
proxy = ProxyWrapper()
if proxy.has_proxy == True:
ws = create_connection(self.ws_url, sslopt=sslopt_ca_certs, http_proxy_host=proxy.proxy_address, http_proxy_port=proxy.proxy_port, http_proxy_auth=(proxy.proxy_user, proxy.proxy_password))
else:
ws = create_connection(self.ws_url, sslopt=sslopt_ca_certs)
self.hook = w.hook_fd(ws.sock._sock.fileno(), 1, 0, 0, "receive_ws_callback", self.get_team_hash())
ws.sock.setblocking(0)
self.ws = ws
# self.attach_websocket(ws)
self.set_connected()
self.connecting = False
except:
w.prnt(self.channel_buffer,
'Failed connecting to slack team {}, retrying.'.format(self.domain))
dbg('connect failed with exception:\n{}'.format(format_exc_tb()), level=5)
self.connecting = False
return False
else:
# The fast reconnect failed, so start over-ish
for chan in self.channels:
self.channels[chan].got_history = False
s = initiate_connection(self.token, retries=999)
self.eventrouter.receive(s)
self.connecting = False
# del self.eventrouter.teams[self.get_team_hash()]
self.set_reconnect_url(None)
def set_connected(self):
self.connected = True
def set_disconnected(self):
w.unhook(self.hook)
self.connected = False
def set_reconnect_url(self, url):
self.ws_url = url
def next_ws_transaction_id(self):
self.ws_counter += 1
return self.ws_counter
def send_to_websocket(self, data, expect_reply=True):
data["id"] = self.next_ws_transaction_id()
message = json.dumps(data)
try:
if expect_reply:
self.ws_replies[data["id"]] = data
self.ws.send(encode_to_utf8(message))
dbg("Sent {}...".format(message[:100]))
except:
w.prnt(self.channel_buffer,
'Lost connection to slack team {} (on send), reconnecting.'.format(self.domain))
dbg('send_to_websocket failed with data: `{}` and exception:\n{}'
.format(message, format_exc_tb()), level=5)
self.set_disconnected()
def update_member_presence(self, user, presence):
user.presence = presence
for c in self.channels:
c = self.channels[c]
if user.id in c.members:
c.update_nicklist(user.id)
def subscribe_users_presence(self):
# FIXME: There is a limitation in the API to the size of the
# json we can send.
# We should try to be smarter to fetch the users whom we want to
# subscribe to.
users = self.users.keys()[0:750]
self.send_to_websocket({
"type": "presence_sub",
"ids": users,
}, expect_reply=False)
class SlackChannelCommon(object):
def send_add_reaction(self, msg_id, reaction):
self.send_change_reaction("reactions.add", msg_id, reaction)
def send_remove_reaction(self, msg_id, reaction):
self.send_change_reaction("reactions.remove", msg_id, reaction)
def send_change_reaction(self, method, msg_id, reaction):
if type(msg_id) is not int:
if msg_id in self.hashed_messages:
timestamp = str(self.hashed_messages[msg_id].ts)
else:
return
elif 0 < msg_id <= len(self.messages):
keys = self.main_message_keys_reversed()
timestamp = next(islice(keys, msg_id - 1, None))
else:
return
data = {"channel": self.identifier, "timestamp": timestamp, "name": reaction}
s = SlackRequest(self.team.token, method, data)
self.eventrouter.receive(s)
def edit_nth_previous_message(self, msg_id, old, new, flags):
message = self.my_last_message(msg_id)
if message is None:
return
if new == "" and old == "":
s = SlackRequest(self.team.token, "chat.delete", {"channel": self.identifier, "ts": message['ts']}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
else:
num_replace = 1
if 'g' in flags:
num_replace = 0
new_message = re.sub(old, new, message["text"], num_replace)
if new_message != message["text"]:
s = SlackRequest(self.team.token, "chat.update", {"channel": self.identifier, "ts": message['ts'], "text": new_message}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def my_last_message(self, msg_id):
if type(msg_id) is not int:
m = self.hashed_messages.get(msg_id)
if m is not None and m.message_json.get("user") == self.team.myidentifier:
return m.message_json
else:
for key in self.main_message_keys_reversed():
m = self.messages[key]
if m.message_json.get("user") == self.team.myidentifier:
msg_id -= 1
if msg_id == 0:
return m.message_json
def change_message(self, ts, message_json=None, text=None):
ts = SlackTS(ts)
m = self.messages.get(ts)
if not m:
return
if message_json:
m.message_json.update(message_json)
if text:
m.change_text(text)
if type(m) == SlackMessage or config.thread_messages_in_channel:
new_text = self.render(m, force=True)
modify_buffer_line(self.channel_buffer, ts, new_text)
if type(m) == SlackThreadMessage:
thread_channel = m.parent_message.thread_channel
if thread_channel and thread_channel.active:
new_text = thread_channel.render(m, force=True)
modify_buffer_line(thread_channel.channel_buffer, ts, new_text)
def hash_message(self, ts):
ts = SlackTS(ts)
def calc_hash(msg):
return sha.sha(str(msg.ts)).hexdigest()
if ts in self.messages and not self.messages[ts].hash:
message = self.messages[ts]
tshash = calc_hash(message)
hl = 3
shorthash = tshash[:hl]
while any(x.startswith(shorthash) for x in self.hashed_messages):
hl += 1
shorthash = tshash[:hl]
if shorthash[:-1] in self.hashed_messages:
col_msg = self.hashed_messages.pop(shorthash[:-1])
col_new_hash = calc_hash(col_msg)[:hl]
col_msg.hash = col_new_hash
self.hashed_messages[col_new_hash] = col_msg
self.change_message(str(col_msg.ts))
if col_msg.thread_channel:
col_msg.thread_channel.rename()
self.hashed_messages[shorthash] = message
message.hash = shorthash
return shorthash
elif ts in self.messages:
return self.messages[ts].hash
class SlackChannel(SlackChannelCommon):
"""
Represents an individual slack channel.
"""
def __init__(self, eventrouter, **kwargs):
# We require these two things for a valid object,
# the rest we can just learn from slack
self.active = False
for key, value in kwargs.items():
setattr(self, key, value)
self.eventrouter = eventrouter
self.slack_name = kwargs["name"]
self.slack_purpose = kwargs.get("purpose", {"value": ""})
self.topic = kwargs.get("topic", {"value": ""})
self.identifier = kwargs["id"]
self.last_read = SlackTS(kwargs.get("last_read", SlackTS()))
self.channel_buffer = None
self.team = kwargs.get('team', None)
self.got_history = False
self.messages = OrderedDict()
self.hashed_messages = {}
self.new_messages = False
self.typing = {}
self.type = 'channel'
self.set_name(self.slack_name)
# short name relates to the localvar we change for typing indication
self.current_short_name = self.name
self.set_members(kwargs.get('members', []))
self.unread_count_display = 0
self.last_line_from = None
def __eq__(self, compare_str):
if compare_str == self.slack_name or compare_str == self.formatted_name() or compare_str == self.formatted_name(style="long_default"):
return True
else:
return False
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
@property
def muted(self):
return self.identifier in self.team.muted_channels
def set_name(self, slack_name):
self.name = "#" + slack_name
def refresh(self):
return self.rename()
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(typing=self.is_someone_typing(), style="sidebar")
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def set_members(self, members):
self.members = set(members)
self.update_nicklist()
def get_members(self):
return self.members
def set_unread_count_display(self, count):
self.unread_count_display = count
self.new_messages = bool(self.unread_count_display)
if self.muted and config.muted_channels_activity != "all":
return
for c in range(self.unread_count_display):
if self.type in ["im", "mpim"]:
w.buffer_set(self.channel_buffer, "hotlist", "2")
else:
w.buffer_set(self.channel_buffer, "hotlist", "1")
def formatted_name(self, style="default", typing=False, **kwargs):
if typing and config.channel_name_typing_indicator:
prepend = ">"
elif self.type == "group":
prepend = config.group_name_prefix
elif self.type == "shared":
prepend = config.shared_name_prefix
else:
prepend = "#"
sidebar_color = w.color(config.color_buflist_muted_channels) if self.muted else ""
select = {
"default": prepend + self.slack_name,
"sidebar": sidebar_color + prepend + self.slack_name,
"base": self.slack_name,
"long_default": "{}.{}{}".format(self.team.preferred_name, prepend, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
return select[style]
def render_topic(self):
if self.channel_buffer:
topic = self.topic['value'] or self.slack_purpose['value']
topic = unhtmlescape(unfurl_refs(topic, ignore_alt_text=False))
w.buffer_set(self.channel_buffer, "title", topic)
def set_topic(self, value):
self.topic = {"value": value}
self.render_topic()
def update_from_message_json(self, message_json):
for key, value in message_json.items():
setattr(self, key, value)
def open(self, update_remote=True):
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.create_buffer()
self.active = True
self.get_history()
def check_should_open(self, force=False):
if hasattr(self, "is_archived") and self.is_archived:
return
if force:
self.create_buffer()
return
# Only check is_member if is_open is not set, because in some cases
# (e.g. group DMs), is_member should be ignored in favor of is_open.
is_open = self.is_open if hasattr(self, "is_open") else self.is_member
if is_open or self.unread_count_display:
self.create_buffer()
if config.background_load_all_history:
self.get_history(slow_queue=True)
def set_related_server(self, team):
self.team = team
def mentions(self):
return {'@' + self.team.nick, self.team.myidentifier}
def highlights(self):
personal_highlights = self.team.highlight_words.union(self.mentions())
if self.muted and config.muted_channels_activity == "personal_highlights":
return personal_highlights
else:
return personal_highlights.union({"!here", "!channel", "!everyone"})
def set_highlights(self):
# highlight my own name and any set highlights
if self.channel_buffer:
h_str = ",".join(self.highlights())
w.buffer_set(self.channel_buffer, "highlight_words", h_str)
def create_buffer(self):
"""
Creates the weechat buffer where the channel magic happens.
"""
if not self.channel_buffer:
self.active = True
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
if self.type == "im":
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
else:
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
self.render_topic()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
if self.channel_buffer:
# if self.team.server_alias:
# w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.server_alias)
# else:
w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.preferred_name)
# else:
# self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
self.update_nicklist()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if self.type == "im":
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"users": self.user, "return_im": True}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def clear_messages(self):
w.buffer_clear(self.channel_buffer)
self.messages = OrderedDict()
self.hashed_messages = {}
self.got_history = False
def destroy_buffer(self, update_remote):
self.clear_messages()
self.channel_buffer = None
self.active = False
if update_remote and not self.eventrouter.shutting_down:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["leave"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def buffer_prnt(self, nick, text, timestamp=str(time.time()), tagset=None, tag_nick=None, **kwargs):
data = "{}\t{}".format(format_nick(nick, self.last_line_from), text)
self.last_line_from = nick
ts = SlackTS(timestamp)
last_read = SlackTS(self.last_read)
# without this, DMs won't open automatically
if not self.channel_buffer and ts > last_read:
self.open(update_remote=False)
if self.channel_buffer:
# backlog messages - we will update the read marker as we print these
backlog = True if ts <= last_read else False
if tagset:
self.new_messages = True
# we have to infer the tagset because we weren't told
elif ts <= last_read:
tagset = "backlog"
elif self.type in ["im", "mpim"]:
if tag_nick != self.team.nick:
tagset = "dm"
self.new_messages = True
else:
tagset = "dmfromme"
else:
tagset = "default"
self.new_messages = True
tags = tag(tagset, user=tag_nick, muted=self.muted)
try:
if (config.unhide_buffers_with_activity
and not self.is_visible() and not self.muted):
w.buffer_set(self.channel_buffer, "hidden", "0")
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_last_print_time(self.channel_buffer, ts.minor)
if backlog or tag_nick == self.team.nick:
self.mark_read(ts, update_remote=False, force=True)
except:
dbg("Problem processing buffer_prnt")
def send_message(self, message, subtype=None, request_dict_ext={}):
message = linkify_text(message, self.team, self)
dbg(message)
if subtype == 'me_message':
s = SlackRequest(self.team.token, "chat.meMessage",
{"channel": self.identifier, "text": message},
team_hash=self.team.team_hash,
channel_identifier=self.identifier)
self.eventrouter.receive(s)
else:
request = {"type": "message", "channel": self.identifier,
"text": message, "user": self.team.myidentifier}
request.update(request_dict_ext)
self.team.send_to_websocket(request)
def store_message(self, message, team, from_me=False):
if not self.active:
return
if from_me:
message.message_json["user"] = team.myidentifier
self.messages[SlackTS(message.ts)] = message
sorted_messages = sorted(self.messages.items())
messages_to_delete = sorted_messages[:-SCROLLBACK_SIZE]
messages_to_keep = sorted_messages[-SCROLLBACK_SIZE:]
for message_hash in [m[1].hash for m in messages_to_delete]:
if message_hash in self.hashed_messages:
del self.hashed_messages[message_hash]
self.messages = OrderedDict(messages_to_keep)
def is_visible(self):
return w.buffer_get_integer(self.channel_buffer, "hidden") == 0
def get_history(self, slow_queue=False):
if not self.got_history:
# we have probably reconnected. flush the buffer
if self.team.connected:
self.clear_messages()
self.buffer_prnt('', 'getting channel history...', tagset='backlog')
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["history"], {"channel": self.identifier, "count": BACKLOG_SIZE}, team_hash=self.team.team_hash, channel_identifier=self.identifier, clear=True)
if not slow_queue:
self.eventrouter.receive(s)
else:
self.eventrouter.receive_slow(s)
self.got_history = True
def main_message_keys_reversed(self):
return (key for key in reversed(self.messages)
if type(self.messages[key]) == SlackMessage)
# Typing related
def set_typing(self, user):
if self.channel_buffer and self.is_visible():
self.typing[user] = time.time()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def unset_typing(self, user):
if self.channel_buffer and self.is_visible():
u = self.typing.get(user, None)
if u:
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def is_someone_typing(self):
"""
Walks through dict of typing folks in a channel and fast
returns if any of them is actively typing. If none are,
nulls the dict and returns false.
"""
for user, timestamp in self.typing.iteritems():
if timestamp + 4 > time.time():
return True
if len(self.typing) > 0:
self.typing = {}
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
return False
def get_typing_list(self):
"""
Returns the names of everyone in the channel who is currently typing.
"""
typing = []
for user, timestamp in self.typing.iteritems():
if timestamp + 4 > time.time():
typing.append(user)
else:
del self.typing[user]
return typing
def mark_read(self, ts=None, update_remote=True, force=False):
if self.new_messages or force:
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
if not ts:
ts = next(reversed(self.messages), SlackTS())
if ts > self.last_read:
self.last_read = ts
if update_remote:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["mark"], {"channel": self.identifier, "ts": ts}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.new_messages = False
def user_joined(self, user_id):
# ugly hack - for some reason this gets turned into a list
self.members = set(self.members)
self.members.add(user_id)
self.update_nicklist(user_id)
def user_left(self, user_id):
self.members.discard(user_id)
self.update_nicklist(user_id)
def update_nicklist(self, user=None):
if not self.channel_buffer:
return
if self.type not in ["channel", "group", "mpim", "shared"]:
return
w.buffer_set(self.channel_buffer, "nicklist", "1")
# create nicklists for the current channel if they don't exist
# if they do, use the existing pointer
here = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_HERE)
if not here:
here = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_HERE, "weechat.color.nicklist_group", 1)
afk = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_AWAY)
if not afk:
afk = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_AWAY, "weechat.color.nicklist_group", 1)
# Add External nicklist group only for shared channels
if self.type == 'shared':
external = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_EXTERNAL)
if not external:
external = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_EXTERNAL, 'weechat.color.nicklist_group', 2)
if user and len(self.members) < 1000:
user = self.team.users.get(user)
# External users that have left shared channels won't exist
if not user or user.deleted:
return
nick = w.nicklist_search_nick(self.channel_buffer, "", user.name)
# since this is a change just remove it regardless of where it is
w.nicklist_remove_nick(self.channel_buffer, nick)
# now add it back in to whichever..
nick_group = afk
if user.is_external:
nick_group = external
elif self.team.is_user_present(user.identifier):
nick_group = here
if user.identifier in self.members:
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
# if we didn't get a user, build a complete list. this is expensive.
else:
if len(self.members) < 1000:
try:
for user in self.members:
user = self.team.users.get(user)
if user.deleted:
continue
nick_group = afk
if user.is_external:
nick_group = external
elif self.team.is_user_present(user.identifier):
nick_group = here
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
except:
dbg("DEBUG: {} {} {}".format(self.identifier, self.name, format_exc_only()))
else:
w.nicklist_remove_all(self.channel_buffer)
for fn in ["1| too", "2| many", "3| users", "4| to", "5| show"]:
w.nicklist_add_group(self.channel_buffer, '', fn, w.color('white'), 1)
def render(self, message, force=False):
text = message.render(force)
if isinstance(message, SlackThreadMessage):
return '{}[{}]{} {}'.format(
w.color(config.color_thread_suffix),
message.parent_message.hash or message.parent_message.ts,
w.color('reset'),
text)
return text
class SlackDMChannel(SlackChannel):
"""
Subclass of a normal channel for person-to-person communication, which
has some important differences.
"""
def __init__(self, eventrouter, users, **kwargs):
dmuser = kwargs["user"]
kwargs["name"] = users[dmuser].name if dmuser in users else dmuser
super(SlackDMChannel, self).__init__(eventrouter, **kwargs)
self.type = 'im'
self.update_color()
self.set_name(self.slack_name)
if dmuser in users:
self.set_topic(create_user_status_string(users[dmuser].profile))
def set_related_server(self, team):
super(SlackDMChannel, self).set_related_server(team)
if self.user not in self.team.users:
s = SlackRequest(self.team.token, 'users.info', {'user': self.slack_name}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def set_name(self, slack_name):
self.name = slack_name
def get_members(self):
return {self.user}
def create_buffer(self):
if not self.channel_buffer:
super(SlackDMChannel, self).create_buffer()
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
def update_color(self):
if config.colorize_private_chats:
self.color_name = get_nick_color_name(self.name)
self.color = w.color(self.color_name)
else:
self.color = ""
self.color_name = ""
def formatted_name(self, style="default", typing=False, present=True, enable_color=False, **kwargs):
if config.colorize_private_chats and enable_color:
print_color = self.color
else:
print_color = ""
prepend = ""
if config.show_buflist_presence:
prepend = "+" if present else " "
select = {
"default": self.slack_name,
"sidebar": prepend + self.slack_name,
"base": self.slack_name,
"long_default": "{}.{}".format(self.team.preferred_name, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
return print_color + select[style]
def open(self, update_remote=True):
self.create_buffer()
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"name": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"users": self.user, "return_im": True}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(style="sidebar", present=self.team.is_user_present(self.user), enable_color=config.colorize_private_chats)
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def refresh(self):
return self.rename()
class SlackGroupChannel(SlackChannel):
"""
A group channel is a private discussion group.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackGroupChannel, self).__init__(eventrouter, **kwargs)
self.type = "group"
self.set_name(self.slack_name)
def set_name(self, slack_name):
self.name = config.group_name_prefix + slack_name
# def formatted_name(self, prepend="#", enable_color=True, basic=False):
# return prepend + self.slack_name
class SlackMPDMChannel(SlackChannel):
"""
An MPDM channel is a special instance of a 'group' channel.
We change the name to look less terrible in weechat.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackMPDMChannel, self).__init__(eventrouter, **kwargs)
n = kwargs.get('name')
self.set_name(n)
self.type = "mpim"
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if update_remote and 'join' in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]['join'], {'users': ','.join(self.members)}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
@staticmethod
def adjust_name(n):
return "|".join("-".join(n.split("-")[1:-1]).split("--"))
def set_name(self, n):
self.name = self.adjust_name(n)
def formatted_name(self, style="default", typing=False, **kwargs):
adjusted_name = self.adjust_name(self.slack_name)
if typing and config.channel_name_typing_indicator:
prepend = ">"
else:
prepend = "@"
select = {
"default": adjusted_name,
"sidebar": prepend + adjusted_name,
"base": adjusted_name,
"long_default": "{}.{}".format(self.team.preferred_name, adjusted_name),
"long_base": "{}.{}".format(self.team.preferred_name, adjusted_name),
}
return select[style]
def rename(self):
pass
class SlackSharedChannel(SlackChannel):
def __init__(self, eventrouter, **kwargs):
super(SlackSharedChannel, self).__init__(eventrouter, **kwargs)
self.type = 'shared'
def set_related_server(self, team):
super(SlackSharedChannel, self).set_related_server(team)
# Fetch members here (after the team is known) since they aren't
# included in rtm.start
s = SlackRequest(team.token, 'conversations.members', {'channel': self.identifier}, team_hash=team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def get_history(self, slow_queue=False):
# Get info for external users in the channel
for user in self.members - set(self.team.users.keys()):
s = SlackRequest(self.team.token, 'users.info', {'user': user}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
super(SlackSharedChannel, self).get_history(slow_queue)
def set_name(self, slack_name):
self.name = config.shared_name_prefix + slack_name
class SlackThreadChannel(SlackChannelCommon):
"""
A thread channel is a virtual channel. We don't inherit from
SlackChannel, because most of how it operates will be different.
"""
def __init__(self, eventrouter, parent_message):
self.eventrouter = eventrouter
self.parent_message = parent_message
self.hashed_messages = {}
self.channel_buffer = None
# self.identifier = ""
# self.name = "#" + kwargs['name']
self.type = "thread"
self.got_history = False
self.label = None
self.members = self.parent_message.channel.members
self.team = self.parent_message.team
self.last_line_from = None
# self.set_name(self.slack_name)
# def set_name(self, slack_name):
# self.name = "#" + slack_name
@property
def identifier(self):
return self.parent_message.channel.identifier
@property
def messages(self):
return self.parent_message.channel.messages
@property
def muted(self):
return self.parent_message.channel.muted
def formatted_name(self, style="default", **kwargs):
hash_or_ts = self.parent_message.hash or self.parent_message.ts
styles = {
"default": " +{}".format(hash_or_ts),
"long_default": "{}.{}".format(self.parent_message.channel.formatted_name(style="long_default"), hash_or_ts),
"sidebar": " +{}".format(hash_or_ts),
}
return styles[style]
def refresh(self):
self.rename()
def mark_read(self, ts=None, update_remote=True, force=False):
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
def buffer_prnt(self, nick, text, timestamp, tag_nick=None, **kwargs):
data = "{}\t{}".format(format_nick(nick, self.last_line_from), text)
self.last_line_from = nick
ts = SlackTS(timestamp)
if self.channel_buffer:
# backlog messages - we will update the read marker as we print these
# backlog = False
# if ts <= SlackTS(self.last_read):
# tags = tag("backlog")
# backlog = True
# elif self.type in ["im", "mpdm"]:
# tags = tag("dm")
# self.new_messages = True
# else:
tags = tag("default", thread=True, muted=self.muted)
# self.new_messages = True
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_last_print_time(self.channel_buffer, ts.minor)
if tag_nick == self.team.nick:
self.mark_read(ts, update_remote=False, force=True)
def get_history(self):
self.got_history = True
for message in self.parent_message.submessages:
text = self.render(message)
self.buffer_prnt(message.sender, text, message.ts)
def main_message_keys_reversed(self):
return (message.ts for message in reversed(self.parent_message.submessages))
def send_message(self, message, subtype=None):
if subtype == 'me_message':
w.prnt("", "ERROR: /me is not supported in threads")
return w.WEECHAT_RC_ERROR
message = linkify_text(message, self.team, self)
dbg(message)
request = {"type": "message", "text": message,
"channel": self.parent_message.channel.identifier,
"thread_ts": str(self.parent_message.ts),
"user": self.team.myidentifier}
self.team.send_to_websocket(request)
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
# if "info" in SLACK_API_TRANSLATOR[self.type]:
# s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"name": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
# self.eventrouter.receive(s)
# if update_remote:
# if "join" in SLACK_API_TRANSLATOR[self.type]:
# s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"name": self.name}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
# self.eventrouter.receive(s)
def rename(self):
if self.channel_buffer and not self.label:
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
def create_buffer(self):
"""
Creates the weechat buffer where the thread magic happens.
"""
if not self.channel_buffer:
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
time_format = w.config_string(w.config_get("weechat.look.buffer_time_format"))
parent_time = time.localtime(SlackTS(self.parent_message.ts).major)
topic = '{} {} | {}'.format(time.strftime(time_format, parent_time), self.parent_message.sender, self.render(self.parent_message) )
w.buffer_set(self.channel_buffer, "title", topic)
# self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
# try:
# if self.unread_count != 0:
# for c in range(1, self.unread_count):
# if self.type == "im":
# w.buffer_set(self.channel_buffer, "hotlist", "2")
# else:
# w.buffer_set(self.channel_buffer, "hotlist", "1")
# else:
# pass
# #dbg("no unread in {}".format(self.name))
# except:
# pass
# dbg("exception no unread count")
# if self.unread_count != 0 and not self.muted:
# w.buffer_set(self.channel_buffer, "hotlist", "1")
def destroy_buffer(self, update_remote):
self.channel_buffer = None
self.got_history = False
self.active = False
def render(self, message, force=False):
return message.render(force)
class SlackUser(object):
"""
Represends an individual slack user. Also where you set their name formatting.
"""
def __init__(self, originating_team_id, **kwargs):
self.identifier = kwargs["id"]
# These attributes may be missing in the response, so we have to make
# sure they're set
self.profile = {}
self.presence = kwargs.get("presence", "unknown")
self.deleted = kwargs.get("deleted", False)
self.is_external = (not kwargs.get("is_bot") and
kwargs.get("team_id") != originating_team_id)
for key, value in kwargs.items():
setattr(self, key, value)
if self.profile.get("display_name"):
self.slack_name = self.profile["display_name"]
self.name = self.profile["display_name"].replace(' ', '')
else:
# No display name set. Fall back to the deprecated username field.
self.slack_name = kwargs["name"]
self.name = self.slack_name
self.update_color()
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
def force_color(self, color_name):
self.color_name = color_name
self.color = w.color(self.color_name)
def update_color(self):
# This will automatically be none/"" if the user has disabled nick
# colourization.
self.color_name = get_nick_color_name(self.name)
self.color = w.color(self.color_name)
def update_status(self, status_emoji, status_text):
self.profile["status_emoji"] = status_emoji
self.profile["status_text"] = status_text
def formatted_name(self, prepend="", enable_color=True):
if enable_color:
return self.color + prepend + self.name + w.color("reset")
else:
return prepend + self.name
class SlackBot(SlackUser):
"""
Basically the same as a user, but split out to identify and for future
needs
"""
def __init__(self, originating_team_id, **kwargs):
super(SlackBot, self).__init__(originating_team_id, is_bot=True, **kwargs)
class SlackMessage(object):
"""
Represents a single slack message and associated context/metadata.
These are modifiable and can be rerendered to change a message,
delete a message, add a reaction, add a thread.
Note: these can't be tied to a SlackUser object because users
can be deleted, so we have to store sender in each one.
"""
def __init__(self, message_json, team, channel, override_sender=None):
self.team = team
self.channel = channel
self.message_json = message_json
self.submessages = []
self.thread_channel = None
self.hash = None
if override_sender:
self.sender = override_sender
self.sender_plain = override_sender
else:
senders = self.get_sender()
self.sender, self.sender_plain = senders[0], senders[1]
self.ts = SlackTS(message_json['ts'])
def __hash__(self):
return hash(self.ts)
def open_thread(self, switch=False):
if not self.thread_channel or not self.thread_channel.active:
self.thread_channel = SlackThreadChannel(EVENTROUTER, self)
self.thread_channel.open()
if switch:
w.buffer_set(self.thread_channel.channel_buffer, "display", "1")
def render(self, force=False):
text = render(self.message_json, self.team, force)
if (self.message_json.get('subtype') == 'me_message' and
not self.message_json['text'].startswith(self.sender)):
text = "{} {}".format(self.sender, text)
if (self.message_json.get('subtype') in ('channel_join', 'group_join') and
self.message_json.get('inviter')):
inviter_id = self.message_json.get('inviter')
inviter_nick = unfurl_refs("<@{}>".format(inviter_id))
text += " by invitation from {}".format(inviter_nick)
if len(self.submessages) > 0:
text += " {}[ Thread: {} Replies: {} ]".format(
w.color(config.color_thread_suffix),
self.hash or self.ts,
len(self.submessages))
return text
def change_text(self, new_text):
self.message_json["text"] = new_text
dbg(self.message_json)
def get_sender(self):
name = ""
name_plain = ""
user = self.team.users.get(self.message_json.get('user'))
if user:
name = "{}".format(user.formatted_name())
name_plain = "{}".format(user.formatted_name(enable_color=False))
if user.is_external:
name += config.external_user_suffix
name_plain += config.external_user_suffix
elif 'username' in self.message_json:
username = self.message_json["username"]
if self.message_json.get("subtype") == "bot_message":
name = "{} :]".format(username)
name_plain = "{}".format(username)
else:
name = "-{}-".format(username)
name_plain = "{}".format(username)
elif 'service_name' in self.message_json:
name = "-{}-".format(self.message_json["service_name"])
name_plain = "{}".format(self.message_json["service_name"])
elif self.message_json.get('bot_id') in self.team.bots:
name = "{} :]".format(self.team.bots[self.message_json["bot_id"]].formatted_name())
name_plain = "{}".format(self.team.bots[self.message_json["bot_id"]].formatted_name(enable_color=False))
return (name, name_plain)
def add_reaction(self, reaction, user):
m = self.message_json.get('reactions', None)
if m:
found = False
for r in m:
if r["name"] == reaction and user not in r["users"]:
r["users"].append(user)
found = True
if not found:
self.message_json["reactions"].append({"name": reaction, "users": [user]})
else:
self.message_json["reactions"] = [{"name": reaction, "users": [user]}]
def remove_reaction(self, reaction, user):
m = self.message_json.get('reactions', None)
if m:
for r in m:
if r["name"] == reaction and user in r["users"]:
r["users"].remove(user)
else:
pass
def has_mention(self):
return w.string_has_highlight(self.message_json.get('text'), ",".join(self.channel.mentions()))
def notify_thread(self, action=None, sender_id=None):
if config.auto_open_threads:
self.open_thread()
elif sender_id != self.team.myidentifier:
if action == "mention":
template = "You were mentioned in thread {hash}, channel {channel}"
elif action == "participant":
template = "New message in thread {hash}, channel {channel} in which you participated"
elif action == "response":
template = "New message in thread {hash} in response to own message in {channel}"
else:
template = "Notification for message in thread {hash}, channel {channel}"
message = template.format(hash=self.hash, channel=self.channel.formatted_name())
self.team.buffer_prnt(message, message=True)
class SlackThreadMessage(SlackMessage):
def __init__(self, parent_message, *args):
super(SlackThreadMessage, self).__init__(*args)
self.parent_message = parent_message
class WeeSlackMetadata(object):
"""
A simple container that we pickle/unpickle to hold data.
"""
def __init__(self, meta):
self.meta = meta
def jsonify(self):
return self.meta
class Hdata(object):
def __init__(self, w):
self.buffer = w.hdata_get('buffer')
self.line = w.hdata_get('line')
self.line_data = w.hdata_get('line_data')
self.lines = w.hdata_get('lines')
class SlackTS(object):
def __init__(self, ts=None):
if ts:
self.major, self.minor = [int(x) for x in ts.split('.', 1)]
else:
self.major = int(time.time())
self.minor = 0
def __cmp__(self, other):
if isinstance(other, SlackTS):
if self.major < other.major:
return -1
elif self.major > other.major:
return 1
elif self.major == other.major:
if self.minor < other.minor:
return -1
elif self.minor > other.minor:
return 1
else:
return 0
else:
s = self.__str__()
if s < other:
return -1
elif s > other:
return 1
elif s == other:
return 0
def __hash__(self):
return hash("{}.{}".format(self.major, self.minor))
def __repr__(self):
return str("{0}.{1:06d}".format(self.major, self.minor))
def split(self, *args, **kwargs):
return [self.major, self.minor]
def majorstr(self):
return str(self.major)
def minorstr(self):
return str(self.minor)
###### New handlers
def handle_rtmstart(login_data, eventrouter):
"""
This handles the main entry call to slack, rtm.start
"""
metadata = pickle.loads(login_data["wee_slack_request_metadata"])
if not login_data["ok"]:
w.prnt("", "ERROR: Failed connecting to Slack with token starting with {}: {}"
.format(metadata.token[:15], login_data["error"]))
if not re.match(r"^xo\w\w(-\d+){3}-[0-9a-f]+$", metadata.token):
w.prnt("", "ERROR: Token does not look like a valid Slack token. "
"Ensure it is a valid token and not just a OAuth code.")
return
# Let's reuse a team if we have it already.
th = SlackTeam.generate_team_hash(login_data['self']['name'], login_data['team']['domain'])
if not eventrouter.teams.get(th):
users = {}
for item in login_data["users"]:
users[item["id"]] = SlackUser(login_data['team']['id'], **item)
bots = {}
for item in login_data["bots"]:
bots[item["id"]] = SlackBot(login_data['team']['id'], **item)
channels = {}
for item in login_data["channels"]:
if item["is_shared"]:
channels[item["id"]] = SlackSharedChannel(eventrouter, **item)
else:
channels[item["id"]] = SlackChannel(eventrouter, **item)
for item in login_data["ims"]:
channels[item["id"]] = SlackDMChannel(eventrouter, users, **item)
for item in login_data["groups"]:
if item["name"].startswith('mpdm-'):
channels[item["id"]] = SlackMPDMChannel(eventrouter, **item)
else:
channels[item["id"]] = SlackGroupChannel(eventrouter, **item)
t = SlackTeam(
eventrouter,
metadata.token,
login_data['url'],
login_data["team"],
login_data["self"]["name"],
login_data["self"]["id"],
users,
bots,
channels,
muted_channels=login_data["self"]["prefs"]["muted_channels"],
highlight_words=login_data["self"]["prefs"]["highlight_words"],
)
eventrouter.register_team(t)
else:
t = eventrouter.teams.get(th)
t.set_reconnect_url(login_data['url'])
t.connect()
t.buffer_prnt('Connected to Slack team {} ({}) with username {}'.format(
login_data["team"]["name"], t.domain, t.nick))
dbg("connected to {}".format(t.domain))
def handle_emojilist(emoji_json, eventrouter, **kwargs):
if emoji_json["ok"]:
request_metadata = pickle.loads(emoji_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
team.emoji_completions.extend(emoji_json["emoji"].keys())
def handle_channelsinfo(channel_json, eventrouter, **kwargs):
request_metadata = pickle.loads(channel_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
channel = team.channels[request_metadata.channel_identifier]
channel.set_unread_count_display(channel_json['channel'].get('unread_count_display', 0))
channel.set_members(channel_json['channel']['members'])
def handle_groupsinfo(group_json, eventrouter, **kwargs):
request_metadata = pickle.loads(group_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
group = team.channels[request_metadata.channel_identifier]
group.set_unread_count_display(group_json['group'].get('unread_count_display', 0))
def handle_conversationsopen(conversation_json, eventrouter, object_name='channel', **kwargs):
request_metadata = pickle.loads(conversation_json["wee_slack_request_metadata"])
# Set unread count if the channel isn't new (channel_identifier exists)
if hasattr(request_metadata, 'channel_identifier'):
team = eventrouter.teams[request_metadata.team_hash]
conversation = team.channels[request_metadata.channel_identifier]
unread_count_display = conversation_json[object_name].get('unread_count_display', 0)
conversation.set_unread_count_display(unread_count_display)
def handle_mpimopen(mpim_json, eventrouter, object_name='group', **kwargs):
handle_conversationsopen(mpim_json, eventrouter, object_name, **kwargs)
def handle_groupshistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_channelshistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_imhistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_mpimhistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_conversationshistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_history(message_json, eventrouter, **kwargs):
request_metadata = pickle.loads(message_json["wee_slack_request_metadata"])
kwargs['team'] = eventrouter.teams[request_metadata.team_hash]
kwargs['channel'] = kwargs['team'].channels[request_metadata.channel_identifier]
if getattr(request_metadata, 'clear', False):
kwargs['channel'].clear_messages()
kwargs['channel'].got_history = True
for message in reversed(message_json["messages"]):
# Don't download historical files, considering that
# background_load_all_history might be on.
process_message(message, eventrouter, download=False, **kwargs)
def handle_conversationsmembers(members_json, eventrouter, **kwargs):
request_metadata = pickle.loads(members_json['wee_slack_request_metadata'])
team = eventrouter.teams[request_metadata.team_hash]
channel = team.channels[request_metadata.channel_identifier]
channel.members = set(members_json['members'])
def handle_usersinfo(user_json, eventrouter, **kwargs):
request_metadata = pickle.loads(user_json['wee_slack_request_metadata'])
team = eventrouter.teams[request_metadata.team_hash]
channel = team.channels[request_metadata.channel_identifier]
user_info = user_json['user']
user = SlackUser(team.identifier, **user_info)
team.users[user_info['id']] = user
if channel.type == 'shared':
channel.update_nicklist(user_info['id'])
elif channel.type == 'im':
channel.slack_name = user.name
channel.set_topic(create_user_status_string(user.profile))
###### New/converted process_ and subprocess_ methods
def process_hello(message_json, eventrouter, **kwargs):
kwargs['team'].subscribe_users_presence()
def process_reconnect_url(message_json, eventrouter, **kwargs):
kwargs['team'].set_reconnect_url(message_json['url'])
def process_manual_presence_change(message_json, eventrouter, **kwargs):
process_presence_change(message_json, eventrouter, **kwargs)
def process_presence_change(message_json, eventrouter, **kwargs):
if "user" in kwargs:
# TODO: remove once it's stable
user = kwargs["user"]
team = kwargs["team"]
team.update_member_presence(user, message_json["presence"])
if "users" in message_json:
team = kwargs["team"]
for user_id in message_json["users"]:
user = team.users[user_id]
team.update_member_presence(user, message_json["presence"])
def process_pref_change(message_json, eventrouter, **kwargs):
team = kwargs["team"]
if message_json['name'] == 'muted_channels':
team.set_muted_channels(message_json['value'])
elif message_json['name'] == 'highlight_words':
team.set_highlight_words(message_json['value'])
else:
dbg("Preference change not implemented: {}\n".format(message_json['name']))
def process_user_change(message_json, eventrouter, **kwargs):
"""
Currently only used to update status, but lots here we could do.
"""
user = message_json['user']
profile = user.get('profile')
team = kwargs['team']
team_user = team.users.get(user['id'])
if team_user:
team_user.update_status(profile.get('status_emoji'), profile.get('status_text'))
dmchannel = team.find_channel_by_members({user['id']}, channel_type='im')
if dmchannel:
dmchannel.set_topic(create_user_status_string(profile))
def process_user_typing(message_json, eventrouter, **kwargs):
channel = kwargs["channel"]
team = kwargs["team"]
if channel:
channel.set_typing(team.users.get(message_json["user"]).name)
w.bar_item_update("slack_typing_notice")
def process_team_join(message_json, eventrouter, **kwargs):
user = message_json['user']
team = kwargs["team"]
team.users[user["id"]] = SlackUser(team.identifier, **user)
def process_pong(message_json, eventrouter, **kwargs):
pass
def process_message(message_json, eventrouter, store=True, download=True, **kwargs):
channel = kwargs["channel"]
team = kwargs["team"]
if SlackTS(message_json["ts"]) in channel.messages:
return
if "thread_ts" in message_json and "reply_count" not in message_json:
message_json["subtype"] = "thread_message"
subtype = message_json.get("subtype")
subtype_functions = get_functions_with_prefix("subprocess_")
if subtype in subtype_functions:
subtype_functions[subtype](message_json, eventrouter, channel, team)
else:
message = SlackMessage(message_json, team, channel)
text = channel.render(message)
dbg("Rendered message: %s" % text)
dbg("Sender: %s (%s)" % (message.sender, message.sender_plain))
if subtype == 'me_message':
prefix = w.prefix("action").rstrip()
else:
prefix = message.sender
channel.buffer_prnt(prefix, text, message.ts,
tag_nick=message.sender_plain, **kwargs)
channel.unread_count_display += 1
if store:
channel.store_message(message, team)
dbg("NORMAL REPLY {}".format(message_json))
if download:
download_files(message_json, **kwargs)
def download_files(message_json, **kwargs):
team = kwargs["team"]
download_location = config.files_download_location
if not download_location:
return
if not os.path.exists(download_location):
try:
os.makedirs(download_location)
except:
w.prnt('', 'ERROR: Failed to create directory at files_download_location: {}'
.format(format_exc_only()))
def fileout_iter(path):
yield path
main, ext = os.path.splitext(path)
for i in count(start=1):
yield main + "-{}".format(i) + ext
for f in message_json.get('files', []):
if f.get('mode') == 'tombstone':
continue
filetype = '' if f['title'].endswith(f['filetype']) else '.' + f['filetype']
filename = '{}_{}{}'.format(team.preferred_name, f['title'], filetype)
for fileout in fileout_iter(os.path.join(download_location, filename)):
if os.path.isfile(fileout):
continue
weechat.hook_process_hashtable(
"url:" + f['url_private'],
{
'file_out': fileout,
'httpheader': 'Authorization: Bearer ' + team.token
},
config.slack_timeout, "", "")
break
def subprocess_thread_message(message_json, eventrouter, channel, team):
# print ("THREADED: " + str(message_json))
parent_ts = message_json.get('thread_ts', None)
if parent_ts:
parent_message = channel.messages.get(SlackTS(parent_ts), None)
if parent_message:
message = SlackThreadMessage(
parent_message, message_json, team, channel)
parent_message.submessages.append(message)
channel.hash_message(parent_ts)
channel.store_message(message, team)
channel.change_message(parent_ts)
if parent_message.thread_channel and parent_message.thread_channel.active:
parent_message.thread_channel.buffer_prnt(message.sender, parent_message.thread_channel.render(message), message.ts, tag_nick=message.sender_plain)
elif message.ts > channel.last_read and message.has_mention():
parent_message.notify_thread(action="mention", sender_id=message_json["user"])
if config.thread_messages_in_channel:
channel.buffer_prnt(
message.sender, channel.render(message), message.ts, tag_nick=message.sender_plain)
# channel = channels.find(message_json["channel"])
# server = channel.server
# #threadinfo = channel.get_message(message_json["thread_ts"])
# message = Message(message_json, server=server, channel=channel)
# dbg(message, main_buffer=True)
#
# orig = channel.get_message(message_json['thread_ts'])
# if orig[0]:
# channel.get_message(message_json['thread_ts'])[2].add_thread_message(message)
# else:
# dbg("COULDN'T find orig message {}".format(message_json['thread_ts']), main_buffer=True)
# if threadinfo[0]:
# channel.messages[threadinfo[1]].become_thread()
# message_json["item"]["ts"], message_json)
# channel.change_message(message_json["thread_ts"], None, message_json["text"])
# channel.become_thread(message_json["item"]["ts"], message_json)
def subprocess_channel_join(message_json, eventrouter, channel, team):
joinprefix = w.prefix("join").strip()
message = SlackMessage(message_json, team, channel, override_sender=joinprefix)
channel.buffer_prnt(joinprefix, channel.render(message), message_json["ts"], tagset='joinleave')
channel.user_joined(message_json['user'])
def subprocess_channel_leave(message_json, eventrouter, channel, team):
leaveprefix = w.prefix("quit").strip()
message = SlackMessage(message_json, team, channel, override_sender=leaveprefix)
channel.buffer_prnt(leaveprefix, channel.render(message), message_json["ts"], tagset='joinleave')
channel.user_left(message_json['user'])
# channel.update_nicklist(message_json['user'])
# channel.update_nicklist()
subprocess_group_join = subprocess_channel_join
subprocess_group_leave = subprocess_channel_leave
def subprocess_message_replied(message_json, eventrouter, channel, team):
parent_ts = message_json["message"].get("thread_ts")
parent_message = channel.messages.get(SlackTS(parent_ts))
# Thread exists but is not open yet
if parent_message is not None \
and not (parent_message.thread_channel and parent_message.thread_channel.active):
channel.hash_message(parent_ts)
last_message = max(message_json["message"]["replies"], key=lambda x: x["ts"])
if message_json["message"].get("user") == team.myidentifier:
parent_message.notify_thread(action="response", sender_id=last_message["user"])
elif any(team.myidentifier == r["user"] for r in message_json["message"]["replies"]):
parent_message.notify_thread(action="participant", sender_id=last_message["user"])
def subprocess_message_changed(message_json, eventrouter, channel, team):
new_message = message_json.get("message", None)
channel.change_message(new_message["ts"], message_json=new_message)
def subprocess_message_deleted(message_json, eventrouter, channel, team):
message = "{}{}{}".format(
w.color("red"), '(deleted)', w.color("reset"))
channel.change_message(message_json["deleted_ts"], text=message)
def subprocess_channel_topic(message_json, eventrouter, channel, team):
text = unhtmlescape(unfurl_refs(message_json["text"], ignore_alt_text=False))
channel.buffer_prnt(w.prefix("network").rstrip(), text, message_json["ts"], tagset="topic")
channel.set_topic(message_json["topic"])
def process_reply(message_json, eventrouter, **kwargs):
team = kwargs["team"]
reply_to = int(message_json["reply_to"])
original_message_json = team.ws_replies.pop(reply_to, None)
if original_message_json:
original_message_json.update(message_json)
channel = team.channels[original_message_json.get('channel')]
process_message(original_message_json, eventrouter,
channel=channel, team=team)
dbg("REPLY {}".format(message_json))
else:
dbg("Unexpected reply {}".format(message_json))
def process_channel_marked(message_json, eventrouter, **kwargs):
"""
complete
"""
channel = kwargs["channel"]
ts = message_json.get("ts", None)
if ts:
channel.mark_read(ts=ts, force=True, update_remote=False)
else:
dbg("tried to mark something weird {}".format(message_json))
def process_group_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_im_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_mpim_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_channel_joined(message_json, eventrouter, **kwargs):
item = message_json["channel"]
kwargs['team'].channels[item["id"]].update_from_message_json(item)
kwargs['team'].channels[item["id"]].open()
def process_channel_created(message_json, eventrouter, **kwargs):
item = message_json["channel"]
c = SlackChannel(eventrouter, team=kwargs["team"], **item)
kwargs['team'].channels[item["id"]] = c
kwargs['team'].buffer_prnt('Channel created: {}'.format(c.slack_name))
def process_channel_rename(message_json, eventrouter, **kwargs):
item = message_json["channel"]
channel = kwargs['team'].channels[item["id"]]
channel.slack_name = message_json['channel']['name']
def process_im_created(message_json, eventrouter, **kwargs):
team = kwargs['team']
item = message_json["channel"]
c = SlackDMChannel(eventrouter, team=team, users=team.users, **item)
team.channels[item["id"]] = c
kwargs['team'].buffer_prnt('IM channel created: {}'.format(c.name))
def process_im_open(message_json, eventrouter, **kwargs):
channel = kwargs['channel']
item = message_json
kwargs['team'].channels[item["channel"]].check_should_open(True)
w.buffer_set(channel.channel_buffer, "hotlist", "2")
def process_im_close(message_json, eventrouter, **kwargs):
channel = kwargs['team'].channels[message_json["channel"]]
if channel.channel_buffer:
w.prnt(kwargs['team'].channel_buffer,
'IM {} closed by another client or the server'.format(channel.name))
eventrouter.weechat_controller.unregister_buffer(channel.channel_buffer, False, True)
def process_group_joined(message_json, eventrouter, **kwargs):
item = message_json["channel"]
if item["name"].startswith("mpdm-"):
c = SlackMPDMChannel(eventrouter, team=kwargs["team"], **item)
else:
c = SlackGroupChannel(eventrouter, team=kwargs["team"], **item)
kwargs['team'].channels[item["id"]] = c
kwargs['team'].channels[item["id"]].open()
def process_reaction_added(message_json, eventrouter, **kwargs):
channel = kwargs['team'].channels.get(message_json["item"].get("channel"))
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts, None)
if message:
message.add_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("reaction to item type not supported: " + str(message_json))
def process_reaction_removed(message_json, eventrouter, **kwargs):
channel = kwargs['team'].channels.get(message_json["item"].get("channel"))
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts, None)
if message:
message.remove_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("Reaction to item type not supported: " + str(message_json))
def process_emoji_changed(message_json, eventrouter, **kwargs):
team = kwargs['team']
team.load_emoji_completions()
###### New module/global methods
def render_formatting(text):
text = re.sub(r'(^| )\*([^*\n`]+)\*(?=[^\w]|$)',
r'\1{}*\2*{}'.format(w.color(config.render_bold_as),
w.color('-' + config.render_bold_as)),
text,
flags=re.UNICODE)
text = re.sub(r'(^| )_([^_\n`]+)_(?=[^\w]|$)',
r'\1{}_\2_{}'.format(w.color(config.render_italic_as),
w.color('-' + config.render_italic_as)),
text,
flags=re.UNICODE)
return text
def render(message_json, team, force=False):
# If we already have a rendered version in the object, just return that.
if not force and message_json.get("_rendered_text", ""):
return message_json["_rendered_text"]
else:
# server = servers.find(message_json["_server"])
if "fallback" in message_json:
text = message_json["fallback"]
elif "text" in message_json:
if message_json['text'] is not None:
text = message_json["text"]
else:
text = ""
else:
text = ""
text = unfurl_refs(text)
if "edited" in message_json:
text += "{}{}{}".format(
w.color(config.color_edited_suffix), ' (edited)', w.color("reset"))
text += unfurl_refs(unwrap_attachments(message_json, text))
text += unfurl_refs(unwrap_files(message_json, text))
text = text.lstrip()
text = unhtmlescape(text.replace("\t", " "))
if message_json.get('mrkdwn', True):
text = render_formatting(text)
text += create_reaction_string(message_json.get("reactions", ""))
message_json["_rendered_text"] = text
return text
def linkify_text(message, team, channel):
# The get_username_map function is a bit heavy, but this whole
# function is only called on message send..
usernames = team.get_username_map()
channels = team.get_channel_map()
message = (message
# Replace IRC formatting chars with Slack formatting chars.
.replace('\x02', '*')
.replace('\x1D', '_')
.replace('\x1F', config.map_underline_to)
# Escape chars that have special meaning to Slack. Note that we do not
# (and should not) perform full HTML entity-encoding here.
# See https://api.slack.com/docs/message-formatting for details.
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.split(' '))
for item in enumerate(message):
targets = re.match('^\s*([@#])([\w\(\)\'.-]+)(\W*)', item[1], re.UNICODE)
if targets and targets.groups()[0] == '@':
named = targets.groups()
if named[1] in ["group", "channel", "here"]:
message[item[0]] = "<!{}>".format(named[1])
else:
try:
if usernames[named[1]]:
message[item[0]] = "<@{}>{}".format(usernames[named[1]], named[2])
except:
message[item[0]] = "@{}{}".format(named[1], named[2])
if targets and targets.groups()[0] == '#':
named = targets.groups()
try:
if channels[named[1]]:
message[item[0]] = "<#{}|{}>{}".format(channels[named[1]], named[1], named[2])
except:
message[item[0]] = "#{}{}".format(named[1], named[2])
# dbg(message)
return " ".join(message)
def unfurl_refs(text, ignore_alt_text=None, auto_link_display=None):
"""
input : <@U096Q7CQM|someuser> has joined the channel
ouput : someuser has joined the channel
"""
# Find all strings enclosed by <>
# - <https://example.com|example with spaces>
# - <#C2147483705|#otherchannel>
# - <@U2147483697|@othernick>
# Test patterns lives in ./_pytest/test_unfurl.py
if ignore_alt_text is None:
ignore_alt_text = config.unfurl_ignore_alt_text
if auto_link_display is None:
auto_link_display = config.unfurl_auto_link_display
matches = re.findall(r"(<[@#]?(?:[^>]*)>)", text)
for m in matches:
# Replace them with human readable strings
text = text.replace(
m, unfurl_ref(m[1:-1], ignore_alt_text, auto_link_display))
return text
def unfurl_ref(ref, ignore_alt_text, auto_link_display):
id = ref.split('|')[0]
display_text = ref
if ref.find('|') > -1:
if ignore_alt_text:
display_text = resolve_ref(id)
else:
if id.startswith("#C"):
display_text = "#{}".format(ref.split('|')[1])
elif id.startswith("@U"):
display_text = ref.split('|')[1]
else:
url, desc = ref.split('|', 1)
match_url = r"^\w+:(//)?{}$".format(re.escape(desc))
url_matches_desc = re.match(match_url, url)
if url_matches_desc and auto_link_display == "text":
display_text = desc
elif url_matches_desc and auto_link_display == "url":
display_text = url
else:
display_text = "{} ({})".format(url, desc)
else:
display_text = resolve_ref(ref)
return display_text
def unhtmlescape(text):
return text.replace("<", "<") \
.replace(">", ">") \
.replace("&", "&")
def unwrap_attachments(message_json, text_before):
text_before_unescaped = unhtmlescape(text_before)
attachment_texts = []
a = message_json.get("attachments", None)
if a:
if text_before:
attachment_texts.append('')
for attachment in a:
# Attachments should be rendered roughly like:
#
# $pretext
# $author: (if rest of line is non-empty) $title ($title_link) OR $from_url
# $author: (if no $author on previous line) $text
# $fields
t = []
prepend_title_text = ''
if 'author_name' in attachment:
prepend_title_text = attachment['author_name'] + ": "
if 'pretext' in attachment:
t.append(attachment['pretext'])
title = attachment.get('title', None)
title_link = attachment.get('title_link', '')
if title_link in text_before_unescaped:
title_link = ''
if title and title_link:
t.append('%s%s (%s)' % (prepend_title_text, title, title_link,))
prepend_title_text = ''
elif title and not title_link:
t.append('%s%s' % (prepend_title_text, title,))
prepend_title_text = ''
from_url = attachment.get('from_url', '')
if from_url not in text_before_unescaped and from_url != title_link:
t.append(from_url)
atext = attachment.get("text", None)
if atext:
tx = re.sub(r' *\n[\n ]+', '\n', atext)
t.append(prepend_title_text + tx)
prepend_title_text = ''
image_url = attachment.get('image_url', '')
if image_url not in text_before_unescaped and image_url != title_link:
t.append(image_url)
fields = attachment.get("fields", None)
if fields:
for f in fields:
if f['title'] != '':
t.append('%s %s' % (f['title'], f['value'],))
else:
t.append(f['value'])
fallback = attachment.get("fallback", None)
if t == [] and fallback:
t.append(fallback)
attachment_texts.append("\n".join([x.strip() for x in t if x]))
return "\n".join(attachment_texts)
def unwrap_files(message_json, text_before):
files_texts = []
for f in message_json.get('files', []):
if f.get('mode', '') != 'tombstone':
text = '{} ({})'.format(f['url_private'], f['title'])
else:
text = '{}(This file was deleted.){}'.format(
w.color("red"),
w.color("reset"))
files_texts.append(text)
if text_before:
files_texts.insert(0, '')
return "\n".join(files_texts)
def resolve_ref(ref):
# TODO: This hack to use eventrouter needs to go
# this resolver should probably move to the slackteam or eventrouter itself
# global EVENTROUTER
if 'EVENTROUTER' in globals():
e = EVENTROUTER
if ref.startswith('@U') or ref.startswith('@W'):
for t in e.teams.keys():
user = e.teams[t].users.get(ref[1:])
if user:
name = '@{}'.format(user.name)
if user.is_external:
name += config.external_user_suffix
return name
elif ref.startswith('#C'):
for t in e.teams.keys():
if ref[1:] in e.teams[t].channels:
# try:
return "{}".format(e.teams[t].channels[ref[1:]].name)
# except:
# dbg("CHANNEL: {}".format(ref))
# Something else, just return as-is
return ref
def create_user_status_string(profile):
real_name = profile.get("real_name")
status_emoji = profile.get("status_emoji")
status_text = profile.get("status_text")
if status_emoji or status_text:
return "{} | {} {}".format(real_name, status_emoji, status_text)
else:
return real_name
def create_reaction_string(reactions):
count = 0
if not isinstance(reactions, list):
reaction_string = " {}[{}]{}".format(
w.color(config.color_reaction_suffix), reactions, w.color("reset"))
else:
reaction_string = ' {}['.format(w.color(config.color_reaction_suffix))
for r in reactions:
if len(r["users"]) > 0:
count += 1
if config.show_reaction_nicks:
nicks = [resolve_ref("@{}".format(user)) for user in r["users"]]
users = "({})".format(",".join(nicks))
else:
users = len(r["users"])
reaction_string += ":{}:{} ".format(r["name"], users)
reaction_string = reaction_string[:-1] + ']'
if count == 0:
reaction_string = ''
return reaction_string
def hdata_line_ts(line_pointer):
data = w.hdata_pointer(hdata.line, line_pointer, 'data')
ts_major = w.hdata_time(hdata.line_data, data, 'date')
ts_minor = w.hdata_time(hdata.line_data, data, 'date_printed')
return (ts_major, ts_minor)
def modify_buffer_line(buffer_pointer, ts, new_text):
own_lines = w.hdata_pointer(hdata.buffer, buffer_pointer, 'own_lines')
line_pointer = w.hdata_pointer(hdata.lines, own_lines, 'last_line')
# Find the last line with this ts
while line_pointer and hdata_line_ts(line_pointer) != (ts.major, ts.minor):
line_pointer = w.hdata_move(hdata.line, line_pointer, -1)
# Find all lines for the message
pointers = []
while line_pointer and hdata_line_ts(line_pointer) == (ts.major, ts.minor):
pointers.append(line_pointer)
line_pointer = w.hdata_move(hdata.line, line_pointer, -1)
pointers.reverse()
# Split the message into at most the number of existing lines as we can't insert new lines
lines = new_text.split('\n', len(pointers) - 1)
# Replace newlines to prevent garbled lines in bare display mode
lines = [line.replace('\n', ' | ') for line in lines]
# Extend lines in case the new message is shorter than the old as we can't delete lines
lines += [''] * (len(pointers) - len(lines))
for pointer, line in zip(pointers, lines):
data = w.hdata_pointer(hdata.line, pointer, 'data')
w.hdata_update(hdata.line_data, data, {"message": line})
return w.WEECHAT_RC_OK
def modify_last_print_time(buffer_pointer, ts_minor):
"""
This overloads the time printed field to let us store the slack
per message unique id that comes after the "." in a slack ts
"""
own_lines = w.hdata_pointer(hdata.buffer, buffer_pointer, 'own_lines')
line_pointer = w.hdata_pointer(hdata.lines, own_lines, 'last_line')
while line_pointer:
data = w.hdata_pointer(hdata.line, line_pointer, 'data')
w.hdata_update(hdata.line_data, data, {"date_printed": str(ts_minor)})
if w.hdata_string(hdata.line_data, data, 'prefix'):
# Reached the first line of the message, so stop here
break
# Move one line backwards so all lines of the message are set
line_pointer = w.hdata_move(hdata.line, line_pointer, -1)
return w.WEECHAT_RC_OK
def format_nick(nick, previous_nick=None):
if nick == previous_nick:
nick = w.config_string(w.config_get('weechat.look.prefix_same_nick')) or nick
nick_prefix = w.config_string(w.config_get('weechat.look.nick_prefix'))
nick_prefix_color_name = w.config_string(w.config_get('weechat.color.chat_nick_prefix'))
nick_prefix_color = w.color(nick_prefix_color_name)
nick_suffix = w.config_string(w.config_get('weechat.look.nick_suffix'))
nick_suffix_color_name = w.config_string(w.config_get('weechat.color.chat_nick_prefix'))
nick_suffix_color = w.color(nick_suffix_color_name)
return nick_prefix_color + nick_prefix + w.color("reset") + nick + nick_suffix_color + nick_suffix + w.color("reset")
def tag(tagset, user=None, thread=False, muted=False):
tagsets = {
# messages in the team/server buffer, e.g. "new channel created"
"team_info": {"no_highlight", "log3"},
"team_message": {"irc_privmsg", "notify_message", "log1"},
# when replaying something old
"backlog": {"irc_privmsg", "no_highlight", "notify_none", "logger_backlog"},
# when receiving a direct message
"dm": {"irc_privmsg", "notify_private", "log1"},
"dmfromme": {"irc_privmsg", "no_highlight", "notify_none", "log1"},
# when this is a join/leave, attach for smart filter ala:
# if user in [x.strip() for x in w.prefix("join"), w.prefix("quit")]
"joinleave": {"irc_smart_filter", "no_highlight", "log4"},
"topic": {"irc_topic", "no_highlight", "log3"},
# catchall ?
"default": {"irc_privmsg", "notify_message", "log1"},
}
nick_tag = {"nick_{}".format(user or "unknown").replace(" ", "_")}
slack_tag = {"slack_{}".format(tagset)}
tags = nick_tag | slack_tag | tagsets[tagset]
if muted:
tags.add("slack_muted_channel")
if not thread and config.muted_channels_activity != "all":
tags -= {"notify_highlight", "notify_message", "notify_private"}
tags.add("notify_none")
if config.muted_channels_activity == "none":
tags.add("no_highlight")
return ",".join(tags)
###### New/converted command_ commands
@slack_buffer_or_ignore
@utf8_decode
def part_command_cb(data, current_buffer, args):
e = EVENTROUTER
args = args.split()
if len(args) > 1:
team = e.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
channel = "".join(args[1:])
if channel in cmap:
buffer_ptr = team.channels[cmap[channel]].channel_buffer
e.weechat_controller.unregister_buffer(buffer_ptr, update_remote=True, close_buffer=True)
else:
e.weechat_controller.unregister_buffer(current_buffer, update_remote=True, close_buffer=True)
return w.WEECHAT_RC_OK_EAT
def parse_topic_command(command):
args = command.split()[1:]
channel_name = None
topic = None
if args:
if args[0].startswith('#'):
channel_name = args[0][1:]
topic = args[1:]
else:
topic = args
if topic == []:
topic = None
if topic:
topic = ' '.join(topic)
if topic == '-delete':
topic = ''
return channel_name, topic
@slack_buffer_or_ignore
@utf8_decode
def topic_command_cb(data, current_buffer, command):
"""
Change the topic of a channel
/topic [<channel>] [<topic>|-delete]
"""
channel_name, topic = parse_topic_command(command)
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
if channel_name:
channel = team.channels.get(team.get_channel_map().get(channel_name))
else:
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if not channel:
w.prnt(team.channel_buffer, "#{}: No such channel".format(channel_name))
return w.WEECHAT_RC_OK_EAT
if topic is None:
w.prnt(channel.channel_buffer, 'Topic for {} is "{}"'.format(channel.name, channel.topic))
else:
s = SlackRequest(team.token, "channels.setTopic", {"channel": channel.identifier, "topic": topic}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def whois_command_cb(data, current_buffer, command):
"""
Get real name of user
/whois <display_name>
"""
args = command.split()
if len(args) < 2:
w.prnt(current_buffer, "Not enough arguments")
return w.WEECHAT_RC_OK_EAT
user = args[1]
if (user.startswith('@')):
user = user[1:]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
u = team.users.get(team.get_username_map().get(user))
if u:
team.buffer_prnt("[{}]: {}".format(user, u.real_name))
if u.profile.get("status_text"):
team.buffer_prnt("[{}]: {} {}".format(user, u.profile.get('status_emoji', ''), u.profile.get('status_text', '')))
team.buffer_prnt("[{}]: Real name: {}".format(user, u.profile.get('real_name_normalized', '')))
team.buffer_prnt("[{}]: Title: {}".format(user, u.profile.get('title', '')))
team.buffer_prnt("[{}]: Email: {}".format(user, u.profile.get('email', '')))
team.buffer_prnt("[{}]: Phone: {}".format(user, u.profile.get('phone', '')))
else:
team.buffer_prnt("[{}]: No such user".format(user))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def me_command_cb(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
message = args.split(' ', 1)[1]
channel.send_message(message, subtype='me_message')
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def command_register(data, current_buffer, args):
"""
/slack register
Register a Slack team in wee-slack.
"""
CLIENT_ID = "2468770254.51917335286"
CLIENT_SECRET = "dcb7fe380a000cba0cca3169a5fe8d70" # Not really a secret.
if not args:
message = textwrap.dedent("""
#### Retrieving a Slack token via OAUTH ####
1) Paste this into a browser: https://slack.com/oauth/authorize?client_id=2468770254.51917335286&scope=client
2) Select the team you wish to access from wee-slack in your browser.
3) Click "Authorize" in the browser **IMPORTANT: the redirect will fail, this is expected**
If you get a message saying you are not authorized to install wee-slack, the team has restricted Slack app installation and you will have to request it from an admin. To do that, go to https://my.slack.com/apps/A1HSZ9V8E-wee-slack and click "Request to Install".
4) Copy the "code" portion of the URL to your clipboard
5) Return to weechat and run `/slack register [code]`
""").strip()
w.prnt("", message)
return w.WEECHAT_RC_OK_EAT
uri = (
"https://slack.com/api/oauth.access?"
"client_id={}&client_secret={}&code={}"
).format(CLIENT_ID, CLIENT_SECRET, args)
params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
w.hook_process_hashtable('url:', params, config.slack_timeout, "", "")
w.hook_process_hashtable("url:{}".format(uri), params, config.slack_timeout, "register_callback", "")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def register_callback(data, command, return_code, out, err):
if return_code != 0:
w.prnt("", "ERROR: problem when trying to get Slack OAuth token. Got return code {}. Err: ".format(return_code, err))
w.prnt("", "Check the network or proxy settings")
return w.WEECHAT_RC_OK_EAT
if len(out) <= 0:
w.prnt("", "ERROR: problem when trying to get Slack OAuth token. Got 0 length answer. Err: ".format(err))
w.prnt("", "Check the network or proxy settings")
return w.WEECHAT_RC_OK_EAT
d = json.loads(out)
if not d["ok"]:
w.prnt("",
"ERROR: Couldn't get Slack OAuth token: {}".format(d['error']))
return w.WEECHAT_RC_OK_EAT
if config.is_default('slack_api_token'):
w.config_set_plugin('slack_api_token', d['access_token'])
else:
# Add new token to existing set, joined by comma.
tok = config.get_string('slack_api_token')
w.config_set_plugin('slack_api_token',
','.join([tok, d['access_token']]))
w.prnt("", "Success! Added team \"%s\"" % (d['team_name'],))
w.prnt("", "Please reload wee-slack with: /python reload slack")
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def msg_command_cb(data, current_buffer, args):
aargs = args.split(None, 2)
who = aargs[1]
if who == "*":
who = EVENTROUTER.weechat_controller.buffers[current_buffer].slack_name
else:
join_query_command_cb(data, current_buffer, '/query ' + who)
if len(aargs) > 2:
message = aargs[2]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
if who in cmap:
channel = team.channels[cmap[who]]
channel.send_message(message)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_channels(data, current_buffer, args):
"""
/slack channels
List the channels in the current team.
"""
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
team.buffer_prnt("Channels:")
for channel in team.get_channel_map():
team.buffer_prnt(" {}".format(channel))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_users(data, current_buffer, args):
"""
/slack users
List the users in the current team.
"""
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
team.buffer_prnt("Users:")
for user in team.users.values():
team.buffer_prnt(" {:<25}({})".format(user.name, user.presence))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_talk(data, current_buffer, args):
"""
/slack talk <user>[,<user2>[,<user3>...]]
Open a chat with the specified user(s).
"""
if not args:
w.prnt('', 'Usage: /slack talk <user>[,<user2>[,<user3>...]]')
return w.WEECHAT_RC_ERROR
return join_query_command_cb(data, current_buffer, '/query ' + args)
@slack_buffer_or_ignore
@utf8_decode
def join_query_command_cb(data, current_buffer, args):
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
split_args = args.split(' ', 1)
if len(split_args) < 2 or not split_args[1]:
w.prnt('', 'Too few arguments for command "{}" (help on command: /help {})'
.format(split_args[0], split_args[0].lstrip('/')))
return w.WEECHAT_RC_OK_EAT
query = split_args[1]
# Try finding the channel by name
channel = team.channels.get(team.get_channel_map().get(query.lstrip('#')))
# If the channel doesn't exist, try finding a DM or MPDM instead
if not channel:
if query.startswith('#'):
w.prnt('', 'ERROR: Unknown channel: {}'.format(query))
return w.WEECHAT_RC_OK_EAT
# Get the IDs of the users
all_users = team.get_username_map()
users = set()
for username in query.split(','):
user = all_users.get(username.lstrip('@'))
if not user:
w.prnt('', 'ERROR: Unknown user: {}'.format(username))
return w.WEECHAT_RC_OK_EAT
users.add(user)
if users:
if len(users) > 1:
channel_type = 'mpim'
# Add the current user since MPDMs include them as a member
users.add(team.myidentifier)
else:
channel_type = 'im'
channel = team.find_channel_by_members(users, channel_type=channel_type)
# If the DM or MPDM doesn't exist, create it
if not channel:
s = SlackRequest(team.token, SLACK_API_TRANSLATOR[channel_type]['join'],
{'users': ','.join(users)}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
if channel:
channel.open()
if config.switch_buffer_on_join:
w.buffer_set(channel.channel_buffer, "display", "1")
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_showmuted(data, current_buffer, args):
"""
/slack showmuted
List the muted channels in the current team.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
muted_channels = [team.channels[key].name
for key in team.muted_channels if key in team.channels]
team.buffer_prnt("Muted channels: {}".format(', '.join(muted_channels)))
return w.WEECHAT_RC_OK_EAT
def get_msg_from_id(channel, msg_id):
if msg_id[0] == '$':
msg_id = msg_id[1:]
return channel.hashed_messages.get(msg_id)
@slack_buffer_required
@utf8_decode
def command_thread(data, current_buffer, args):
"""
/thread [message_id]
Open the thread for the message.
If no message id is specified the last thread in channel will be opened.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if args:
msg = get_msg_from_id(channel, args)
if not msg:
w.prnt('', 'ERROR: Invalid id given, must be an existing id')
return w.WEECHAT_RC_OK_EAT
else:
for message in reversed(channel.messages.values()):
if type(message) == SlackMessage and len(message.submessages) > 0:
msg = message
break
else:
w.prnt('', 'ERROR: No threads found in channel')
return w.WEECHAT_RC_OK_EAT
msg.open_thread(switch=config.switch_buffer_on_join)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_reply(data, current_buffer, args):
"""
/reply <count/message_id> <text>
Reply in a thread on the message. Specify either the message id
or a count upwards to the message from the last message.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
try:
msg_id, text = args.split(None, 1)
except ValueError:
w.prnt('', 'Usage: /reply <count/id> <message>')
return w.WEECHAT_RC_OK_EAT
msg = get_msg_from_id(channel, msg_id)
if msg:
parent_id = str(msg.ts)
elif msg_id.isdigit() and int(msg_id) >= 1:
mkeys = channel.main_message_keys_reversed()
parent_id = str(next(islice(mkeys, int(msg_id) - 1, None)))
else:
w.prnt('', 'ERROR: Invalid id given, must be a number greater than 0 or an existing id')
return w.WEECHAT_RC_OK_EAT
channel.send_message(text, request_dict_ext={'thread_ts': parent_id})
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_rehistory(data, current_buffer, args):
"""
/rehistory
Reload the history in the current channel.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
channel.clear_messages()
channel.get_history()
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_hide(data, current_buffer, args):
"""
/hide
Hide the current channel if it is marked as distracting.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
name = channel.formatted_name(style='long_default')
if name in config.distracting_channels:
w.buffer_set(channel.channel_buffer, "hidden", "1")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def slack_command_cb(data, current_buffer, args):
split_args = args.split(' ', 1)
cmd_name = split_args[0]
cmd_args = split_args[1] if len(split_args) > 1 else ''
cmd = EVENTROUTER.cmds.get(cmd_name or 'help')
if not cmd:
w.prnt('', 'Command not found: ' + cmd_name)
return w.WEECHAT_RC_OK
return cmd(data, current_buffer, cmd_args)
@utf8_decode
def command_help(data, current_buffer, args):
"""
/slack help
Print help for /slack commands.
"""
if args:
cmd = EVENTROUTER.cmds.get(args)
if cmd:
cmds = {args: cmd}
else:
w.prnt('', 'Command not found: ' + args)
return w.WEECHAT_RC_OK
else:
cmds = EVENTROUTER.cmds
w.prnt('', 'Slack commands:')
for name, cmd in sorted(cmds.items()):
helptext = (cmd.__doc__ or '').rstrip()
w.prnt('', '{}:{}'.format(name, helptext))
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_distracting(data, current_buffer, args):
"""
/slack distracting
Add or remove the current channel from distracting channels. You can hide
or unhide these channels with /slack nodistractions.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
fullname = channel.formatted_name(style="long_default")
if fullname in config.distracting_channels:
config.distracting_channels.remove(fullname)
else:
config.distracting_channels.append(fullname)
w.config_set_plugin('distracting_channels', ','.join(config.distracting_channels))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_slash(data, current_buffer, args):
"""
/slack slash /customcommand arg1 arg2 arg3
Run a custom slack command.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
team = channel.team
split_args = args.split(' ', 1)
command = split_args[0]
text = split_args[1] if len(split_args) > 1 else ""
s = SlackRequest(team.token, "chat.command",
{"command": command, "text": text, 'channel': channel.identifier},
team_hash=team.team_hash, channel_identifier=channel.identifier)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_mute(data, current_buffer, args):
"""
/slack mute
Toggle mute on the current channel.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
team = channel.team
team.muted_channels ^= {channel.identifier}
muted_str = "Muted" if channel.identifier in team.muted_channels else "Unmuted"
team.buffer_prnt("{} channel {}".format(muted_str, channel.name))
s = SlackRequest(team.token, "users.prefs.set",
{"name": "muted_channels", "value": ",".join(team.muted_channels)},
team_hash=team.team_hash, channel_identifier=channel.identifier)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_linkarchive(data, current_buffer, args):
"""
/slack linkarchive [message_id]
Place a link to the channel or message in the input bar.
Use cursor or mouse mode to get the id.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
url = 'https://{}/'.format(channel.team.domain)
if isinstance(channel, SlackChannelCommon):
url += 'archives/{}/'.format(channel.identifier)
if args:
if args[0] == '$':
message_id = args[1:]
else:
message_id = args
message = channel.hashed_messages.get(message_id)
if message:
url += 'p{}{:0>6}'.format(message.ts.majorstr(), message.ts.minorstr())
if isinstance(message, SlackThreadMessage):
url += "?thread_ts={}&cid={}".format(message.parent_message.ts, channel.identifier)
else:
w.prnt('', 'ERROR: Invalid id given, must be an existing id')
return w.WEECHAT_RC_OK_EAT
w.command(current_buffer, "/input insert {}".format(url))
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def command_nodistractions(data, current_buffer, args):
"""
/slack nodistractions
Hide or unhide all channels marked as distracting.
"""
global hide_distractions
hide_distractions = not hide_distractions
channels = [channel for channel in EVENTROUTER.weechat_controller.buffers.itervalues()
if channel in config.distracting_channels]
for channel in channels:
w.buffer_set(channel.channel_buffer, "hidden", str(int(hide_distractions)))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_upload(data, current_buffer, args):
"""
/slack upload <filename>
Uploads a file to the current buffer.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
url = 'https://slack.com/api/files.upload'
file_path = os.path.expanduser(args)
if ' ' in file_path:
file_path = file_path.replace(' ', '\ ')
# only http proxy is currenlty supported
proxy = ProxyWrapper()
proxy_string = proxy.curl()
form_fields = {
'file': '@' + file_path,
'channels': channel.identifier,
'token': channel.team.token,
}
if isinstance(channel, SlackThreadChannel):
form_fields['thread_ts'] = channel.parent_message.ts
curl_options = ' '.join(
'-F {}={}'.format(*field) for field in form_fields.iteritems())
command = 'curl {} {} {}'.format(curl_options, proxy_string, url)
w.hook_process(command, config.slack_timeout, '', '')
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def away_command_cb(data, current_buffer, args):
all_servers, message = re.match('^/away( -all)? ?(.*)', args).groups()
if all_servers:
team_buffers = [team.channel_buffer for team in EVENTROUTER.teams.values()]
else:
team_buffers = [current_buffer]
for team_buffer in team_buffers:
if message:
command_away(data, team_buffer, args)
else:
command_back(data, team_buffer, args)
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_away(data, current_buffer, args):
"""
/slack away
Sets your status as 'away'.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team.token, "users.setPresence", {"presence": "away"}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_status(data, current_buffer, args):
"""
/slack status [emoji [status_message]]
Lets you set your Slack Status (not to be confused with away/here).
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
split_args = args.split(' ', 1)
emoji = split_args[0]
text = split_args[1] if len(split_args) > 1 else ""
profile = {"status_text": text, "status_emoji": emoji}
s = SlackRequest(team.token, "users.profile.set", {"profile": profile}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK
@utf8_decode
def line_event_cb(data, signal, hashtable):
buffer_pointer = hashtable["_buffer"]
line_timestamp = hashtable["_chat_line_date"]
line_time_id = hashtable["_chat_line_date_printed"]
channel = EVENTROUTER.weechat_controller.buffers.get(buffer_pointer)
if line_timestamp and line_time_id and isinstance(channel, SlackChannelCommon):
ts = SlackTS("{}.{}".format(line_timestamp, line_time_id))
message_hash = channel.hash_message(ts)
if message_hash is None:
return w.WEECHAT_RC_OK
message_hash = "$" + message_hash
if data == "message":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/input insert {}".format(message_hash))
elif data == "delete":
w.command(buffer_pointer, "/input send {}s///".format(message_hash))
elif data == "linkarchive":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/slack linkarchive {}".format(message_hash[1:]))
elif data == "reply":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/input insert /reply {}\\x20".format(message_hash))
elif data == "thread":
w.command(buffer_pointer, "/cursor stop")
w.command(buffer_pointer, "/thread {}".format(message_hash))
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_back(data, current_buffer, args):
"""
/slack back
Sets your status as 'back'.
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team.token, "users.setPresence", {"presence": "auto"}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK
@slack_buffer_required
@utf8_decode
def command_label(data, current_buffer, args):
"""
/label <name>
Rename a thread buffer. Note that this is not permanent. It will only last
as long as you keep the buffer and wee-slack open.
"""
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if channel.type == 'thread':
new_name = " +" + args
channel.label = new_name
w.buffer_set(channel.channel_buffer, "short_name", new_name)
return w.WEECHAT_RC_OK
@utf8_decode
def set_unread_cb(data, current_buffer, command):
for channel in EVENTROUTER.weechat_controller.buffers.values():
channel.mark_read()
return w.WEECHAT_RC_OK
@slack_buffer_or_ignore
@utf8_decode
def set_unread_current_buffer_cb(data, current_buffer, command):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
channel.mark_read()
return w.WEECHAT_RC_OK
###### NEW EXCEPTIONS
class InvalidType(Exception):
"""
Raised when we do type checking to ensure objects of the wrong
type are not used improperly.
"""
def __init__(self, type_str):
super(InvalidType, self).__init__(type_str)
###### New but probably old and need to migrate
def closed_slack_debug_buffer_cb(data, buffer):
global slack_debug
slack_debug = None
return w.WEECHAT_RC_OK
def create_slack_debug_buffer():
global slack_debug, debug_string
if slack_debug is not None:
w.buffer_set(slack_debug, "display", "1")
else:
debug_string = None
slack_debug = w.buffer_new("slack-debug", "", "", "closed_slack_debug_buffer_cb", "")
w.buffer_set(slack_debug, "notify", "0")
def load_emoji():
try:
DIR = w.info_get("weechat_dir", "")
with open('{}/weemoji.json'.format(DIR), 'r') as ef:
return json.loads(ef.read())["emoji"]
except:
dbg("Couldn't load emoji list: {}".format(format_exc_only()), 5)
return []
def setup_hooks():
w.bar_item_new('slack_typing_notice', '(extra)typing_bar_item_cb', '')
w.hook_timer(5000, 0, 0, "ws_ping_cb", "")
w.hook_timer(1000, 0, 0, "typing_update_cb", "")
w.hook_timer(1000, 0, 0, "buffer_list_update_callback", "EVENTROUTER")
w.hook_timer(3000, 0, 0, "reconnect_callback", "EVENTROUTER")
w.hook_timer(1000 * 60 * 5, 0, 0, "slack_never_away_cb", "")
w.hook_signal('buffer_closing', "buffer_closing_callback", "")
w.hook_signal('buffer_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('window_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('quit', "quit_notification_callback", "")
if config.send_typing_notice:
w.hook_signal('input_text_changed', "typing_notification_cb", "")
w.hook_command(
# Command name and description
'slack', 'Plugin to allow typing notification and sync of read markers for slack.com',
# Usage
'[command] [command options]',
# Description of arguments
'Commands:\n' +
'\n'.join(sorted(EVENTROUTER.cmds.keys())) +
'\nUse /slack help [command] to find out more\n',
# Completions
'|'.join(EVENTROUTER.cmds.keys()),
# Function name
'slack_command_cb', '')
w.hook_command_run('/me', 'me_command_cb', '')
w.hook_command_run('/query', 'join_query_command_cb', '')
w.hook_command_run('/join', 'join_query_command_cb', '')
w.hook_command_run('/part', 'part_command_cb', '')
w.hook_command_run('/topic', 'topic_command_cb', '')
w.hook_command_run('/msg', 'msg_command_cb', '')
w.hook_command_run("/input complete_next", "complete_next_cb", "")
w.hook_command_run("/input set_unread", "set_unread_cb", "")
w.hook_command_run("/input set_unread_current_buffer", "set_unread_current_buffer_cb", "")
w.hook_command_run('/away', 'away_command_cb', '')
w.hook_command_run('/whois', 'whois_command_cb', '')
for cmd in ['hide', 'label', 'rehistory', 'reply', 'thread']:
doc = EVENTROUTER.cmds[cmd].__doc__.strip().split('\n', 1)
args = ' '.join(doc[0].split()[1:])
description = textwrap.dedent(doc[1])
w.hook_command(cmd, description, args, '', '', 'command_' + cmd, '')
w.hook_completion("nicks", "complete @-nicks for slack", "nick_completion_cb", "")
w.hook_completion("emoji", "complete :emoji: for slack", "emoji_completion_cb", "")
w.key_bind("mouse", {
"@chat(python.*):button2": "hsignal:slack_mouse",
})
w.key_bind("cursor", {
"@chat(python.*):D": "hsignal:slack_cursor_delete",
"@chat(python.*):L": "hsignal:slack_cursor_linkarchive",
"@chat(python.*):M": "hsignal:slack_cursor_message",
"@chat(python.*):R": "hsignal:slack_cursor_reply",
"@chat(python.*):T": "hsignal:slack_cursor_thread",
})
w.hook_hsignal("slack_mouse", "line_event_cb", "message")
w.hook_hsignal("slack_cursor_delete", "line_event_cb", "delete")
w.hook_hsignal("slack_cursor_linkarchive", "line_event_cb", "linkarchive")
w.hook_hsignal("slack_cursor_message", "line_event_cb", "message")
w.hook_hsignal("slack_cursor_reply", "line_event_cb", "reply")
w.hook_hsignal("slack_cursor_thread", "line_event_cb", "thread")
# Hooks to fix/implement
# w.hook_signal('buffer_opened', "buffer_opened_cb", "")
# w.hook_signal('window_scrolled', "scrolled_cb", "")
# w.hook_timer(3000, 0, 0, "slack_connection_persistence_cb", "")
##### END NEW
def dbg(message, level=0, main_buffer=False, fout=False):
"""
send debug output to the slack-debug buffer and optionally write to a file.
"""
# TODO: do this smarter
# return
if level >= config.debug_level:
global debug_string
message = "DEBUG: {}".format(message)
if fout:
file('/tmp/debug.log', 'a+').writelines(message + '\n')
if main_buffer:
# w.prnt("", "---------")
w.prnt("", "slack: " + message)
else:
if slack_debug and (not debug_string or debug_string in message):
# w.prnt(slack_debug, "---------")
w.prnt(slack_debug, message)
###### Config code
class PluginConfig(object):
Setting = collections.namedtuple('Setting', ['default', 'desc'])
# Default settings.
# These are, initially, each a (default, desc) tuple; the former is the
# default value of the setting, in the (string) format that weechat
# expects, and the latter is the user-friendly description of the setting.
# At __init__ time these values are extracted, the description is used to
# set or update the setting description for use with /help, and the default
# value is used to set the default for any settings not already defined.
# Following this procedure, the keys remain the same, but the values are
# the real (python) values of the settings.
default_settings = {
'auto_open_threads': Setting(
default='false',
desc='Automatically open threads when mentioned or in'
'response to own messages.'),
'background_load_all_history': Setting(
default='false',
desc='Load history for each channel in the background as soon as it'
' opens, rather than waiting for the user to look at it.'),
'channel_name_typing_indicator': Setting(
default='true',
desc='Change the prefix of a channel from # to > when someone is'
' typing in it. Note that this will (temporarily) affect the sort'
' order if you sort buffers by name rather than by number.'),
'color_buflist_muted_channels': Setting(
default='darkgray',
desc='Color to use for muted channels in the buflist'),
'color_edited_suffix': Setting(
default='095',
desc='Color to use for (edited) suffix on messages that have been edited.'),
'color_reaction_suffix': Setting(
default='darkgray',
desc='Color to use for the [:wave:(@user)] suffix on messages that'
' have reactions attached to them.'),
'color_thread_suffix': Setting(
default='lightcyan',
desc='Color to use for the [thread: XXX] suffix on messages that'
' have threads attached to them.'),
'colorize_private_chats': Setting(
default='false',
desc='Whether to use nick-colors in DM windows.'),
'debug_mode': Setting(
default='false',
desc='Open a dedicated buffer for debug messages and start logging'
' to it. How verbose the logging is depends on log_level.'),
'debug_level': Setting(
default='3',
desc='Show only this level of debug info (or higher) when'
' debug_mode is on. Lower levels -> more messages.'),
'distracting_channels': Setting(
default='',
desc='List of channels to hide.'),
'external_user_suffix': Setting(
default='*',
desc='The suffix appended to nicks to indicate external users.'),
'files_download_location': Setting(
default='',
desc='If set, file attachments will be automatically downloaded'
' to this location.'),
'group_name_prefix': Setting(
default='&',
desc='The prefix of buffer names for groups (private channels).'),
'map_underline_to': Setting(
default='_',
desc='When sending underlined text to slack, use this formatting'
' character for it. The default ("_") sends it as italics. Use'
' "*" to send bold instead.'),
'muted_channels_activity': Setting(
default='personal_highlights',
desc="Control which activity you see from muted channels, either"
" none, personal_highlights, all_highlights or all. none: Don't"
" show any activity. personal_highlights: Only show personal"
" highlights, i.e. not @channel and @here. all_highlights: Show"
" all highlights, but not other messages. all: Show all activity,"
" like other channels."),
'never_away': Setting(
default='false',
desc='Poke Slack every five minutes so that it never marks you "away".'),
'record_events': Setting(
default='false',
desc='Log all traffic from Slack to disk as JSON.'),
'render_bold_as': Setting(
default='bold',
desc='When receiving bold text from Slack, render it as this in weechat.'),
'render_italic_as': Setting(
default='italic',
desc='When receiving bold text from Slack, render it as this in weechat.'
' If your terminal lacks italic support, consider using "underline" instead.'),
'send_typing_notice': Setting(
default='true',
desc='Alert Slack users when you are typing a message in the input bar '
'(Requires reload)'),
'server_aliases': Setting(
default='',
desc='A comma separated list of `subdomain:alias` pairs. The alias'
' will be used instead of the actual name of the slack (in buffer'
' names, logging, etc). E.g `work:no_fun_allowed` would make your'
' work slack show up as `no_fun_allowed` rather than `work.slack.com`.'),
'shared_name_prefix': Setting(
default='%',
desc='The prefix of buffer names for shared channels.'),
'short_buffer_names': Setting(
default='false',
desc='Use `foo.#channel` rather than `foo.slack.com.#channel` as the'
' internal name for Slack buffers.'),
'show_buflist_presence': Setting(
default='true',
desc='Display a `+` character in the buffer list for present users.'),
'show_reaction_nicks': Setting(
default='false',
desc='Display the name of the reacting user(s) alongside each reactji.'),
'slack_api_token': Setting(
default='INSERT VALID KEY HERE!',
desc='List of Slack API tokens, one per Slack instance you want to'
' connect to. See the README for details on how to get these.'),
'slack_timeout': Setting(
default='20000',
desc='How long (ms) to wait when communicating with Slack.'),
'switch_buffer_on_join': Setting(
default='true',
desc='When /joining a channel, automatically switch to it as well.'),
'thread_messages_in_channel': Setting(
default='false',
desc='When enabled shows thread messages in the parent channel.'),
'unfurl_ignore_alt_text': Setting(
default='false',
desc='When displaying ("unfurling") links to channels/users/etc,'
' ignore the "alt text" present in the message and instead use the'
' canonical name of the thing being linked to.'),
'unfurl_auto_link_display': Setting(
default='both',
desc='When displaying ("unfurling") links to channels/users/etc,'
' determine what is displayed when the text matches the url'
' without the protocol. This happens when Slack automatically'
' creates links, e.g. from words separated by dots or email'
' addresses. Set it to "text" to only display the text written by'
' the user, "url" to only display the url or "both" (the default)'
' to display both.'),
'unhide_buffers_with_activity': Setting(
default='false',
desc='When activity occurs on a buffer, unhide it even if it was'
' previously hidden (whether by the user or by the'
' distracting_channels setting).'),
}
# Set missing settings to their defaults. Load non-missing settings from
# weechat configs.
def __init__(self):
self.settings = {}
# Set all descriptions, replace the values in the dict with the
# default setting value rather than the (setting,desc) tuple.
# Use items() rather than iteritems() so we don't need to worry about
# invalidating the iterator.
for key, (default, desc) in self.default_settings.items():
w.config_set_desc_plugin(key, desc)
self.settings[key] = default
# Migrate settings from old versions of Weeslack...
self.migrate()
# ...and then set anything left over from the defaults.
for key, default in self.settings.iteritems():
if not w.config_get_plugin(key):
w.config_set_plugin(key, default)
self.config_changed(None, None, None)
def __str__(self):
return "".join([x + "\t" + str(self.settings[x]) + "\n" for x in self.settings.keys()])
def config_changed(self, data, key, value):
for key in self.settings:
self.settings[key] = self.fetch_setting(key)
if self.debug_mode:
create_slack_debug_buffer()
return w.WEECHAT_RC_OK
def fetch_setting(self, key):
if hasattr(self, 'get_' + key):
try:
return getattr(self, 'get_' + key)(key)
except:
return self.settings[key]
else:
# Most settings are on/off, so make get_boolean the default
return self.get_boolean(key)
def __getattr__(self, key):
return self.settings[key]
def get_boolean(self, key):
return w.config_string_to_boolean(w.config_get_plugin(key))
def get_string(self, key):
return w.config_get_plugin(key)
def get_int(self, key):
return int(w.config_get_plugin(key))
def is_default(self, key):
default = self.default_settings.get(key).default
return w.config_get_plugin(key) == default
get_color_buflist_muted_channels = get_string
get_color_edited_suffix = get_string
get_color_reaction_suffix = get_string
get_color_thread_suffix = get_string
get_debug_level = get_int
get_external_user_suffix = get_string
get_files_download_location = get_string
get_group_name_prefix = get_string
get_map_underline_to = get_string
get_muted_channels_activity = get_string
get_render_bold_as = get_string
get_render_italic_as = get_string
get_shared_name_prefix = get_string
get_slack_timeout = get_int
get_unfurl_auto_link_display = get_string
def get_distracting_channels(self, key):
return [x.strip() for x in w.config_get_plugin(key).split(',') if x]
def get_server_aliases(self, key):
alias_list = w.config_get_plugin(key)
return dict(item.split(":") for item in alias_list.split(",") if ':' in item)
def get_slack_api_token(self, key):
token = w.config_get_plugin("slack_api_token")
if token.startswith('${sec.data'):
return w.string_eval_expression(token, {}, {}, {})
else:
return token
def migrate(self):
"""
This is to migrate the extension name from slack_extension to slack
"""
if not w.config_get_plugin("migrated"):
for k in self.settings.keys():
if not w.config_is_set_plugin(k):
p = w.config_get("plugins.var.python.slack_extension.{}".format(k))
data = w.config_string(p)
if data != "":
w.config_set_plugin(k, data)
w.config_set_plugin("migrated", "true")
old_thread_color_config = w.config_get_plugin("thread_suffix_color")
new_thread_color_config = w.config_get_plugin("color_thread_suffix")
if old_thread_color_config and not new_thread_color_config:
w.config_set_plugin("color_thread_suffix", old_thread_color_config)
# to Trace execution, add `setup_trace()` to startup
# and to a function and sys.settrace(trace_calls) to a function
def setup_trace():
global f
now = time.time()
f = open('{}/{}-trace.json'.format(RECORD_DIR, now), 'w')
def trace_calls(frame, event, arg):
global f
if event != 'call':
return
co = frame.f_code
func_name = co.co_name
if func_name == 'write':
# Ignore write() calls from print statements
return
func_line_no = frame.f_lineno
func_filename = co.co_filename
caller = frame.f_back
caller_line_no = caller.f_lineno
caller_filename = caller.f_code.co_filename
print >> f, 'Call to %s on line %s of %s from line %s of %s' % \
(func_name, func_line_no, func_filename,
caller_line_no, caller_filename)
f.flush()
return
def initiate_connection(token, retries=3):
return SlackRequest(token,
'rtm.start',
{"batch_presence_aware": 1},
retries=retries)
if __name__ == "__main__":
w = WeechatWrapper(weechat)
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "script_unloaded", ""):
weechat_version = w.info_get("version_number", "") or 0
if int(weechat_version) < 0x1030000:
w.prnt("", "\nERROR: Weechat version 1.3+ is required to use {}.\n\n".format(SCRIPT_NAME))
else:
global EVENTROUTER
EVENTROUTER = EventRouter()
# setup_trace()
# WEECHAT_HOME = w.info_get("weechat_dir", "")
# Global var section
slack_debug = None
config = PluginConfig()
config_changed_cb = config.config_changed
typing_timer = time.time()
# domain = None
# previous_buffer = None
# slack_buffer = None
# never_away = False
hide_distractions = False
# hotlist = w.infolist_get("hotlist", "", "")
# main_weechat_buffer = w.info_get("irc_buffer", "{}.{}".format(domain, "DOESNOTEXIST!@#$"))
w.hook_config("plugins.var.python." + SCRIPT_NAME + ".*", "config_changed_cb", "")
w.hook_modifier("input_text_for_buffer", "input_text_for_buffer_cb", "")
EMOJI.extend(load_emoji())
setup_hooks()
# attach to the weechat hooks we need
tokens = map(string.strip, config.slack_api_token.split(','))
w.prnt('', 'Connecting to {} slack team{}.'
.format(len(tokens), '' if len(tokens) == 1 else 's'))
for t in tokens:
s = initiate_connection(t)
EVENTROUTER.receive(s)
if config.record_events:
EVENTROUTER.record()
EVENTROUTER.handle_next()
# END attach to the weechat hooks we need
hdata = Hdata(w)
|
mit
| -8,762,472,178,675,239,000 | 37.339551 | 277 | 0.599475 | false | 3.764751 | true | false | false |
Brocade-OpenSource/OpenStack-DNRM-Nova
|
nova/compute/api.py
|
1
|
152761
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import functools
import re
import string
import uuid
from oslo.config import cfg
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova import db
from nova.db import base
from nova import exception
from nova import hooks
from nova.image import glance
from nova import network
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova.openstack.common import excutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import servicegroup
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
def publisher_id(aggregate_identify=None):
return notifier.publisher_id("aggregate", aggregate_identify)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.BoolOpt('allow_migrate_to_same_host',
default=False,
help='Allow migrate machine to the same host. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
default=None,
help='availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(uuid)s',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
RO_SECURITY_GROUPS = ['default']
SM_IMAGE_PROP_PREFIX = "image_"
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been sucessfully started
at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance['vm_state'] not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
if (task_state is not None and
instance['task_state'] not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method=f.__name__)
if must_have_launched and not instance['launched_at']:
raise exception.InstanceInvalidState(
attr=None,
not_launched=True,
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance['locked'] and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_service = (image_service or
glance.get_default_image_service())
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance['uuid'])
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method)
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
(old_ref, instance_ref) = self.db.instance_update_and_get_original(
context, instance_uuid, kwargs)
notifications.send_update(context, old_ref, instance_ref, 'api')
return instance_ref
def _record_action_start(self, context, instance, action):
act = compute_utils.pack_action_start(context, instance['uuid'],
action)
self.db.action_start(context, act)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
QUOTAS.limit_check(context, injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
req_ram = max_count * instance_type['memory_mb']
# Check the quota
try:
reservations = QUOTAS.reserve(context, instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
usages = exc.kwargs['usages']
overs = exc.kwargs['overs']
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // instance_type['memory_mb'])
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
allowed = 0
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)s instances. %(msg)s"),
{'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'msg': msg})
requested = dict(instances=min_count, cores=req_cores, ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, reservations
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
num_metadata = len(metadata)
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
LOG.warn(_("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties"),
{'pid': context.project_id,
'num_metadata': num_metadata})
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warn(msg)
raise exception.InvalidMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""
Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks):
"""
Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
self.network_api.validate_networks(context, requested_networks)
@staticmethod
def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
image_service, kernel_id = glance.get_remote_image_service(
context, kernel_id)
image_service.show(context, kernel_id)
if ramdisk_id is not None:
image_service, ramdisk_id = glance.get_remote_image_service(
context, ramdisk_id)
image_service.show(context, ramdisk_id)
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
return availability_zone, forced_host, forced_node
@staticmethod
def _inherit_properties_from_image(image, auto_disk_config):
image_properties = image.get('properties', {})
def prop(prop_, prop_type=None):
"""Return the value of an image property."""
value = image_properties.get(prop_)
if value is not None:
if prop_type == 'bool':
value = strutils.bool_from_string(value)
return value
options_from_image = {'os_type': prop('os_type'),
'architecture': prop('architecture'),
'vm_mode': prop('vm_mode')}
# If instance doesn't have auto_disk_config overridden by request, use
# whatever the image indicates
if auto_disk_config is None:
auto_disk_config = prop('auto_disk_config', prop_type='bool')
options_from_image['auto_disk_config'] = auto_disk_config
return options_from_image
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance['uuid'],
'name': instance['display_name'],
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance['display_name']
updates = {'display_name': new_name}
if not instance.get('hostname'):
updates['hostname'] = utils.sanitize_hostname(new_name)
instance = self.db.instance_update(context,
instance['uuid'], updates)
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
# Image checks don't apply when building from volume
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.InstanceTypeMemoryTooSmall()
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.InstanceTypeDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.InstanceTypeDiskTooSmall()
def _get_image(self, context, image_href):
if not image_href:
return None, {}
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
return image_id, image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
if image_id is not None:
self._check_requested_image(context, image_id,
image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, min_count,
max_count, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, user_data,
metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
auto_disk_config, reservation_id):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: 'volume_id' in bdm, block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.InstanceTypeNotFound(
instance_type_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
self._check_requested_networks(context, requested_networks)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name:
key_pair = self.db.key_pair_get(context, context.user_id,
key_name)
key_data = key_pair['public_key']
root_device_name = block_device.properties_root_device_name(
boot_meta.get('properties', {}))
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata,
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
return base_options
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
check_policy(context, 'create:forced_host', {})
filter_properties['force_hosts'] = [forced_host]
if forced_node:
check_policy(context, 'create:forced_host', {})
filter_properties['force_nodes'] = [forced_node]
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping):
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug(_("Going to run %s instances...") % num_instances)
instances = []
try:
for i in xrange(num_instances):
options = base_options.copy()
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, options,
security_groups, block_device_mapping,
num_instances, i)
instances.append(instance)
self._validate_bdm(context, instance)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
self.db.instance_destroy(context, instance['uuid'])
finally:
QUOTAS.rollback(context, quota_reservations)
# Commit the reservations
QUOTAS.commit(context, quota_reservations)
return instances
def _get_volume_image_metadata(self, context, block_device_mapping):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if bdm.get('device_name') == "vda":
volume_id = bdm.get('volume_id')
if volume_id is not None:
try:
volume = self.volume_api.get(context,
volume_id)
return volume['volume_image_metadata']
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
return None
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = {}
boot_meta['properties'] = \
self._get_volume_image_metadata(context,
block_device_mapping)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(
availability_zone)
base_options = self._validate_and_build_base_options(context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, min_count, max_count, display_name,
display_description, key_name, key_data, security_groups,
availability_zone, user_data, metadata, injected_files,
access_ip_v4, access_ip_v6, requested_networks, config_drive,
block_device_mapping, auto_disk_config, reservation_id)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host, forced_node, instance_type)
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, virtual_name):
size = 0
if virtual_name == 'swap':
size = instance_type.get('swap', 0)
elif block_device.is_ephemeral(virtual_name):
num = block_device.ephemeral_num(virtual_name)
# TODO(yamahata): ephemeralN where N > 0
# Only ephemeral0 is allowed for now because InstanceTypes
# table only allows single local disk, ephemeral_gb.
# In order to enhance it, we need to add a new columns to
# instance_types table.
if num > 0:
return 0
size = instance_type.get('ephemeral_gb')
return size
def _update_image_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
mappings):
"""tell vm driver to create ephemeral/swap device at boot time by
updating BlockDeviceMapping
"""
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug(_("bdm %s"), bdm, instance_uuid=instance_uuid)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
size = self._volume_size(instance_type, virtual_name)
if size == 0:
continue
values = {
'instance_uuid': instance_uuid,
'device_name': bdm['device'],
'virtual_name': virtual_name,
'volume_size': size}
self.db.block_device_mapping_update_or_create(elevated_context,
values)
def _update_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
block_device_mapping):
"""tell vm driver to attach volume at boot time by updating
BlockDeviceMapping
"""
LOG.debug(_("block_device_mapping %s"), block_device_mapping,
instance_uuid=instance_uuid)
for bdm in block_device_mapping:
assert 'device_name' in bdm
values = {'instance_uuid': instance_uuid}
for key in ('device_name', 'delete_on_termination', 'virtual_name',
'snapshot_id', 'volume_id', 'volume_size',
'no_device'):
values[key] = bdm.get(key)
virtual_name = bdm.get('virtual_name')
if (virtual_name is not None and
block_device.is_swap_or_ephemeral(virtual_name)):
size = self._volume_size(instance_type, virtual_name)
if size == 0:
continue
values['volume_size'] = size
# NOTE(yamahata): NoDevice eliminates devices defined in image
# files by command line option.
# (--block-device-mapping)
if virtual_name == 'NoDevice':
values['no_device'] = True
for k in ('delete_on_termination', 'virtual_name',
'snapshot_id', 'volume_id', 'volume_size'):
values[k] = None
self.db.block_device_mapping_update_or_create(elevated_context,
values)
def _validate_bdm(self, context, instance):
for bdm in block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid'])):
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
if volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
def _populate_instance_for_bdm(self, context, instance, instance_type,
image, block_device_mapping):
"""Populate instance block device mapping information."""
instance_uuid = instance['uuid']
image_properties = image.get('properties', {})
mappings = image_properties.get('mappings', [])
if mappings:
self._update_image_block_device_mapping(context,
instance_type, instance_uuid, mappings)
image_bdm = image_properties.get('block_device_mapping', [])
for mapping in (image_bdm, block_device_mapping):
if not mapping:
continue
self._update_block_device_mapping(context,
instance_type, instance_uuid, mapping)
# NOTE(ndipanov): Create an image bdm - at the moment
# this is not used but is done for easier transition
# in the future.
if (instance['image_ref'] and not
self.is_volume_backed_instance(context, instance, None)):
image_bdm = block_device.create_image_bdm(instance['image_ref'])
image_bdm['instance_uuid'] = instance_uuid
self.db.block_device_mapping_update_or_create(context,
image_bdm,
legacy=False)
def _populate_instance_shutdown_terminate(self, instance, image,
block_device_mapping):
"""Populate instance shutdown_terminate information."""
image_properties = image.get('properties', {})
if (block_device_mapping or
image_properties.get('mappings') or
image_properties.get('block_device_mapping')):
instance['shutdown_terminate'] = False
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
hostname = instance.get('hostname')
if display_name is None:
display_name = self._default_display_name(instance['uuid'])
instance['display_name'] = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance['hostname'] = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, base_options, image,
security_groups):
"""Build the beginning of a new instance."""
image_properties = image.get('properties', {})
instance = base_options
if not instance.get('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance['uuid'] = str(uuid.uuid4())
instance['launch_index'] = 0
instance['vm_state'] = vm_states.BUILDING
instance['task_state'] = task_states.SCHEDULING
instance['info_cache'] = {'network_info': '[]'}
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
instance.setdefault('system_metadata', {})
# Make sure we have the dict form that we need for instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
prefix_format = SM_IMAGE_PROP_PREFIX + '%s'
for key, value in image_properties.iteritems():
new_value = unicode(value)[:255]
instance['system_metadata'][prefix_format % key] = new_value
# Keep a record of the original base image that this
# image's instance is derived from:
base_image_ref = image_properties.get('base_image_ref')
if not base_image_ref:
# base image ref property not previously set through a snapshot.
# default to using the image ref as the base:
base_image_ref = base_options['image_ref']
instance['system_metadata']['image_base_image_ref'] = base_image_ref
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
base_options, security_group, block_device_mapping, num_instances,
index):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
instance = self._populate_instance_for_create(base_options,
image, security_group)
self._populate_instance_names(instance, num_instances)
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
self.security_group_api.ensure_default(context)
instance = self.db.instance_create(context, instance)
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
check_policy(context, 'create', target)
if requested_networks:
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None):
"""
Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints)
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
for service in self.db.service_get_all_by_topic(context,
CONF.compute_topic):
host_name = service['host']
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
def update_state(self, context, instance, new_state):
"""Updates the state of a compute instance.
For example to 'active' or 'error'.
Also sets 'task_state' to None.
Used by admin_actions api
:param context: The security context
:param instance: The instance to update
:param new_state: A member of vm_state, eg. 'active'
"""
self.update(context, instance,
vm_state=new_state,
task_state=None)
@wrap_check_policy
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance: The instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:returns: None
"""
_, updated = self._update(context, instance, **kwargs)
return updated
def _update(self, context, instance, **kwargs):
# Update the instance record and send a state update notification
# if task or vm state changed
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance['uuid'], kwargs)
notifications.send_update(context, old_ref, instance_ref,
service="api")
return dict(old_ref.iteritems()), dict(instance_ref.iteritems())
def _delete(self, context, instance, cb, **instance_attrs):
if instance['disable_terminate']:
LOG.info(_('instance termination disabled'),
instance=instance)
return
host = instance['host']
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid']))
reservations = None
if context.is_admin and context.project_id != instance['project_id']:
project_id = instance['project_id']
else:
project_id = context.project_id
try:
# NOTE(maoy): no expected_task_state needs to be set
attrs = {'progress': 0}
attrs.update(instance_attrs)
old, updated = self._update(context,
instance,
**attrs)
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
reservations = self._create_reservations(context,
old,
updated,
project_id)
if not host:
# Just update database, nothing else we can do
constraint = self.db.constraint(host=self.db.equal_any(host))
try:
self.db.instance_destroy(context, instance['uuid'],
constraint)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id)
return
except exception.ConstraintNotMet:
# Refresh to get new host information
instance = self.get(context, instance['uuid'])
if instance['vm_state'] == vm_states.RESIZED:
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
get_migration = self.db.migration_get_by_instance_and_status
try:
migration_ref = get_migration(context.elevated(),
instance['uuid'], 'finished')
except exception.MigrationNotFoundByStatus:
migration_ref = None
if migration_ref:
src_host = migration_ref['source_compute']
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
deltas = self._downsize_quota_delta(context, instance)
downsize_reservations = self._reserve_quota_delta(context,
deltas)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration_ref,
host=src_host, cast=False,
reservations=downsize_reservations)
is_up = False
try:
service = self.db.service_get_by_compute_host(
context.elevated(), instance['host'])
if self.servicegroup_api.service_is_up(service):
is_up = True
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id)
reservations = None
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id)
def _create_reservations(self, context, old_instance, new_instance,
project_id):
instance_vcpus = old_instance['vcpus']
instance_memory_mb = old_instance['memory_mb']
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if old_instance['task_state'] in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
get_migration = self.db.migration_get_by_instance_and_status
try:
migration_ref = get_migration(context.elevated(),
old_instance['uuid'], 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration_ref = None
if (migration_ref and
new_instance['instance_type_id'] ==
migration_ref['new_instance_type_id']):
old_inst_type_id = migration_ref['old_instance_type_id']
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.InstanceTypeNotFound:
LOG.warning(_("instance type %d not found"),
old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
instance_memory_mb = old_inst_type['memory_mb']
LOG.debug(_("going to delete a resizing instance"))
reservations = QUOTAS.reserve(context,
project_id=project_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return reservations
def _local_delete(self, context, instance, bdms):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
instance_uuid = instance['uuid']
self.db.instance_info_cache_delete(context, instance_uuid)
compute_utils.notify_about_instance_usage(
context, instance, "delete.start")
elevated = context.elevated()
self.network_api.deallocate_for_instance(elevated,
instance)
system_meta = self.db.instance_system_metadata_get(context,
instance_uuid)
# cleanup volumes
for bdm in bdms:
if bdm['volume_id']:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
self.volume_api.terminate_connection(context,
bdm['volume_id'],
connector)
self.volume_api.detach(elevated, bdm['volume_id'])
if bdm['delete_on_termination']:
self.volume_api.delete(context, bdm['volume_id'])
self.db.block_device_mapping_destroy(context, bdm['id'])
instance = self._instance_update(context,
instance_uuid,
vm_state=vm_states.DELETED,
task_state=None,
terminated_at=timeutils.utcnow())
self.db.instance_destroy(context, instance_uuid)
compute_utils.notify_about_instance_usage(
context, instance, "delete.end", system_metadata=system_meta)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
def soft_delete(context, instance, bdms, reservations=None):
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
self._delete(context, instance, soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
def terminate(context, instance, bdms, reservations=None):
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
self._delete(context, instance, terminate,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate instance"), instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
instance_type = flavors.extract_flavor(instance)
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance['host']:
instance = self.update(context, instance,
task_state=task_states.RESTORING,
expected_task_state=None,
deleted_at=None)
self.compute_rpcapi.restore_instance(context, instance)
else:
self.update(context,
instance,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=None,
deleted_at=None)
QUOTAS.commit(context, quota_reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, quota_reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED],
must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR],
task_state=[None])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug(_("Going to try to start instance"), instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
#NOTE(bcwaldon): no policy check here since it should be rolled in to
# search_opts in get_all
def get_active_by_window(self, context, begin, end=None, project_id=None):
"""Get instances that were continuously active over a window."""
return self.db.instance_get_active_by_window_joined(context, begin,
end, project_id)
#NOTE(bcwaldon): this doesn't really belong in this class
def get_instance_type(self, context, instance_type_id):
"""Get an instance type by instance type id."""
return flavors.get_flavor(instance_type_id, ctxt=context)
def get(self, context, instance_id, want_objects=False):
"""Get a single instance with the given instance_id."""
# NOTE(ameade): we still need to support integer ids for ec2
expected_attrs = ['metadata', 'system_metadata',
'security_groups', 'info_cache']
try:
if uuidutils.is_uuid_like(instance_id):
instance = instance_obj.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif utils.is_int_like(instance_id):
instance = instance_obj.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
raise exception.InstanceNotFound(instance_id=instance_id)
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, sort_key='created_at',
sort_dir='desc', limit=None, marker=None, want_objects=False):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be returned sorted in the order specified by the
'sort_dir' parameter using the key specified in the 'sort_key'
parameter.
"""
#TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
if 'all_tenants' in search_opts:
check_policy(context, "get_all_tenants", target)
LOG.debug(_("Searching by: %s") % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
instance_type = flavors.get_flavor_by_flavor_id(
flavor_id)
filters['instance_type_id'] = instance_type['id']
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in search_opts.iteritems():
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, basestring):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
inst_models = self._get_instances_by_filters(context, filters,
sort_key, sort_dir,
limit=limit,
marker=marker)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
def _get_instances_by_filters(self, context, filters,
sort_key, sort_dir,
limit=None,
marker=None):
if 'ip6' in filters or 'ip' in filters:
res = self.network_api.get_instance_uuids_by_ip_filter(context,
filters)
# NOTE(jkoelker) It is possible that we will get the same
# instance uuid twice (one for ipv4 and ipv6)
uuids = set([r['instance_uuid'] for r in res])
filters['uuid'] = uuids
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
return instance_obj.InstanceList.get_by_filters(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=fields)
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED])
def live_snapshot(self, context, instance, name, extra_properties=None,
image_id=None):
"""Live Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: A dict containing image metadata
"""
if image_id:
# The image entry has already been created, so just pull the
# metadata.
image_meta = self.image_service.show(context, image_id)
else:
image_meta = self._create_image(context, instance, name,
'snapshot', extra_properties=extra_properties)
instance = self.update(context, instance,
task_state=task_states.IMAGE_LIVE_SNAPSHOT,
expected_task_state=None)
self.compute_rpcapi.live_snapshot_instance(context, instance=instance,
image_id=image_meta['id'])
return image_meta
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None, image_id=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup or snapshot
name = backup_type # daily backups are called 'daily'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
"""
if image_id:
# The image entry has already been created, so just pull the
# metadata.
image_meta = self.image_service.show(context, image_id)
else:
image_meta = self._create_image(context, instance, name,
'backup', backup_type=backup_type,
rotation=rotation, extra_properties=extra_properties)
instance = self.update(context, instance,
task_state=task_states.IMAGE_BACKUP,
expected_task_state=None)
self.compute_rpcapi.snapshot_instance(context, instance=instance,
image_id=image_meta['id'], image_type='backup',
backup_type=backup_type, rotation=rotation)
return image_meta
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None,
image_id=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: A dict containing image metadata
"""
if image_id:
# The image entry has already been created, so just pull the
# metadata.
image_meta = self.image_service.show(context, image_id)
else:
image_meta = self._create_image(context, instance, name,
'snapshot', extra_properties=extra_properties)
instance = self.update(context, instance,
task_state=task_states.IMAGE_SNAPSHOT,
expected_task_state=None)
self.compute_rpcapi.snapshot_instance(context, instance=instance,
image_id=image_meta['id'], image_type='snapshot')
return image_meta
def _create_image(self, context, instance, name, image_type,
backup_type=None, rotation=None, extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
"""
instance_uuid = instance['uuid']
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
sent_meta = {
'name': name,
'is_public': False,
'properties': properties,
}
# Persist base image ref as a Glance image property
system_meta = self.db.instance_system_metadata_get(
context, instance_uuid)
base_image_ref = system_meta.get('image_base_image_ref')
if base_image_ref:
properties['base_image_ref'] = base_image_ref
if image_type == 'backup':
properties['backup_type'] = backup_type
elif image_type == 'snapshot':
min_ram, min_disk = self._get_minram_mindisk_params(context,
instance)
if min_ram is not None:
sent_meta['min_ram'] = min_ram
if min_disk is not None:
sent_meta['min_disk'] = min_disk
properties.update(extra_properties or {})
# Now inherit image properties from the base image
for key, value in system_meta.items():
# Trim off the image_ prefix
if key.startswith(SM_IMAGE_PROP_PREFIX):
key = key[len(SM_IMAGE_PROP_PREFIX):]
# Skip properties that are non-inheritable
if key in CONF.non_inheritable_image_properties:
continue
# By using setdefault, we ensure that the properties set
# up above will not be overwritten by inherited values
properties.setdefault(key, value)
return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param image_meta: metadata for the new image
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta['name'] = name
properties = image_meta['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
properties.update(extra_properties or {})
bdms = self.get_instance_bdms(context, instance)
mapping = []
for bdm in bdms:
if bdm['no_device']:
continue
volume_id = bdm.get('volume_id')
if volume_id:
# create snapshot based on volume_id
volume = self.volume_api.get(context, volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
bdm['snapshot_id'] = snapshot['id']
bdm['volume_id'] = None
mapping.append(bdm)
for m in block_device.mappings_prepend_dev(properties.get('mappings',
[])):
virtual_name = m['virtual']
if virtual_name in ('ami', 'root'):
continue
assert block_device.is_swap_or_ephemeral(virtual_name)
device_name = m['device']
if device_name in [b['device_name'] for b in mapping
if not b.get('no_device', False)]:
continue
# NOTE(yamahata): swap and ephemeral devices are specified in
# AMI, but disabled for this instance by user.
# So disable those device by no_device.
mapping.append({'device_name': device_name, 'no_device': True})
if mapping:
properties['block_device_mapping'] = mapping
for attr in ('status', 'location', 'id'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
return self.image_service.create(context, image_meta, data='')
def _get_minram_mindisk_params(self, context, instance):
try:
#try to get source image of the instance
orig_image = self.image_service.show(context,
instance['image_ref'])
except exception.ImageNotFound:
return None, None
#disk format of vhd is non-shrinkable
if orig_image.get('disk_format') == 'vhd':
instance_type = flavors.extract_flavor(instance)
min_disk = instance_type['root_gb']
else:
#set new image values to the original image values
min_disk = orig_image.get('min_disk')
min_ram = orig_image.get('min_ram')
return min_ram, min_disk
def _get_block_device_info(self, context, instance_uuid):
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(context,
instance_uuid))
block_device_mapping = []
for bdm in bdms:
if not bdm['volume_id']:
continue
try:
cinfo = jsonutils.loads(bdm['connection_info'])
if cinfo and 'serial' not in cinfo:
cinfo['serial'] = bdm['volume_id']
bdmap = {'connection_info': cinfo,
'mount_device': bdm['device_name'],
'delete_on_termination': bdm['delete_on_termination']}
block_device_mapping.append(bdmap)
except TypeError:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
pass
return {'block_device_mapping': block_device_mapping}
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED,
vm_states.ERROR],
task_state=[None, task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if ((reboot_type == 'SOFT' and
instance['task_state'] == task_states.REBOOTING) or
(reboot_type == 'HARD' and
instance['task_state'] == task_states.REBOOTING_HARD)):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method='reboot')
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance = self.update(context, instance,
task_state=state,
expected_task_state=[None,
task_states.REBOOTING])
elevated = context.elevated()
block_info = self._get_block_device_info(elevated,
instance['uuid'])
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=block_info,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance['image_ref'] or ''
files_to_inject = kwargs.pop('files_to_inject', [])
metadata = kwargs.get('metadata', {})
instance_type = flavors.extract_flavor(instance)
image_id, image = self._get_image(context, image_href)
self._checks_for_create_and_rebuild(context, image_id, image,
instance_type, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""
Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that
# if the system_metadata for this instance is updated
# after we do the get and before we update.. those other
# updates will be lost. Since this problem exists in a lot
# of other places, I think it should be addressed in a DB
# layer overhaul.
sys_metadata = self.db.instance_system_metadata_get(context,
instance['uuid'])
orig_sys_metadata = dict(sys_metadata)
# Remove the old keys
for key in sys_metadata.keys():
if key.startswith(SM_IMAGE_PROP_PREFIX):
del sys_metadata[key]
# Add the new ones
for key, value in image.get('properties', {}).iteritems():
new_value = unicode(value)[:255]
sys_metadata[(SM_IMAGE_PROP_PREFIX + '%s') % key] = new_value
self.db.instance_system_metadata_update(context,
instance['uuid'], sys_metadata, True)
return orig_sys_metadata
instance = self.update(context, instance,
task_state=task_states.REBUILDING,
expected_task_state=None,
# Unfortunately we need to set image_ref early,
# so API users can see it.
image_ref=image_href, kernel_id=kernel_id or "",
ramdisk_id=ramdisk_id or "",
progress=0, **kwargs)
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context,
instance['uuid']))
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_rpcapi.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration_ref = self.db.migration_get_by_instance_and_status(elevated,
instance['uuid'], 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration_ref)
reservations = self._reserve_quota_delta(context, deltas)
instance = self.update(context, instance,
task_state=task_states.RESIZE_REVERTING,
expected_task_state=None)
self.db.migration_update(elevated, migration_ref['id'],
{'status': 'reverting'})
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context,
instance=instance, migration=migration_ref,
host=migration_ref['dest_compute'], reservations=reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration_ref=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration_ref is None:
migration_ref = self.db.migration_get_by_instance_and_status(
elevated, instance['uuid'], 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
reservations = self._reserve_quota_delta(context, deltas)
self.db.migration_update(elevated, migration_ref['id'],
{'status': 'confirming'})
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance=instance, migration=migration_ref,
host=migration_ref['source_compute'],
reservations=reservations)
@staticmethod
def _resize_quota_delta(context, new_instance_type,
old_instance_type, sense, compare):
"""
Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_instance_type: the target instance type
:param old_instance_type: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_instance_type[resource] -
old_instance_type[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_instance_type, old_instance_type):
"""
Calculate deltas required to adjust quota for an instance upsize.
"""
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
"""
Calculate deltas required to reverse a prior upsizing
quota adjustment.
"""
old_instance_type = flavors.get_flavor(
migration_ref['old_instance_type_id'])
new_instance_type = flavors.get_flavor(
migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
"""
Calculate deltas required to adjust quota for an instance downsize.
"""
old_instance_type = flavors.extract_flavor(instance,
'old_')
new_instance_type = flavors.extract_flavor(instance,
'new_')
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, project_id=None):
if not deltas:
return
return QUOTAS.reserve(context, project_id=project_id, **deltas)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def resize(self, context, instance, flavor_id=None, **kwargs):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
current_instance_type = flavors.extract_flavor(instance)
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug(_("flavor_id is None. Assuming migration."),
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s"),
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
# FIXME(sirp): both of these should raise InstanceTypeNotFound instead
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=flavor_id)
# NOTE(markwash): look up the image early to avoid auth problems later
image_ref = instance.get('image_ref')
if image_ref:
image = self.image_service.show(context, image_ref)
else:
image = {}
if same_instance_type and flavor_id:
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
reservations = self._reserve_quota_delta(context, deltas,
project_id=instance[
'project_id'])
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
usages = exc.kwargs['usages']
overs = exc.kwargs['overs']
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
resource=resource)
instance = self.update(context, instance,
task_state=task_states.RESIZE_PREP,
expected_task_state=None,
progress=0, **kwargs)
request_spec = {
'instance_type': new_instance_type,
'instance_uuids': [instance['uuid']],
'instance_properties': instance,
'image': image}
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
# Here when flavor_id is None, the process is considered as migrate.
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance['host'])
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations,
project_id=instance['project_id'])
reservations = []
args = {
"instance": instance,
"instance_type": new_instance_type,
"image": image,
"request_spec": jsonutils.to_primitive(request_spec),
"filter_properties": filter_properties,
"reservations": reservations,
}
self._record_action_start(context, instance, instance_actions.RESIZE)
self.scheduler_rpcapi.prep_resize(context, **args)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED],
task_state=[None])
def shelve(self, context, instance):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.SHELVE)
image_id = None
bdms = self.get_instance_bdms(context, instance)
if not self.is_volume_backed_instance(context, instance, bdms):
name = '%s-shelved' % instance['name']
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED], task_state=[None])
def shelve_offload(self, context, instance):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=None)
self.compute_rpcapi.shelve_offload_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED], task_state=[None])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def pause(self, context, instance):
"""Pause the given instance."""
self.update(context,
instance,
task_state=task_states.PAUSING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
self.update(context,
instance,
task_state=task_states.UNPAUSING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance=instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def suspend(self, context, instance):
"""Suspend the given instance."""
self.update(context,
instance,
task_state=task_states.SUSPENDING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
self.update(context,
instance,
task_state=task_states.RESUMING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
bdms = self.get_instance_bdms(context, instance)
for bdm in bdms:
if bdm['volume_id']:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.check_attached(context, volume)
# TODO(ndipanov): This check can be generalized as a decorator to
# check for valid combinations of src and dests - for now check
# if it's booted from volume only
if self.is_volume_backed_instance(context, instance, None):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance['uuid'],
reason=reason)
self.update(context,
instance,
task_state=task_states.RESCUING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
self.update(context,
instance,
task_state=task_states.UNRESCUING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
self.update(context,
instance,
task_state=task_states.UPDATING_PASSWORD,
expected_task_state=None)
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_lock
def inject_file(self, context, instance, path, file_contents):
"""Write a file to the given instance."""
self.compute_rpcapi.inject_file(context, instance=instance, path=path,
file_contents=file_contents)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
context = context.elevated()
instance_uuid = instance['uuid']
LOG.debug(_('Locking'), context=context, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid, locked=True)
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
context = context.elevated()
instance_uuid = instance['uuid']
LOG.debug(_('Unlocking'), context=context, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid, locked=False)
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance['uuid'])['locked']
@wrap_check_policy
@check_instance_lock
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def attach_volume(self, context, instance, volume_id, device=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
device = self.compute_rpcapi.reserve_block_device_name(
context, device=device, instance=instance, volume_id=volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device)
return device
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance. This method is separated to make
it easier for cells version to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance['uuid']:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
rv = self.db.instance_metadata_get(context, instance['uuid'])
return dict(rv.iteritems())
def get_all_instance_metadata(self, context, search_filts):
"""Get all metadata."""
def _filter_metadata(instance, search_filt, input_metadata):
uuids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if uuids and instance.get('uuid') not in uuids:
return {}
for (k, v) in input_metadata.iteritems():
# Both keys and value defined -- AND
if ((keys_filter and values_filter) and
(k not in keys_filter) and (v not in values_filter)):
continue
# Only keys or value is defined
elif ((keys_filter and k not in keys_filter) or
(values_filter and v not in values_filter)):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
instances = self._get_instances_by_filters(context, filters={},
sort_key='created_at',
sort_dir='desc')
for instance in instances:
try:
check_policy(context, 'get_all_instance_metadata', instance)
metadata = instance.get('metadata', {})
for filt in search_filts:
# By chaining the input to the output, the filters are
# ANDed together
metadata = _filter_metadata(instance, filt, metadata)
for (k, v) in metadata.iteritems():
formatted_metadata_list.append({'key': k, 'value': v,
'instance_id': instance.get('uuid')})
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return formatted_metadata_list
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
self.db.instance_metadata_delete(context, instance['uuid'], key)
instance['metadata'] = {}
notifications.send_update(context, instance, instance)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = self.get_instance_metadata(context, instance)
if delete:
_metadata = metadata
else:
_metadata = orig.copy()
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
metadata = self.db.instance_metadata_update(context, instance['uuid'],
_metadata, True)
instance['metadata'] = metadata
notifications.send_update(context, instance, instance)
diff = utils.diff_dict(orig, _metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance['uuid'] for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def get_instance_bdms(self, context, instance, legacy=True):
"""Get all bdm tables for specified instance."""
bdms = self.db.block_device_mapping_get_all_by_instance(context,
instance['uuid'])
if legacy:
return block_device.legacy_mapping(bdms)
return bdms
def is_volume_backed_instance(self, context, instance, bdms):
if not instance['image_ref']:
return True
if bdms is None:
bdms = self.get_instance_bdms(context, instance)
for bdm in bdms:
if ((block_device.strip_dev(bdm['device_name']) ==
block_device.strip_dev(instance['root_device_name']))
and
(bdm['volume_id'] is not None or
bdm['snapshot_id'] is not None)):
return True
return False
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
host_name or "another host", instance=instance)
instance = self.update(context, instance,
task_state=task_states.MIGRATING,
expected_task_state=None)
self.compute_task_api.migrate_server(context, instance,
scheduler_hint={'host': host_name},
live=True, rebuild=False, flavor=None,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
"""
LOG.debug(_('vm evacuation scheduled'))
inst_host = instance['host']
service = self.db.service_get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
msg = (_('Instance compute service state on %s '
'expected to be down, but it was up.') % inst_host)
LOG.error(msg)
raise exception.ComputeServiceUnavailable(msg)
instance = self.update(context, instance, expected_task_state=None,
task_state=task_states.REBUILDING)
self._record_action_start(context, instance, instance_actions.EVACUATE)
return self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return self.db.migration_get_all_by_filters(context, filters)
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = self.db.service_get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.host_power_action(context, action=action,
host=host_name)
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
services = self.db.service_get_all(context, disabled=disabled)
if set_zones or 'availability_zone' in filters:
services = availability_zones.set_availability_zones(context,
services)
ret_services = []
for service in services:
for key, val in filters.iteritems():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return self.db.service_get_by_compute_host(context, host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
service = db.service_get_by_args(context, host_name, binary)
return db.service_update(context, service['id'], params_to_update)
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return self.db.instance_get_all_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return self.db.compute_node_get(context, int(compute_id))
def compute_node_get_all(self, context):
return self.db.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.db.compute_node_search_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return self.db.actions_get(context, instance['uuid'])
def action_get_by_request_id(self, context, instance, request_id):
return self.db.action_get_by_request_id(context, instance['uuid'],
request_id)
def action_events_get(self, context, instance, action_id):
return self.db.action_events_get(context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(AggregateAPI, self).__init__(**kwargs)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate_payload = {}
values = {"name": aggregate_name}
aggregate_payload.update(values)
metadata = None
if availability_zone:
metadata = {'availability_zone': availability_zone}
aggregate_payload.update({'meta_data': metadata})
compute_utils.notify_about_aggregate_update(context,
"create.start",
aggregate_payload)
aggregate = self.db.aggregate_create(context, values,
metadata=metadata)
aggregate = self._get_aggregate_info(context, aggregate)
# To maintain the same API result as before.
del aggregate['hosts']
del aggregate['metadata']
aggregate_payload.update({'aggregate_id': aggregate['id']})
compute_utils.notify_about_aggregate_update(context,
"create.end",
aggregate_payload)
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
aggregate = self.db.aggregate_get(context, aggregate_id)
return self._get_aggregate_info(context, aggregate)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
aggregates = self.db.aggregate_get_all(context)
return [self._get_aggregate_info(context, a) for a in aggregates]
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
aggregate_payload.update({'meta_data': values})
compute_utils.notify_about_aggregate_update(context,
"updateprop.start",
aggregate_payload)
aggregate = self.db.aggregate_update(context, aggregate_id, values)
compute_utils.notify_about_aggregate_update(context,
"updateprop.end",
aggregate_payload)
return self._get_aggregate_info(context, aggregate)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate_payload = {'aggregate_id': aggregate_id}
aggregate_payload.update({'meta_data': metadata})
compute_utils.notify_about_aggregate_update(context,
"updatemetadata.start",
aggregate_payload)
# If a key is set to None, it gets removed from the aggregate metadata.
for key in metadata.keys():
if not metadata[key]:
try:
self.db.aggregate_metadata_delete(context,
aggregate_id, key)
metadata.pop(key)
except exception.AggregateMetadataNotFound as e:
LOG.warn(e.format_message())
self.db.aggregate_metadata_add(context, aggregate_id, metadata)
compute_utils.notify_about_aggregate_update(context,
"updatemetadata.end",
aggregate_payload)
return self.get_aggregate(context, aggregate_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
hosts = self.db.aggregate_host_get_all(context, aggregate_id)
if len(hosts) > 0:
raise exception.InvalidAggregateAction(action='delete',
aggregate_id=aggregate_id,
reason='not empty')
self.db.aggregate_delete(context, aggregate_id)
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
self.db.service_get_by_compute_host(context, host_name)
aggregate = self.db.aggregate_get(context, aggregate_id)
self.db.aggregate_host_add(context, aggregate_id, host_name)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return self.get_aggregate(context, aggregate_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
self.db.service_get_by_compute_host(context, host_name)
aggregate = self.db.aggregate_get(context, aggregate_id)
self.db.aggregate_host_delete(context, aggregate_id, host_name)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return self.get_aggregate(context, aggregate_id)
def _get_aggregate_info(self, context, aggregate):
"""Builds a dictionary with aggregate props, metadata and hosts."""
metadata = self.db.aggregate_metadata_get(context, aggregate['id'])
hosts = self.db.aggregate_host_get_all(context, aggregate['id'])
result = dict(aggregate.iteritems())
# metadetails was not originally included here. We need to pull it
# back out to maintain API stability.
del result['metadetails']
result["metadata"] = metadata
result["hosts"] = hosts
return result
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
def _validate_new_key_pair(self, context, user_id, key_name):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
_("Keypair name contains unsafe characters"))
if not 0 < len(key_name) < 256:
raise exception.InvalidKeypair(
_('Keypair name must be between 1 and 255 characters long'))
count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
def import_key_pair(self, context, user_id, key_name, public_key):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name)
fingerprint = crypto.generate_fingerprint(public_key)
keypair = {'user_id': user_id,
'name': key_name,
'fingerprint': fingerprint,
'public_key': public_key}
self.db.key_pair_create(context, keypair)
return keypair
def create_key_pair(self, context, user_id, key_name):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
keypair = {'user_id': user_id,
'name': key_name,
'fingerprint': fingerprint,
'public_key': public_key,
'private_key': private_key}
self.db.key_pair_create(context, keypair)
return keypair
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self.db.key_pair_destroy(context, user_id, key_name)
def _get_key_pair(self, key_pair):
return {'name': key_pair['name'],
'public_key': key_pair['public_key'],
'fingerprint': key_pair['fingerprint']}
def get_key_pairs(self, context, user_id):
"""List key pairs."""
key_pairs = self.db.key_pair_get_all_by_user(context, user_id)
return [self._get_key_pair(k) for k in key_pairs]
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
key_pair = self.db.key_pair_get(context, user_id, key_name)
return self._get_key_pair(key_pair)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""
Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova seurity group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
def validate_property(self, value, property, allowed):
"""
Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
if not val:
msg = _("Security group %s cannot be empty.") % property
self.raise_invalid_property(msg)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
if len(val) > 255:
msg = _("Security group %s should not be greater "
"than 255 characters.") % property
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.audit(_("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
group_ref = self.db.security_group_update(context,
security_group['id'],
group)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
# Get reservations
try:
reservations = QUOTAS.reserve(context, security_groups=-1)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding muliple
security group rules at once but the EC2 one does. Therefore,
this function is writen to support both.
"""
count = QUOTAS.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
QUOTAS.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Authorize security group ingress %s")
LOG.audit(msg, name, context=context)
rules = [self.db.security_group_rule_create(context, v) for v in vals]
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Revoke security group ingress %s")
LOG.audit(msg, security_group['name'], context=context)
for rule_id in rule_ids:
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
is_duplicate = True
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
is_duplicate = False
break
if is_duplicate:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
try:
return self.db.security_group_default_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance['host'] is not None:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance['uuid'] not in instances:
instances[instance['uuid']] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance['host']:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = self.db.instance_get_by_uuid(context, instance_uuid)
groups = instance.get('security_groups')
if groups:
return [{'name': group['name']} for group in groups]
def populate_security_groups(self, instance, security_groups):
instance['security_groups'] = security_groups
|
apache-2.0
| 9,132,316,229,346,046,000 | 42.558882 | 79 | 0.566899 | false | 4.555406 | false | false | false |
deepmind/rlax
|
rlax/_src/policy_gradients.py
|
1
|
10472
|
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions implementing policy gradient losses.
Policy gradient algorithms directly update the policy of an agent based on
a stochatic estimate of the direction of steepest ascent in a score function
representing the expected return of that policy. This subpackage provides a
number of utility functions for implementing policy gradient algorithms for
discrete and continuous policies.
"""
from typing import Optional
import chex
import jax
import jax.numpy as jnp
from rlax._src import distributions
from rlax._src import losses
Array = chex.Array
Scalar = chex.Scalar
def _clip_by_l2_norm(x: Array, max_norm: float) -> Array:
"""Clip gradients to maximum l2 norm `max_norm`."""
# Compute the sum of squares and find out where things are zero.
sum_sq = jnp.sum(jnp.vdot(x, x))
nonzero = sum_sq > 0
# Compute the norm wherever sum_sq > 0 and leave it <= 0 otherwise. This makes
# use of the the "double where" trick; see
# https://jax.readthedocs.io/en/latest/faq.html#gradients-contain-nan-where-using-where
# for more info. In short this is necessary because although norm ends up
# computed correctly where nonzero is true if we ignored this we'd end up with
# nans on the off-branches which would leak through when computed gradients in
# the backward pass.
sum_sq_ones = jnp.where(nonzero, sum_sq, jnp.ones_like(sum_sq))
norm = jnp.where(nonzero, jnp.sqrt(sum_sq_ones), sum_sq)
# Normalize by max_norm. Whenever norm < max_norm we're left with x (this
# happens trivially for indices where nonzero is false). Otherwise we're left
# with the desired x * max_norm / norm.
return (x * max_norm) / jnp.maximum(norm, max_norm)
def dpg_loss(
a_t: Array,
dqda_t: Array,
dqda_clipping: Optional[Scalar] = None,
use_stop_gradient: bool = True,
) -> Array:
"""Calculates the deterministic policy gradient (DPG) loss.
See "Deterministic Policy Gradient Algorithms" by Silver, Lever, Heess,
Degris, Wierstra, Riedmiller (http://proceedings.mlr.press/v32/silver14.pdf).
Args:
a_t: continuous-valued action at time t.
dqda_t: gradient of Q(s,a) wrt. a, evaluated at time t.
dqda_clipping: clips the gradient to have norm <= `dqda_clipping`.
use_stop_gradient: bool indicating whether or not to apply stop gradient
to targets.
Returns:
DPG loss.
"""
chex.assert_rank([a_t, dqda_t], 1)
chex.assert_type([a_t, dqda_t], float)
if dqda_clipping is not None:
dqda_t = _clip_by_l2_norm(dqda_t, dqda_clipping)
target_tm1 = dqda_t + a_t
target_tm1 = jax.lax.select(use_stop_gradient,
jax.lax.stop_gradient(target_tm1), target_tm1)
return losses.l2_loss(target_tm1 - a_t)
def policy_gradient_loss(
logits_t: Array,
a_t: Array,
adv_t: Array,
w_t: Array,
use_stop_gradient: bool = True,
) -> Array:
"""Calculates the policy gradient loss.
See "Simple Gradient-Following Algorithms for Connectionist RL" by Williams.
(http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
a_t: a sequence of actions sampled from the preferences `logits_t`.
adv_t: the observed or estimated advantages from executing actions `a_t`.
w_t: a per timestep weighting for the loss.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
Loss whose gradient corresponds to a policy gradient update.
"""
chex.assert_rank([logits_t, a_t, adv_t, w_t], [2, 1, 1, 1])
chex.assert_type([logits_t, a_t, adv_t, w_t], [float, int, float, float])
log_pi_a_t = distributions.softmax().logprob(a_t, logits_t)
adv_t = jax.lax.select(use_stop_gradient, jax.lax.stop_gradient(adv_t), adv_t)
loss_per_timestep = -log_pi_a_t * adv_t
return jnp.mean(loss_per_timestep * w_t)
def entropy_loss(
logits_t: Array,
w_t: Array,
) -> Array:
"""Calculates the entropy regularization loss.
See "Function Optimization using Connectionist RL Algorithms" by Williams.
(https://www.tandfonline.com/doi/abs/10.1080/09540099108946587)
Args:
logits_t: a sequence of unnormalized action preferences.
w_t: a per timestep weighting for the loss.
Returns:
Entropy loss.
"""
chex.assert_rank([logits_t, w_t], [2, 1])
chex.assert_type([logits_t, w_t], float)
entropy_per_timestep = distributions.softmax().entropy(logits_t)
return -jnp.mean(entropy_per_timestep * w_t)
def _compute_advantages(logits_t: Array,
q_t: Array,
use_stop_gradient=True) -> Array:
"""Computes summed advantage using logits and action values."""
policy_t = jax.nn.softmax(logits_t, axis=1)
# Avoid computing gradients for action_values.
q_t = jax.lax.select(use_stop_gradient, jax.lax.stop_gradient(q_t), q_t)
baseline_t = jnp.sum(policy_t * q_t, axis=1)
adv_t = q_t - jnp.expand_dims(baseline_t, 1)
return policy_t, adv_t
def qpg_loss(
logits_t: Array,
q_t: Array,
use_stop_gradient: bool = True,
) -> Array:
"""Computes the QPG (Q-based Policy Gradient) loss.
See "Actor-Critic Policy Optimization in Partially Observable Multiagent
Environments" by Srinivasan, Lanctot.
(https://papers.nips.cc/paper/7602-actor-critic-policy-optimization-in-partially-observable-multiagent-environments.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
q_t: the observed or estimated action value from executing actions `a_t` at
time t.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
QPG Loss.
"""
chex.assert_rank([logits_t, q_t], 2)
chex.assert_type([logits_t, q_t], float)
policy_t, advantage_t = _compute_advantages(logits_t, q_t)
advantage_t = jax.lax.select(use_stop_gradient,
jax.lax.stop_gradient(advantage_t), advantage_t)
policy_advantages = -policy_t * advantage_t
loss = jnp.mean(jnp.sum(policy_advantages, axis=1), axis=0)
return loss
def rm_loss(
logits_t: Array,
q_t: Array,
use_stop_gradient: bool = True,
) -> Array:
"""Computes the RMPG (Regret Matching Policy Gradient) loss.
The gradient of this loss adapts the Regret Matching rule by weighting the
standard PG update with thresholded regret.
See "Actor-Critic Policy Optimization in Partially Observable Multiagent
Environments" by Srinivasan, Lanctot.
(https://papers.nips.cc/paper/7602-actor-critic-policy-optimization-in-partially-observable-multiagent-environments.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
q_t: the observed or estimated action value from executing actions `a_t` at
time t.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
RM Loss.
"""
chex.assert_rank([logits_t, q_t], 2)
chex.assert_type([logits_t, q_t], float)
policy_t, advantage_t = _compute_advantages(logits_t, q_t)
action_regret_t = jax.nn.relu(advantage_t)
action_regret_t = jax.lax.select(use_stop_gradient,
jax.lax.stop_gradient(action_regret_t),
action_regret_t)
policy_regret = -policy_t * action_regret_t
loss = jnp.mean(jnp.sum(policy_regret, axis=1), axis=0)
return loss
def rpg_loss(
logits_t: Array,
q_t: Array,
use_stop_gradient: bool = True,
) -> Array:
"""Computes the RPG (Regret Policy Gradient) loss.
The gradient of this loss adapts the Regret Matching rule by weighting the
standard PG update with regret.
See "Actor-Critic Policy Optimization in Partially Observable Multiagent
Environments" by Srinivasan, Lanctot.
(https://papers.nips.cc/paper/7602-actor-critic-policy-optimization-in-partially-observable-multiagent-environments.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
q_t: the observed or estimated action value from executing actions `a_t` at
time t.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
RPG Loss.
"""
chex.assert_rank([logits_t, q_t], 2)
chex.assert_type([logits_t, q_t], float)
_, adv_t = _compute_advantages(logits_t, q_t, use_stop_gradient)
regrets_t = jnp.sum(jax.nn.relu(adv_t), axis=1)
total_regret_t = jnp.mean(regrets_t, axis=0)
return total_regret_t
def clipped_surrogate_pg_loss(
prob_ratios_t: Array,
adv_t: Array,
epsilon: Scalar,
use_stop_gradient=True) -> Array:
"""Computes the clipped surrogate policy gradient loss.
L_clipₜ(θ) = - min(rₜ(θ)Âₜ, clip(rₜ(θ), 1-ε, 1+ε)Âₜ)
Where rₜ(θ) = π_θ(aₜ| sₜ) / π_θ_old(aₜ| sₜ) and Âₜ are the advantages.
See Proximal Policy Optimization Algorithms, Schulman et al.:
https://arxiv.org/abs/1707.06347
Args:
prob_ratios_t: Ratio of action probabilities for actions a_t:
rₜ(θ) = π_θ(aₜ| sₜ) / π_θ_old(aₜ| sₜ)
adv_t: the observed or estimated advantages from executing actions a_t.
epsilon: Scalar value corresponding to how much to clip the objecctive.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
Loss whose gradient corresponds to a clipped surrogate policy gradient
update.
"""
chex.assert_rank([prob_ratios_t, adv_t], [1, 1])
chex.assert_type([prob_ratios_t, adv_t], [float, float])
adv_t = jax.lax.select(use_stop_gradient, jax.lax.stop_gradient(adv_t), adv_t)
clipped_ratios_t = jnp.clip(prob_ratios_t, 1. - epsilon, 1. + epsilon)
clipped_objective = jnp.fmin(prob_ratios_t * adv_t, clipped_ratios_t * adv_t)
return -jnp.mean(clipped_objective)
|
apache-2.0
| 380,960,590,882,449,540 | 34.569966 | 122 | 0.690942 | false | 3.125037 | false | false | false |
openstack/vitrage
|
vitrage/api/controllers/v1/alarm.py
|
1
|
2404
|
# Copyright 2016 - Nokia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import pecan
from oslo_log import log
from osprofiler import profiler
from pecan.core import abort
from vitrage.api.controllers.v1.alarm_base import BaseAlarmsController
from vitrage.api.controllers.v1 import count
from vitrage.api.controllers.v1 import history
from vitrage.api.policy import enforce
from vitrage.common.constants import VertexProperties as Vprops
LOG = log.getLogger(__name__)
# noinspection PyBroadException
@profiler.trace_cls("alarm controller",
info={}, hide_args=False, trace_private=False)
class AlarmsController(BaseAlarmsController):
count = count.AlarmCountsController()
history = history.HistoryController()
@pecan.expose('json')
def get_all(self, **kwargs):
kwargs['only_active_alarms'] = True
LOG.info('returns alarms list with vitrage id %s',
kwargs.get(Vprops.VITRAGE_ID))
return self._get_alarms(**kwargs)
@pecan.expose('json')
def get(self, vitrage_id):
enforce("get alarm",
pecan.request.headers,
pecan.request.enforcer,
{})
LOG.info('returns show alarm with vitrage id %s', vitrage_id)
return self._show_alarm(vitrage_id)
@staticmethod
def _show_alarm(vitrage_id):
try:
alarm_json = pecan.request.client.call(pecan.request.context,
'show_alarm',
vitrage_id=vitrage_id)
LOG.info(alarm_json)
if not alarm_json:
abort(404, "Failed to find alarm %s" % vitrage_id)
return json.loads(alarm_json)
except Exception:
LOG.exception('Failed to load JSON.')
abort(404, 'Failed to show alarm.')
|
apache-2.0
| -3,731,942,575,143,812,600 | 30.631579 | 75 | 0.649334 | false | 3.928105 | false | false | false |
neerajvashistha/pa-dude
|
lib/python2.7/site-packages/sphinx/theming.py
|
1
|
8278
|
# -*- coding: utf-8 -*-
"""
sphinx.theming
~~~~~~~~~~~~~~
Theming support for HTML builders.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import shutil
import zipfile
import tempfile
from os import path
from six import string_types, iteritems
from six.moves import configparser
try:
import pkg_resources
except ImportError:
pkg_resources = False
from sphinx import package_dir
from sphinx.errors import ThemeError
NODEFAULT = object()
THEMECONF = 'theme.conf'
class Theme(object):
"""
Represents the theme chosen in the configuration.
"""
themes = {}
@classmethod
def init_themes(cls, confdir, theme_path, warn=None):
"""Search all theme paths for available themes."""
cls.themepath = list(theme_path)
cls.themepath.append(path.join(package_dir, 'themes'))
for themedir in cls.themepath[::-1]:
themedir = path.join(confdir, themedir)
if not path.isdir(themedir):
continue
for theme in os.listdir(themedir):
if theme.lower().endswith('.zip'):
try:
zfile = zipfile.ZipFile(path.join(themedir, theme))
if THEMECONF not in zfile.namelist():
continue
tname = theme[:-4]
tinfo = zfile
except Exception:
if warn:
warn('file %r on theme path is not a valid '
'zipfile or contains no theme' % theme)
continue
else:
if not path.isfile(path.join(themedir, theme, THEMECONF)):
continue
tname = theme
tinfo = None
cls.themes[tname] = (path.join(themedir, theme), tinfo)
@classmethod
def load_extra_theme(cls, name):
themes = ['alabaster']
try:
import sphinx_rtd_theme
themes.append('sphinx_rtd_theme')
except ImportError:
pass
if name in themes:
if name == 'alabaster':
import alabaster
themedir = alabaster.get_path()
# alabaster theme also requires 'alabaster' extension, it will be loaded
# at sphinx.application module.
elif name == 'sphinx_rtd_theme':
themedir = sphinx_rtd_theme.get_html_theme_path()
else:
raise NotImplementedError('Programming Error')
else:
for themedir in load_theme_plugins():
if path.isfile(path.join(themedir, name, THEMECONF)):
break
else:
# specified theme is not found
return
cls.themepath.append(themedir)
cls.themes[name] = (path.join(themedir, name), None)
return
def __init__(self, name, warn=None):
if name not in self.themes:
self.load_extra_theme(name)
if name not in self.themes:
if name == 'sphinx_rtd_theme':
raise ThemeError('sphinx_rtd_theme is no longer a hard dependency '
'since version 1.4.0. Please install it manually.'
'(pip install sphinx_rtd_theme)')
else:
raise ThemeError('no theme named %r found '
'(missing theme.conf?)' % name)
self.name = name
# Do not warn yet -- to be compatible with old Sphinxes, people *have*
# to use "default".
# if name == 'default' and warn:
# warn("'default' html theme has been renamed to 'classic'. "
# "Please change your html_theme setting either to "
# "the new 'alabaster' default theme, or to 'classic' "
# "to keep using the old default.")
tdir, tinfo = self.themes[name]
if tinfo is None:
# already a directory, do nothing
self.themedir = tdir
self.themedir_created = False
else:
# extract the theme to a temp directory
self.themedir = tempfile.mkdtemp('sxt')
self.themedir_created = True
for name in tinfo.namelist():
if name.endswith('/'):
continue
dirname = path.dirname(name)
if not path.isdir(path.join(self.themedir, dirname)):
os.makedirs(path.join(self.themedir, dirname))
fp = open(path.join(self.themedir, name), 'wb')
fp.write(tinfo.read(name))
fp.close()
self.themeconf = configparser.RawConfigParser()
self.themeconf.read(path.join(self.themedir, THEMECONF))
try:
inherit = self.themeconf.get('theme', 'inherit')
except configparser.NoOptionError:
raise ThemeError('theme %r doesn\'t have "inherit" setting' % name)
# load inherited theme automatically #1794, #1884, #1885
self.load_extra_theme(inherit)
if inherit == 'none':
self.base = None
elif inherit not in self.themes:
raise ThemeError('no theme named %r found, inherited by %r' %
(inherit, name))
else:
self.base = Theme(inherit, warn=warn)
def get_confstr(self, section, name, default=NODEFAULT):
"""Return the value for a theme configuration setting, searching the
base theme chain.
"""
try:
return self.themeconf.get(section, name)
except (configparser.NoOptionError, configparser.NoSectionError):
if self.base is not None:
return self.base.get_confstr(section, name, default)
if default is NODEFAULT:
raise ThemeError('setting %s.%s occurs in none of the '
'searched theme configs' % (section, name))
else:
return default
def get_options(self, overrides):
"""Return a dictionary of theme options and their values."""
chain = [self.themeconf]
base = self.base
while base is not None:
chain.append(base.themeconf)
base = base.base
options = {}
for conf in reversed(chain):
try:
options.update(conf.items('options'))
except configparser.NoSectionError:
pass
for option, value in iteritems(overrides):
if option not in options:
raise ThemeError('unsupported theme option %r given' % option)
options[option] = value
return options
def get_dirchain(self):
"""Return a list of theme directories, beginning with this theme's,
then the base theme's, then that one's base theme's, etc.
"""
chain = [self.themedir]
base = self.base
while base is not None:
chain.append(base.themedir)
base = base.base
return chain
def cleanup(self):
"""Remove temporary directories."""
if self.themedir_created:
try:
shutil.rmtree(self.themedir)
except Exception:
pass
if self.base:
self.base.cleanup()
def load_theme_plugins():
"""load plugins by using``sphinx_themes`` section in setuptools entry_points.
This API will return list of directory that contain some theme directory.
"""
if not pkg_resources:
return []
theme_paths = []
for plugin in pkg_resources.iter_entry_points('sphinx_themes'):
func_or_path = plugin.load()
try:
path = func_or_path()
except Exception:
path = func_or_path
if isinstance(path, string_types):
theme_paths.append(path)
else:
raise ThemeError('Plugin %r does not response correctly.' %
plugin.module_name)
return theme_paths
|
mit
| 4,504,715,810,136,291,300 | 33.781513 | 88 | 0.539381 | false | 4.609131 | true | false | false |
rwl/PyCIM
|
CIM14/CDPSM/Balanced/IEC61970/Wires/EnergySource.py
|
1
|
2528
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CDPSM.Balanced.IEC61970.Core.ConductingEquipment import ConductingEquipment
class EnergySource(ConductingEquipment):
"""A generic equivalent for an energy supplier on a transmission or distribution voltage level.
"""
def __init__(self, x=0.0, voltageMagnitude=0.0, voltageAngle=0.0, nominalVoltage=0.0, *args, **kw_args):
"""Initialises a new 'EnergySource' instance.
@param x: Positive sequence Thevenin reactance.
@param voltageMagnitude: Phase-to-phase open circuit voltage magnitude.
@param voltageAngle: Phase angle of a-phase open circuit.
@param nominalVoltage: Phase-to-phase nominal voltage.
"""
#: Positive sequence Thevenin reactance.
self.x = x
#: Phase-to-phase open circuit voltage magnitude.
self.voltageMagnitude = voltageMagnitude
#: Phase angle of a-phase open circuit.
self.voltageAngle = voltageAngle
#: Phase-to-phase nominal voltage.
self.nominalVoltage = nominalVoltage
super(EnergySource, self).__init__(*args, **kw_args)
_attrs = ["x", "voltageMagnitude", "voltageAngle", "nominalVoltage"]
_attr_types = {"x": float, "voltageMagnitude": float, "voltageAngle": float, "nominalVoltage": float}
_defaults = {"x": 0.0, "voltageMagnitude": 0.0, "voltageAngle": 0.0, "nominalVoltage": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
mit
| -1,110,787,457,818,717,400 | 44.963636 | 108 | 0.714794 | false | 3.931571 | false | false | false |
trilan/lemon
|
lemon/forms.py
|
1
|
2292
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.forms.formsets import formset_factory
from django.forms.models import BaseInlineFormSet
from django.forms.models import ModelFormMetaclass, _get_foreign_key
from django.utils.translation import ugettext_lazy as _
from .fields import ContentTypeChoiceField
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(
label=_('Username'),
max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_(
'Required. 30 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'
),
error_messages={
'invalid': _(
'This value may contain only letters, numbers and @/./+/-/_ '
'characters.'
)
}
)
class Meta:
model = User
class MenuItemForm(forms.ModelForm):
admin_site = None
class Meta(object):
fields = ['content_type', 'name', 'position']
def __init__(self, *args, **kwargs):
qs = ContentType.objects.all()
content_type = ContentTypeChoiceField(
self.admin_site, qs, label=_('content type')
)
self.base_fields['content_type'] = content_type
super(MenuItemForm, self).__init__(*args, **kwargs)
formfield_callback = lambda f: f.formfield()
def contenttype_inlineformset_factory(parent_model, model, admin_site,
formfield_callback,
extra=3, can_order=False,
can_delete=True, max_num=0):
fk = _get_foreign_key(parent_model, model)
Meta = type('Meta', (MenuItemForm.Meta,), {'model': model})
class_name = model.__name__ + 'Form'
form_class_attrs = {
'admin_site': admin_site,
'Meta': Meta,
'formfield_callback': formfield_callback
}
form = ModelFormMetaclass(class_name, (MenuItemForm,), form_class_attrs)
FormSet = formset_factory(form, BaseInlineFormSet, extra=extra,
max_num=max_num,
can_order=can_order, can_delete=can_delete)
FormSet.model = model
FormSet.fk = fk
return FormSet
|
bsd-3-clause
| 1,137,810,636,647,366,900 | 31.28169 | 77 | 0.58726 | false | 4.228782 | false | false | false |
gmsanchez/nmpc_comparison
|
cstr_startup_colloc.py
|
1
|
9778
|
# Linear and nonlinear control of startup of a CSTR.
import mpctools as mpc
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
import time
# Define some parameters and then the CSTR model.
Nx = 3
Nu = 2
Nd = 1
# Ny = Nx
Delta = .25
# eps = 1e-6 # Use this as a small number.
T0 = 350
c0 = 1
r = .219
k0 = 7.2e10
E = 8750
U = 54.94
rho = 1000
Cp = .239
dH = -5e4
def ode(x,u,d):
# Grab the states, controls, and disturbance. We would like to write
#
# [c, T, h] = x[0:Nx]
# [Tc, F] = u[0:Nu]
# [F0] = d[0:Nd]
#
# but this doesn't work in Casadi 3.0. So, we're stuck with the following:
c = x[0]
T = x[1]
h = x[2]
Tc = u[0]
F = u[1]
F0 = d[0]
# Now create the ODE.
rate = k0*c*np.exp(-E/T)
dxdt = np.array([
F0*(c0 - c)/(np.pi*r**2*h) - rate,
F0*(T0 - T)/(np.pi*r**2*h)
- dH/(rho*Cp)*rate
+ 2*U/(r*rho*Cp)*(Tc - T),
(F0 - F)/(np.pi*r**2)
])
return dxdt
# Turn into casadi function and simulator.
ode_casadi = mpc.getCasadiFunc(ode,[Nx,Nu,Nd],["x","u","d"],funcname="ode")
ode_rk4_casadi = mpc.getCasadiFunc(ode,[Nx,Nu,Nd],["x","u","d"],
funcname="ode_rk4",rk4=False,Delta=Delta)
cstr = mpc.DiscreteSimulator(ode, Delta, [Nx,Nu,Nd], ["x","u","d"])
# Steady-state values.
cs = .878
Ts = 324.5
hs = .659
Fs = .1
Tcs = 300
F0s = .1
# Update the steady-state values a few times to make sure they don't move.
for i in range(10):
[cs,Ts,hs] = cstr.sim([cs,Ts,hs],[Tcs,Fs],[F0s]).tolist()
xs = np.array([cs,Ts,hs])
us = np.array([Tcs,Fs])
ds = np.array([F0s])
# Now get a linearization at this steady state.
#ss = mpc.util.getLinearizedModel(ode_casadi, [xs,us,ds], ["A","B","Bp"], Delta)
#A = ss["A"]
#B = ss["B"]
#Bp = ss["Bp"]
#C = np.eye(Nx)
# Weighting matrices for controller.
Q = .5*np.diag(xs**-2)
R = 2*np.diag(us**-2)
# model_casadi = mpc.getCasadiFunc(ode,[Nx,Nu,Nd],["x","u","d"],funcname="cstr")
#[K, Pi] = mpc.util.dlqr(A,B,Q,R)
# Define casadi functions.
Fnonlinear = ode_rk4_casadi
# def measurement(x,d):
# return x
# h = mpc.getCasadiFunc(measurement,[Nx,Nd],["x","d"],funcname="h")
#def linmodel(x,u,d):
# Ax = mpc.mtimes(A,x-xs) + xs
# Bu = mpc.mtimes(B,u-us)
# Bpd = mpc.mtimes(Bp,d-ds)
# return Ax + Bu + Bpd
#Flinear = mpc.getCasadiFunc(linmodel,[Nx,Nu,Nd],["x","u","d"],funcname="F")
def stagecost(x,u,xsp,usp,Q,R):
# Return deviation variables.
dx = x - xsp
du = u - usp
# Calculate stage cost.
return mpc.mtimes(dx.T,Q,dx) + mpc.mtimes(du.T,R,du)
largs = ["x","u","x_sp","u_sp","Q","R"]
l = mpc.getCasadiFunc(stagecost,[Nx,Nu,Nx,Nu,(Nx,Nx),(Nu,Nu)],largs,
funcname="l")
def costtogo(x,xsp):
# Deviation variables.
dx = x - xsp
# Calculate cost to go.
return mpc.mtimes(dx.T,10*Q,dx)
Pf = mpc.getCasadiFunc(costtogo,[Nx,Nx],["x","s_xp"],funcname="Pf")
# First see what happens if we try to start up the reactor under no control.
Nsim = 100
x0 = np.array([.05*cs,.75*Ts,.5*hs])
xcl = {}
ucl = {}
xcl["uncont"] = np.zeros((Nsim+1,Nx))
xcl["uncont"][0,:] = x0
ucl["uncont"] = np.tile(us,(Nsim,1))
for t in range(Nsim):
xcl["uncont"][t+1,:] = cstr.sim(xcl["uncont"][t,:],ucl["uncont"][t,:],ds)
# Build a solver for the linear and nonlinear models.
Nt = 15
sp = {"x" : np.tile(xs, (Nt+1,1)), "u" : np.tile(us, (Nt,1))}
#xguesslin = np.zeros((Nt+1,Nx))
#xguesslin[0,:] = x0
#for t in range(Nt):
# xguesslin[t+1,:] = A.dot(xguesslin[t,:] - xs) + xs
#guesslin = {"x" : xguesslin, "u" : np.tile(us,(Nt,1))}
guessnonlin = sp.copy()
# Control bounds.
umax = np.array([.05*Tcs,.15*Fs])
dumax = .2*umax # Maximum for rate-of-change.
bounds = dict(uub=[us + umax],ulb=[us - umax])
ub = {"u" : np.tile(us + umax, (Nt,1)), "Du" : np.tile(dumax, (Nt,1))}
lb = {"u" : np.tile(us - umax, (Nt,1)), "Du" : np.tile(-dumax, (Nt,1))}
N = {"x":Nx, "u":Nu, "p":Nd, "t":Nt, "c":3}
p = np.tile(ds, (Nt,1)) # Parameters for system.
nmpc_commonargs = {
"N" : N,
"Delta": Delta,
"x0" : x0,
"lb" : lb,
"ub" : ub,
"p" : p,
"verbosity" : 0,
"Pf" : Pf,
"l" : l,
"sp" : sp,
"uprev" : us,
"funcargs" : {"l" : largs},
"extrapar" : {"Q" : Q, "R" : R}, # In case we want to tune online.
}
solvers = {}
# solvers["lmpc"] = mpc.nmpc(f=Flinear,guess=guesslin,**nmpc_commonargs)
solvers["nmpc"] = mpc.nmpc(f=Fnonlinear,guess=guessnonlin,**nmpc_commonargs)
# Also build steady-state target finders.
contVars = [0,2]
#sstarg_commonargs = {
# "N" : N,
# "lb" : {"u" : np.tile(us - umax, (1,1))},
# "ub" : {"u" : np.tile(us + umax, (1,1))},
# "verbosity" : 0,
## "h" : h,
# "p" : np.array([ds]),
#}
#sstargs = {}
# sstargs["lmpc"] = mpc.sstarg(f=Flinear,**sstarg_commonargs)
# sstargs["nmpc"] = mpc.sstarg(f=Fnonlinear,**sstarg_commonargs)
# Now simulate the process under control.
tcl = {}
for method in solvers.keys():
xcl[method] = np.zeros((Nsim+1,Nx))
xcl[method][0,:] = x0
tcl[method] = np.zeros((Nsim+1,1))
thisx = x0
ucl[method] = np.zeros((Nsim,Nu))
# ysp = np.tile(xs,(Nsim+1,1))
xsp = np.zeros((Nsim+1,Nx))
usp = np.zeros((Nsim,Nu))
# ysp[int(Nsim/3):int(2*Nsim/3),:] = xs*np.array([.85,.75,1.15])
for t in range(Nsim):
# Figure out setpoints.
# if t == 0 or not np.all(ysp[t,:] == ysp[t-1,:]):
# thisysp = ysp[t,:]
# sstargs[method].fixvar("y",0,thisysp[contVars],contVars)
# sstargs[method].guess["u",0] = us
# sstargs[method].guess["x",0] = thisysp
# sstargs[method].guess["y",0] = thisysp
# sstargs[method].solve()
#
# print "%10s %3d: %s" % ("sstarg",t,sstargs[method].stats["status"])
# if sstargs[method].stats["status"] != "Solve_Succeeded":
# print "***Target finder failed!"
# break
#
# xsp[t,:] = np.squeeze(sstargs[method].var["x",0])
# usp[t,:] = np.squeeze(sstargs[method].var["u",0])
#
# solvers[method].par["x_sp"] = [xsp[t,:]]*(Nt + 1)
# solvers[method].par["u_sp"] = [usp[t,:]]*Nt
# Fix initial condition and solve.
t0 = time.time()
solvers[method].fixvar("x",0,thisx)
solvers[method].solve()
print "%10s %3d: %s" % (method,t,solvers[method].stats["status"])
if solvers[method].stats["status"] != "Solve_Succeeded":
print "***Solver failed!"
break
else:
solvers[method].saveguess()
thisu = np.squeeze(solvers[method].var["u"][0])
ucl[method][t,:] = thisu
t1 = time.time()
tcl[method][t] = t1-t0
thisx = cstr.sim(thisx,thisu,ds)
xcl[method][t+1,:] = thisx
# Update previous u.
solvers[method].par["u_prev",0] = ucl[method][t,:]
# Define plotting function.
def cstrplot(x,u,xsp=None,contVars=[],title=None,colors={},labels={},
markers={},keys=None,bounds=None,ilegend=0):
if keys is None:
keys = x.keys()
for k in keys:
u[k] = np.concatenate((u[k],u[k][-1:,:]))
ylabelsx = ["$c$ (mol/L)", "$T$ (K)", "$h$ (m)"]
ylabelsu = ["$T_c$ (K)", "$F$ (kL/min)"]
gs = gridspec.GridSpec(Nx*Nu,2)
fig = plt.figure(figsize=(10,6),facecolor="none")
leglines = []
leglabels = []
for i in range(Nx):
ax = fig.add_subplot(gs[i*Nu:(i+1)*Nu,0])
for k in keys:
t = np.arange(0,x[k].shape[0])*Delta
args = {"color":colors.get(k,"black"), "label":labels.get(k,k),
"marker":markers.get(k,"")}
[line] = ax.plot(t,x[k][:,i],markeredgecolor="none",**args)
if i == ilegend:
leglines.append(line)
leglabels.append(args["label"])
if i in contVars and xsp is not None:
ax.step(t,xsp[:,i],linestyle="--",color="black",where="post")
ax.set_ylabel(ylabelsx[i])
mpc.plots.zoomaxis(ax,yscale=1.1)
mpc.plots.prettyaxesbox(ax)
mpc.plots.prettyaxesbox(ax,
facecolor="white",front=False)
ax.set_xlabel("Time (min)")
for i in range(Nu):
ax = fig.add_subplot(gs[i*Nx:(i+1)*Nx,1])
for k in keys:
t = np.arange(0,u[k].shape[0])*Delta
args = {"color":colors.get(k,"black"), "label":labels.get(k,k)}
ax.step(t,u[k][:,i],where="post",**args)
if bounds is not None:
for b in set(["uub", "ulb"]).intersection(bounds.keys()):
ax.plot(np.array([t[0],t[-1]]),np.ones((2,))*bounds[b][i],
'--k')
ax.set_ylabel(ylabelsu[i])
mpc.plots.zoomaxis(ax,yscale=1.25)
mpc.plots.prettyaxesbox(ax)
mpc.plots.prettyaxesbox(ax,
facecolor="white",front=False)
ax.set_xlabel("Time (min)")
fig.legend(leglines,leglabels,loc="lower center",ncol=len(keys))
fig.tight_layout(pad=.5,rect=(0,.075,1,1))
if title is not None:
fig.canvas.set_window_title(title)
return fig
x = xcl['nmpc']
u = ucl['nmpc']
ptimes = tcl['nmpc']
# Make plots.
keys = ["uncont", "nmpc"]
colors = {"lmpc":"blue", "nmpc":"green", "uncont":"red"}
labels = {"lmpc":"LMPC", "nmpc":"NMPC", "uncont":"Uncontrolled"}
markers = {"lmpc":"s", "nmpc":"o", "uncont":"^"}
plotbounds = dict([(k,bounds[k][0]) for k in ["ulb","uub"]])
fig = cstrplot(xcl, ucl, colors=colors, contVars=contVars, labels=labels,
keys=keys, markers={}, bounds=plotbounds, ilegend=2)
fig.show()
# mpc.plots.showandsave(fig,"cstr_startup.pdf",facecolor="none")
|
gpl-3.0
| 3,749,407,622,004,102,700 | 30.850163 | 80 | 0.54009 | false | 2.509754 | false | false | false |
bmazin/ARCONS-pipeline
|
photonlist/test/testExpTimeWeight.py
|
1
|
1192
|
'''
Author: Julian van Eyken Date: Jul 9 2013
Test code for per-pixel effective exposure time weighting.
'''
import os.path
import photonlist.photlist as pl
import photonlist.RADecImage as rdi
from util.FileName import FileName
def getSimpleImage(fileName=FileName(run='PAL2012',date='20121211',tstamp='20121212-033323').photonList(),
firstSec=0, integrationTime=5, wvlMin=None, wvlMax=None, doWeighted=True):
'''
Get a simple short-exposure time RA/dec-mapped image, for
the purposes of looking at the per-pixel effective integration
time weighting.
'''
virtualImage = rdi.RADecImage(vPlateScale=0.1)
print 'Loading: ',os.path.basename(fileName)
phList = pl.PhotList(fileName)
baseSaveName,ext=os.path.splitext(os.path.basename(fileName))
imSaveName=baseSaveName+'.tif'
virtualImage.loadImage(phList,doStack=True,savePreStackImage=imSaveName,
firstSec=firstSec,integrationTime=integrationTime,
wvlMin=wvlMin, wvlMax=wvlMax, doWeighted=doWeighted)
virtualImage.display(pclip=True)
return virtualImage
if __name__ == "__main__":
getSimpleImage()
|
gpl-2.0
| -3,305,290,240,977,502,700 | 34.058824 | 106 | 0.700503 | false | 3.475219 | false | false | false |
jc6036/LordsAndLadies
|
file_filler.py
|
1
|
2432
|
# Using this to simply fill up a text file with placeholder names
def fill_names(filename, num_of_names, variation, gender = "none"):
if gender == "male":
if variation == "first":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_names + 1):
opened_file.write("Male_Name %d\n" % i)
elif gender == "female":
if variation == "first":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_names + 1):
opened_file.write("Female_Name %d\n" % i)
else:
if variation == "last":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_names + 1):
opened_file.write("Last_Name %d\n" % i)
elif variation == "kingdom":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_names + 1):
opened_file.write("Kingdom_Name %d\n" % i)
def fill_locations(filename, num_of_names):
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_names + 1):
opened_file.write("Test_Location %d\n" % i)
def fill_titles(filename, num_of_titles, fix, job_type):
if job_type == "noble":
if fix == "prefix":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_titles + 1):
opened_file.write("Prefix_Title_Noble %s\n" % str(i))
elif fix == "subfix":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_titles + 1):
opened_file.write("Subfix_Title_Noble %s\n" % str(i))
elif job_type == "common":
if fix == "prefix":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_titles + 1):
opened_file.write("Prefix_Title_Common %s\n" % str(i))
elif fix == "subfix":
with open("./%s" % str(filename), "w") as opened_file:
for i in range(1, num_of_titles + 1):
opened_file.write("Subfix_Title_Common %s\n" % str(i))
fill_names("male_names.txt", 500, "first", "male")
fill_names("female_names.txt", 500, "first", "female")
fill_names("last_names.txt", 250, "last", "none")
|
gpl-2.0
| 6,665,909,490,550,261,000 | 34.246377 | 74 | 0.515625 | false | 3.290934 | false | false | false |
nschaetti/EchoTorch
|
echotorch/transforms/text/Character.py
|
1
|
3378
|
# -*- coding: utf-8 -*-
#
# Imports
import torch
from .Transformer import Transformer
# Transform text to character vectors
class Character(Transformer):
"""
Transform text to character vectors
"""
# Constructor
def __init__(self, uppercase=False, gram_to_ix=None, start_ix=0, fixed_length=-1):
"""
Constructor
"""
# Gram to ix
if gram_to_ix is not None:
self.gram_count = len(gram_to_ix.keys())
self.gram_to_ix = gram_to_ix
else:
self.gram_count = start_ix
self.gram_to_ix = dict()
# end if
# Ix to gram
self.ix_to_gram = dict()
if gram_to_ix is not None:
for gram in gram_to_ix.keys():
self.ix_to_gram[gram_to_ix[gram]] = gram
# end for
# end if
# Properties
self.uppercase = uppercase
self.fixed_length = fixed_length
# Super constructor
super(Character, self).__init__()
# end __init__
##############################################
# Public
##############################################
##############################################
# Properties
##############################################
# Get the number of inputs
@property
def input_dim(self):
"""
Get the number of inputs.
:return: The input size.
"""
return 1
# end input_dim
# Vocabulary size
@property
def voc_size(self):
"""
Vocabulary size
:return:
"""
return self.gram_count
# end voc_size
##############################################
# Private
##############################################
# To upper
def to_upper(self, gram):
"""
To upper
:param gram:
:return:
"""
if not self.uppercase:
return gram.lower()
# end if
return gram
# end to_upper
##############################################
# Override
##############################################
# Convert a string
def __call__(self, text):
"""
Convert a string to a ESN input
:param text: Text to convert
:return: Tensor of word vectors
"""
# Add to voc
for i in range(len(text)):
gram = self.to_upper(text[i])
if gram not in self.gram_to_ix.keys():
self.gram_to_ix[gram] = self.gram_count
self.ix_to_gram[self.gram_count] = gram
self.gram_count += 1
# end if
# end for
# List of character to 2grams
text_idxs = [self.gram_to_ix[self.to_upper(text[i])] for i in range(len(text))]
# To long tensor
text_idxs = torch.LongTensor(text_idxs)
# Check length
if self.fixed_length != -1:
if text_idxs.size(0) > self.fixed_length:
text_idxs = text_idxs[:self.fixed_length]
elif text_idxs.size(0) < self.fixed_length:
zero_idxs = torch.LongTensor(self.fixed_length).fill_(0)
zero_idxs[:text_idxs.size(0)] = text_idxs
text_idxs = zero_idxs
# end if
# end if
return text_idxs, text_idxs.size(0)
# end convert
# end FunctionWord
|
gpl-3.0
| 8,788,521,190,926,664,000 | 24.78626 | 87 | 0.450266 | false | 4.201493 | false | false | false |
mhils/mitmproxy
|
mitmproxy/flowfilter.py
|
1
|
14034
|
"""
The following operators are understood:
~q Request
~s Response
Headers:
Patterns are matched against "name: value" strings. Field names are
all-lowercase.
~a Asset content-type in response. Asset content types are:
text/javascript
application/x-javascript
application/javascript
text/css
image/*
application/x-shockwave-flash
~h rex Header line in either request or response
~hq rex Header in request
~hs rex Header in response
~b rex Expression in the body of either request or response
~bq rex Expression in the body of request
~bs rex Expression in the body of response
~t rex Shortcut for content-type header.
~d rex Request domain
~m rex Method
~u rex URL
~c CODE Response code.
rex Equivalent to ~u rex
"""
import functools
import re
import sys
from typing import Callable, ClassVar, Optional, Sequence, Type
import pyparsing as pp
from mitmproxy import flow, http, tcp, websocket
from mitmproxy.net.websocket import check_handshake
def only(*types):
def decorator(fn):
@functools.wraps(fn)
def filter_types(self, flow):
if isinstance(flow, types):
return fn(self, flow)
return False
return filter_types
return decorator
class _Token:
def dump(self, indent=0, fp=sys.stdout):
print("{spacing}{name}{expr}".format(
spacing="\t" * indent,
name=self.__class__.__name__,
expr=getattr(self, "expr", "")
), file=fp)
class _Action(_Token):
code: ClassVar[str]
help: ClassVar[str]
@classmethod
def make(klass, s, loc, toks):
return klass(*toks[1:])
class FErr(_Action):
code = "e"
help = "Match error"
def __call__(self, f):
return True if f.error else False
class FMarked(_Action):
code = "marked"
help = "Match marked flows"
def __call__(self, f):
return f.marked
class FHTTP(_Action):
code = "http"
help = "Match HTTP flows"
@only(http.HTTPFlow)
def __call__(self, f):
return True
class FWebSocket(_Action):
code = "websocket"
help = "Match WebSocket flows (and HTTP-WebSocket handshake flows)"
@only(http.HTTPFlow, websocket.WebSocketFlow)
def __call__(self, f):
m = (
(isinstance(f, http.HTTPFlow) and f.request and check_handshake(f.request.headers))
or isinstance(f, websocket.WebSocketFlow)
)
return m
class FTCP(_Action):
code = "tcp"
help = "Match TCP flows"
@only(tcp.TCPFlow)
def __call__(self, f):
return True
class FReq(_Action):
code = "q"
help = "Match request with no response"
@only(http.HTTPFlow)
def __call__(self, f):
if not f.response:
return True
class FResp(_Action):
code = "s"
help = "Match response"
@only(http.HTTPFlow)
def __call__(self, f):
return bool(f.response)
class _Rex(_Action):
flags = 0
is_binary = True
def __init__(self, expr):
self.expr = expr
if self.is_binary:
expr = expr.encode()
try:
self.re = re.compile(expr, self.flags)
except Exception:
raise ValueError("Cannot compile expression.")
def _check_content_type(rex, message):
return any(
name.lower() == b"content-type" and
rex.search(value)
for name, value in message.headers.fields
)
class FAsset(_Action):
code = "a"
help = "Match asset in response: CSS, Javascript, Flash, images."
ASSET_TYPES = [re.compile(x) for x in [
b"text/javascript",
b"application/x-javascript",
b"application/javascript",
b"text/css",
b"image/.*",
b"application/x-shockwave-flash"
]]
@only(http.HTTPFlow)
def __call__(self, f):
if f.response:
for i in self.ASSET_TYPES:
if _check_content_type(i, f.response):
return True
return False
class FContentType(_Rex):
code = "t"
help = "Content-type header"
@only(http.HTTPFlow)
def __call__(self, f):
if _check_content_type(self.re, f.request):
return True
elif f.response and _check_content_type(self.re, f.response):
return True
return False
class FContentTypeRequest(_Rex):
code = "tq"
help = "Request Content-Type header"
@only(http.HTTPFlow)
def __call__(self, f):
return _check_content_type(self.re, f.request)
class FContentTypeResponse(_Rex):
code = "ts"
help = "Response Content-Type header"
@only(http.HTTPFlow)
def __call__(self, f):
if f.response:
return _check_content_type(self.re, f.response)
return False
class FHead(_Rex):
code = "h"
help = "Header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.request and self.re.search(bytes(f.request.headers)):
return True
if f.response and self.re.search(bytes(f.response.headers)):
return True
return False
class FHeadRequest(_Rex):
code = "hq"
help = "Request header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.request and self.re.search(bytes(f.request.headers)):
return True
class FHeadResponse(_Rex):
code = "hs"
help = "Response header"
flags = re.MULTILINE
@only(http.HTTPFlow)
def __call__(self, f):
if f.response and self.re.search(bytes(f.response.headers)):
return True
class FBod(_Rex):
code = "b"
help = "Body"
flags = re.DOTALL
@only(http.HTTPFlow, websocket.WebSocketFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.request and f.request.raw_content:
if self.re.search(f.request.get_content(strict=False)):
return True
if f.response and f.response.raw_content:
if self.re.search(f.response.get_content(strict=False)):
return True
elif isinstance(f, websocket.WebSocketFlow) or isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if self.re.search(msg.content):
return True
return False
class FBodRequest(_Rex):
code = "bq"
help = "Request body"
flags = re.DOTALL
@only(http.HTTPFlow, websocket.WebSocketFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.request and f.request.raw_content:
if self.re.search(f.request.get_content(strict=False)):
return True
elif isinstance(f, websocket.WebSocketFlow) or isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if msg.from_client and self.re.search(msg.content):
return True
class FBodResponse(_Rex):
code = "bs"
help = "Response body"
flags = re.DOTALL
@only(http.HTTPFlow, websocket.WebSocketFlow, tcp.TCPFlow)
def __call__(self, f):
if isinstance(f, http.HTTPFlow):
if f.response and f.response.raw_content:
if self.re.search(f.response.get_content(strict=False)):
return True
elif isinstance(f, websocket.WebSocketFlow) or isinstance(f, tcp.TCPFlow):
for msg in f.messages:
if not msg.from_client and self.re.search(msg.content):
return True
class FMethod(_Rex):
code = "m"
help = "Method"
flags = re.IGNORECASE
@only(http.HTTPFlow)
def __call__(self, f):
return bool(self.re.search(f.request.data.method))
class FDomain(_Rex):
code = "d"
help = "Domain"
flags = re.IGNORECASE
is_binary = False
@only(http.HTTPFlow, websocket.WebSocketFlow)
def __call__(self, f):
if isinstance(f, websocket.WebSocketFlow):
f = f.handshake_flow
return bool(
self.re.search(f.request.host) or
self.re.search(f.request.pretty_host)
)
class FUrl(_Rex):
code = "u"
help = "URL"
is_binary = False
# FUrl is special, because it can be "naked".
@classmethod
def make(klass, s, loc, toks):
if len(toks) > 1:
toks = toks[1:]
return klass(*toks)
@only(http.HTTPFlow, websocket.WebSocketFlow)
def __call__(self, f):
if isinstance(f, websocket.WebSocketFlow):
f = f.handshake_flow
if not f or not f.request:
return False
return self.re.search(f.request.pretty_url)
class FSrc(_Rex):
code = "src"
help = "Match source address"
is_binary = False
def __call__(self, f):
if not f.client_conn or not f.client_conn.address:
return False
r = "{}:{}".format(f.client_conn.address[0], f.client_conn.address[1])
return f.client_conn.address and self.re.search(r)
class FDst(_Rex):
code = "dst"
help = "Match destination address"
is_binary = False
def __call__(self, f):
if not f.server_conn or not f.server_conn.address:
return False
r = "{}:{}".format(f.server_conn.address[0], f.server_conn.address[1])
return f.server_conn.address and self.re.search(r)
class _Int(_Action):
def __init__(self, num):
self.num = int(num)
class FCode(_Int):
code = "c"
help = "HTTP response code"
@only(http.HTTPFlow)
def __call__(self, f):
if f.response and f.response.status_code == self.num:
return True
class FAnd(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
for i in self.lst:
i.dump(indent + 1, fp)
def __call__(self, f):
return all(i(f) for i in self.lst)
class FOr(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
for i in self.lst:
i.dump(indent + 1, fp)
def __call__(self, f):
return any(i(f) for i in self.lst)
class FNot(_Token):
def __init__(self, itm):
self.itm = itm[0]
def dump(self, indent=0, fp=sys.stdout):
super().dump(indent, fp)
self.itm.dump(indent + 1, fp)
def __call__(self, f):
return not self.itm(f)
filter_unary: Sequence[Type[_Action]] = [
FAsset,
FErr,
FHTTP,
FMarked,
FReq,
FResp,
FTCP,
FWebSocket,
]
filter_rex: Sequence[Type[_Rex]] = [
FBod,
FBodRequest,
FBodResponse,
FContentType,
FContentTypeRequest,
FContentTypeResponse,
FDomain,
FDst,
FHead,
FHeadRequest,
FHeadResponse,
FMethod,
FSrc,
FUrl,
]
filter_int = [
FCode
]
def _make():
# Order is important - multi-char expressions need to come before narrow
# ones.
parts = []
for cls in filter_unary:
f = pp.Literal(f"~{cls.code}") + pp.WordEnd()
f.setParseAction(cls.make)
parts.append(f)
# This is a bit of a hack to simulate Word(pyparsing_unicode.printables),
# which has a horrible performance with len(pyparsing.pyparsing_unicode.printables) == 1114060
unicode_words = pp.CharsNotIn("()~'\"" + pp.ParserElement.DEFAULT_WHITE_CHARS)
unicode_words.skipWhitespace = True
regex = (
unicode_words
| pp.QuotedString('"', escChar='\\')
| pp.QuotedString("'", escChar='\\')
)
for cls in filter_rex:
f = pp.Literal(f"~{cls.code}") + pp.WordEnd() + regex.copy()
f.setParseAction(cls.make)
parts.append(f)
for cls in filter_int:
f = pp.Literal(f"~{cls.code}") + pp.WordEnd() + pp.Word(pp.nums)
f.setParseAction(cls.make)
parts.append(f)
# A naked rex is a URL rex:
f = regex.copy()
f.setParseAction(FUrl.make)
parts.append(f)
atom = pp.MatchFirst(parts)
expr = pp.infixNotation(
atom,
[(pp.Literal("!").suppress(),
1,
pp.opAssoc.RIGHT,
lambda x: FNot(*x)),
(pp.Literal("&").suppress(),
2,
pp.opAssoc.LEFT,
lambda x: FAnd(*x)),
(pp.Literal("|").suppress(),
2,
pp.opAssoc.LEFT,
lambda x: FOr(*x)),
])
expr = pp.OneOrMore(expr)
return expr.setParseAction(lambda x: FAnd(x) if len(x) != 1 else x)
bnf = _make()
TFilter = Callable[[flow.Flow], bool]
def parse(s: str) -> Optional[TFilter]:
try:
flt = bnf.parseString(s, parseAll=True)[0]
flt.pattern = s
return flt
except pp.ParseException:
return None
except ValueError:
return None
def match(flt, flow):
"""
Matches a flow against a compiled filter expression.
Returns True if matched, False if not.
If flt is a string, it will be compiled as a filter expression.
If the expression is invalid, ValueError is raised.
"""
if isinstance(flt, str):
flt = parse(flt)
if not flt:
raise ValueError("Invalid filter expression.")
if flt:
return flt(flow)
return True
help = []
for a in filter_unary:
help.append(
(f"~{a.code}", a.help)
)
for b in filter_rex:
help.append(
(f"~{b.code} regex", b.help)
)
for c in filter_int:
help.append(
(f"~{c.code} int", c.help)
)
help.sort()
help.extend(
[
("!", "unary not"),
("&", "and"),
("|", "or"),
("(...)", "grouping"),
]
)
|
mit
| 5,222,736,535,658,669,000 | 23.322357 | 98 | 0.562847 | false | 3.616078 | false | false | false |
rwgdrummer/maskgen
|
hp_tool/hp/ErrorWindow.py
|
1
|
2192
|
from Tkinter import *
from tkSimpleDialog import Dialog
import json
import csv
import tkFileDialog
class ErrorWindow(Dialog):
"""
Provided a list of error messages, shows them in a simple pop-up window.
"""
def __init__(self, master, errors):
self.errors = errors
self.cancelPressed = True
Dialog.__init__(self, master, title='Validation')
def body(self, master):
frame = Frame(self, bd=2, relief=SUNKEN)
frame.pack(fill=BOTH, expand=TRUE)
yscrollbar = Scrollbar(frame)
yscrollbar.pack(side=RIGHT, fill=Y)
xscrollbar = Scrollbar(frame, orient=HORIZONTAL)
xscrollbar.pack(side=BOTTOM, fill=X)
self.listbox = Listbox(frame, width=80, height=15)
self.listbox.pack(fill=BOTH, expand=1)
if type(self.errors) == str:
with open(self.errors) as j:
self.errors = json.load(j)
if type(self.errors) == dict:
for i in self.errors:
for message in self.errors[i]:
self.listbox.insert(END, message[1])
else:
for i in self.errors:
self.listbox.insert(END, i)
# attach listbox to scrollbar
self.listbox.config(yscrollcommand=yscrollbar.set, xscrollcommand=xscrollbar.set)
yscrollbar.config(command=self.listbox.yview)
xscrollbar.config(command=self.listbox.xview)
def buttonbox(self):
box = Frame(self)
exportButton = Button(self, text='Export', width=10, command=self.export)
exportButton.pack(side=RIGHT, padx=5, pady=5)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
def export(self):
with tkFileDialog.asksaveasfile(mode='w', defaultextension='.txt') as f:
f.write('\n'.join(self.listbox.get(0, END)))
f.write('\n')
def apply(self):
self.cancelPressed = False
|
bsd-3-clause
| -6,608,855,206,167,699,000 | 30.782609 | 89 | 0.605839 | false | 3.593443 | false | false | false |
cyphactor/lifecyclemanager
|
testenv/trac-0.10.4/trac/util/autoreload.py
|
1
|
2888
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import sys
import time
import thread
_SLEEP_TIME = 1
def _reloader_thread(modification_callback):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
@param modification_callback: Function taking a single argument, the
modified file, and is called after a modification is detected."""
mtimes = {}
while True:
for filename in filter(None, [getattr(module, "__file__", None)
for module in sys.modules.values()]):
while not os.path.isfile(filename): # Probably in an egg or zip file
filename = os.path.dirname(filename)
if not filename:
break
if not filename: # Couldn't map to physical file, so just ignore
continue
if filename.endswith(".pyc"):
filename = filename[:-1]
mtime = os.stat(filename).st_mtime
if filename not in mtimes:
mtimes[filename] = mtime
continue
if mtime > mtimes[filename]:
modification_callback(filename)
sys.exit(3)
time.sleep(_SLEEP_TIME)
def _restart_with_reloader():
while True:
args = [sys.executable] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
# This call reinvokes ourself and goes into the other branch of main as
# a new process.
exit_code = os.spawnve(os.P_WAIT, sys.executable,
args, new_environ)
if exit_code != 3:
return exit_code
def main(main_func, modification_callback):
"""Run `main_func` and restart any time modules are changed."""
if os.environ.get("RUN_MAIN"):
# Lanch the actual program as a child thread
thread.start_new_thread(main_func, ())
try:
# Now wait for a file modification and quit
_reloader_thread(modification_callback)
except KeyboardInterrupt:
pass
else:
# Initial invocation just waits around restarting this executable
try:
sys.exit(_restart_with_reloader())
except KeyboardInterrupt:
pass
|
gpl-3.0
| -7,681,305,959,908,121,000 | 34.219512 | 80 | 0.608033 | false | 4.349398 | false | false | false |
KBNLresearch/iromlab
|
iromlab/testsru.py
|
1
|
1831
|
#! /usr/bin/env python
import io
import xml.etree.ElementTree as ETree
from .kbapi import sru
def main():
"""
Script for testing SRU interface outside Iromlab
(not used by main Iromlab application)
"""
catid = "184556155"
# Lookup catalog identifier
#sruSearchString = '"PPN=' + str(catid) + '"'
sruSearchString = 'OaiPmhIdentifier="GGC:AC:' + str(catid) + '"'
print(sruSearchString)
response = sru.search(sruSearchString, "GGC")
if not response:
noGGCRecords = 0
else:
noGGCRecords = response.sru.nr_of_records
if noGGCRecords == 0:
# No matching record found
msg = ("Search for PPN=" + str(catid) + " returned " +
"no matching record in catalog!")
print("PPN not found", msg)
else:
record = next(response.records)
# Title can be in either in:
# 1. title element
# 2. title element with maintitle attribute
# 3. title element with intermediatetitle attribute (3 in combination with 2)
titlesMain = record.titlesMain
titlesIntermediate = record.titlesIntermediate
titles = record.titles
if titlesMain != []:
title = titlesMain[0]
if titlesIntermediate != []:
title = title + ", " + titlesIntermediate[0]
else:
title = titles[0]
print("Title: " + title)
# Write XML
recordData = record.record_data
recordAsString = ETree.tostring(recordData, encoding='UTF-8', method='xml')
try:
with io.open("meta-kbmdo.xml", "wb") as fOut:
fOut.write(recordAsString)
fOut.close()
except IOError:
print("Could not write KB-MDO metadata to file")
if __name__ == "__main__":
main()
|
apache-2.0
| 1,240,776,010,216,517,400 | 27.625 | 85 | 0.581103 | false | 3.912393 | false | false | false |
madsmpedersen/MMPE
|
datastructures/dual_key_dict.py
|
1
|
3481
|
'''
Created on 08/11/2013
@author: mmpe
'''
class DualKeyDict(object):
def __init__(self, unique_key_att, additional_key_att):
self._unique_key_att = unique_key_att
self._additional_key_att = additional_key_att
self._dict = {}
self._unique_keys = set()
def __getitem__(self, key):
obj = self._dict[key]
if isinstance(obj, list):
raise AttributeError("More objects associated by key, '%s'. Use 'get' function to get list of objects" % key)
else:
return obj
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return (i for i in self._unique_keys)
def __setitem__(self, key, obj):
self.add(obj)
def __len__(self):
return len(self._unique_keys)
def add(self, obj):
unique_key = getattr(obj, self._unique_key_att)
if unique_key in self._unique_keys:
raise KeyError("Key '%s' already exists in dict" % unique_key)
self._dict[unique_key] = obj
self._unique_keys.add(unique_key)
additional_key = getattr(obj, self._additional_key_att)
if additional_key in self._dict:
existing_obj = self._dict[additional_key]
if isinstance(existing_obj, list):
existing_obj.append(obj)
else:
self._dict[additional_key] = [existing_obj, obj]
else:
self._dict[additional_key] = obj
def get(self, key, default=None, multiple_error=False):
"""
Return <object> or <list of objects> associated by 'key'
If key not exists, 'default' is returned
If multiple_error is true, ValueError is raised if 'key' associates a <list of objects>
"""
if key in self._dict:
obj = self._dict[key]
if multiple_error and isinstance(obj, list):
raise AttributeError("More objects associated by key, '%s'" % key)
return obj
else:
return default
def keys(self):
"""Return list of unique keys"""
return list(self._unique_keys)
def values(self):
return [self._dict[k] for k in self._unique_keys]
def __str__(self):
return "{%s}" % ",".join(["(%s,%s): %s" % (getattr(obj, self._unique_key_att), getattr(obj, self._additional_key_att), obj) for obj in self.values()])
def remove(self, value):
"""
Value may be:
- unique key
- additional key
- object
"""
obj = self._dict.get(value, value)
unique_key = getattr(obj, self._unique_key_att)
del self._dict[unique_key]
self._unique_keys.remove(unique_key)
additional_key = getattr(obj, self._additional_key_att)
value = self._dict[additional_key]
if isinstance(value, list):
value = [v for v in value if v is not obj]
#value.remove(obj)
if len(value) == 1:
self._dict[additional_key] = value[0]
else:
self._dict[additional_key] = value
else:
del self._dict[additional_key]
return obj
def clear(self):
self._dict.clear()
self._unique_keys.clear()
def copy(self):
copy = DualKeyDict(self._unique_key_att, self._additional_key_att)
copy._unique_keys = self._unique_keys.copy()
copy._dict = self._dict.copy()
return copy
|
apache-2.0
| 1,287,717,536,764,929,500 | 29.535088 | 158 | 0.556162 | false | 3.867778 | false | false | false |
zjuchenyuan/BioWeb
|
Lib/Bio/codonalign/codonalphabet.py
|
1
|
2100
|
# Copyright 2013 by Zheng Ruan ([email protected]).
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code for Codon Alphabet.
CodonAlphabet class is inherited from Alphabet class. It is an
alphabet for CodonSeq class.
"""
import copy
try:
from itertools import izip
except ImportError:
izip = zip
from Bio.Alphabet import IUPAC, Gapped, HasStopCodon, Alphabet
from Bio.Data.CodonTable import generic_by_id
default_codon_table = copy.deepcopy(generic_by_id[1])
def get_codon_alphabet(alphabet, gap="-", stop="*"):
"""Gets alignment alphabet for codon alignment.
Only nucleotide alphabet is accepted. Raise an error when the type of
alphabet is incompatible.
"""
from Bio.Alphabet import NucleotideAlphabet
if isinstance(alphabet, NucleotideAlphabet):
alpha = alphabet
if gap:
alpha = Gapped(alpha, gap_char=gap)
if stop:
alpha = HasStopCodon(alpha, stop_symbol=stop)
else:
raise TypeError("Only Nuclteotide Alphabet is accepted!")
return alpha
default_alphabet = get_codon_alphabet(IUPAC.unambiguous_dna)
class CodonAlphabet(Alphabet):
"""Generic Codon Alphabet with a size of three"""
size = 3
letters = None
name = ''
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.names[0])
def get_codon_alphabet(codon_table, gap_char="-"):
letters = list(codon_table.forward_table.keys())
letters.extend(codon_table.stop_codons)
letters.extend(codon_table.start_codons)
if gap_char:
letters.append(gap_char * 3)
generic_codon_alphabet = CodonAlphabet()
generic_codon_alphabet.letters = letters
generic_codon_alphabet.gap_char = '-'
generic_codon_alphabet.names = codon_table.names
return generic_codon_alphabet
default_codon_alphabet = get_codon_alphabet(default_codon_table)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
mit
| -3,725,978,972,403,675,000 | 28.577465 | 73 | 0.69381 | false | 3.301887 | false | false | false |
saeedghsh/SSRR13
|
Andreas/slam6d/3rdparty/lastools/ArcGIS_toolbox/scripts/lasground.py
|
2
|
6337
|
#
# lasground.py
#
# (c) 2012, Martin Isenburg
# LASSO - rapid tools to catch reality
#
# uses lasground.exe to extracts the bare-earth by classifying LIDAR
# points into ground (class = 2) and non-ground points (class = 1).
#
# The LiDAR input can be in LAS/LAZ/BIN/TXT/SHP/... format.
# The LiDAR output can be in LAS/LAZ/BIN/TXT format.
#
# for licensing details see http://rapidlasso.com/download/LICENSE.txt
#
import sys, os, arcgisscripting, subprocess
def return_classification(classification):
if (classification == "created, never classified (0)"):
return "0"
if (classification == "unclassified (1)"):
return "1"
if (classification == "ground (2)"):
return "2"
if (classification == "low vegetation (3)"):
return "3"
if (classification == "medium vegetation (4)"):
return "4"
if (classification == "high vegetation (5)"):
return "5"
if (classification == "building (6)"):
return "6"
if (classification == "low point (7)"):
return "7"
if (classification == "keypoint (8)"):
return "8"
if (classification == "water (9)"):
return "9"
if (classification == "high point (10)"):
return "10"
if (classification == "(11)"):
return "11"
if (classification == "overlap point (12)"):
return "12"
if (classification == "(13)"):
return "13"
if (classification == "(14)"):
return "14"
if (classification == "(15)"):
return "15"
if (classification == "(16)"):
return "16"
if (classification == "(17)"):
return "17"
if (classification == "(18)"):
return "18"
return "unknown"
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
### create the geoprocessor object
gp = arcgisscripting.create(9.3)
### report that something is happening
gp.AddMessage("Starting lasground ...")
### get number of arguments
argc = len(sys.argv)
### report arguments (for debug)
#gp.AddMessage("Arguments:")
#for i in range(0, argc):
# gp.AddMessage("[" + str(i) + "]" + sys.argv[i])
### get the path to the LAStools binaries
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))+"\\bin"
### check if path exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\lastools\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
### create the full path to the lasground executable
lasground_path = lastools_path+"\\lasground.exe"
### check if executable exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find lasground.exe at " + lasground_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lasground_path + " ...")
### create the command string for lasground.exe
command = [lasground_path]
### maybe use '-verbose' option
if sys.argv[argc-1] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append(sys.argv[1])
### maybe horizontal feet
if sys.argv[2] == "true":
command.append("-feet")
### maybe vertical feet
if sys.argv[3] == "true":
command.append("-elevation_feet")
### what type of terrain do we have
if sys.argv[4] == "city or warehouses":
command.append("-city")
elif sys.argv[4] == "towns or flats":
command.append("-town")
elif sys.argv[4] == "metropolis":
command.append("-metro")
### what granularity should we operate with
if sys.argv[5] == "fine":
command.append("-fine")
elif sys.argv[5] == "extra fine":
command.append("-extra_fine")
elif sys.argv[5] == "ultra fine":
command.append("-ultra_fine")
### maybe we should ignore/preserve some existing classifications when classifying
if sys.argv[6] != "#":
command.append("-ignore_class")
command.append(return_classification(sys.argv[6]))
### maybe we should ignore/preserve some more existing classifications when classifying
if sys.argv[7] != "#":
command.append("-ignore_class")
command.append(return_classification(sys.argv[7]))
### this is where the output arguments start
out = 8
### maybe an output format was selected
if sys.argv[out] != "#":
if sys.argv[out] == "las":
command.append("-olas")
elif sys.argv[out] == "laz":
command.append("-olaz")
elif sys.argv[out] == "bin":
command.append("-obin")
elif sys.argv[out] == "xyzc":
command.append("-otxt")
command.append("-oparse")
command.append("xyzc")
elif sys.argv[out] == "xyzci":
command.append("-otxt")
command.append("-oparse")
command.append("xyzci")
elif sys.argv[out] == "txyzc":
command.append("-otxt")
command.append("-oparse")
command.append("txyzc")
elif sys.argv[out] == "txyzci":
command.append("-otxt")
command.append("-oparse")
command.append("txyzci")
### maybe an output file name was selected
if sys.argv[out+1] != "#":
command.append("-o")
command.append(sys.argv[out+1])
### maybe an output directory was selected
if sys.argv[out+2] != "#":
command.append("-odir")
command.append(sys.argv[out+2])
### maybe an output appendix was selected
if sys.argv[out+3] != "#":
command.append("-odix")
command.append(sys.argv[out+3])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lasground
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. lasground failed.")
sys.exit(1)
### report happy end
gp.AddMessage("Success. lasground done.")
|
bsd-3-clause
| -3,070,467,438,095,268,000 | 28.17619 | 130 | 0.614644 | false | 3.351137 | false | false | false |
jhanley634/testing-tools
|
problem/pixel/volcanic_voxels.py
|
1
|
6745
|
#! /usr/bin/env python
# Copyright 2020 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
from collections import namedtuple
from operator import itemgetter
from typing import List
import random
import numpy as np
Voxel = namedtuple('Voxel', 'x y z')
class Voxels:
"""Turns text input into a list of Voxels."""
def __init__(self, text: str):
depth = 1 + int(max(text))
lines = text.splitlines()
height = len(lines)
width = max(map(len, lines))
self.model = np.zeros((width, height, depth), int)
voxels = []
for j, line in enumerate(lines):
y = len(lines) - j # Origin is at lower left.
for x, ch in enumerate(line):
if ch.isnumeric(): # We ignore ocean pixels.
assert ch > '0', (ch, line)
for z in range(1, int(ch) + 1):
voxel = Voxel(x, y, z)
self.model[voxel] = True
voxels.append(voxel)
random.shuffle(voxels)
self.voxels = voxels
def render(self) -> str:
for voxel in self.voxels:
assert self.model[voxel], voxel
width, height, depth = self.model.shape
return '\n'.join(self._raster(height - 1 - y) # origin LL
for y in range(height))
def _raster(self, y) -> str:
width = self.model.shape[0]
return ''.join(self._depth_val(x, y)
for x in range(width))
def _depth_val(self, x, y) -> str:
"""Returns blank for ocean, or 1..3 for coast..mountain."""
depth = self.model.shape[2]
val = ' ' # Ocean, by default.
for z in range(depth):
if self.model[(x, y, z)]:
val = str(z)
return val
class PrintedModel:
def __init__(self, voxels: List[Voxel]):
self.model = self._get_zeros(voxels)
self._cur = voxels[0] # 3-D print head position
self.elapsed = 1 # Unit cost to move print head to initial voxel.
self._print(voxels)
def _print(self, voxels: List[Voxel]) -> None:
for voxel in voxels:
self._verify_feasible(*voxel)
self.elapsed += _manhattan_distance(self._cur, voxel)
self.model[voxel] = self.elapsed
self._cur = voxel
def _verify_feasible(self, x, y, z):
"""Ensure there is a foundation to print a mountain top upon."""
for z1 in range(1, z):
if not self.model[(x, y, z1)]:
raise ValueError(f'No support for ({x}, {y}, {z})')
@staticmethod
def _get_zeros(voxels: List[Voxel]):
assert len(voxels)
width = 1 + max(map(itemgetter(0), voxels))
height = 1 + max(map(itemgetter(1), voxels))
depth = 1 + max(map(itemgetter(2), voxels))
return np.zeros((width, height, depth), int)
def render(self):
height = self.model.shape[1]
return '\n'.join(self._raster(height - 1 - y, bool(y % 2))
for y in range(height))
def _raster(self, y, first_bold=False):
bold = first_bold
raster = []
for x in range(self.model.shape[0]):
raster.append(self._cell(x, y, bold))
bold = not bold
return ''.join(raster)
def _cell(self, x, y, bold):
cell = '..' # ocean
for z in range(self.model.shape[2]):
if self.model[(x, y, z)]:
elapsed = self.model[(x, y, z)]
cell = f'{elapsed % 100:02d}'
if bold:
esc = chr(27)
cell = f'{esc}[1m{cell}{esc}[0m'
return cell
def three_d_print(voxels: List[Voxel]) -> str:
pm = PrintedModel(voxels)
return pm.elapsed, pm.model
def _manhattan_distance(a: Voxel, b: Voxel) -> int:
return (abs(a.x - b.x)
+ abs(a.y - b.y)
+ abs(a.z - b.z))
def xyz(coord):
return coord
def xzy(coord):
return coord.x, coord.z, coord.y
def yxz(coord):
return coord.y, coord.x, coord.z
def yzx(coord):
return coord.y, coord.z, coord.x
def zyx(coord):
return tuple(reversed(coord))
def zxy(coord):
return coord.z, coord.x, coord.y
islands = Voxels("""
1
111 1121
1112211 11223211
1112233211 112321
122211 13
1211 1 1 11
1 1211 12321
1123211 121
1121 1
11
""")
# Name these islands:
# A B C D
if __name__ == '__main__':
t1, out1 = three_d_print(sorted(islands.voxels, key=xyz))
t2, out2 = three_d_print(sorted(islands.voxels, key=xzy))
t3, out3 = three_d_print(sorted(islands.voxels, key=zxy))
t4, out4 = three_d_print(sorted(islands.voxels, key=yxz))
t5, out5 = three_d_print(sorted(islands.voxels, key=yzx))
t6, out6 = three_d_print(sorted(islands.voxels, key=zyx))
# output: 246 246 406 542 760 827 False False False False False
print(t1, t2, t3, t4, t5, t6,
np.array_equal(out1, out2),
np.array_equal(out1, out3),
np.array_equal(out1, out4),
np.array_equal(out1, out5),
np.array_equal(out1, out6))
# print(three_d_print(islands.voxels)) # fails due to No Support
pm = PrintedModel(sorted(islands.voxels))
print(pm.render())
# volcanic voxels
#
# Some volcanic islands are depicted above.
# A 3-D printer will create a model of them.
# The input of (x, y, z) voxels is now in a randomly permuted order.
# Write a function that puts the voxels in "better than naïve" order.
|
mit
| 140,680,134,709,253,020 | 31.897561 | 76 | 0.575326 | false | 3.36695 | false | false | false |
erigones/esdc-ce
|
gui/migrations/0004_alerting_fields_initialization.py
|
1
|
1297
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models, transaction
# noinspection PyUnusedLocal
def initialize_alerting_fields(apps, schema_editor):
former_user_profile = apps.get_model('gui', 'UserProfile')
if former_user_profile.objects.count() > 10000:
warning_text = '\n It looks like there is a lot of users in your database and ' \
'it would take a lot of time to update their profiles. This migration is therefore skipped. ' \
'Please perform this operation manually.'
print(warning_text)
else:
with transaction.atomic():
# Cannot user F() expressions to joined tables
for user_profile in former_user_profile.objects.select_related('user__email').iterator():
user_profile.alerting_email = user_profile.user.email
user_profile.alerting_phone = user_profile.phone
user_profile.alerting_jabber = user_profile.jabber
user_profile.save()
class Migration(migrations.Migration):
dependencies = [
('gui', '0003_add_alerting_user_fields'),
]
operations = [
migrations.RunPython(initialize_alerting_fields, reverse_code=lambda x, y: None)
]
|
apache-2.0
| 4,412,531,453,637,234,700 | 39.53125 | 118 | 0.643793 | false | 4.294702 | false | false | false |
PaulWay/insights-core
|
insights/parsers/date.py
|
1
|
3048
|
"""
date - Command
==============
This module provides processing for the output of the ``date`` command.
The specs handled by this command inlude::
"date" : CommandSpec("/bin/date"),
"date_utc" : CommandSpec("/bin/date --utc"),
Class ``Date`` parses the output of the ``date`` command. Sample output of
this command looks like::
Fri Jun 24 09:13:34 CST 2016
Class ``DateUTC`` parses the output of the ``date --utc`` command. Output is
similar to the ``date`` command except that the `Timezone` column uses UTC.
All classes utilize the same base class ``DateParser`` so the following
examples apply to all classes in this module.
Examples:
>>> from insights.parsers.date import Date, DateUTC
>>> from insights.tests import context_wrap
>>> date_content = "Mon May 30 10:49:14 CST 2016"
>>> shared = {Date: Date(context_wrap(date_content))}
>>> date_info = shared[Date]
>>> date_info.data
'Mon May 30 10:49:14 CST 2016'
>>> date_info.datetime is not None
True
>>> date_info.timezone
'CST'
>>> date_content = "Mon May 30 10:49:14 UTC 2016"
>>> shared = {DateUTC: DateUTC(context_wrap(date_content))}
>>> date_info = shared[DateUTC]
>>> date_info.data
'Mon May 30 10:49:14 UTC 2016'
>>> date_info.datetime
datetime.datetime(2016, 5, 30, 10, 49, 14)
>>> date_info.timezone
'UTC'
"""
import sys
from datetime import datetime
from .. import Parser, parser, get_active_lines
class DateParseException(Exception):
pass
class DateParser(Parser):
"""Base class implementing shared code."""
def parse_content(self, content):
"""
Parses the output of the ``date`` and ``date --utc`` command.
Sample: Fri Jun 24 09:13:34 CST 2016
Sample: Fri Jun 24 09:13:34 UTC 2016
Attributes
----------
datetime: datetime.datetime
A native datetime.datetime of the parsed date string
timezone: str
The string portion of the date string containing the timezone
Raises:
DateParseException: Raised if any exception occurs parsing the
content.
"""
self.data = get_active_lines(content, comment_char="COMMAND>")[0]
parts = self.data.split()
if not len(parts) == 6:
msg = "Expected six date parts. Got [%s]"
raise DateParseException(msg % self.data)
try:
self.timezone = parts[4]
no_tz = ' '.join(parts[:4]) + ' ' + parts[-1]
self.datetime = datetime.strptime(no_tz, '%a %b %d %H:%M:%S %Y')
except:
raise DateParseException(self.data), None, sys.exc_info()[2]
@parser("date")
class Date(DateParser):
"""
Class to parse ``date`` command output.
Sample: Fri Jun 24 09:13:34 CST 2016
"""
pass
@parser("date_utc")
class DateUTC(DateParser):
"""
Class to parse ``date --utc`` command output.
Sample: Fri Jun 24 09:13:34 UTC 2016
"""
pass
|
apache-2.0
| -1,107,753,871,417,200,300 | 27.485981 | 77 | 0.603675 | false | 3.767614 | false | false | false |
ihciah/xk-database
|
views/admin.py
|
1
|
5958
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint
from flask import g, request, flash
from flask import render_template, redirect
from models import Student,Account,Course,Xk,Teacher
from forms import SearchStudentFrom,adminProfileForm,UserProfileForm,SearchForm,CourseEditForm,UseraddForm
from utils import get_current_user,require_admin,transt2line,transtea2line
__all__ = ['bp']
bp = Blueprint('admin',__name__)
@bp.route('/',methods=['GET'])
@require_admin
def home():
user_count=Account.query.filter(Account.role!=2).count()
course_count=Course.query.count()
return render_template('admin/admin.html',user_count=user_count,course_count=course_count)
@bp.route('/userlist',methods=['GET','POST'])
@require_admin
def userlist():
if request.method == 'GET':
return render_template('admin/user_search.html')
form = SearchStudentFrom(request.form)
if form.validate():
[result_student,result_teacher]=form.dosearch()
return render_template('admin/user_search_result.html',result_student=result_student,result_teacher=result_teacher)
for fieldName, errorMessages in form.errors.iteritems():
for err in errorMessages:
flash(err)
return render_template('admin/user_search.html')
@bp.route('/profile',methods=['GET','POST'])
@require_admin
def profile():
user = Student.query.get(g.user.username)
if request.method == 'GET':
return render_template('admin/profile.html')
form = adminProfileForm(request.form)
if form.validate():
form.save()
flash(u"资料成功更新!")
user = Student.query.get(g.user.username)
render_template('admin/profile.html')
for fieldName, errorMessages in form.errors.iteritems():
for err in errorMessages:
flash(err)
return render_template('admin/profile.html')
@bp.route('/stu-course',methods=['GET'])
@require_admin
def stu_course():
#查看学生选课、选课退课
uid=request.args.get('id')
if uid is None or uid=='':
return redirect("/admin/userlist")
return render_template('admin/stu_course.html',result=Student.query.get(uid),uid=uid)
@bp.route('/user-profile',methods=['GET','POST'])
@require_admin
def user_profile():
#修改教师/学生资料
if request.method == 'GET':
uid=request.args.get('id')
if uid is None:
return redirect("/admin/userlist")
user=Student.query.get(uid)
if user is None:
user=Teacher.query.get(uid)
if user is None:
return redirect("/admin/userlist")
return render_template('admin/user_profile.html',stu=None,tea=user)
return render_template('admin/user_profile.html',stu=user,tea=None)
form=UserProfileForm(request.form)
if form.validate():
form.save()
flash(u"资料成功更新!")
#current_app.logger.debug(3)
for fieldName, errorMessages in form.errors.iteritems():
for err in errorMessages:
flash(err)
#current_app.logger.debug(2)
if form.stuid is not None and form.stuid.data!='':
user=Student.query.get(form.stuid.data)
return render_template('admin/user_profile.html',stu=user,tea=None)
else:
user=Teacher.query.get(form.teaid.data)
return render_template('admin/user_profile.html',stu=None,tea=user)
@bp.route('/course',methods=['GET','POST'])
@require_admin
def course():
if request.method == 'GET':
return render_template('admin/courselist.html')
form = SearchForm(request.form)
if form.validate():
sres=form.search()
return render_template('admin/courselist.html',result=sres)
return render_template('admin/courselist.html')
@bp.route('/course-edit',methods=['GET','POST'])
@require_admin
def course_edit():
if request.method == 'GET':
code=request.args.get('id')
if code is None or code=='':
course=None
times=None
teas=None
type=1#1:new;0:edit
else:
type=0
course=Course.query.get(code)
if course is None:
return redirect("/admin/course")
times=transt2line(course.ctime)
teas=transtea2line(course.teacher)
return render_template('admin/course_edit.html',type=type,course=course,times=times,teas=teas)
form = CourseEditForm(request.form)
course=times=teas=None
if form.validate():
course=form.save()
flash(u"课程保存成功!")
else:
course=Course.query.get(form.code.data)
if course is not None:
times=transt2line(course.ctime)
teas=transtea2line(course.teacher)
for fieldName, errorMessages in form.errors.iteritems():
for err in errorMessages:
flash(err)
return render_template('admin/course_edit.html',type=0,course=course,times=times,teas=teas)
@bp.route('/useradd',methods=['GET', 'POST'])
@require_admin
def signup():
roles={1:"stu",2:"admin",3:"teacher"}
if request.method == 'GET':
return render_template('admin/useradd.html')
form = UseraddForm(request.form)
if form.validate():
uid,type=form.save()
flash(u"用户添加成功!")
if type==1 or type==3:
return redirect("/admin/user-profile?id="+uid)
return redirect("/admin/useradd")
for fieldName, errorMessages in form.errors.iteritems():
for err in errorMessages:
flash(err)
return render_template('admin/useradd.html')
@bp.route('/course-stu',methods=['GET'])
@require_admin
def course_stu():
cid=request.args.get('id')
if cid is None or cid=='':
return redirect("/admin/course")
result_student=Student.query.join(Xk, Xk.stuid==Student.stuid).filter(Xk.code==cid).all()
return render_template('admin/course_user.html',result_student=result_student,result_teacher=None,courseid=cid)
|
gpl-2.0
| -1,314,900,180,352,519,200 | 34.810976 | 123 | 0.65344 | false | 3.409988 | false | false | false |
xiang12835/python_web
|
py2_web2py/web2py/gluon/packages/dal/pydal/representers/postgre.py
|
3
|
1368
|
from ..adapters.postgres import Postgre, PostgreNew
from .base import SQLRepresenter, JSONRepresenter
from . import representers, before_type, for_type
from ..helpers.serializers import serializers
@representers.register_for(Postgre)
class PostgreRepresenter(SQLRepresenter, JSONRepresenter):
def _make_geoextra(self, field_type):
srid = 4326
geotype, params = field_type[:-1].split('(')
params = params.split(',')
if len(params) >= 2:
schema, srid = params[:2]
return {'srid': srid}
@before_type('geometry')
def geometry_extras(self, field_type):
return self._make_geoextra(field_type)
@for_type('geometry', adapt=False)
def _geometry(self, value, srid):
return "ST_GeomFromText('%s',%s)" % (value, srid)
@before_type('geography')
def geography_extras(self, field_type):
return self._make_geoextra(field_type)
@for_type('geography', adapt=False)
def _geography(self, value, srid):
return "ST_GeogFromText('SRID=%s;%s')" % (srid, value)
@for_type('jsonb', encode=True)
def _jsonb(self, value):
return serializers.json(value)
@representers.register_for(PostgreNew)
class PostgreArraysRepresenter(PostgreRepresenter):
def _listify_elements(self, elements):
return "{" + ",".join(str(el) for el in elements) + "}"
|
apache-2.0
| 1,911,755,367,537,962,500 | 32.365854 | 63 | 0.658626 | false | 3.480916 | false | false | false |
hstau/manifold-cryo
|
fit_1D_open_manifold_3D.py
|
1
|
5015
|
import numpy as np
import get_fit_1D_open_manifold_3D_param
import solve_d_R_d_tau_p_3D
import a
from scipy.io import loadmat
import matplotlib.pyplot as plt
#import matplotlib.pyplot as plt
'''
function [a,b,tau] = fit_1D_open_manifold_3D(psi)
%
% fit_1D_open_manifold_3D
%
% fit the eigenvectors for a 1D open manifold to the model
% x_ij = a_j cos(j*pi*tau_i) + b_j.
%
% j goes from 1 to 3 (this is only for 3D systems).
%
% i goes from 1 to nS where nS is the number of data points to be fitted.
%
% For a fixed set of a_j and b_j, j=1:3, tau_i for i=1:nS are
% obtained by putting dR/d(tau_i) to zero.
%
% For a fixed set of tau_i, i=1:nS, a_j and b_j for j=1:3 are
% obtained by solving 3 sets of 2x2 linear equations.
%
% Fit parameters and initial set of {\tau} are specified in
%
% get_fit_1D_open_manifold_3D_param.m
%
% copyright (c) Russell Fung 2014
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
global p nDim a b x x_fit
'''
'''
def plot_fitted_curve(hFig):
global x x_fit
h = plt.figure(hFig)
hsp = plt.subplot(2,2,1)
plot3(x(:,1),x(:,2),x(:,3),'b.','lineWidth',1);
hold on
plot3(x_fit(:,1),x_fit(:,2),x_fit(:,3),'g.','lineWidth',1);
hold off
set(hsp,'lineWidth',2,'fontSize',15);
hsp = subplot(2,2,2);
plotRF(hsp,x(:,1),x(:,2),'','','','b.');
addplotRF(hsp,x_fit(:,1),x_fit(:,2),'g.');
hsp = subplot(2,2,3);
plotRF(hsp,x(:,1),x(:,3),'','','','b.');
addplotRF(hsp,x_fit(:,1),x_fit(:,3),'g.');
hsp = subplot(2,2,4);
plotRF(hsp,x(:,2),x(:,3),'','','','b.');
addplotRF(hsp,x_fit(:,2),x_fit(:,3),'g.');
drawnow
%end
'''
eps = 1e-4
#global maxIter,delta_a_max, delta_b_max,delta_tau_max,a_b_tau_result
def op(psi):
a.init()
#global p, nDim, a, b, x, x_fit
a.nDim = 3
#tau = get_fit_1D_open_manifold_3D_param
tau = get_fit_1D_open_manifold_3D_param.op(psi)
aux = np.zeros((tau.shape[0],5)) #added
nS = a.x.shape[0]
for iter in xrange(1,a.maxIter+1):
string ='iteration ' + str(iter)
print string
'''
#%%%%%%%%%%%%%%%%%%%%%
#% solve for a and b %
#%%%%%%%%%%%%%%%%%%%%%
'''
a_old = a.a
b_old = a.b
j_pi_tau = np.dot(tau,np.pi*np.array([[1,2,3]]))
cos_j_pi_tau = np.cos(j_pi_tau)
A11 = np.sum(cos_j_pi_tau**2, axis=0)
A12 = np.sum(cos_j_pi_tau, axis=0)
A21 = A12
A22 = nS
x_cos_j_pi_tau = a.x*cos_j_pi_tau
b1 = np.sum(x_cos_j_pi_tau, axis=0)
b2 = np.sum(a.x, axis=0)
coeff = np.zeros((2,3))
for qq in xrange(3):
A = np.array([[A11[qq],A12[qq]],[A21[qq], A22]])
b = np.array([b1[qq], b2[qq]])
coeff[:,qq] = np.linalg.solve(A,b)
a.a = coeff[0,:]
a.b = coeff[1,:]
'''
%%%%%%%%%%%%%%%%%%%%%%%%%
#% plot the fitted curve %
%%%%%%%%%%%%%%%%%%%%%%%%%
'''
j_pi_tau = np.dot(np.linspace(0,1,1000).reshape(-1,1),np.array([[1,2,3]]))*np.pi
cos_j_pi_tau = np.cos(j_pi_tau)
tmp = a.a*cos_j_pi_tau
a.x_fit = tmp + a.b
#%plot_fitted_curve(iter)
'''
%%%%%%%%%%%%%%%%%
#% solve for tau %
%%%%%%%%%%%%%%%%%
'''
tau_old = tau
for a.p in xrange(nS):
tau[a.p],beta = solve_d_R_d_tau_p_3D.op() #added
for kk in xrange(beta.shape[0]):
aux[a.p,kk] = beta[kk]
'''
if iter == 0:
data = loadmat('aux0.mat') # (this is for < v7.3
elif iter == 1:
data = loadmat('aux1.mat') # (this is for < v7.3
else:
data = loadmat('aux2.mat') # (this is for < v7.3
imaux = data['aux']
plt.subplot(2, 2, 1)
plt.imshow(aux, cmap=plt.get_cmap('gray'),aspect=0.1)
plt.title('aux')
plt.subplot(2, 2, 2)
plt.imshow(imaux, cmap=plt.get_cmap('gray'), aspect=0.1)
plt.title('imaux')
plt.show()
'''
'''
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#% calculate the changes in fitting parameters %
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
'''
delta_a = np.fabs(a.a-a_old)/(np.fabs(a.a)+eps)
delta_b = np.fabs(a.b-b_old)/(np.fabs(a.b)+eps)
delta_tau = np.fabs(tau-tau_old)
delta_a = max(delta_a)*100
delta_b = max(delta_b)*100
delta_tau = max(delta_tau)
print ' changes in fitting parameters: \n'
string = ' amplitudes: '+ str(delta_a) + '\n' + \
' offsets: ' + str(delta_b) + ' \n' +\
' values of tau: ' + str(delta_tau) + ' \n'
print string
if (delta_a<a.delta_a_max) and (delta_b < a.delta_b_max) and (delta_tau < a.delta_tau_max):
break
return (a.a,a.b,tau)
|
gpl-2.0
| 2,362,552,218,448,211,500 | 30.147436 | 99 | 0.465803 | false | 2.552163 | false | false | false |
tjmonsi/cmsc129-2016-repo
|
submissions/exercise4/pitargue/interpreter/syntax_analyzer.py
|
1
|
33315
|
from . import evaluate
class Parser():
def __init__(self, variables):
self.lexemes = []
self.lookahead = None
self.tokens = []
self.parse_tree = []
self.variables = variables
self.types = {
'TRUE_KEYWORD': 'boolean',
'FALSE_KEYWORD': 'boolean',
'INTEGER_LITERAL': 'int',
'FLOAT_LITERAL': 'float',
'STRING_LITERAL': 'string'
}
def nextLexeme(self):
if self.lexemes:
self.lookahead = self.lexemes.pop(0)
# print(self.lookahead[1])
else:
self.lookahead = ('eof', 'END OF FILE')
def assert_next(self, expected_value, error_message):
if self.lookahead[0] == expected_value:
self.nextLexeme()
return True
else:
print(error_message + ' before ' + self.lookahead[1])
return False
def assert_delimiter(self):
self.assert_next('SEMICOLON_KEYWORD', 'expected semicolon')
def check_next(self, expected_values):
if len(expected_values) == 1:
return self.lookahead[0] == expected_values[0]
for value in expected_values:
if self.lookahead[0] == value:
return True
return False
def parse(self, lexemes):
self.lexemes = lexemes
self.nextLexeme()
while not self.lookahead[0] == 'eof':
t = self.statement()
if isinstance(t, list):
self.parse_tree.extend(t)
else:
self.parse_tree.append(t)
return self.parse_tree
def codeblock(self):
stmts = []
self.assert_next('OPEN_CURLY_BRACE_KEYWORD', 'expected {')
while not self.check_next(['CLOSE_CURLY_BRACE_KEYWORD', 'eof']):
t = self.statement()
if isinstance(t, list):
stmts.extend(t)
else:
stmts.append(t)
self.assert_next('CLOSE_CURLY_BRACE_KEYWORD', 'expected }')
return stmts
def statement(self):
# STATEMENT := EXPRESSION | INPUT | OUTPUT | COMMENT | IFSTMT |
# SWITCHSTMT | LOOPSTMT | FUNCTIONDEC | RETURN |
# break | continue
if self.check_next(['INPUT_KEYWORD']):
return self.input()
elif self.check_next(['OUTPUT_KEYWORD']):
return self.output()
elif self.check_next(['VAR_KEYWORD']):
return self.vardec()
elif self.check_next(['SINGLE_LINE_COMMENT']):
self.nextLexeme()
elif self.check_next(['IF_KEYWORD']):
return self.ifstmt()
elif self.check_next(['SWITCH_KEYWORD']):
self.switch()
elif self.check_next(['WHILE_KEYWORD']):
return self.while_loop()
elif self.check_next(['DO_KEYWORD']):
return self.do_while_loop()
elif self.check_next(['FOR_KEYWORD']):
return self.for_loop()
elif self.check_next(['FOREACH_KEYWORD']):
self.foreach_loop()
elif self.check_next(['FUNCTION_KEYWORD']):
self.function()
elif self.check_next(['RETURN_KEYWORD']):
self.returnstmt()
elif self.check_next(['BREAK_KEYWORD']):
return self.breakstmt()
elif self.check_next(['CONTINUE_KEYWORD']):
return self.continuestmt()
elif self.check_next(['OPEN_KEYWORD']):
return self.openstmt()
elif self.check_next(['WRITE_KEYWORD']):
return self.writestmt()
elif self.check_next(['WRITELINE_KEYWORD']):
return self.writelinestmt()
elif self.check_next(['APPEND_KEYWORD']):
return self.appendstmt()
elif self.check_next(['IDENTIFIER']):
cur = self.lookahead
self.nextLexeme()
if self.check_next(['EQUAL_SIGN_KEYWORD']):
ass = self.assignment(cur[1], None)
self.assert_delimiter()
return ass;
elif self.check_next(['INCREMENT_KEYWORD']):
self.nextLexeme()
self.assert_delimiter()
return evaluate.Increment(self.variables, cur[1])
elif self.check_next(['DECREMENT_KEYWORD']):
self.nextLexeme()
self.assert_delimiter()
return evaluate.Decrement(self.variables, cur[1])
elif self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
pos = self.expression()
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
if self.check_next(['EQUAL_SIGN_KEYWORD']):
ass = self.assignment(cur[1], pos)
self.assert_delimiter()
return ass;
return evaluate.Variable(self.variables, cur[1], pos)
else:
print('unknown statement at ' + cur[1])
if self.check_next(['SEMICOLON_KEYWORD']):
self.nextLexeme()
else:
print('unknown statement at ' + self.lookahead[1])
self.nextLexeme()
def input(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
name = self.lookahead[1]
mess = None
self.assert_next('IDENTIFIER', 'expected identifier')
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
self.expression()
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
if self.check_next(['COMMA_KEYWORD']):
self.nextLexeme()
mess = self.lookahead[1]
self.assert_next('STRING_LITERAL', 'expected string literal')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.Input(self.variables, name, mess)
def output(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
expr = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.Output(expr)
def appendstmt(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
name = self.lookahead[1]
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('COMMA_KEYWORD', 'expected ,')
expr = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.Append(self.variables, name, expr)
def openstmt(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
path = None
if self.check_next(['IDENTIFIER']):
path = evaluate.Variable(self.variables, self.lookahead[1], None)
self.nextLexeme()
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
self.expression()
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
elif self.check_next(['STRING_LITERAL']):
path = self.lookahead[1]
self.nextLexeme()
else:
print('expected variable identifier or string literal before ' + self.lookahead[1])
self.assert_next('COMMA_KEYWORD', 'expected ,')
mode = self.lookahead[1]
self.assert_next('STRING_LITERAL', 'expected string literal')
self.assert_next('COMMA_KEYWORD', 'expected ,')
name = self.lookahead[1]
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.Open(path, mode, name, self.variables)
def writestmt(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
name = self.lookahead[1]
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('COMMA_KEYWORD', 'expected ,')
value = None
if self.check_next(['IDENTIFIER']):
source_iden = self.lookahead[1]
self.nextLexeme()
pos = None
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
pos = self.expression()
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
value = evaluate.Variable(self.variables, source_iden, pos)
elif self.check_next(['STRING_LITERAL']):
value = self.lookahead[1]
self.nextLexeme()
else:
print('expected variable identifier or string literal before ' + self.lookahead[1])
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.Write(self.variables, name, value)
def writelinestmt(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
name = self.lookahead[1]
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('COMMA_KEYWORD', 'expected ,')
value = None
if self.check_next(['IDENTIFIER']):
source_iden = self.lookahead[1]
self.nextLexeme()
pos = None
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
pos = self.expression()
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
value = evaluate.Variable(self.variables, source_iden, pos)
elif self.check_next(['STRING_LITERAL']):
value = self.lookahead[1]
self.nextLexeme()
else:
print('expected variable identifier or string literal before ' + self.lookahead[1])
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.WriteLine(self.variables, name, value)
def assignment(self, var_name, pos):
self.assert_next('EQUAL_SIGN_KEYWORD', 'expected =')
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
vals = [];
while not self.check_next(['CLOSE_BRACKET_KEYWORD']):
expr = self.expression()
if expr:
vals.append(expr)
if not self.check_next(['CLOSE_BRACKET_KEYWORD', 'SEMICOLON_KEYWORD', 'eof']):
self.assert_next('COMMA_KEYWORD', 'expected comma')
if self.check_next(['SEMICOLON_KEYWORD', 'eof']):
print('expected ] before ' + self.lookahead[1])
break
else:
if not self.check_next(['CLOSE_BRACKET_KEYWORD', 'SEMICOLON_KEYWORD', 'eof']):
print('expected ] before ' + self.lookahead[1])
break
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
return evaluate.Assignment(self.variables, var_name, pos, ('array', vals))
else:
expr = self.expression()
return evaluate.Assignment(self.variables, var_name, pos, ('single', expr))
def vardec(self):
self.nextLexeme()
name = self.lookahead[1]
varde = []
if self.assert_next('IDENTIFIER', 'expected identifier'):
self.variables[name] = {
'type': 'undefined',
'value': None
}
varde.append(evaluate.VarDec(self.variables, name))
if self.check_next(['EQUAL_SIGN_KEYWORD']):
# self.nextLexeme()
# if self.check_next(['OPEN_BRACKET_KEYWORD']):
# self.nextLexeme()
# while not self.check_next(['CLOSE_BRACKET_KEYWORD']):
# self.expression()
# if not self.check_next(['CLOSE_BRACKET_KEYWORD']):
# self.assert_next('COMMA_KEYWORD', 'expected comma')
# self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
# else:
# self.expression()
varde.append(self.assignment(name, None))
self.assert_delimiter()
return varde
def ifstmt(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
cond = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
then = self.codeblock()
elsif_cond = None
elsif_block = None
if self.check_next(['ELSIF_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
elsif_cond = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
elsif_block = self.codeblock()
else_block = None
if self.check_next(['ELSE_KEYWORD']):
self.nextLexeme()
else_block = self.codeblock()
return evaluate.IfThenElse(cond, then, elsif_cond, elsif_block, else_block)
def switch(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
if self.variable():
self.nextLexeme()
else:
print('expected variable identifier before ' + self.lookahead[1])
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_next('OPEN_CURLY_BRACE_KEYWORD', 'expected {')
while not self.check_next(['CLOSE_CURLY_BRACE_KEYWORD', 'eof']):
if self.check_next(['DEFAULT_KEYWORD']):
break
self.caseblock()
if self.check_next(['DEFAULT_KEYWORD']):
self.nextLexeme()
self.assert_next('COLON_KEYWORD', 'expected :')
self.codeblock()
self.assert_next('CLOSE_CURLY_BRACE_KEYWORD', 'expected }')
def caseblock(self):
self.assert_next('CASE_KEYWORD', 'expected case')
if self.literal():
self.nextLexeme()
else:
print('expected literal at ' + self.lookahead[1])
self.assert_next('COLON_KEYWORD', 'expected :')
# self.assert_next('INTEGER_LITERAL', 'expected literal')
self.codeblock()
def while_loop(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
cond = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
loop = self.codeblock()
return evaluate.WhileLoop(cond, loop)
def do_while_loop(self):
self.nextLexeme()
loop = self.codeblock()
self.assert_next('WHILE_KEYWORD', 'expected while')
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
cond = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
return evaluate.DoWhileLoop(loop, cond)
def for_loop(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
if self.check_next(['CLOSE_PARENTHESIS_KEYWORD']):
self.nextLexeme()
else:
init = self.statement()
cond = self.expression()
self.assert_delimiter()
last = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
loop = self.codeblock()
return evaluate.ForLoop(init, cond, loop, last)
def foreach_loop(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('IN_KEYWORD', 'expected in')
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.codeblock()
def function(self):
self.nextLexeme()
self.assert_next('IDENTIFIER', 'expected function identifier')
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
while not self.check_next(['CLOSE_PARENTHESIS_KEYWORD']):
self.assert_next('IDENTIFIER', 'expected identifier')
if not self.check_next(['CLOSE_PARENTHESIS_KEYWORD']):
self.assert_next('COMMA_KEYWORD', 'expected comma')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.codeblock()
def returnstmt(self):
self.nextLexeme()
self.expression()
self.assert_delimiter()
def breakstmt(self):
self.nextLexeme()
self.assert_delimiter()
return evaluate.Break()
def continuestmt(self):
self.nextLexeme()
self.assert_delimiter()
return evaluate.Continue()
def expression(self):
operators = []
operands = []
self.evaluate_expression(operators, operands)
return evaluate.Expression(operators, operands)
def evaluate_expression(self, operators, operands):
if self.check_next(['OPEN_PARENTHESIS_KEYWORD']):
self.nextLexeme()
operands.append(self.expression())
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
if self.evaluate_token(operators, operands):
self.evaluate_expression(operators, operands)
def evaluate_token(self, operators, operands):
if self.literal():
lit = self.lookahead
self.nextLexeme()
operands.append(evaluate.Literal(self.types[lit[0]], lit[1]))
return True
elif self.variable():
name = self.lookahead
pos = None
self.nextLexeme()
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
pos = self.expression()
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
if self.check_next(['EQUAL_SIGN_KEYWORD']):
return self.assignment(name, None)
elif self.check_next(['INCREMENT_KEYWORD']):
self.nextLexeme()
operands.append(evaluate.Increment(self.variables, name[1]))
return True
elif self.check_next(['DECREMENT_KEYWORD']):
self.nextLexeme()
operands.append(evaluate.Decrement(self.variables, name[1]))
return True
elif self.check_next(['OPEN_PARENTHESIS_KEYWORD']):
self.nextLexeme()
while not self.check_next(['CLOSE_PARENTHESIS_KEYWORD']):
self.expression()
if not self.check_next(['CLOSE_PARENTHESIS_KEYWORD', 'SEMICOLON_KEYWORD']):
self.assert_next('COMMA_KEYWORD', 'expected comma')
if self.check_next(['SEMICOLON_KEYWORD', 'eof']):
print('expected ) before ' + self.lookahead[1])
return False
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.Variable(self.variables, name[1], pos))
return True
# elif self.check_next(['MINUS_KEYWORD']):
# self.nextLexeme()
# expr = self.expression()
# operands.append(evaluate.Negation(expr))
# return True
elif self.check_next(['LEN_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
expr = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.Len(expr))
return True
elif self.check_next(['RAND_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
expr1 = self.expression()
self.assert_next('COMMA_KEYWORD', 'expected ,')
expr2 = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.Random(expr1, expr2))
return True
elif self.check_next(['READ_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected ()')
name = self.lookahead[1]
self.assert_next('IDENTIFIER', 'expected variable identifier')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.Read(self.variables, name))
return True
elif self.check_next(['READLINE_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected ()')
name = self.lookahead[1]
self.assert_next('IDENTIFIER', 'expected variable identifier')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.ReadLine(self.variables, name))
return True
elif self.check_next(['SQRT_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected ()')
expr = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.Sqrt(expr))
return True
elif self.check_next(['NOT_KEYWORD']):
self.nextLexeme()
expr = self.expression()
operands.append(evaluate.Not(expr))
return True
elif self.check_next(['INT_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
expr = self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
operands.append(evaluate.IntegerCast(expr))
return True
elif self.check_next(['PLUS_KEYWORD', 'MINUS_KEYWORD', 'MULTIPLY_KEYWORD', 'DIVIDE_KEYWORD', 'MODULO_KEYWORD']):
self.append_math_operator(operators, operands)
return True
elif self.check_next(['GREATER_THAN_KEYWORD', 'LESS_THAN_KEYWORD', 'GREATER_THAN_OR_EQUALS_KEYWORD',
'LESS_THAN_OR_EQUALS_KEYWORD', 'AND_KEYWORD', 'OR_KEYWORD', 'EQUALS_KEYWORD',
'NOT_EQUALS_KEYWORD']):
# operators.append(self.lookahead)
# self.nextLexeme()
# operands.append(self.expression())
self.append_boolean_operator(operators, operands)
return True
else:
return False
def append_boolean_operator(self, operators, operands):
operator = self.lookahead
self.nextLexeme()
while operators and operators[0][0] in ['PLUS_KEYWORD', 'MINUS_KEYWORD', 'MULTIPLY_KEYWORD', 'DIVIDE_KEYWORD', 'MODULO_KEYWORD']:
op = operators.pop()
if op[0] == 'MINUS_KEYWORD':
if len(operands) % 2 != 0:
t1 = operands.pop()
operands.append(evaluate.Negation(t1))
else:
if len(operands) < 2:
raise evaluate.EvaluationError('Invalid expression at ' + operator[1])
else:
t2 = operands.pop()
t1 = operands.pop()
operands.append(evaluate.MathOperation(op, t1, t2))
operators.append(operator)
operands.append(self.expression())
def append_math_operator(self, operators, operands):
operator = self.lookahead
self.nextLexeme()
if operators:
while self.check_precedence(operators[0], operator):
op = operators.pop()
if op[0] == 'MINUS_KEYWORD':
if len(operands) % 2 != 0:
t1 = operands.pop()
operands.append(evaluate.Negation(t1))
else:
if len(operands) < 2:
raise evaluate.EvaluationError('Invalid expression at ' + operator[1])
else:
t2 = operands.pop()
t1 = operands.pop()
operands.append(evaluate.MathOperation(op, t1, t2))
else:
if len(operands) < 2:
raise evaluate.EvaluationError('Invalid expression at ' + operator[1])
else:
t2 = operands.pop()
t1 = operands.pop()
operands.append(evaluate.MathOperation(op, t1, t2))
operators.append(operator)
def check_precedence(self, op1, op2):
# if op1[0] in ['GREATER_THAN_KEYWORD', 'LESS_THAN_KEYWORD', 'GREATER_THAN_OR_EQUALS_KEYWORD',
# 'LESS_THAN_OR_EQUALS_KEYWORD', 'AND_KEYWORD', 'OR_KEYWORD', 'EQUALS_KEYWORD',
# 'NOT_EQUALS_KEYWORD']:
# return True
if op1[0] in ['MULTIPLY_KEYWORD', 'DIVIDE_KEYWORD', 'MODULO_KEYWORD'] and op2 in ['PLUS_KEYWORD', 'MINUS_KEYWORD']:
return True
else:
return False
# def expression(self):
# return self.operation()
#
# def operation(self):
# trm = self.term()
# if trm:
# oprtr = self.operator()
# if oprtr:
# self.nextLexeme()
# oprtn = self.operation()
# if oprtn:
# if oprtr in ['GREATER_THAN_KEYWORD',
# 'LESS_THAN_KEYWORD', 'GREATER_THAN_OR_EQUALS_KEYWORD',
# 'LESS_THAN_OR_EQUALS_KEYWORD', 'AND_KEYWORD',
# 'OR_KEYWORD', 'EQUALS_KEYWORD', 'NOT_EQUALS_KEYWORD']:
# return evaluate.BooleanExpression(oprtr, trm, oprtn)
# else:
# return evaluate.MathExpression(oprtr, oprtn, trm)
# else:
# return False
# else:
# return trm
# else:
# print('expected expression at ' + self.lookahead[1])
# return False
#
# def term(self):
# op = self.operand()
# if op:
# oprtr = self.operator()
# if oprtr:
# self.nextLexeme()
# trm = self.term()
# if trm:
# if oprtr in ['GREATER_THAN_KEYWORD',
# 'LESS_THAN_KEYWORD', 'GREATER_THAN_OR_EQUALS_KEYWORD',
# 'LESS_THAN_OR_EQUALS_KEYWORD', 'AND_KEYWORD',
# 'OR_KEYWORD', 'EQUALS_KEYWORD', 'NOT_EQUALS_KEYWORD']:
# return evaluate.BooleanExpression(oprtr, op, trm)
# else:
# return evaluate.MathExpression(oprtr, trm, op)
# else:
# return False
# else:
# return op
# else:
# return False
#
#
# def operand(self):
# if self.check_next(['OPEN_PARENTHESIS_KEYWORD']):
# self.nextLexeme()
# expr = self.expression()
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return expr
# else:
# if self.literal():
# lit = self.lookahead
# self.nextLexeme()
# return evaluate.Literal(self.types[lit[0]], lit[1])
# elif self.variable():
# name = self.lookahead
# pos = None
# self.nextLexeme()
# if self.check_next(['OPEN_BRACKET_KEYWORD']):
# self.nextLexeme()
# pos = self.expression()
# self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
# if self.check_next(['EQUAL_SIGN_KEYWORD']):
# return self.assignment(name)
# elif self.check_next(['INCREMENT_KEYWORD']):
# self.nextLexeme()
# return evaluate.Increment(self.variables, name[1])
# elif self.check_next(['DECREMENT_KEYWORD']):
# self.nextLexeme()
# return evaluate.Decrement(self.variables, name[1])
# elif self.check_next(['OPEN_PARENTHESIS_KEYWORD']):
# self.nextLexeme()
# while not self.check_next(['CLOSE_PARENTHESIS_KEYWORD']):
# self.expression()
# if not self.check_next(['CLOSE_PARENTHESIS_KEYWORD', 'SEMICOLON_KEYWORD']):
# self.assert_next('COMMA_KEYWORD', 'expected comma')
# if self.check_next(['SEMICOLON_KEYWORD', 'eof']):
# print('expected ) before ' + self.lookahead[1])
# return False
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return evaluate.Variable(self.variables, name[1], pos)
# elif self.check_next(['MINUS_KEYWORD']):
# self.nextLexeme()
# expr = self.expression()
# return evaluate.Negation(expr)
# elif self.check_next(['LEN_KEYWORD']):
# self.nextLexeme()
# self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
# # if self.check_next(['STRING_LITERAL']):
# # self.nextLexeme()
# # self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# # elif self.check_next(['INTEGER_LITERAL']):
# # self.nextLexeme()
# # self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# # elif self.check_next(['FLOAT_LITERAL']):
# # self.nextLexeme()
# # self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# # elif self.check_next(['IDENTIFIER']):
# # self.nextLexeme()
# # if self.check_next(['OPEN_BRACKET_KEYWORD']):
# # self.nextLexeme()
# # self.expression()
# # self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
# expr = self.expression()
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return evaluate.Len(expr)
# elif self.check_next(['RAND_KEYWORD']):
# self.nextLexeme()
# self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
# expr1 = self.expression()
# self.assert_next('COMMA_KEYWORD', 'expected ,')
# expr2 = self.expression()
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return evaluate.Random(expr1, expr2)
# elif self.check_next(['READ_KEYWORD']):
# self.nextLexeme()
# self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected ()')
# name = self.lookahead[1]
# self.assert_next('IDENTIFIER', 'expected variable identifier')
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return evaluate.Read(self.variables, name)
# elif self.check_next(['READLINE_KEYWORD']):
# self.nextLexeme()
# self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected ()')
# name = self.lookahead[1]
# self.assert_next('IDENTIFIER', 'expected variable identifier')
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return evaluate.ReadLine(self.variables, name)
# elif self.check_next(['SQRT_KEYWORD']):
# self.nextLexeme()
# self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected ()')
# expr = self.expression()
# self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
# return evaluate.Sqrt(expr)
# elif self.check_next(['NOT_KEYWORD']):
# self.nextLexeme()
# expr = self.expression()
# return evaluate.Not(expr)
# else:
# return False
def operator(self):
if self.check_next(['PLUS_KEYWORD', 'MINUS_KEYWORD',
'MULTIPLY_KEYWORD', 'DIVIDE_KEYWORD',
'MODULO_KEYWORD', 'GREATER_THAN_KEYWORD',
'LESS_THAN_KEYWORD', 'GREATER_THAN_OR_EQUALS_KEYWORD',
'LESS_THAN_OR_EQUALS_KEYWORD', 'AND_KEYWORD',
'OR_KEYWORD', 'EQUALS_KEYWORD', 'NOT_EQUALS_KEYWORD'
]):
return self.lookahead[0]
else:
return False
def literal(self):
return self.check_next(['INTEGER_LITERAL', 'FLOAT_LITERAL', 'STRING_LITERAL', 'TRUE_KEYWORD', 'FALSE_KEYWORD'])
def variable(self):
return self.check_next(['IDENTIFIER'])
|
mit
| -5,841,835,166,006,237,000 | 42.777924 | 137 | 0.536125 | false | 4.182674 | false | false | false |
icyflame/batman
|
tests/flow_tests.py
|
1
|
7876
|
# -*- coding: utf-8 -*-
"""Tests for the flow module."""
#
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot.exceptions import NoPage
from pywikibot.flow import Board, Topic, Post
from pywikibot.tools import PY2
from tests.aspects import (
TestCase,
)
from tests.basepage_tests import (
BasePageMethodsTestBase,
BasePageLoadRevisionsCachingTestBase,
)
if not PY2:
unicode = str
class TestBoardBasePageMethods(BasePageMethodsTestBase):
"""Test Flow board pages using BasePage-defined methods."""
family = 'mediawiki'
code = 'mediawiki'
def setUp(self):
"""Set up unit test."""
self._page = Board(self.site, 'Talk:Sandbox')
super(TestBoardBasePageMethods, self).setUp()
def test_basepage_methods(self):
"""Test basic Page methods on a Flow board page."""
self._test_invoke()
self._test_return_datatypes()
self.assertFalse(self._page.isRedirectPage())
self.assertEqual(self._page.latest_revision.parent_id, 0)
def test_content_model(self):
"""Test Flow page content model."""
self.assertEqual(self._page.content_model, 'flow-board')
class TestTopicBasePageMethods(BasePageMethodsTestBase):
"""Test Flow topic pages using BasePage-defined methods."""
family = 'mediawiki'
code = 'mediawiki'
def setUp(self):
"""Set up unit test."""
self._page = Topic(self.site, 'Topic:Sh6wgo5tu3qui1w2')
super(TestTopicBasePageMethods, self).setUp()
def test_basepage_methods(self):
"""Test basic Page methods on a Flow topic page."""
self._test_invoke()
self._test_return_datatypes()
self.assertFalse(self._page.isRedirectPage())
self.assertEqual(self._page.latest_revision.parent_id, 0)
def test_content_model(self):
"""Test Flow topic page content model."""
self.assertEqual(self._page.content_model, 'flow-board')
class TestLoadRevisionsCaching(BasePageLoadRevisionsCachingTestBase):
"""Test site.loadrevisions() caching."""
family = 'mediawiki'
code = 'mediawiki'
def setUp(self):
"""Set up unit test."""
self._page = Board(self.site, 'Talk:Sandbox')
super(TestLoadRevisionsCaching, self).setUp()
def test_page_text(self):
"""Test site.loadrevisions() with Page.text."""
self.skipTest('See T107537')
self._test_page_text()
class TestFlowLoading(TestCase):
"""Test loading of Flow objects from the API."""
family = 'mediawiki'
code = 'mediawiki'
cached = True
def test_board_uuid(self):
"""Test retrieval of Flow board UUID."""
board = Board(self.site, 'Talk:Sandbox')
self.assertEqual(board.uuid, 'rl7iby6wgksbpfno')
def test_topic_uuid(self):
"""Test retrieval of Flow topic UUID."""
topic = Topic(self.site, 'Topic:Sh6wgo5tu3qui1w2')
self.assertEqual(topic.uuid, 'sh6wgo5tu3qui1w2')
def test_post_uuid(self):
"""Test retrieval of Flow post UUID.
This doesn't really "load" anything from the API. It just tests
the property to make sure the UUID passed to the constructor is
stored properly.
"""
topic = Topic(self.site, 'Topic:Sh6wgo5tu3qui1w2')
post = Post(topic, 'sh6wgoagna97q0ia')
self.assertEqual(post.uuid, 'sh6wgoagna97q0ia')
def test_post_contents(self):
"""Test retrieval of Flow post contents."""
# Load
topic = Topic(self.site, 'Topic:Sh6wgo5tu3qui1w2')
post = Post(topic, 'sh6wgoagna97q0ia')
# Wikitext
wikitext = post.get(format='wikitext')
self.assertIn('wikitext', post._content)
self.assertNotIn('html', post._content)
self.assertIsInstance(wikitext, unicode)
self.assertNotEqual(wikitext, '')
# HTML
html = post.get(format='html')
self.assertIn('html', post._content)
self.assertIn('wikitext', post._content)
self.assertIsInstance(html, unicode)
self.assertNotEqual(html, '')
# Caching (hit)
post._content['html'] = 'something'
html = post.get(format='html')
self.assertIsInstance(html, unicode)
self.assertEqual(html, 'something')
self.assertIn('html', post._content)
# Caching (reload)
post._content['html'] = 'something'
html = post.get(format='html', force=True)
self.assertIsInstance(html, unicode)
self.assertNotEqual(html, 'something')
self.assertIn('html', post._content)
def test_topiclist(self):
"""Test loading of topiclist."""
board = Board(self.site, 'Talk:Sandbox')
i = 0
for topic in board.topics(limit=7):
i += 1
if i == 10:
break
self.assertEqual(i, 10)
class TestFlowFactoryErrors(TestCase):
"""Test errors associated with class methods generating Flow objects."""
family = 'test'
code = 'test'
cached = True
def test_illegal_arguments(self):
"""Test illegal method arguments."""
board = Board(self.site, 'Talk:Pywikibot test')
real_topic = Topic(self.site, 'Topic:Slbktgav46omarsd')
fake_topic = Topic(self.site, 'Topic:Abcdefgh12345678')
# Topic.from_topiclist_data
self.assertRaises(TypeError, Topic.from_topiclist_data, self.site, '', {})
self.assertRaises(TypeError, Topic.from_topiclist_data, board, 521, {})
self.assertRaises(TypeError, Topic.from_topiclist_data, board,
'slbktgav46omarsd', [0, 1, 2])
self.assertRaises(NoPage, Topic.from_topiclist_data, board,
'abc', {'stuff': 'blah'})
# Post.fromJSON
self.assertRaises(TypeError, Post.fromJSON, board, 'abc', {})
self.assertRaises(TypeError, Post.fromJSON, real_topic, 1234, {})
self.assertRaises(TypeError, Post.fromJSON, real_topic, 'abc', [])
self.assertRaises(NoPage, Post.fromJSON, fake_topic, 'abc',
{'posts': [], 'revisions': []})
def test_invalid_data(self):
"""Test invalid "API" data."""
board = Board(self.site, 'Talk:Pywikibot test')
real_topic = Topic(self.site, 'Topic:Slbktgav46omarsd')
# Topic.from_topiclist_data
self.assertRaises(ValueError, Topic.from_topiclist_data,
board, 'slbktgav46omarsd', {'stuff': 'blah'})
self.assertRaises(ValueError, Topic.from_topiclist_data,
board, 'slbktgav46omarsd',
{'posts': [], 'revisions': []})
self.assertRaises(ValueError, Topic.from_topiclist_data, board,
'slbktgav46omarsd',
{'posts': {'slbktgav46omarsd': ['123']},
'revisions': {'456': []}})
self.assertRaises(AssertionError, Topic.from_topiclist_data, board,
'slbktgav46omarsd',
{'posts': {'slbktgav46omarsd': ['123']},
'revisions': {'123': {'content': 789}}})
# Post.fromJSON
self.assertRaises(ValueError, Post.fromJSON, real_topic, 'abc', {})
self.assertRaises(ValueError, Post.fromJSON, real_topic, 'abc',
{'stuff': 'blah'})
self.assertRaises(ValueError, Post.fromJSON, real_topic, 'abc',
{'posts': {'abc': ['123']},
'revisions': {'456': []}})
self.assertRaises(AssertionError, Post.fromJSON, real_topic, 'abc',
{'posts': {'abc': ['123']},
'revisions': {'123': {'content': 789}}})
|
mit
| 6,127,447,604,697,114,000 | 34.318386 | 82 | 0.599543 | false | 3.838207 | true | false | false |
jimmykiselak/lbrycrd
|
qa/rpc-tests/maxblocksinflight.py
|
1
|
3782
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import logging
'''
In this test we connect to one node over p2p, send it numerous inv's, and
compare the resulting number of getdata requests to a max allowed value. We
test for exceeding 128 blocks in flight, which was the limit an 0.9 client will
reach. [0.10 clients shouldn't request more than 16 from a single peer.]
'''
MAX_REQUESTS = 128
class TestManager(NodeConnCB):
# set up NodeConnCB callbacks, overriding base class
def on_getdata(self, conn, message):
self.log.debug("got getdata %s" % repr(message))
# Log the requests
for inv in message.inv:
if inv.hash not in self.blockReqCounts:
self.blockReqCounts[inv.hash] = 0
self.blockReqCounts[inv.hash] += 1
def on_close(self, conn):
if not self.disconnectOkay:
raise EarlyDisconnectError(0)
def __init__(self):
NodeConnCB.__init__(self)
self.log = logging.getLogger("BlockRelayTest")
def add_new_connection(self, connection):
self.connection = connection
self.blockReqCounts = {}
self.disconnectOkay = False
def run(self):
self.connection.rpc.generate(1) # Leave IBD
numBlocksToGenerate = [8, 16, 128, 1024]
for count in range(len(numBlocksToGenerate)):
current_invs = []
for i in range(numBlocksToGenerate[count]):
current_invs.append(CInv(2, random.randrange(0, 1 << 256)))
if len(current_invs) >= 50000:
self.connection.send_message(msg_inv(current_invs))
current_invs = []
if len(current_invs) > 0:
self.connection.send_message(msg_inv(current_invs))
# Wait and see how many blocks were requested
time.sleep(2)
total_requests = 0
with mininode_lock:
for key in self.blockReqCounts:
total_requests += self.blockReqCounts[key]
if self.blockReqCounts[key] > 1:
raise AssertionError("Error, test failed: block %064x requested more than once" % key)
if total_requests > MAX_REQUESTS:
raise AssertionError("Error, too many blocks (%d) requested" % total_requests)
print "Round %d: success (total requests: %d)" % (count, total_requests)
self.disconnectOkay = True
self.connection.disconnect_node()
class MaxBlocksInFlightTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="Binary to test max block requests behavior")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager()
test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))
NetworkThread().start() # Start up network handling in another thread
test.run()
if __name__ == '__main__':
MaxBlocksInFlightTest().main()
|
mit
| 2,546,787,251,703,153,000 | 38.395833 | 110 | 0.618456 | false | 4.002116 | true | false | false |
botify-labs/mangrove
|
mangrove/pool.py
|
1
|
11546
|
from abc import ABCMeta
from multiprocessing import cpu_count
from concurrent.futures import ThreadPoolExecutor
from boto import ec2
from mangrove.declarative import ServiceDeclaration, ServicePoolDeclaration
from mangrove.mappings import ConnectionsMapping
from mangrove.utils import get_boto_module
from mangrove.exceptions import (
MissingMethodError,
DoesNotExistError,
NotConnectedError
)
class ServicePool(object):
"""Aws service connection pool wrapper
ServicePool class should be subclassed to provide
an amazon aws service connection pool. To do so,
creating a brand new class subclassing this one and
setting the services class attribute to an
existing boto module class should be enough.
* *Examples*: please take a look to mangrove.services
modules.
* *Nota*: To be as efficient as possible, every selected
regions connections will be made asynchronously using the
backported python3.2 concurrent.futures module.
:param regions: AWS regions to connect the service to as
a default every regions will be used.
:type regions: list of strings
:param default_region: region to be used as a default
:type default_region: string
:param aws_access_key_id: aws access key token (if not provided
AWS_ACCESS_KEY_ID will be fetched from
environment)
:type aws_access_key_id: string
:param aws_secret_access_key: aws secret access key (if not provided
AWS_SECRET_ACCESS_KEY will be fetched from
environment)
:type aws_secret_access_key: string
"""
__meta__ = ABCMeta
# Name of the boto python module to be used. Just in case
# you'd wanna use a fork instead.
_boto_module_name = 'boto'
# Boto aws service name to bind the regionalized
# pool to.
service = None
def __init__(self, connect=False, regions=None, default_region=None,
aws_access_key_id=None, aws_secret_access_key=None):
self._service_declaration = ServiceDeclaration(self.service)
self._service_declaration.regions = regions
self._service_declaration.default_region = default_region
self.module = self._service_declaration.module
self._executor = ThreadPoolExecutor(max_workers=cpu_count())
self._connections = ConnectionsMapping()
# _default_region private property setting should
# always be called after the _regions_names is set
self._regions_names = regions
self._default_region = default_region
if connect is True:
self.connect(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
def connect(self, aws_access_key_id=None, aws_secret_access_key=None):
"""Starts connections to pool's services
:param aws_access_key_id: aws access key token (if not provided
AWS_ACCESS_KEY_ID will be fetched from
environment)
:type aws_access_key_id: string
:param aws_secret_access_key: aws secret access key (if not provided
AWS_SECRET_ACCESS_KEY will be fetched from
environment)
:type aws_secret_access_key: string
"""
# For performances reasons, every regions connections are
# made concurrently through the concurent.futures library.
for region in self._service_declaration.regions:
self._connections[region] = self._executor.submit(
self._connect_module_to_region,
region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
if self._default_region is not None:
self._connections.default = self._service_declaration.default_region
def _connect_module_to_region(self, region, aws_access_key_id=None,
aws_secret_access_key=None):
"""Calls the connect_to_region method over the service's
module
:param region: AWS region to connect the service to.
:type region: list of strings
:param aws_access_key_id: aws access key token (if not provided
AWS_ACCESS_KEY_ID will be fetched from
environment)
:type aws_access_key_id: string
:param aws_secret_access_key: aws secret access key (if not provided
AWS_SECRET_ACCESS_KEY will be fetched from
environment)
:type aws_secret_access_key: string
"""
return self.module.connect_to_region(region)
@property
def regions(self):
return self._connections
def region(self, region_name):
"""Access a pools specific region connections
:param region_name: region connection to be accessed
:type region_name: string
"""
if not region_name in self._connections:
raise NotConnectedError(
"No active connexion found for {} region, "
"please use .connect() method to proceed.".format(region_name)
)
return self._connections[region_name]
def add_region(self, region_name):
"""Connect the pool to a new region
:param region_name: Name of the region to connect to
:type region_name: string
"""
region_client = self._connect_module_to_region(region_name)
self._connections[region_name] = region_client
self._service_declaration.regions.append(region_name)
class ServiceMixinPool(object):
"""Multiple AWS services connection pool wrapper class
ServiceMixinPool mixes the ServicePool subclasses instances
into independent pool. It can be pretty usefull when you need
to build your own custom pool exposing multiple services in
multiples regions.
For example, insteading of instanciating different pools for each
and every services you want to use, subclassing ServiceMixinPool
would allow you to create a pool exposing them transparently like
so:
::code-block: python
class MyPool(ServiceMixinPool):
services = {
'ec2': {
'regions': '*', # Wildcard for "every regions"
'default_region': 'eu-west-1'
},
'sqs': {
'regions': ['us-east-1', 'us-west-1', 'eu-west-1'],
'default_region': 'us-west-1',
},
}
pool = MyPool()
pool.ec2.eu_west_1.get_all_instances()
pool.s3.bucket('test')
...
:param connect: Should the pool init services regions connections
on instanciation.
:type connect: bool
:param aws_access_key_id: aws access key token (if not provided
AWS_ACCESS_KEY_ID will be fetched from
environment)
:type aws_access_key_id: string
:param aws_secret_access_key: aws secret access key (if not provided
AWS_SECRET_ACCESS_KEY will be fetched from
environment)
:type aws_secret_access_key: string
"""
__meta__ = ABCMeta
# aws services to be added to the mixin pool. To add one, please
# respect the following pattern:
# 'service_name': {'regions': [], 'default_region'}
# * regions parameter should be whether a list of aws regions names,
# or the '*' wildcard (['*'])
# * default_region parameter should be an aws region part of
# the provided regions parameters
services = {}
def __init__(self, connect=False,
aws_access_key_id=None, aws_secret_access_key=None):
self._executor = ThreadPoolExecutor(max_workers=cpu_count())
self._services_declaration = ServicePoolDeclaration(self.services)
self._services_store = {}
self._load_services(connect)
def _load_services(self, connect=None, aws_access_key_id=None,
aws_secret_access_key=None):
"""Helper private method adding every services referenced services
to mixin pool
:param connect: Should the pool being connected to remote services
at startup.
:type connect: boolean
:param aws_access_key_id: aws access key token (if not provided
AWS_ACCESS_KEY_ID will be fetched from
environment)
:type aws_access_key_id: string
:param aws_secret_access_key: aws secret access key (if not provided
AWS_SECRET_ACCESS_KEY will be fetched from
environment)
:type aws_secret_access_key: string
"""
for service_name, localisation in self._services_declaration.iteritems():
self.add_service(
service_name,
connect=connect,
regions=localisation.regions,
default_region=localisation.default_region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
def connect(self):
"""Connects every services in the pool"""
for name, pool in self._services_store.iteritems():
pool.connect()
def add_service(self, service_name, connect=False,
regions=None, default_region=None,
aws_access_key_id=None, aws_secret_access_key=None):
"""Adds a service connection to the services pool
:param service_name: name of the AWS service to add
:type service_name: string
:param regions: AWS regions to connect the service to.
:type regions: list of strings
:param aws_access_key_id: aws access key token (if not provided
AWS_ACCESS_KEY_ID will be fetched from
environment)
:type aws_access_key_id: string
:param aws_secret_access_key: aws secret access key (if not provided
AWS_SECRET_ACCESS_KEY will be fetched from
environment)
:type aws_secret_access_key: string
"""
service_pool_kls = type(service_name.capitalize(), (ServicePool,), {})
service_pool_kls.service = service_name
service_pool_instance = service_pool_kls(
connect=False,
regions=regions,
default_region=default_region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key
)
setattr(self, service_name, service_pool_instance)
if service_name not in self._services_store:
self._services_store[service_name] = service_pool_instance
if service_name not in self._services_declaration:
self._services_declaration[service_name].regions = regions or '*'
if default_region is not None:
self._services_declaration[service_name].default_region = default_region
return service_pool_instance
|
mit
| -903,598,347,621,880,300 | 38.272109 | 88 | 0.597696 | false | 4.531397 | false | false | false |
tehamalab/dgs
|
goals/models.py
|
1
|
39889
|
import json
from django.db import models
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from django.core.exceptions import ValidationError
from django.contrib.postgres.fields import HStoreField, ArrayField
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify, truncatechars
from django.utils.functional import cached_property
from django.core.urlresolvers import reverse
from mptt.models import MPTTModel, TreeForeignKey
from mptt.signals import node_moved
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFit
def area_type_topo_path(instance, filename):
return 'topojson/areatype/{0}/{1}'.format(instance.code, filename)
class AreaType(models.Model):
code = models.CharField(_('Code'), max_length=20, unique=True)
name = models.CharField(_('Name'), max_length=255)
description = models.TextField(_('Description'), blank=True)
topojson = models.FileField(_('TopoJSON'), blank=True, null=True,
upload_to=area_type_topo_path)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Area Type')
verbose_name_plural = _('Area Types')
def __str__(self):
return self.name
class Area(MPTTModel):
parent = TreeForeignKey('self', null=True, blank=True,
related_name='children', db_index=True)
code = models.CharField(_('Area code'), max_length=20, unique=True)
name = models.CharField(_('Area name'), max_length=255)
type = models.ForeignKey('goals.AreaType',
verbose_name=_('Area type'),
related_name='areas')
description = models.TextField(_('Area description'), blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/areas/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Area')
verbose_name_plural = _('Areas')
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.get_slug()
if self.type:
self.extras['type_code'] = self.type.code
self.extras['type_name'] = self.type.name
super(Area, self).save(*args, **kwargs)
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def api_url(self):
try:
return reverse('area-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def type_code(self):
if self.type:
return self.extras.get('type_code', '') or self.type.code
return ""
@cached_property
def type_name(self):
if self.type:
return self.extras.get('type_name', '') or self.type.name
return ""
class SectorType(models.Model):
code = models.CharField(_('Code'), max_length=20, unique=True)
name = models.CharField(_('Name'), max_length=255)
description = models.TextField(_('Description'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Sector Type')
verbose_name_plural = _('Sector Types')
def __str__(self):
return self.name
class Sector(MPTTModel):
themes = models.ManyToManyField('goals.Theme', verbose_name='Themes',
related_name='sectors')
parent = TreeForeignKey('self', null=True, blank=True,
related_name='children', db_index=True)
name = models.CharField(_('Sector name'), max_length=255)
code = models.CharField(_('Sector code'), max_length=20)
type = models.ForeignKey('goals.SectorType',
verbose_name=_('Sector type'),
related_name='sextors')
description = models.TextField(_('Sector description'), blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/sectors/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Sector')
verbose_name_plural = _('Sectors')
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.get_slug()
if self.type:
self.extras['type_code'] = self.type.code
self.extras['type_name'] = self.type.name
if self.parent:
self.extras['parent_name'] = self.parent.name
super(Sector, self).save(*args, **kwargs)
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
@cached_property
def type_code(self):
return self.extras.get('type_code', '') or self.type.code
@cached_property
def type_name(self):
return self.extras.get('type_name', '') or self.type.name
@cached_property
def parent_name(self):
if self.parent:
return self.extras.get('parent_name', '') or self.parent.name
return ''
@cached_property
def ancestors_ids(self):
return json.loads(self.extras.get('ancestors_ids', '[]'))\
or [ancestor.id for ancestor in self.get_ancestors()]
@cached_property
def ancestors_codes(self):
return json.loads(self.extras.get('ancestors_codes', '[]'))\
or [ancestor.code for ancestor in self.get_ancestors()]
@cached_property
def ancestors_names(self):
return json.loads(self.extras.get('ancestors_names', '[]'))\
or [ancestor.name for ancestor in self.get_ancestors()]
@cached_property
def themes_codes(self):
return json.loads(self.extras.get('themes_codes', '[]'))
@cached_property
def themes_names(self):
return json.loads(self.extras.get('themes_names', '[]'))
@cached_property
def plans_ids(self):
return json.loads(self.extras.get('plans_ids', '[]'))
@cached_property
def plans_codes(self):
return json.loads(self.extras.get('plans_codes', '[]'))
@cached_property
def plans_names(self):
return json.loads(self.extras.get('plans_names', '[]'))
@cached_property
def api_url(self):
try:
return reverse('sector-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
class Plan(models.Model):
code = models.CharField(_('code'), max_length=10,
unique=True)
name = models.CharField(_('Name'), max_length=255)
caption = models.CharField(_('Caption'), max_length=255, blank=True)
description = models.TextField(_('Description'), blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/goals/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Plan')
verbose_name_plural = _('Plans')
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.get_slug()
super(Plan, self).save(*args, **kwargs)
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def api_url(self):
try:
return reverse('plan-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
class Theme(models.Model):
plan = models.ForeignKey('goals.Plan', verbose_name='Plans',
related_name='themes')
name = models.CharField(_('Theme name'), max_length=255)
code = models.CharField(_('Theme number'), max_length=10)
caption = models.CharField(_('Caption'), max_length=255, blank=True)
description = models.TextField(_('Theme description'), blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/themes/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Theme')
verbose_name_plural = _('Themes')
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.extras['plan_name'] = self.plan.name
self.extras['plan_code'] = self.plan.code
if not self.slug:
self.slug = self.get_slug()
super(Theme, self).save(*args, **kwargs)
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
@cached_property
def plan_name(self):
return self.extras.get('plan_name', '') or self.plan.name
@cached_property
def plan_code(self):
return self.extras.get('plan_code', '') or self.plan.code
@cached_property
def api_url(self):
try:
return reverse('theme-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
class Goal(models.Model):
plan = models.ForeignKey('goals.Plan', verbose_name='plan',
related_name='goals')
code = models.CharField(_('Goal number'), max_length=10)
name = models.CharField(_('Goal name'), max_length=255)
caption = models.CharField(_('Caption'), max_length=255, blank=True)
description = models.TextField(_('Goal description'), blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/goals/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Goal')
verbose_name_plural = _('Goals')
unique_together = ['code', 'plan']
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.get_slug()
self.extras['plan_name'] = self.plan.name
self.extras['plan_code'] = self.plan.code
super(Goal, self).save(*args, **kwargs)
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def api_url(self):
try:
return reverse('goal-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
@cached_property
def plan_name(self):
return self.extras.get('plan_name', '') or self.plan.name
@cached_property
def plan_code(self):
return self.extras.get('plan_code', '') or self.plan.code
class Target(models.Model):
goal = models.ForeignKey(Goal, verbose_name=_('Goal'),
related_name='targets')
code = models.CharField(_('Target number'), max_length=10)
name = models.CharField(_('Target'), max_length=255)
description = models.TextField(_('Target description'),
blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/targets/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Target')
verbose_name_plural = _('Targets')
unique_together = ['code', 'goal']
def __str__(self):
return '%s %s : %s' % (self.plan_code, self.code, truncatechars(self.description, 50))
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.get_slug()
self.extras['goal_code'] = self.goal.code
self.extras['goal_name'] = self.goal.name
self.extras['plan_id'] = self.plan.id
self.extras['plan_code'] = self.plan.code
self.extras['plan_name'] = self.plan.name
super(Target, self).save(*args, **kwargs)
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def api_url(self):
try:
return reverse('target-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
@cached_property
def goal_code(self):
return self.extras.get('goal_code', '') or self.goal.code
@cached_property
def goal_name(self):
return self.extras.get('goal_name', '') or self.goal.name
@cached_property
def plan(self):
return self.goal.plan
@cached_property
def plan_id(self):
return int(self.extras.get('plan_id', '0')) or self.goal.plan_id
@cached_property
def plan_code(self):
return self.extras.get('plan_code', '') or self.goal.plan_code
@cached_property
def plan_name(self):
return self.extras.get('plan_name', '') or self.goal.plan_name
class Indicator(models.Model):
theme = models.ForeignKey('goals.Theme', verbose_name=_('Theme'),
related_name='indicators', null=True, blank=True)
sector = models.ForeignKey('goals.Sector', verbose_name=_('Sector'),
related_name='indicators', null=True, blank=True)
target = models.ForeignKey(Target, verbose_name=_('Target'),
related_name='indicators', null=True, blank=True)
name = models.CharField(_('Indicator'), max_length=255)
code = models.CharField(_('Indicator number'), max_length=10)
description = models.TextField(_('Indicator description'),
blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/indicators/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
sectors_ids = ArrayField(
models.IntegerField(), null=True, blank=True, editable=False,
verbose_name=_('Sectors ids'), default=[])
plan_id = models.IntegerField(_('Plan ID'), null=True, blank=True,
editable=False)
class Meta:
verbose_name = _('Indicator')
verbose_name_plural = _('Indicators')
unique_together = ['code', 'target', 'sector', 'theme']
def __str__(self):
return '%s %s : %s' \
% (self.plan_code, self.code, self.name)
def save(self, *args, **kwargs):
self.clean()
if not self.slug:
self.slug = self.get_slug()
if self.theme:
self.extras['theme_code'] = self.theme.code
self.extras['theme_name'] = self.theme.name
if self.sector:
self.sectors_ids = self.sector.ancestors_ids + [self.sector_id]
self.extras['sector_code'] = self.sector.code
self.extras['sector_name'] = self.sector.name
self.extras['sectors_codes'] = json.dumps(self.sector.ancestors_codes + [self.sector.code])
self.extras['sectors_names'] = json.dumps(self.sector.ancestors_names + [self.sector.name])
self.extras['sector_type_code'] = self.sector.type.code
self.extras['sector_type_name'] = self.sector.type.name
self.extras['root_sector_id'] = self.sector.get_root().id
self.extras['root_sector_code'] = self.sector.get_root().code
self.extras['root_sector_name'] = self.sector.get_root().name
if self.target:
self.extras['target_code'] = self.target.code
self.extras['target_name'] = self.target.name
if self.goal:
self.extras['goal_id'] = self.goal.id
self.extras['goal_code'] = self.goal.code
self.extras['goal_name'] = self.goal.name
if self.plan:
self.plan_id = self.plan.id
self.extras['plan_code'] = self.plan.code
self.extras['plan_name'] = self.plan.name
super(Indicator, self).save(*args, **kwargs)
def clean(self):
if self.theme and self.target:
if self.theme.plan_id != self.target.goal.plan_id:
raise ValidationError(
_('Theme and Target must belong to the same plan'))
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def api_url(self):
try:
return reverse('indicator-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
@cached_property
def theme_code(self):
return self.extras.get('theme_code', '')
@cached_property
def theme_name(self):
return self.extras.get('theme_name', '')
@cached_property
def sectors_names(self):
if self.sector:
return json.loads(self.extras.get('sectors_names', '[]'))
return []
@cached_property
def sectors_codes(self):
if self.sector:
return json.loads(self.extras.get('sectors_codes', '[]'))
return []
@cached_property
def sector_type_code(self):
return self.extras.get('sector_type_code', '')
@cached_property
def sector_type_name(self):
return self.extras.get('sector_type_name', '')
@cached_property
def sector_code(self):
return self.extras.get('sector_code', '')
@cached_property
def sector_name(self):
return self.extras.get('sector_name', '')
@cached_property
def root_sector_id(self):
return int(self.extras.get('root_sector_id', '0')) or None
@cached_property
def root_sector_code(self):
return self.extras.get('root_sector_code', '')
@cached_property
def root_sector_name(self):
return self.extras.get('root_sector_name', '')
@cached_property
def target_code(self):
return self.extras.get('target_code', '')
@cached_property
def target_name(self):
return self.extras.get('target_name', '')
@cached_property
def goal(self):
if self.target:
return self.target.goal
return None
@cached_property
def goal_id(self):
return int(self.extras.get('goal_id', '0')) or None
@cached_property
def goal_code(self):
return self.extras.get('goal_code', '')
@cached_property
def goal_name(self):
return self.extras.get('goal_name', '')
@cached_property
def plan(self):
if self.target:
return self.target.goal.plan
elif self.theme:
return self.theme.plan
return None
@cached_property
def plan_code(self):
return self.extras.get('plan_code', '')
@cached_property
def plan_name(self):
return self.extras.get('plan_name', '')
def get_progress_count(self):
return Progress.objects.filter(component__indicators=self.id).count()
def get_progress_preview(self):
return Progress.objects.filter(component__indicators=self.id)\
.order_by('component__indicators', '-year')\
.distinct('component__indicators')
class Component(models.Model):
YES = 'YES'
NO = 'NO'
PARTIALLY = 'PARTIALLY'
UNKNOWN = 'UNKNOWN'
STATS_AVAILABLE_CHOICES = (
(YES, _('Yes')),
(NO, _('No')),
(PARTIALLY, _('Partially')),
(UNKNOWN, _('Unknown')),
)
indicators = models.ManyToManyField('goals.Indicator',
verbose_name=_('Indicators'),
related_name='components')
code = models.CharField(_('Component number'), max_length=10,
unique=True)
name = models.CharField(_('Component name'), max_length=255)
description = models.TextField(_('Component description'),
blank=True)
image = models.ImageField(_('Image'),
upload_to='goals/components/images',
blank=True, null=True)
image_small = ImageSpecField(source='image',
processors=[ResizeToFit(100, 100)],
format='PNG',
options={'quality': 90})
image_medium = ImageSpecField(source='image',
processors=[ResizeToFit(250, 250)],
format='PNG',
options={'quality': 90})
image_large = ImageSpecField(source='image',
processors=[ResizeToFit(700)],
options={'quality': 80})
target_value = models.FloatField(_('Target value'), blank=True,
null=True)
value_unit = models.CharField(_('Value unit'), blank=True,
max_length=50)
stats_available = models.CharField(
_('Statistics availble'), max_length=50, blank=True,
choices=STATS_AVAILABLE_CHOICES, default=UNKNOWN)
data_source = models.CharField(_('Data source'), max_length=255,
blank=True)
agency = models.CharField(_('Agency'), max_length=255, blank=True)
slug = models.SlugField(_('Slug'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Component')
verbose_name_plural = _('Components')
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.get_slug()
super(Component, self).save(*args, **kwargs)
def get_slug(self):
if not self.slug:
slug = slugify(self.name[:50])
return slug
return self.slug
@cached_property
def api_url(self):
try:
return reverse('component-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def image_url(self):
if self.image:
return self.image.url
@cached_property
def image_small_url(self):
if self.image_small:
return self.image_small.url
@cached_property
def image_medium_url(self):
if self.image_medium:
return self.image_medium.url
@cached_property
def image_large_url(self):
if self.image_large:
return self.image_large.url
@cached_property
def indicators_codes(self):
return json.loads(self.extras.get('indicators_codes', '[]')) \
or list(self.indicators.values_list('code', flat=True))
@cached_property
def indicators_names(self):
return json.loads(self.extras.get('indicators_names', '[]')) \
or list(self.indicators.values_list('name', flat=True))
@cached_property
def targets_ids(self):
return json.loads(self.extras.get('targets_ids', '[]'))
@cached_property
def targets_codes(self):
return json.loads(self.extras.get('targets_codes', '[]'))
@cached_property
def targets_names(self):
return json.loads(self.extras.get('targets_names', '[]'))
@cached_property
def goals_ids(self):
return json.loads(self.extras.get('goals_ids', '[]'))
@cached_property
def goals_codes(self):
return json.loads(self.extras.get('goals_codes', '[]'))
@cached_property
def goals_names(self):
return json.loads(self.extras.get('goals_names', '[]'))
@cached_property
def plans_ids(self):
return json.loads(self.extras.get('plans_ids', '[]'))
@cached_property
def plans_codes(self):
return json.loads(self.extras.get('plans_codes', '[]'))
@cached_property
def plans_names(self):
return json.loads(self.extras.get('plans_names', '[]'))
def get_progress_count(self):
return Progress.objects.filter(component=self.id).count()
class Progress(models.Model):
component = models.ForeignKey(Component,
verbose_name=_('Component'),
related_name='progress')
area = models.ForeignKey(Area, verbose_name=_('Area'),
related_name='progress')
groups = ArrayField(
models.CharField(max_length=50, blank=True), null=True,
blank=True, verbose_name=_('Groups'), default=[])
year = models.PositiveIntegerField(_('Year'))
fiscal_year = models.CharField(_('Fiscal year'), max_length=9,
blank=True)
value = models.FloatField(_('Value'))
remarks = models.TextField(_('Remarks'), blank=True)
created = models.DateTimeField(_('Created'), auto_now_add=True)
last_modified = models.DateTimeField(_('Last modified'),
auto_now=True)
extras = HStoreField(_('Extras'), blank=True, null=True, default={})
class Meta:
verbose_name = _('Progress')
verbose_name_plural = _('Progress')
def __str__(self):
return '%d:%d' % (self.year, self.value)
def save(self, *args, **kwargs):
self.extras['area_code'] = self.area.code
self.extras['area_name'] = self.area.name
self.extras['area_type_id'] = self.area.type_id
self.extras['area_type_code'] = self.area.type_code
self.extras['area_type_name'] = self.area.type_name
self.extras['component_code'] = self.component.code
self.extras['component_name'] = self.component.name
self.extras['value_unit'] = self.component.value_unit
super(Progress, self).save(*args, **kwargs)
@cached_property
def api_url(self):
try:
return reverse('progress-detail', args=[self.pk])
except:
# API isn't installed
# FIXME: Catch a specific exception
return ''
@cached_property
def component_code(self):
return self.extras.get('component_code', '')\
or self.component.code
@cached_property
def component_name(self):
return self.extras.get('component_name', '')\
or self.component.name
@cached_property
def area_code(self):
return self.extras.get('area_code', '') or self.area.code
@cached_property
def area_name(self):
return self.extras.get('area_name', '') or self.area.name
@cached_property
def area_type_id(self):
return int(self.extras.get('area_type_id', 0))\
or self.area.type_id
@cached_property
def area_type_code(self):
return self.extras.get('area_type_code', '')\
or self.area.type_code
@cached_property
def area_type_name(self):
return self.extras.get('area_type_name', '')\
or self.area.type_name
@cached_property
def value_unit(self):
return self.extras.get('value_unit', '')
@receiver(m2m_changed, sender=Sector.themes.through)
def sector_themes_changed(sender, instance, action, **kwargs):
if action == 'post_add':
themes = instance.themes.prefetch_related('plan')
instance.extras['themes_codes'] = json.dumps([t.code for t in themes])
instance.extras['themes_names'] = json.dumps([t.name for t in themes])
instance.extras['plans_ids'] = json.dumps(list(set([t.plan.id for t in themes])))
instance.extras['plans_codes'] = json.dumps(list(set([t.plan.code for t in themes])))
instance.extras['plans_names'] = json.dumps(list(set([t.plan.name for t in themes])))
Sector.objects.filter(id=instance.id).update(extras=instance.extras)
@receiver(m2m_changed, sender=Component.indicators.through)
def component_indicators_changed(sender, instance, action, **kwargs):
if action == 'post_add':
indctrs = instance.indicators\
.prefetch_related('target', 'target__goal', 'target__goal__plan')
instance.extras['indicators_codes'] = json.dumps([i.code for i in indctrs])
instance.extras['indicators_names'] = json.dumps([i.name for i in indctrs])
instance.extras['targets_ids'] = json.dumps(list(set([i.target.id for i in indctrs if i.target])))
instance.extras['targets_codes'] = json.dumps(list(set([i.target.code for i in indctrs if i.target])))
instance.extras['targets_names'] = json.dumps(list(set([i.target.name for i in indctrs if i.target])))
instance.extras['goals_ids'] = json.dumps(list(set([i.target.goal.id for i in indctrs if i.target])))
instance.extras['goals_codes'] = json.dumps(list(set([i.target.goal.code for i in indctrs if i.target])))
instance.extras['goals_names'] = json.dumps(list(set([i.target.goal.name for i in indctrs if i.target])))
instance.extras['plans_ids'] = json.dumps(list(set([i.plan.id for i in indctrs if i.plan])))
instance.extras['plans_codes'] = json.dumps(list(set([i.plan.code for i in indctrs if i.plan])))
instance.extras['plans_names'] = json.dumps(list(set([i.plan.name for i in indctrs if i.plan])))
Component.objects.filter(id=instance.id).update(extras=instance.extras)
@receiver(node_moved, sender=Sector)
def sector_node_moved(sender, instance, **kwargs):
instance.extras['ancestors_ids'] = json.dumps(
[ancestor.id for ancestor in instance.get_ancestors()])
instance.extras['ancestors_codes'] = json.dumps(
[ancestor.code for ancestor in instance.get_ancestors()])
instance.extras['ancestors_names'] = json.dumps(
[ancestor.name for ancestor in instance.get_ancestors()])
Sector.objects.filter(id=instance.id).update(extras=instance.extras)
|
unlicense
| -7,689,396,014,878,925,000 | 35.56187 | 113 | 0.561558 | false | 4.167694 | false | false | false |
atheendra/access_keys
|
keystone/tests/test_v3_federation.py
|
1
|
58835
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import uuid
from keystone.auth import controllers as auth_controllers
from keystone.common import dependency
from keystone.common import serializer
from keystone import config
from keystone.contrib.federation import controllers as federation_controllers
from keystone.contrib.federation import utils as mapping_utils
from keystone import exception
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
from keystone.tests import mapping_fixtures
from keystone.tests import test_v3
CONF = config.CONF
LOG = log.getLogger(__name__)
def dummy_validator(*args, **kwargs):
pass
@dependency.requires('federation_api')
class FederationTests(test_v3.RestfulTestCase):
EXTENSION_NAME = 'federation'
EXTENSION_TO_ADD = 'federation_extension'
class FederatedIdentityProviderTests(FederationTests):
"""A test class for Identity Providers."""
idp_keys = ['description', 'enabled']
default_body = {'description': None, 'enabled': True}
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/identity_providers/' + str(suffix)
return '/OS-FEDERATION/identity_providers'
def _fetch_attribute_from_response(self, resp, parameter,
assert_is_not_none=True):
"""Fetch single attribute from TestResponse object."""
result = resp.result.get(parameter)
if assert_is_not_none:
self.assertIsNotNone(result)
return result
def _create_and_decapsulate_response(self, body=None):
"""Create IdP and fetch it's random id along with entity."""
default_resp = self._create_default_idp(body=body)
idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
self.assertIsNotNone(idp)
idp_id = idp.get('id')
return (idp_id, idp)
def _get_idp(self, idp_id):
"""Fetch IdP entity based on it's id."""
url = self.base_url(suffix=idp_id)
resp = self.get(url)
return resp
def _create_default_idp(self, body=None):
"""Create default IdP."""
url = self.base_url(suffix=uuid.uuid4().hex)
if body is None:
body = self._http_idp_input()
resp = self.put(url, body={'identity_provider': body},
expected_status=201)
return resp
def _http_idp_input(self, **kwargs):
"""Create default input for IdP data."""
body = None
if 'body' not in kwargs:
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
else:
body = kwargs['body']
return body
def _assign_protocol_to_idp(self, idp_id=None, proto=None, url=None,
mapping_id=None, validate=True, **kwargs):
if url is None:
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
if idp_id is None:
idp_id, _ = self._create_and_decapsulate_response()
if proto is None:
proto = uuid.uuid4().hex
if mapping_id is None:
mapping_id = uuid.uuid4().hex
body = {'mapping_id': mapping_id}
url = url % {'idp_id': idp_id, 'protocol_id': proto}
resp = self.put(url, body={'protocol': body}, **kwargs)
if validate:
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': mapping_id})
return (resp, idp_id, proto)
def _get_protocol(self, idp_id, protocol_id):
url = "%s/protocols/%s" % (idp_id, protocol_id)
url = self.base_url(suffix=url)
r = self.get(url)
return r
def test_create_idp(self):
"""Creates the IdentityProvider entity."""
keys_to_check = self.idp_keys
body = self._http_idp_input()
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_list_idps(self, iterations=5):
"""Lists all available IdentityProviders.
This test collects ids of created IdPs and
intersects it with the list of all available IdPs.
List of all IdPs can be a superset of IdPs created in this test,
because other tests also create IdPs.
"""
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
ids = []
for _ in range(iterations):
id = get_id(self._create_default_idp())
ids.append(id)
ids = set(ids)
keys_to_check = self.idp_keys
url = self.base_url()
resp = self.get(url)
self.assertValidListResponse(resp, 'identity_providers',
dummy_validator,
keys_to_check=keys_to_check)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = set([e['id'] for e in entities])
ids_intersection = entities_ids.intersection(ids)
self.assertEqual(ids_intersection, ids)
def test_check_idp_uniqueness(self):
"""Add same IdP twice.
Expect HTTP 409 code for the latter call.
"""
url = self.base_url(suffix=uuid.uuid4().hex)
body = self._http_idp_input()
self.put(url, body={'identity_provider': body},
expected_status=201)
self.put(url, body={'identity_provider': body},
expected_status=409)
def test_get_idp(self):
"""Create and later fetch IdP."""
body = self._http_idp_input()
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
resp = self.get(url)
self.assertValidResponse(resp, 'identity_provider',
dummy_validator, keys_to_check=body.keys(),
ref=body)
def test_get_nonexisting_idp(self):
"""Fetch nonexisting IdP entity.
Expected HTTP 404 status code.
"""
idp_id = uuid.uuid4().hex
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.get(url, expected_status=404)
def test_delete_existing_idp(self):
"""Create and later delete IdP.
Expect HTTP 404 for the GET IdP call.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.delete(url)
self.get(url, expected_status=404)
def test_delete_nonexisting_idp(self):
"""Delete nonexisting IdP.
Expect HTTP 404 for the GET IdP call.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
self.delete(url, expected_status=404)
def test_update_idp_mutable_attributes(self):
"""Update IdP's mutable parameters."""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
_enabled = not default_idp.get('enabled')
body = {'description': uuid.uuid4().hex, 'enabled': _enabled}
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
for key in body.keys():
self.assertEqual(body[key], updated_idp.get(key))
resp = self.get(url)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
for key in body.keys():
self.assertEqual(body[key], updated_idp.get(key))
def test_update_idp_immutable_attributes(self):
"""Update IdP's immutable parameters.
Expect HTTP 403 code.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
body = self._http_idp_input()
body['id'] = uuid.uuid4().hex
body['protocols'] = [uuid.uuid4().hex, uuid.uuid4().hex]
url = self.base_url(suffix=idp_id)
self.patch(url, body={'identity_provider': body}, expected_status=403)
def test_update_nonexistent_idp(self):
"""Update nonexistent IdP
Expect HTTP 404 code.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
body = self._http_idp_input()
body['enabled'] = False
body = {'identity_provider': body}
self.patch(url, body=body, expected_status=404)
def test_assign_protocol_to_idp(self):
"""Assign a protocol to existing IdP."""
self._assign_protocol_to_idp(expected_status=201)
def test_protocol_composite_pk(self):
"""Test whether Keystone let's add two entities with identical
names, however attached to different IdPs.
1. Add IdP and assign it protocol with predefined name
2. Add another IdP and assign it a protocol with same name.
Expect HTTP 201 code
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': 201}
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
def test_protocol_idp_pk_uniqueness(self):
"""Test whether Keystone checks for unique idp/protocol values.
Add same protocol twice, expect Keystone to reject a latter call and
return HTTP 409 code.
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': 201}
resp, idp_id, proto = self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
kwargs = {'expected_status': 409}
resp, idp_id, proto = self._assign_protocol_to_idp(idp_id=idp_id,
proto='saml2',
validate=False,
url=url, **kwargs)
def test_assign_protocol_to_nonexistent_idp(self):
"""Assign protocol to IdP that doesn't exist.
Expect HTTP 404 code.
"""
idp_id = uuid.uuid4().hex
kwargs = {'expected_status': 404}
self._assign_protocol_to_idp(proto='saml2',
idp_id=idp_id,
validate=False,
**kwargs)
def test_get_protocol(self):
"""Create and later fetch protocol tied to IdP."""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id']
url = "%s/protocols/%s" % (idp_id, proto_id)
url = self.base_url(suffix=url)
resp = self.get(url)
reference = {'id': proto_id}
self.assertValidResponse(resp, 'protocol',
dummy_validator,
keys_to_check=reference.keys(),
ref=reference)
def test_list_protocols(self):
"""Create set of protocols and later list them.
Compare input and output id sets.
"""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
iterations = random.randint(0, 16)
protocol_ids = []
for _ in range(iterations):
resp, _, proto = self._assign_protocol_to_idp(idp_id=idp_id,
expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')
proto_id = proto_id['id']
protocol_ids.append(proto_id)
url = "%s/protocols" % idp_id
url = self.base_url(suffix=url)
resp = self.get(url)
self.assertValidListResponse(resp, 'protocols',
dummy_validator,
keys_to_check=['id'])
entities = self._fetch_attribute_from_response(resp, 'protocols')
entities = set([entity['id'] for entity in entities])
protocols_intersection = entities.intersection(protocol_ids)
self.assertEqual(protocols_intersection, set(protocol_ids))
def test_update_protocols_attribute(self):
"""Update protocol's attribute."""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
new_mapping_id = uuid.uuid4().hex
url = "%s/protocols/%s" % (idp_id, proto)
url = self.base_url(suffix=url)
body = {'mapping_id': new_mapping_id}
resp = self.patch(url, body={'protocol': body})
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': new_mapping_id}
)
def test_delete_protocol(self):
"""Delete protocol.
Expect HTTP 404 code for the GET call after the protocol is deleted.
"""
url = self.base_url(suffix='/%(idp_id)s/'
'protocols/%(protocol_id)s')
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
url = url % {'idp_id': idp_id,
'protocol_id': proto}
self.delete(url)
self.get(url, expected_status=404)
class MappingCRUDTests(FederationTests):
"""A class for testing CRUD operations for Mappings."""
MAPPING_URL = '/OS-FEDERATION/mappings/'
def assertValidMappingListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'mappings',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMappingResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'mapping',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMapping(self, entity, ref=None):
self.assertIsNotNone(entity.get('id'))
self.assertIsNotNone(entity.get('rules'))
if ref:
self.assertEqual(jsonutils.loads(entity['rules']), ref['rules'])
return entity
def _create_default_mapping_entry(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(url,
body={'mapping': mapping_fixtures.MAPPING_LARGE},
expected_status=201)
return resp
def _get_id_from_response(self, resp):
r = resp.result.get('mapping')
return r.get('id')
def test_mapping_create(self):
resp = self._create_default_mapping_entry()
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_list(self):
url = self.MAPPING_URL
self._create_default_mapping_entry()
resp = self.get(url)
entities = resp.result.get('mappings')
self.assertIsNotNone(entities)
self.assertResponseStatus(resp, 200)
self.assertValidListLinks(resp.result.get('links'))
self.assertEqual(len(entities), 1)
def test_mapping_delete(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': str(mapping_id)}
resp = self.delete(url)
self.assertResponseStatus(resp, 204)
self.get(url, expected_status=404)
def test_mapping_get(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_update(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.patch(url,
body={'mapping': mapping_fixtures.MAPPING_SMALL})
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
def test_delete_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.delete(url, expected_status=404)
def test_get_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.get(url, expected_status=404)
def test_create_mapping_bad_requirements(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_BAD_REQ})
def test_create_mapping_no_rules(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_NO_RULES})
def test_create_mapping_no_remote_objects(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_NO_REMOTE})
def test_create_mapping_bad_value(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_BAD_VALUE})
def test_create_mapping_missing_local(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_MISSING_LOCAL})
def test_create_mapping_missing_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_MISSING_TYPE})
def test_create_mapping_wrong_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_WRONG_TYPE})
def test_create_mapping_extra_remote_properties_not_any_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_any_one_of(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_extra_remote_properties_just_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE
self.put(url, expected_status=400, body={'mapping': mapping})
def test_create_mapping_empty_map(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': {}})
def test_create_mapping_extra_rules_properties(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_EXTRA_RULES_PROPS})
class MappingRuleEngineTests(FederationTests):
"""A class for testing the mapping rule engine."""
def test_rule_engine_any_one_of_and_direct_mapping(self):
"""Should return user's name and group id EMPLOYEE_GROUP_ID.
The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE.
The will test the case where `any_one_of` is valid, and there is
a direct mapping for the users name.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.ADMIN_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
fn = assertion.get('FirstName')
ln = assertion.get('LastName')
full_name = '%s %s' % (fn, ln)
group_ids = values.get('group_ids')
name = values.get('name')
self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
self.assertEqual(name, full_name)
def test_rule_engine_no_regex_match(self):
"""Should deny authorization, the email of the tester won't match.
This will not match since the email in the assertion will fail
the regex test. It is set to match any @example.com address.
But the incoming value is set to [email protected].
RuleProcessor should raise exception.Unauthorized exception.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.BAD_TESTER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
self.assertRaises(exception.Unauthorized,
rp.process, assertion)
def test_rule_engine_any_one_of_many_rules(self):
"""Should return group CONTRACTOR_GROUP_ID.
The CONTRACTOR_ASSERTION should successfully have a match in
MAPPING_SMALL. This will test the case where many rules
must be matched, including an `any_one_of`, and a direct
mapping.
"""
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.CONTRACTOR_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids)
def test_rule_engine_not_any_of_and_direct_mapping(self):
"""Should return user's name and email.
The CUSTOMER_ASSERTION should successfully have a match in
MAPPING_LARGE. This will test the case where a requirement
has `not_any_of`, and direct mapping to a username, no group.
"""
mapping = mapping_fixtures.MAPPING_LARGE
assertion = mapping_fixtures.CUSTOMER_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(name, user_name)
self.assertEqual(group_ids, [])
def test_rule_engine_not_any_of_many_rules(self):
"""Should return group EMPLOYEE_GROUP_ID.
The EMPLOYEE_ASSERTION should successfully have a match in
MAPPING_SMALL. This will test the case where many remote
rules must be matched, including a `not_any_of`.
"""
mapping = mapping_fixtures.MAPPING_SMALL
assertion = mapping_fixtures.EMPLOYEE_ASSERTION
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(name, user_name)
self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
def _rule_engine_regex_match_and_many_groups(self, assertion):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
A helper function injecting assertion passed as an argument.
Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results.
"""
mapping = mapping_fixtures.MAPPING_LARGE
rp = mapping_utils.RuleProcessor(mapping['rules'])
values = rp.process(assertion)
user_name = assertion.get('UserName')
group_ids = values.get('group_ids')
name = values.get('name')
self.assertEqual(user_name, name)
self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
def test_rule_engine_regex_match_and_many_groups(self):
"""Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
The TESTER_ASSERTION should successfully have a match in
MAPPING_LARGE. This will test a successful regex match
for an `any_one_of` evaluation type, and will have many
groups returned.
"""
self._rule_engine_regex_match_and_many_groups(
mapping_fixtures.TESTER_ASSERTION)
def test_rule_engine_discards_nonstring_objects(self):
"""Check whether RuleProcessor discards non string objects.
Despite the fact that assertion is malformed and contains
non string objects, RuleProcessor should correctly discard them and
successfully have a match in MAPPING_LARGE.
"""
self._rule_engine_regex_match_and_many_groups(
mapping_fixtures.MALFORMED_TESTER_ASSERTION)
def test_rule_engine_fails_after_discarding_nonstring(self):
"""Check whether RuleProcessor discards non string objects.
Expect RuleProcessor to discard non string object, which
is required for a correct rule match. Since no rules are
matched expect RuleProcessor to raise exception.Unauthorized
exception.
"""
mapping = mapping_fixtures.MAPPING_SMALL
rp = mapping_utils.RuleProcessor(mapping['rules'])
assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION
self.assertRaises(exception.Unauthorized,
rp.process, assertion)
class FederatedTokenTests(FederationTests):
IDP = 'ORG_IDP'
PROTOCOL = 'saml2'
AUTH_METHOD = 'saml2'
USER = 'user@ORGANIZATION'
ASSERTION_PREFIX = 'PREFIX_'
UNSCOPED_V3_SAML2_REQ = {
"identity": {
"methods": [AUTH_METHOD],
AUTH_METHOD: {
"identity_provider": IDP,
"protocol": PROTOCOL
}
}
}
AUTH_URL = '/auth/tokens'
def load_fixtures(self, fixtures):
super(FederationTests, self).load_fixtures(fixtures)
self.load_federation_sample_data()
def idp_ref(self, id=None):
idp = {
'id': id or uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex
}
return idp
def proto_ref(self, mapping_id=None):
proto = {
'id': uuid.uuid4().hex,
'mapping_id': mapping_id or uuid.uuid4().hex
}
return proto
def mapping_ref(self, rules=None):
return {
'id': uuid.uuid4().hex,
'rules': rules or self.rules['rules']
}
def _assertSerializeToXML(self, json_body):
"""Serialize JSON body to XML.
Serialize JSON body to XML, then deserialize to JSON
again. Expect both JSON dictionaries to be equal.
"""
xml_body = serializer.to_xml(json_body)
json_deserialized = serializer.from_xml(xml_body)
self.assertDictEqual(json_deserialized, json_body)
def _scope_request(self, unscoped_token_id, scope, scope_id):
return {
'auth': {
'identity': {
'methods': [
self.AUTH_METHOD
],
self.AUTH_METHOD: {
'id': unscoped_token_id
}
},
'scope': {
scope: {
'id': scope_id
}
}
}
}
def _project(self, project):
return (project['id'], project['name'])
def _roles(self, roles):
return set([(r['id'], r['name']) for r in roles])
def _check_projects_and_roles(self, token, roles, projects):
"""Check whether the projects and the roles match."""
token_roles = token.get('roles')
if token_roles is None:
raise AssertionError('Roles not found in the token')
token_roles = self._roles(token_roles)
roles_ref = self._roles(roles)
self.assertEqual(token_roles, roles_ref)
token_projects = token.get('project')
if token_projects is None:
raise AssertionError('Projects not found in the token')
token_projects = self._project(token_projects)
projects_ref = self._project(projects)
self.assertEqual(token_projects, projects_ref)
def _check_scoped_token_attributes(self, token):
def xor_project_domain(iterable):
return sum(('project' in iterable, 'domain' in iterable)) % 2
for obj in ('user', 'catalog', 'expires_at', 'issued_at',
'methods', 'roles'):
self.assertIn(obj, token)
# Check for either project or domain
if not xor_project_domain(token.keys()):
raise AssertionError("You must specify either"
"project or domain.")
def _issue_unscoped_token(self, assertion='EMPLOYEE_ASSERTION'):
api = federation_controllers.Auth()
context = {'environment': {}}
self._inject_assertion(context, assertion)
r = api.federated_authentication(context, self.IDP, self.PROTOCOL)
return r
def test_issue_unscoped_token(self):
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_issue_unscoped_token_serialize_to_xml(self):
"""Issue unscoped token and serialize to XML.
Make sure common.serializer doesn't complain about
the response structure and tag names.
"""
r = self._issue_unscoped_token()
token_resp = r.json_body
# Remove 'extras' if empty or None,
# as JSON and XML (de)serializers treat
# them differently, making dictionaries
# comparisions fail.
if not token_resp['token'].get('extras'):
token_resp['token'].pop('extras')
self._assertSerializeToXML(token_resp)
def test_issue_unscoped_token_no_groups(self):
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token,
assertion='BAD_TESTER_ASSERTION')
def test_issue_unscoped_token_malformed_environment(self):
"""Test whether non string objects are filtered out.
Put non string objects into the environment, inject
correct assertion and try to get an unscoped token.
Expect server not to fail on using split() method on
non string objects and return token id in the HTTP header.
"""
api = auth_controllers.Auth()
context = {
'environment': {
'malformed_object': object(),
'another_bad_idea': tuple(xrange(10)),
'yet_another_bad_param': dict(zip(uuid.uuid4().hex,
range(32)))
}
}
self._inject_assertion(context, 'EMPLOYEE_ASSERTION')
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_scope_to_project_once(self):
r = self.post(self.AUTH_URL,
body=self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, self.proj_employees['id'])
self._check_scoped_token_attributes(token_resp)
roles_ref = [self.role_employee]
projects_ref = self.proj_employees
self._check_projects_and_roles(token_resp, roles_ref, projects_ref)
def test_scope_to_bad_project(self):
"""Scope unscoped token with a project we don't have access to."""
self.post(self.AUTH_URL,
body=self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER,
expected_status=401)
def test_scope_to_project_multiple_times(self):
"""Try to scope the unscoped token multiple times.
The new tokens should be scoped to:
* Customers' project
* Employees' project
"""
bodies = (self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN,
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN)
project_ids = (self.proj_employees['id'],
self.proj_customers['id'])
for body, project_id_ref in zip(bodies, project_ids):
r = self.post(self.AUTH_URL, body=body)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, project_id_ref)
self._check_scoped_token_attributes(token_resp)
def test_scope_token_from_nonexistent_unscoped_token(self):
"""Try to scope token from non-existent unscoped token."""
self.post(self.AUTH_URL,
body=self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN,
expected_status=404)
def test_issue_token_from_rules_without_user(self):
api = auth_controllers.Auth()
context = {'environment': {}}
self._inject_assertion(context, 'BAD_TESTER_ASSERTION')
self.assertRaises(exception.Unauthorized,
api.authenticate_for_token,
context, self.UNSCOPED_V3_SAML2_REQ)
def test_issue_token_with_nonexistent_group(self):
"""Inject assertion that matches rule issuing bad group id.
Expect server to find out that some groups are missing in the
backend and raise exception.MappedGroupNotFound exception.
"""
self.assertRaises(exception.MappedGroupNotFound,
self._issue_unscoped_token,
assertion='CONTRACTOR_ASSERTION')
def test_scope_to_domain_once(self):
r = self.post(self.AUTH_URL,
body=self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER)
token_resp = r.result['token']
domain_id = token_resp['domain']['id']
self.assertEqual(domain_id, self.domainA['id'])
self._check_scoped_token_attributes(token_resp)
def test_scope_to_domain_multiple_tokens(self):
"""Issue multiple tokens scoping to different domains.
The new tokens should be scoped to:
* domainA
* domainB
* domainC
"""
bodies = (self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN,
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN)
domain_ids = (self.domainA['id'],
self.domainB['id'],
self.domainC['id'])
for body, domain_id_ref in zip(bodies, domain_ids):
r = self.post(self.AUTH_URL, body=body)
token_resp = r.result['token']
domain_id = token_resp['domain']['id']
self.assertEqual(domain_id, domain_id_ref)
self._check_scoped_token_attributes(token_resp)
def test_list_projects(self):
url = '/OS-FEDERATION/projects'
token = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
projects_refs = (set([self.proj_customers['id']]),
set([self.proj_employees['id'],
self.project_all['id']]),
set([self.proj_employees['id'],
self.project_all['id'],
self.proj_customers['id']]))
for token, projects_ref in zip(token, projects_refs):
r = self.get(url, token=token)
projects_resp = r.result['projects']
projects = set(p['id'] for p in projects_resp)
self.assertEqual(projects, projects_ref)
def test_list_domains(self):
url = '/OS-FEDERATION/domains'
tokens = (self.tokens['CUSTOMER_ASSERTION'],
self.tokens['EMPLOYEE_ASSERTION'],
self.tokens['ADMIN_ASSERTION'])
domain_refs = (set([self.domainA['id']]),
set([self.domainA['id'],
self.domainB['id']]),
set([self.domainA['id'],
self.domainB['id'],
self.domainC['id']]))
for token, domains_ref in zip(tokens, domain_refs):
r = self.get(url, token=token)
domains_resp = r.result['domains']
domains = set(p['id'] for p in domains_resp)
self.assertEqual(domains, domains_ref)
def test_full_workflow(self):
"""Test 'standard' workflow for granting access tokens.
* Issue unscoped token
* List available projects based on groups
* Scope token to a one of available projects
"""
r = self._issue_unscoped_token()
employee_unscoped_token_id = r.headers.get('X-Subject-Token')
r = self.get('/OS-FEDERATION/projects',
token=employee_unscoped_token_id)
projects = r.result['projects']
random_project = random.randint(0, len(projects)) - 1
project = projects[random_project]
v3_scope_request = self._scope_request(employee_unscoped_token_id,
'project', project['id'])
r = self.post(self.AUTH_URL, body=v3_scope_request)
token_resp = r.result['token']
project_id = token_resp['project']['id']
self.assertEqual(project_id, project['id'])
self._check_scoped_token_attributes(token_resp)
def test_workflow_with_groups_deletion(self):
"""Test full workflow with groups deletion before token scoping.
The test scenario is as follows:
- Create group ``group``
- Create and assign roles to ``group`` and ``project_all``
- Patch mapping rules for existing IdP so it issues group id
- Issue unscoped token with ``group``'s id
- Delete group ``group``
- Scope token to ``project_all``
- Expect HTTP 500 response
"""
# create group and role
group = self.new_group_ref(
domain_id=self.domainA['id'])
self.identity_api.create_group(group['id'],
group)
role = self.new_role_ref()
self.assignment_api.create_role(role['id'],
role)
# assign role to group and project_admins
self.assignment_api.create_grant(role['id'],
group_id=group['id'],
project_id=self.project_all['id'])
rules = {
'rules': [
{
'local': [
{
'group': {
'id': group['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'LastName',
'any_one_of': [
'Account'
]
}
]
}
]
}
self.federation_api.update_mapping(self.mapping['id'], rules)
r = self._issue_unscoped_token(assertion='TESTER_ASSERTION')
token_id = r.headers.get('X-Subject-Token')
# delete group
self.identity_api.delete_group(group['id'])
# scope token to project_all, expect HTTP 500
scoped_token = self._scope_request(
token_id, 'project',
self.project_all['id'])
self.post(self.AUTH_URL,
body=scoped_token,
expected_status=500)
def test_assertion_prefix_parameter(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` set to fixed, non defailt value,
issue an unscoped token from assertion EMPLOYEE_ASSERTION_PREFIXED.
Expect server to return unscoped token.
"""
self.config_fixture.config(group='federation',
assertion_prefix=self.ASSERTION_PREFIX)
r = self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION_PREFIXED')
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
def test_assertion_prefix_parameter_expect_fail(self):
"""Test parameters filtering based on the prefix.
With ``assertion_prefix`` default value set to empty string
issue an unscoped token from assertion EMPLOYEE_ASSERTION.
Next, configure ``assertion_prefix`` to value ``UserName``.
Try issuing unscoped token with EMPLOYEE_ASSERTION.
Expect server to raise exception.Unathorized exception.
"""
r = self._issue_unscoped_token()
self.assertIsNotNone(r.headers.get('X-Subject-Token'))
self.config_fixture.config(group='federation',
assertion_prefix='UserName')
self.assertRaises(exception.Unauthorized,
self._issue_unscoped_token)
def load_federation_sample_data(self):
"""Inject additional data."""
# Create and add domains
self.domainA = self.new_domain_ref()
self.assignment_api.create_domain(self.domainA['id'],
self.domainA)
self.domainB = self.new_domain_ref()
self.assignment_api.create_domain(self.domainB['id'],
self.domainB)
self.domainC = self.new_domain_ref()
self.assignment_api.create_domain(self.domainC['id'],
self.domainC)
# Create and add projects
self.proj_employees = self.new_project_ref(
domain_id=self.domainA['id'])
self.assignment_api.create_project(self.proj_employees['id'],
self.proj_employees)
self.proj_customers = self.new_project_ref(
domain_id=self.domainA['id'])
self.assignment_api.create_project(self.proj_customers['id'],
self.proj_customers)
self.project_all = self.new_project_ref(
domain_id=self.domainA['id'])
self.assignment_api.create_project(self.project_all['id'],
self.project_all)
# Create and add groups
self.group_employees = self.new_group_ref(
domain_id=self.domainA['id'])
self.identity_api.create_group(self.group_employees['id'],
self.group_employees)
self.group_customers = self.new_group_ref(
domain_id=self.domainA['id'])
self.identity_api.create_group(self.group_customers['id'],
self.group_customers)
self.group_admins = self.new_group_ref(
domain_id=self.domainA['id'])
self.identity_api.create_group(self.group_admins['id'],
self.group_admins)
# Create and add roles
self.role_employee = self.new_role_ref()
self.assignment_api.create_role(self.role_employee['id'],
self.role_employee)
self.role_customer = self.new_role_ref()
self.assignment_api.create_role(self.role_customer['id'],
self.role_customer)
self.role_admin = self.new_role_ref()
self.assignment_api.create_role(self.role_admin['id'],
self.role_admin)
# Employees can access
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
project_id=self.project_all['id'])
# Customers can access
# * proj_customers
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
project_id=self.proj_customers['id'])
# Admins can access:
# * proj_customers
# * proj_employees
# * project_all
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_customers['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.proj_employees['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
project_id=self.project_all['id'])
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Customers can access:
# * domain A
self.assignment_api.create_grant(self.role_customer['id'],
group_id=self.group_customers['id'],
domain_id=self.domainA['id'])
# Employees can access:
# * domain A
# * domain B
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_employee['id'],
group_id=self.group_employees['id'],
domain_id=self.domainB['id'])
# Admins can access:
# * domain A
# * domain B
# * domain C
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainA['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainB['id'])
self.assignment_api.create_grant(self.role_admin['id'],
group_id=self.group_admins['id'],
domain_id=self.domainC['id'])
self.rules = {
'rules': [
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Employee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_employees['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': self.ASSERTION_PREFIX + 'UserName'
},
{
'type': self.ASSERTION_PREFIX + 'orgPersonType',
'any_one_of': [
'SuperEmployee'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Customer'
]
}
]
},
{
'local': [
{
'group': {
'id': self.group_admins['id']
}
},
{
'group': {
'id': self.group_employees['id']
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName'
},
{
'type': 'orgPersonType',
'any_one_of': [
'Admin',
'Chief'
]
}
]
},
{
'local': [
{
'group': {
'id': uuid.uuid4().hex
}
},
{
'group': {
'id': self.group_customers['id']
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'FirstName',
'any_one_of': [
'Jill'
]
},
{
'type': 'LastName',
'any_one_of': [
'Smith'
]
}
]
},
{
'local': [
{
'group': {
'id': 'this_group_no_longer_exists'
}
},
{
'user': {
'name': '{0}'
}
}
],
'remote': [
{
'type': 'UserName',
},
{
'type': 'Email',
'any_one_of': [
'[email protected]'
]
},
{
'type': 'orgPersonType',
'any_one_of': [
'Tester'
]
}
]
},
]
}
# Add IDP
self.idp = self.idp_ref(id=self.IDP)
self.federation_api.create_idp(self.idp['id'],
self.idp)
# Add a mapping
self.mapping = self.mapping_ref()
self.federation_api.create_mapping(self.mapping['id'],
self.mapping)
# Add protocols
self.proto_saml = self.proto_ref(mapping_id=self.mapping['id'])
self.proto_saml['id'] = self.PROTOCOL
self.federation_api.create_protocol(self.idp['id'],
self.proto_saml['id'],
self.proto_saml)
# Generate fake tokens
context = {'environment': {}}
self.tokens = {}
VARIANTS = ('EMPLOYEE_ASSERTION', 'CUSTOMER_ASSERTION',
'ADMIN_ASSERTION')
api = auth_controllers.Auth()
for variant in VARIANTS:
self._inject_assertion(context, variant)
r = api.authenticate_for_token(context, self.UNSCOPED_V3_SAML2_REQ)
self.tokens[variant] = r.headers.get('X-Subject-Token')
self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN = self._scope_request(
uuid.uuid4().hex, 'project', self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE = self._scope_request(
self.tokens['EMPLOYEE_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'project',
self.proj_customers['id'])
self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'project',
self.proj_employees['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request(
self.tokens['CUSTOMER_ASSERTION'], 'domain',
self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainA['id'])
self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain', self.domainB['id'])
self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN = self._scope_request(
self.tokens['ADMIN_ASSERTION'], 'domain',
self.domainC['id'])
def _inject_assertion(self, context, variant):
assertion = getattr(mapping_fixtures, variant)
context['environment'].update(assertion)
context['query_string'] = []
|
apache-2.0
| -6,737,992,034,688,989,000 | 37.154994 | 79 | 0.516427 | false | 4.360086 | true | false | false |
Fokko/incubator-airflow
|
airflow/operators/cassandra_to_gcs.py
|
1
|
14378
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains operator for copying
data from Cassandra to Google cloud storage in JSON format.
"""
import json
import warnings
from base64 import b64encode
from datetime import datetime
from decimal import Decimal
from tempfile import NamedTemporaryFile
from typing import Optional
from uuid import UUID
from cassandra.util import Date, OrderedMapSerializedKey, SortedSet, Time
from airflow.exceptions import AirflowException
from airflow.gcp.hooks.gcs import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.providers.apache.cassandra.hooks.cassandra import CassandraHook
from airflow.utils.decorators import apply_defaults
class CassandraToGoogleCloudStorageOperator(BaseOperator):
"""
Copy data from Cassandra to Google cloud storage in JSON format
Note: Arrays of arrays are not supported.
:param cql: The CQL to execute on the Cassandra table.
:type cql: str
:param bucket: The bucket to upload to.
:type bucket: str
:param filename: The filename to use as the object name when uploading
to Google cloud storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: str
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from MySQL.
:type schema_filename: str
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filename param docs above). This param allows developers to specify the
file size of the splits. Check https://cloud.google.com/storage/quotas
to see the maximum allowed file size for a single object.
:type approx_max_file_size_bytes: long
:param cassandra_conn_id: Reference to a specific Cassandra hook.
:type cassandra_conn_id: str
:param gzip: Option to compress file for upload
:type gzip: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud
Platform. This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
"""
template_fields = ('cql', 'bucket', 'filename', 'schema_filename',)
template_ext = ('.cql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self,
cql: str,
bucket: str,
filename: str,
schema_filename: Optional[str] = None,
approx_max_file_size_bytes: int = 1900000000,
gzip: bool = False,
cassandra_conn_id: str = 'cassandra_default',
gcp_conn_id: str = 'google_cloud_default',
google_cloud_storage_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = google_cloud_storage_conn_id
self.cql = cql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.cassandra_conn_id = cassandra_conn_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.gzip = gzip
self.hook = None
# Default Cassandra to BigQuery type mapping
CQL_TYPE_MAP = {
'BytesType': 'BYTES',
'DecimalType': 'FLOAT',
'UUIDType': 'BYTES',
'BooleanType': 'BOOL',
'ByteType': 'INTEGER',
'AsciiType': 'STRING',
'FloatType': 'FLOAT',
'DoubleType': 'FLOAT',
'LongType': 'INTEGER',
'Int32Type': 'INTEGER',
'IntegerType': 'INTEGER',
'InetAddressType': 'STRING',
'CounterColumnType': 'INTEGER',
'DateType': 'TIMESTAMP',
'SimpleDateType': 'DATE',
'TimestampType': 'TIMESTAMP',
'TimeUUIDType': 'BYTES',
'ShortType': 'INTEGER',
'TimeType': 'TIME',
'DurationType': 'INTEGER',
'UTF8Type': 'STRING',
'VarcharType': 'STRING',
}
def execute(self, context):
cursor = self._query_cassandra()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.update(self._write_local_schema_file(cursor))
# Flush all files before uploading
for file_handle in files_to_upload.values():
file_handle.flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for file_handle in files_to_upload.values():
file_handle.close()
# Close all sessions and connection associated with this Cassandra cluster
self.hook.shutdown_cluster()
def _query_cassandra(self):
"""
Queries cassandra and returns a cursor to the results.
"""
self.hook = CassandraHook(cassandra_conn_id=self.cassandra_conn_id)
session = self.hook.get_conn()
cursor = session.execute(self.cql)
return cursor
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles = {self.filename.format(file_no): tmp_file_handle}
for row in cursor:
row_dict = self.generate_data_dict(row._fields, row)
s = json.dumps(row_dict).encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle
return tmp_file_handles
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = []
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
for name, type in zip(cursor.column_names, cursor.column_types):
schema.append(self.generate_schema_dict(name, type))
json_serialized_schema = json.dumps(schema).encode('utf-8')
tmp_schema_file_handle.write(json_serialized_schema)
return {self.schema_filename: tmp_schema_file_handle}
def _upload_to_gcs(self, files_to_upload):
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
for object, tmp_file_handle in files_to_upload.items():
hook.upload(self.bucket, object, tmp_file_handle.name, 'application/json', self.gzip)
@classmethod
def generate_data_dict(cls, names, values):
row_dict = {}
for name, value in zip(names, values):
row_dict.update({name: cls.convert_value(name, value)})
return row_dict
@classmethod
def convert_value(cls, name, value):
if not value:
return value
elif isinstance(value, (str, int, float, bool, dict)):
return value
elif isinstance(value, bytes):
return b64encode(value).decode('ascii')
elif isinstance(value, UUID):
return b64encode(value.bytes).decode('ascii')
elif isinstance(value, (datetime, Date)):
return str(value)
elif isinstance(value, Decimal):
return float(value)
elif isinstance(value, Time):
return str(value).split('.')[0]
elif isinstance(value, (list, SortedSet)):
return cls.convert_array_types(name, value)
elif hasattr(value, '_fields'):
return cls.convert_user_type(name, value)
elif isinstance(value, tuple):
return cls.convert_tuple_type(name, value)
elif isinstance(value, OrderedMapSerializedKey):
return cls.convert_map_type(name, value)
else:
raise AirflowException('unexpected value: ' + str(value))
@classmethod
def convert_array_types(cls, name, value):
return [cls.convert_value(name, nested_value) for nested_value in value]
@classmethod
def convert_user_type(cls, name, value):
"""
Converts a user type to RECORD that contains n fields, where n is the
number of attributes. Each element in the user type class will be converted to its
corresponding data type in BQ.
"""
names = value._fields
values = [cls.convert_value(name, getattr(value, name)) for name in names]
return cls.generate_data_dict(names, values)
@classmethod
def convert_tuple_type(cls, name, value):
"""
Converts a tuple to RECORD that contains n fields, each will be converted
to its corresponding data type in bq and will be named 'field_<index>', where
index is determined by the order of the tuple elements defined in cassandra.
"""
names = ['field_' + str(i) for i in range(len(value))]
values = [cls.convert_value(name, value) for name, value in zip(names, value)]
return cls.generate_data_dict(names, values)
@classmethod
def convert_map_type(cls, name, value):
"""
Converts a map to a repeated RECORD that contains two fields: 'key' and 'value',
each will be converted to its corresponding data type in BQ.
"""
converted_map = []
for k, v in zip(value.keys(), value.values()):
converted_map.append({
'key': cls.convert_value('key', k),
'value': cls.convert_value('value', v)
})
return converted_map
@classmethod
def generate_schema_dict(cls, name, type):
field_schema = dict()
field_schema.update({'name': name})
field_schema.update({'type': cls.get_bq_type(type)})
field_schema.update({'mode': cls.get_bq_mode(type)})
fields = cls.get_bq_fields(name, type)
if fields:
field_schema.update({'fields': fields})
return field_schema
@classmethod
def get_bq_fields(cls, name, type):
fields = []
if not cls.is_simple_type(type):
names, types = [], []
if cls.is_array_type(type) and cls.is_record_type(type.subtypes[0]):
names = type.subtypes[0].fieldnames
types = type.subtypes[0].subtypes
elif cls.is_record_type(type):
names = type.fieldnames
types = type.subtypes
if types and not names and type.cassname == 'TupleType':
names = ['field_' + str(i) for i in range(len(types))]
elif types and not names and type.cassname == 'MapType':
names = ['key', 'value']
for name, type in zip(names, types):
field = cls.generate_schema_dict(name, type)
fields.append(field)
return fields
@classmethod
def is_simple_type(cls, type):
return type.cassname in CassandraToGoogleCloudStorageOperator.CQL_TYPE_MAP
@classmethod
def is_array_type(cls, type):
return type.cassname in ['ListType', 'SetType']
@classmethod
def is_record_type(cls, type):
return type.cassname in ['UserType', 'TupleType', 'MapType']
@classmethod
def get_bq_type(cls, type):
if cls.is_simple_type(type):
return CassandraToGoogleCloudStorageOperator.CQL_TYPE_MAP[type.cassname]
elif cls.is_record_type(type):
return 'RECORD'
elif cls.is_array_type(type):
return cls.get_bq_type(type.subtypes[0])
else:
raise AirflowException('Not a supported type: ' + type.cassname)
@classmethod
def get_bq_mode(cls, type):
if cls.is_array_type(type) or type.cassname == 'MapType':
return 'REPEATED'
elif cls.is_record_type(type) or cls.is_simple_type(type):
return 'NULLABLE'
else:
raise AirflowException('Not a supported type: ' + type.cassname)
|
apache-2.0
| -4,027,919,065,744,805,000 | 38.284153 | 104 | 0.627973 | false | 4.092798 | false | false | false |
nshearer/etl
|
src/etl/EtlJoinProcessor.py
|
1
|
5333
|
'''
Created on Dec 28, 2012
@author: nshearer
'''
from abc import ABCMeta, abstractmethod
from EtlProcessor import EtlProcessor
class EtlJoinProcessor(EtlProcessor):
'''Join one set of records to another'''
def __init__(self):
super(EtlJoinProcessor, self).__init__()
self.__match_keys = dict() # Match Key -> (input_set, Record Key)
self.__lookup_inputs_processed = False
def list_inputs(self):
for p_input in self.list_lookup_inputs():
yield p_input
for p_input in self.list_subject_inputs():
yield p_input
# -- Override these -------------------------------------------------------
@abstractmethod
def list_lookup_inputs(self):
'''List inputs that contain the records to ref against
These record sets must be indexed
'''
@abstractmethod
def list_subject_inputs(self):
'''List inputs that contain the records to find refs for'''
@abstractmethod
def build_lookup_record_key(self, lookup_record):
'''Build a key to be used for matching subject records to'''
@abstractmethod
def build_lookup_key(self, record):
'''Build a key to use to find a lookup record'''
# -- Common join logic ----------------------------------------------------
def gen_output(self, name, inputs, record_set):
'''Generate named output data.
Dynamically calls 'gen_<name>_output' method
@param name: Name of the output to generate
@param inputs: Dictionary of connected input datasets
@param record_set: Container to populate with records
'''
if not self.__lookup_inputs_processed:
# Generate keys for lookup records
for data_port in self.list_lookup_inputs():
for input_set in inputs[data_port.name]:
for record in input_set.all_records():
# Build a Match key for this lookup record
match_key = self.build_lookup_record_key(record)
if match_key is None:
msg = "Did not build a match key for this record"
msg = record.create_msg(msg)
raise Exception(msg)
# Determine associated index
rec_index = record.index
if rec_index is None:
msg = "Record in lookup input has no index."
msg = record.create_msg(msg)
raise Exception(msg)
# Make sure match key is unique
if self.__match_keys.has_key(match_key):
handle = self._handle_duplicate_lookup_match_key
handle(match_key, record)
# Store
else:
store_rec = self._store_lookup_record
store_rec(match_key, input_set, rec_index)
self.__lookup_inputs_processed = True
# Call Parent to process subject records
super(EtlJoinProcessor, self).gen_output(name, inputs, record_set)
#def gen_invoices_output(self, inputs, output_set):
# for record_set in inputs['invoices']:
# for record in record_set.all_records():
# ref_record = self.lookup(record)
# if ref_record is not None:
# # Get values from subject
# values = record.values
#
# # Copy in values from lookup record
# for name in ['pidm', 'name', 'ssn']:
# values[name] = ref_record[name]
#
# # Output record
# output_set.add_record(values)
def lookup(self, record):
'''Find record in lookup sets for this record'''
# Build a Match key for this lookup record
match_key = self.build_lookup_key(record)
if match_key is None:
msg = "Did not build a match key for this record"
msg = record.create_msg(msg)
raise Exception(msg)
# Find match
if self.__match_keys.has_key(match_key):
input_set, lookup_index = self.__match_keys[match_key]
return input_set.get_record(lookup_index)
return None
def _handle_duplicate_lookup_match_key(self, match_key, record):
msg = "Duplicated match key '%s'" % (match_key)
msg = record.create_msg(msg)
raise Exception(msg)
def _store_lookup_record(self, match_key, lookup_set, index):
self.__match_keys[match_key] = (lookup_set, index)
|
gpl-2.0
| -5,025,042,689,156,766,000 | 36.381295 | 79 | 0.475342 | false | 5.007512 | false | false | false |
mattduan/proof
|
util/UniqueList.py
|
1
|
4847
|
"""
List with unique entries. UniqueList does not allow null nor duplicates.
"""
__version__= '$Revision: 11 $'[11:-2]
__author__ = "Duan Guoqiang ([email protected])"
class UniqueList(list):
def __init__(self, initlist=[]):
# call super class
list.__init__(self)
# add initlist
if initlist:
self.extend(initlist)
def __getslice__(self, i, j):
# return a UniqueList object
i = max(i, 0); j = max(j, 0)
return self.__class__(list.__getslice__(self, i, j))
def __setslice__(self, i, j, other):
i = max(i, 0); j = max(j, 0)
# remove duplicates
uniques = []
try:
for o in other:
if o not in self:
uniques.append(o)
except TypeError:
raise TypeError( "UniqueList.__setslice__() argument %s must be iterable" % (other) )
# call super class
list.__setslice__(self, i, j, uniques)
def __add(self, l, flag=None):
""" A convenient method for all add call.
"""
if type(l) == type([]) or \
isinstance(l, UniqueList):
if flag == "r":
new_list = UniqueList()
new_list.extend(l)
new_list.extend(self)
return new_list
elif flag == "i":
self.extend(l)
return self
else:
new_list = UniqueList()
new_list.extend(self)
new_list.extend(l)
return new_list
else:
raise TypeError( """can only concatenate list/List/UniqueList (not "%s")""" % \
type(l) )
def __add__(self, l):
return self.__add(l)
def __radd__(self, l):
return self.__add(l, "r")
def __iadd__(self, l):
return self.__add(l, "i")
def __mul__(self, n):
return self
__rmul__ = __mul__
__imul__ = __mul__
def append(self, item):
""" Append an Item to the list.
@param item the Item to append
"""
if item != None and item not in self:
list.append(self, item)
def insert(self, i, item):
""" Insert an item to the list.
@param i the index to insert
@param item the item to insert
"""
if item != None and item not in self:
list.insert(self, i, item)
def extend(self, l):
""" Extend another list into this list.
@param l the list to extend
"""
try:
for i in l:
self.append(i)
except TypeError, msg:
raise TypeError("UniqueList.extend() argument must be iterable")
def clear(self):
""" Remove all items in the list.
"""
list.__init__(self, [])
# only used for test
if __name__ == '__main__':
print
print "UniqueList Test"
print
print "testing constructor"
ul1 = UniqueList()
print "ul1 (UniqueList()) => %s" % (ul1)
ul2 = UniqueList('123')
print "ul2 (UniqueList('123')) => %s" % (ul2)
ul3 = UniqueList([1,1,2,3])
print "ul3 (UniqueList([1,1,2,3])) => %s" % (ul3)
print
print 'testing type'
print "ul1 type => %s" % (type(ul1))
print "ul1 is subclass list => %s" % (issubclass(ul1.__class__, list))
print "testing append"
ul1.append(2)
print "ul1.append(2) => %s" % (ul1)
ul1.append(2)
print "ul1.append(2) => %s" % (ul1)
ul2.append(2)
print "ul2.append(2) => %s" % (ul2)
ul3.append(2)
print "ul3.append(2) => %s" % (ul3)
print
print "testing insert"
ul1.insert(1, 1)
print "ul1.insert(1, 1) => %s" % (ul1)
ul1.insert(1, 1)
print "ul1.insert(1, 1) => %s" % (ul1)
ul3.insert(3, 3)
print "ul3.insert(3, 3) => %s" % (ul3)
print
print "testing extend"
ul1.extend('123')
print "ul1.extend('123') => %s" % (ul1)
ul1.extend([1,2,3])
print "ul1.extend([1,2,3]) => %s" % (ul1)
print
print "testing +"
print "ul1 = %s" % (ul1)
print "ul2 = %s" % (ul2)
print "ul3 = %s" % (ul3)
ul4 = ul1 + ul2 + ul3
print "ul1 + ul2 + ul3 => %s" % (ul4)
print "type(ul1 + ul2 + ul3) => %s" % (type(ul4))
print "ul1 + [2,4,5] => %s" % (ul1 + [2,4,5])
print "type(ul1 + [2,4,5]) => %s" % (type(ul1 + [2,4,5]))
print
print "testing slice"
print "ul1[2:5] => %s" % (ul1[2:5])
ul1[2:5] = [1,2,3]
print "ul1[2:5] = [1,2,3]"
print "ul1 => %s" % (ul1)
print "type(ul1) => %s" % (type(ul1))
print
print "testing mul"
print "ul1 * 3 => %s" % (ul1*3)
print
print "testing clear"
ul1.clear()
print "ul1.clear() => %s" % (ul1)
print
print "done."
print
|
bsd-3-clause
| -1,884,556,780,845,086,200 | 25.2 | 97 | 0.482773 | false | 3.212061 | true | false | false |
flopezag/fiware-backlog
|
kernel/DataBoard.py
|
1
|
5593
|
from kconfig import settings
from kconfig import enablersBook, helpdeskCompBook
from kconfig import trackersBook, workGroupBook
from kernel.DataFactory import DataFactory
__author__ = "Manuel Escriche <[email protected]>"
class Data:
@staticmethod
def getUrgentDeskUpcoming():
trackers = ','.join(trackersBook[tracker].keystone for tracker in trackersBook)
jql = "duedate >= 0d AND duedate <= 7d AND status != Closed AND project in ({})".format(trackers)
return DataFactory(settings.storeHome).getQueryData('urgent.upcoming', jql)
@staticmethod
def getUrgentDeskOverdue():
trackers = ','.join(trackersBook[tracker].keystone for tracker in trackersBook)
jql = "duedate < now() AND status != Closed AND project in ({})".format(trackers)
return DataFactory(settings.storeHome).getQueryData('urgent.upcoming', jql)
@staticmethod
def getHelpDesk():
return DataFactory(settings.storeHome).getTrackerData('HELP')
@staticmethod
def getHelpDeskTechChannel():
techChannel = helpdeskCompBook['Tech']
return DataFactory(settings.storeHome).getComponentData(techChannel.key)
@staticmethod
def getDesk(desk):
return DataFactory(settings.storeHome).getTrackerData(desk.tracker)
@staticmethod
def getFocusedDesk(desk):
jql = "project = {} AND (resolution = Unresolved OR resolutiondate <= 60d)".format(desk.tracker)
return DataFactory(settings.storeHome).getQueryData('{}.focused'.format(desk.tracker), jql)
@staticmethod
def getChannel(channel):
return DataFactory(settings.storeHome).getComponentData(channel.id)
@staticmethod
def getFocusedChannel(channel):
jql = "component = {} AND (resolution = Unresolved OR resolutiondate <= 60d)".format(channel.key)
return DataFactory(settings.storeHome).getQueryData('{}.focused'.format(channel.key), jql)
@staticmethod
def getEnabler(enablername):
cmp_id = enablersBook[enablername]
return DataFactory(settings.storeHome).getComponentData(cmp_id.key)
@staticmethod
def getAccountDeskRequests():
jql = "project = FLUA AND issuetype = UpgradeAccount"
return DataFactory(settings.storeHome).getQueryData('account.requests', jql)
@staticmethod
def getFocusedAccountDeskRequest():
jql = "project = FLUA AND issuetype = UpgradeAccount AND (resolution = Unresolved OR resolutiondate <= 60d)"
return DataFactory(settings.storeHome).getQueryData('account.focusedRequests', jql)
@staticmethod
def getAccountChannelRequests(channnel):
jql = "component = {} AND issuetype = UpgradeAccount".format(channnel.key)
return DataFactory(settings.storeHome).getQueryData('account.requests', jql)
@staticmethod
def getFocusedAccountChannelRequest(channel):
jql = "component = {} AND issuetype = UpgradeAccount AND (resolution = Unresolved OR resolutiondate <= 60d)".format(channel.key)
return DataFactory(settings.storeHome).getQueryData('account.focusedRequests', jql)
@staticmethod
def getAccountDeskProvisioning():
jql = "project = FLUA AND issuetype = AccountUpgradeByNode"
return DataFactory(settings.storeHome).getQueryData('account.provisioning', jql)
@staticmethod
def getFocusedAccountDeskProvisioning():
jql = "project = FLUA AND issuetype = AccountUpgradeByNode AND (resolution = Unresolved OR resolutiondate <= 60d)"
return DataFactory(settings.storeHome).getQueryData('account.focusedProvisioning', jql)
@staticmethod
def getEnablerHelpDesk(enablername):
enabler = enablersBook[enablername]
jql = "project = HELP AND issuetype in (Monitor, extRequest) AND HD-Enabler = '{}'".format(enablername)
return DataFactory(settings.storeHome).getQueryData('helpdesk.enabler-{}'.format(enabler.backlogKeyword), jql)
@staticmethod
def getChapterHelpDesk(chaptername):
jql = "project = HELP AND issuetype in (Monitor, extRequest) AND HD-Chapter = '{}'".format(chaptername)
return DataFactory(settings.storeHome).getQueryData('helpdesk.chapter-{}'.format(chaptername), jql)
@staticmethod
def getNodeHelpDesk(nodename):
jql = "project = HELP AND issuetype in (Monitor, extRequest) AND HD-Node = '{}'".format(nodename)
return DataFactory(settings.storeHome).getQueryData('helpdesk.node-{}'.format(nodename), jql)
@staticmethod
def getGlobalComponent(key):
return DataFactory(settings.storeHome).getComponentData(key)
@staticmethod
def getChannel(key):
return DataFactory(settings.storeHome).getComponentData(key)
@staticmethod
def getWorkGroups():
trackers = ','.join([workGroupBook[item].tracker for item in workGroupBook])
jql = 'project in ({})'.format(trackers)
return DataFactory(settings.storeHome).getQueryData('workgroups',jql)
@staticmethod
def getWorkGroup(key):
return DataFactory(settings.storeHome).getTrackerData(key)
@staticmethod
def getWorkGroupComponent(key):
return DataFactory(settings.storeHome).getComponentData(key)
@staticmethod
def getWorkGroupNoComponent(key):
return DataFactory(settings.storeHome).getTrackerNoComponentData(key)
@staticmethod
def getLab():
return DataFactory(settings.storeHome).getTrackerData('LAB')
@staticmethod
def getLabComponent(cmp):
return DataFactory(settings.storeHome).getComponentData(cmp.key)
if __name__ == "__main__":
pass
|
apache-2.0
| 5,845,164,835,955,047,000 | 39.824818 | 136 | 0.714107 | false | 3.963855 | false | false | false |
george-hopkins/frn-py
|
frn/common/protocol.py
|
1
|
2718
|
from twisted.protocols import basic
from frn.utils import parse_dict
class InvalidServerResponse(Exception):
pass
class InvalidClientRequest(Exception):
pass
class LineReceiver(basic.LineReceiver):
def decodedLineReceived(self, line):
"""Override this for when each line is received."""
raise NotImplementedError
def lineReceived(self, line):
"""Decode a received line."""
line = line.decode('iso-8859-1').encode('utf8')
self.decodedLineReceived(line)
def sendLine(self, line):
"""Send a line to the other end of the connection."""
line = str(line).decode('utf8').encode('iso-8859-1')
basic.LineReceiver.sendLine(self, line)
class CommandClient(LineReceiver):
def __init__(self):
self.commandQueue = []
def sendCommand(self, command, before, handler):
wasEmpty = not self.commandQueue
self.commandQueue.append((command, before, handler))
if wasEmpty:
self.__sendNextCommand()
def __sendNextCommand(self):
if self.commandQueue:
command, before, handler = self.commandQueue[0]
if before:
before()
if command:
self.sendLine(command)
else:
self.__finishCommand()
def __finishCommand(self):
if self.commandQueue:
self.commandQueue.pop(0)
self.__sendNextCommand()
def decodedLineReceived(self, line):
if self.commandQueue:
if self.commandQueue[0][2](line) is not True:
self.__finishCommand()
else:
raise InvalidServerResponse('Unexpected line receveived.')
def finish(self):
self.sendCommand(None, self.transport.loseConnection, None)
class CommandServer(LineReceiver):
def __init__(self):
self.commandHandlers = {}
def registerCommand(self, name, handler, allowedArgs=False):
self.commandHandlers[name] = (handler, allowedArgs)
def deregisterCommands(self):
self.commandHandlers = {}
def decodedLineReceived(self, line):
parts = line.split(':', 1)
command = parts[0]
if len(parts) == 1:
args = {}
elif parts[1] and parts[1][0] == '<':
args = parse_dict(parts[1])
else:
args = {'_': parts[1]}
if command in self.commandHandlers:
handler, allowedArgs = self.commandHandlers[command]
if allowedArgs is False:
handler(args)
else:
handler({key: args[key] for key in allowedArgs})
else:
raise InvalidClientRequest('Unknown command "%s".' % command)
|
lgpl-3.0
| -7,237,886,593,561,025,000 | 28.868132 | 73 | 0.598234 | false | 4.334928 | false | false | false |
eoconsulting/django-zoook
|
django_zoook/tag/views.py
|
1
|
4455
|
# -*- coding: utf-8 -*-
############################################################################################
#
# Zoook. OpenERP e-sale, e-commerce Open Source Management Solution
# Copyright (C) 2011 Zikzakmedia S.L. (<http://www.zikzakmedia.com>). All Rights Reserved
#
# Module Created: 03/05/2012
# Author: Mariano Ruiz <[email protected]>,
# Enterprise Objects Consulting (<http://www.eoconsulting.com.ar>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################################
from django.shortcuts import render_to_response
from django.http import Http404
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.utils.translation import get_language
from django.db.models import Q
from django_zoook.settings import *
from django_zoook.catalog.models import *
from django_zoook.tools.paginator import *
def keyword(request,tag):
"""All Products filtered by keyword"""
q = tag
values = []
if q:
kwargs_eq = {
'product_tmpl__metakeyword_'+get_language(): u'%s' % q,
}
kwargs_start = {
'product_tmpl__metakeyword_'+get_language()+'__istartswith': u'%s,' % q,
}
kwargs_md = {
'product_tmpl__metakeyword_'+get_language()+'__icontains': u',%s,' % q,
}
kwargs_end = {
'product_tmpl__metakeyword_'+get_language()+'__iendswith': u',%s' % q,
}
product_products = ProductProduct.objects.filter(
#Q(product_tmpl__status=True), Q(active=True),
Q(product_tmpl__visibility='all') | Q(product_tmpl__visibility='search') | Q(product_tmpl__visibility='catalog'),
Q(**kwargs_eq) | Q(**kwargs_start) | Q(**kwargs_md) | Q(**kwargs_end))
# Pagination options
set_paginator_options(request, 'price')
total = product_products.count()
paginator = Paginator(product_products, request.session['paginator'])
num_pages = get_num_pages(product_products, request.session['paginator'])
page = int(request.GET.get('page', '1'))
# If page request (9999) is out of range, deliver last page of results.
try:
product_products = paginator.page(page)
except (EmptyPage, InvalidPage):
product_products = paginator.page(paginator.num_pages)
# == template values ==
title = _(u"'%(tag)s' - Page %(page)s of %(total)s") % {'tag': q, 'page': product_products.number, 'total': num_pages}
metadescription = _(u"'%(tag)s' - Page %(page)s of %(total)s") % {'tag': q, 'page': product_products.number, 'total': num_pages}
category_values = {
'title': title,
'query': u'“%s”' % q,
'tag': q,
'metadescription': metadescription,
'product_products': product_products,
'paginator_option': request.session['paginator'],
'mode_option': request.session['mode'],
'order_option': request.session['order'],
'order_by_option': request.session['order_by'],
'paginator_items': PAGINATOR_ITEMS,
'catalog_orders': CATALOG_ORDERS,
'total': total,
'currency': DEFAULT_CURRENCY,
'compare_on': COMPARE_ON,
'update_price': UPDATE_PRICE,
'currency_position': CURRENCY_LABEL_POSITION,
}
return render_to_response("tag/tag.html", category_values, context_instance=RequestContext(request))
else:
raise Http404(_('This query is not available because you navigate with bookmarks or search engine. Use navigation menu'))
|
agpl-3.0
| -6,514,768,064,720,154,000 | 42.213592 | 136 | 0.602112 | false | 3.967023 | false | false | false |
MathieuDuponchelle/gobject-introspection
|
giscanner/girparser.py
|
1
|
26691
|
# -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2008 Johan Dahlin
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import os
from xml.etree.cElementTree import parse
from . import ast
from .girwriter import COMPATIBLE_GIR_VERSION
from .collections import OrderedDict
CORE_NS = "http://www.gtk.org/introspection/core/1.0"
C_NS = "http://www.gtk.org/introspection/c/1.0"
GLIB_NS = "http://www.gtk.org/introspection/glib/1.0"
def _corens(tag):
return '{%s}%s' % (CORE_NS, tag)
def _glibns(tag):
return '{%s}%s' % (GLIB_NS, tag)
def _cns(tag):
return '{%s}%s' % (C_NS, tag)
class GIRParser(object):
def __init__(self, types_only=False):
self._types_only = types_only
self._namespace = None
self._filename_stack = []
# Public API
def parse(self, filename):
filename = os.path.abspath(filename)
self._filename_stack.append(filename)
tree = parse(filename)
self.parse_tree(tree)
self._filename_stack.pop()
def parse_tree(self, tree):
self._namespace = None
self._pkgconfig_packages = set()
self._includes = set()
self._c_includes = set()
self._c_prefix = None
self._parse_api(tree.getroot())
def get_namespace(self):
return self._namespace
# Private
def _find_first_child(self, node, name_or_names):
if isinstance(name_or_names, str):
for child in node.getchildren():
if child.tag == name_or_names:
return child
else:
for child in node.getchildren():
if child.tag in name_or_names:
return child
return None
def _find_children(self, node, name):
return [child for child in node.getchildren() if child.tag == name]
def _get_current_file(self):
if not self._filename_stack:
return None
cwd = os.getcwd() + os.sep
curfile = self._filename_stack[-1]
if curfile.startswith(cwd):
return curfile[len(cwd):]
return curfile
def _parse_api(self, root):
assert root.tag == _corens('repository')
version = root.attrib['version']
if version != COMPATIBLE_GIR_VERSION:
raise SystemExit("%s: Incompatible version %s (supported: %s)" %
(self._get_current_file(), version, COMPATIBLE_GIR_VERSION))
for node in root.getchildren():
if node.tag == _corens('include'):
self._parse_include(node)
elif node.tag == _corens('package'):
self._parse_pkgconfig_package(node)
elif node.tag == _cns('include'):
self._parse_c_include(node)
ns = root.find(_corens('namespace'))
assert ns is not None
identifier_prefixes = ns.attrib.get(_cns('identifier-prefixes'))
if identifier_prefixes:
identifier_prefixes = identifier_prefixes.split(',')
symbol_prefixes = ns.attrib.get(_cns('symbol-prefixes'))
if symbol_prefixes:
symbol_prefixes = symbol_prefixes.split(',')
self._namespace = ast.Namespace(ns.attrib['name'],
ns.attrib['version'],
identifier_prefixes=identifier_prefixes,
symbol_prefixes=symbol_prefixes)
if 'shared-library' in ns.attrib:
self._namespace.shared_libraries = ns.attrib['shared-library'].split(',')
self._namespace.includes = self._includes
self._namespace.c_includes = self._c_includes
self._namespace.exported_packages = self._pkgconfig_packages
parser_methods = {
_corens('alias'): self._parse_alias,
_corens('bitfield'): self._parse_enumeration_bitfield,
_corens('callback'): self._parse_callback,
_corens('class'): self._parse_object_interface,
_corens('enumeration'): self._parse_enumeration_bitfield,
_corens('interface'): self._parse_object_interface,
_corens('record'): self._parse_record,
_corens('union'): self._parse_union,
_corens('docsection'): self._parse_doc_section,
_glibns('boxed'): self._parse_boxed}
if not self._types_only:
parser_methods[_corens('constant')] = self._parse_constant
parser_methods[_corens('function')] = self._parse_function
for node in ns.getchildren():
method = parser_methods.get(node.tag)
if method is not None:
method(node)
def _parse_doc_section(self, node):
docsection = ast.DocSection(node.attrib["name"])
self._parse_generic_attribs(node, docsection)
self._namespace.append(docsection)
def _parse_include(self, node):
include = ast.Include(node.attrib['name'], node.attrib['version'])
self._includes.add(include)
def _parse_pkgconfig_package(self, node):
self._pkgconfig_packages.add(node.attrib['name'])
def _parse_c_include(self, node):
self._c_includes.add(node.attrib['name'])
def _parse_alias(self, node):
typeval = self._parse_type(node)
alias = ast.Alias(node.attrib['name'], typeval, node.attrib.get(_cns('type')))
self._parse_generic_attribs(node, alias)
self._namespace.append(alias)
def _parse_generic_attribs(self, node, obj):
assert isinstance(obj, ast.Annotated)
skip = node.attrib.get('skip')
if skip:
try:
obj.skip = int(skip) > 0
except ValueError:
obj.skip = False
introspectable = node.attrib.get('introspectable')
if introspectable:
try:
obj.introspectable = int(introspectable) > 0
except ValueError:
obj.introspectable = False
if self._types_only:
return
doc = node.find(_corens('doc'))
if doc is not None:
if doc.text:
obj.doc = doc.text
version = node.attrib.get('version')
if version:
obj.version = version
version_doc = node.find(_corens('doc-version'))
if version_doc is not None:
if version_doc.text:
obj.version_doc = version_doc.text
deprecated = node.attrib.get('deprecated-version')
if deprecated:
obj.deprecated = deprecated
deprecated_doc = node.find(_corens('doc-deprecated'))
if deprecated_doc is not None:
if deprecated_doc.text:
obj.deprecated_doc = deprecated_doc.text
stability = node.attrib.get('stability')
if stability:
obj.stability = stability
stability_doc = node.find(_corens('doc-stability'))
if stability_doc is not None:
if stability_doc.text:
obj.stability_doc = stability_doc.text
attributes = node.findall(_corens('attribute'))
if attributes:
attributes_ = OrderedDict()
for attribute in attributes:
name = attribute.attrib.get('name')
value = attribute.attrib.get('value')
attributes_[name] = value
obj.attributes = attributes_
def _parse_object_interface(self, node):
parent = node.attrib.get('parent')
if parent:
parent_type = self._namespace.type_from_name(parent)
else:
parent_type = None
ctor_kwargs = {'name': node.attrib['name'],
'parent_type': parent_type,
'gtype_name': node.attrib[_glibns('type-name')],
'get_type': node.attrib[_glibns('get-type')],
'c_symbol_prefix': node.attrib.get(_cns('symbol-prefix')),
'ctype': node.attrib.get(_cns('type'))}
if node.tag == _corens('interface'):
klass = ast.Interface
elif node.tag == _corens('class'):
klass = ast.Class
is_abstract = node.attrib.get('abstract')
is_abstract = is_abstract and is_abstract != '0'
ctor_kwargs['is_abstract'] = is_abstract
else:
raise AssertionError(node)
obj = klass(**ctor_kwargs)
self._parse_generic_attribs(node, obj)
type_struct = node.attrib.get(_glibns('type-struct'))
if type_struct:
obj.glib_type_struct = self._namespace.type_from_name(type_struct)
if klass == ast.Class:
is_fundamental = node.attrib.get(_glibns('fundamental'))
if is_fundamental and is_fundamental != '0':
obj.fundamental = True
for func_id in ['ref-func', 'unref-func',
'set-value-func', 'get-value-func']:
func_name = node.attrib.get(_glibns(func_id))
obj.__dict__[func_id.replace('-', '_')] = func_name
if self._types_only:
self._namespace.append(obj)
return
for iface in self._find_children(node, _corens('implements')):
obj.interfaces.append(self._namespace.type_from_name(iface.attrib['name']))
for iface in self._find_children(node, _corens('prerequisite')):
obj.prerequisites.append(self._namespace.type_from_name(iface.attrib['name']))
for func_node in self._find_children(node, _corens('function')):
func = self._parse_function_common(func_node, ast.Function, obj)
obj.static_methods.append(func)
for method in self._find_children(node, _corens('method')):
func = self._parse_function_common(method, ast.Function, obj)
func.is_method = True
obj.methods.append(func)
for method in self._find_children(node, _corens('virtual-method')):
func = self._parse_function_common(method, ast.VFunction, obj)
self._parse_generic_attribs(method, func)
func.is_method = True
func.invoker = method.get('invoker')
obj.virtual_methods.append(func)
for ctor in self._find_children(node, _corens('constructor')):
func = self._parse_function_common(ctor, ast.Function, obj)
func.is_constructor = True
obj.constructors.append(func)
obj.fields.extend(self._parse_fields(node, obj))
for prop in self._find_children(node, _corens('property')):
obj.properties.append(self._parse_property(prop, obj))
for signal in self._find_children(node, _glibns('signal')):
obj.signals.append(self._parse_function_common(signal, ast.Signal, obj))
self._namespace.append(obj)
def _parse_callback(self, node):
callback = self._parse_function_common(node, ast.Callback)
self._namespace.append(callback)
def _parse_function(self, node):
function = self._parse_function_common(node, ast.Function)
self._namespace.append(function)
def _parse_parameter(self, node):
typeval = self._parse_type(node)
param = ast.Parameter(node.attrib.get('name'),
typeval,
node.attrib.get('direction') or ast.PARAM_DIRECTION_IN,
node.attrib.get('transfer-ownership'),
node.attrib.get('nullable') == '1',
node.attrib.get('optional') == '1',
node.attrib.get('allow-none') == '1',
node.attrib.get('scope'),
node.attrib.get('caller-allocates') == '1')
self._parse_generic_attribs(node, param)
return param
def _parse_function_common(self, node, klass, parent=None):
name = node.attrib['name']
returnnode = node.find(_corens('return-value'))
if not returnnode:
raise ValueError('node %r has no return-value' % (name, ))
transfer = returnnode.attrib.get('transfer-ownership')
nullable = returnnode.attrib.get('nullable') == '1'
retval = ast.Return(self._parse_type(returnnode), nullable, transfer)
self._parse_generic_attribs(returnnode, retval)
parameters = []
throws = (node.attrib.get('throws') == '1')
if klass is ast.Callback:
func = klass(name, retval, parameters, throws,
node.attrib.get(_cns('type')))
elif klass is ast.Function:
identifier = node.attrib.get(_cns('identifier'))
func = klass(name, retval, parameters, throws, identifier)
elif klass is ast.VFunction:
func = klass(name, retval, parameters, throws)
elif klass is ast.Signal:
func = klass(name, retval, parameters,
when=node.attrib.get('when'),
no_recurse=node.attrib.get('no-recurse', '0') == '1',
detailed=node.attrib.get('detailed', '0') == '1',
action=node.attrib.get('action', '0') == '1',
no_hooks=node.attrib.get('no-hooks', '0') == '1')
else:
assert False
func.shadows = node.attrib.get('shadows', None)
func.shadowed_by = node.attrib.get('shadowed-by', None)
func.moved_to = node.attrib.get('moved-to', None)
func.parent = parent
parameters_node = node.find(_corens('parameters'))
if (parameters_node is not None):
paramnode = self._find_first_child(parameters_node, _corens('instance-parameter'))
if paramnode:
func.instance_parameter = self._parse_parameter(paramnode)
for paramnode in self._find_children(parameters_node, _corens('parameter')):
parameters.append(self._parse_parameter(paramnode))
for i, paramnode in enumerate(self._find_children(parameters_node,
_corens('parameter'))):
param = parameters[i]
self._parse_type_array_length(parameters, paramnode, param.type)
closure = paramnode.attrib.get('closure')
if closure:
idx = int(closure)
assert idx < len(parameters), "%d >= %d" % (idx, len(parameters))
param.closure_name = parameters[idx].argname
destroy = paramnode.attrib.get('destroy')
if destroy:
idx = int(destroy)
assert idx < len(parameters), "%d >= %d" % (idx, len(parameters))
param.destroy_name = parameters[idx].argname
self._parse_type_array_length(parameters, returnnode, retval.type)
self._parse_generic_attribs(node, func)
self._namespace.track(func)
return func
def _parse_fields(self, node, obj):
res = []
names = (_corens('field'), _corens('record'), _corens('union'), _corens('callback'))
for child in node.getchildren():
if child.tag in names:
fieldobj = self._parse_field(child, obj)
res.append(fieldobj)
return res
def _parse_compound(self, cls, node):
compound = cls(node.attrib.get('name'),
ctype=node.attrib.get(_cns('type')),
disguised=node.attrib.get('disguised') == '1',
gtype_name=node.attrib.get(_glibns('type-name')),
get_type=node.attrib.get(_glibns('get-type')),
c_symbol_prefix=node.attrib.get(_cns('symbol-prefix')))
if node.attrib.get('foreign') == '1':
compound.foreign = True
self._parse_generic_attribs(node, compound)
if not self._types_only:
compound.fields.extend(self._parse_fields(node, compound))
for method in self._find_children(node, _corens('method')):
func = self._parse_function_common(method, ast.Function, compound)
func.is_method = True
compound.methods.append(func)
for i, fieldnode in enumerate(self._find_children(node, _corens('field'))):
field = compound.fields[i]
self._parse_type_array_length(compound.fields, fieldnode, field.type)
for func in self._find_children(node, _corens('function')):
compound.static_methods.append(
self._parse_function_common(func, ast.Function, compound))
for ctor in self._find_children(node, _corens('constructor')):
func = self._parse_function_common(ctor, ast.Function, compound)
func.is_constructor = True
compound.constructors.append(func)
return compound
def _parse_record(self, node, anonymous=False):
struct = self._parse_compound(ast.Record, node)
is_gtype_struct_for = node.attrib.get(_glibns('is-gtype-struct-for'))
if is_gtype_struct_for is not None:
struct.is_gtype_struct_for = self._namespace.type_from_name(is_gtype_struct_for)
if not anonymous:
self._namespace.append(struct)
return struct
def _parse_union(self, node, anonymous=False):
union = self._parse_compound(ast.Union, node)
if not anonymous:
self._namespace.append(union)
return union
def _parse_type_simple(self, typenode):
# ast.Fields can contain inline callbacks
if typenode.tag == _corens('callback'):
typeval = self._namespace.type_from_name(typenode.attrib['name'])
typeval.ctype = typenode.attrib.get(_cns('type'))
return typeval
# ast.Arrays have their own toplevel XML
elif typenode.tag == _corens('array'):
array_type = typenode.attrib.get('name')
element_type = self._parse_type(typenode)
array_ctype = typenode.attrib.get(_cns('type'))
ret = ast.Array(array_type, element_type, ctype=array_ctype)
# zero-terminated defaults to true...
zero = typenode.attrib.get('zero-terminated')
if zero and zero == '0':
ret.zeroterminated = False
fixed_size = typenode.attrib.get('fixed-size')
if fixed_size:
ret.size = int(fixed_size)
return ret
elif typenode.tag == _corens('varargs'):
return ast.Varargs()
elif typenode.tag == _corens('type'):
name = typenode.attrib.get('name')
ctype = typenode.attrib.get(_cns('type'))
if name is None:
if ctype is None:
return ast.TypeUnknown()
return ast.Type(ctype=ctype)
elif name in ['GLib.List', 'GLib.SList']:
subchild = self._find_first_child(typenode,
map(_corens, ('callback', 'array',
'varargs', 'type')))
if subchild is not None:
element_type = self._parse_type(typenode)
else:
element_type = ast.TYPE_ANY
return ast.List(name, element_type, ctype=ctype)
elif name == 'GLib.HashTable':
subchildren = self._find_children(typenode, _corens('type'))
subchildren_types = map(self._parse_type_simple, subchildren)
while len(subchildren_types) < 2:
subchildren_types.append(ast.TYPE_ANY)
return ast.Map(subchildren_types[0], subchildren_types[1], ctype=ctype)
else:
return self._namespace.type_from_name(name, ctype)
else:
assert False, "Failed to parse inner type"
def _parse_type(self, node):
for name in map(_corens, ('callback', 'array', 'varargs', 'type')):
typenode = node.find(name)
if typenode is not None:
return self._parse_type_simple(typenode)
assert False, "Failed to parse toplevel type"
def _parse_type_array_length(self, siblings, node, typeval):
"""A hack necessary to handle the integer parameter/field indexes on
array types."""
typenode = node.find(_corens('array'))
if typenode is None:
return
lenidx = typenode.attrib.get('length')
if lenidx is not None:
idx = int(lenidx)
assert idx < len(siblings), "%r %d >= %d" % (parent, idx, len(siblings))
if isinstance(siblings[idx], ast.Field):
typeval.length_param_name = siblings[idx].name
else:
typeval.length_param_name = siblings[idx].argname
def _parse_boxed(self, node):
obj = ast.Boxed(node.attrib[_glibns('name')],
gtype_name=node.attrib[_glibns('type-name')],
get_type=node.attrib[_glibns('get-type')],
c_symbol_prefix=node.attrib.get(_cns('symbol-prefix')))
self._parse_generic_attribs(node, obj)
if self._types_only:
self._namespace.append(obj)
return
for method in self._find_children(node, _corens('method')):
func = self._parse_function_common(method, ast.Function, obj)
func.is_method = True
obj.methods.append(func)
for ctor in self._find_children(node, _corens('constructor')):
obj.constructors.append(
self._parse_function_common(ctor, ast.Function, obj))
for callback in self._find_children(node, _corens('callback')):
obj.fields.append(
self._parse_function_common(callback, ast.Callback, obj))
self._namespace.append(obj)
def _parse_field(self, node, parent):
type_node = None
anonymous_node = None
if node.tag in map(_corens, ('record', 'union')):
anonymous_elt = node
else:
anonymous_elt = self._find_first_child(node, _corens('callback'))
if anonymous_elt is not None:
if anonymous_elt.tag == _corens('callback'):
anonymous_node = self._parse_function_common(anonymous_elt, ast.Callback)
elif anonymous_elt.tag == _corens('record'):
anonymous_node = self._parse_record(anonymous_elt, anonymous=True)
elif anonymous_elt.tag == _corens('union'):
anonymous_node = self._parse_union(anonymous_elt, anonymous=True)
else:
assert False, anonymous_elt.tag
else:
assert node.tag == _corens('field'), node.tag
type_node = self._parse_type(node)
field = ast.Field(node.attrib.get('name'),
type_node,
node.attrib.get('readable') != '0',
node.attrib.get('writable') == '1',
node.attrib.get('bits'),
anonymous_node=anonymous_node)
field.private = node.attrib.get('private') == '1'
field.parent = parent
self._parse_generic_attribs(node, field)
return field
def _parse_property(self, node, parent):
prop = ast.Property(node.attrib['name'],
self._parse_type(node),
node.attrib.get('readable') != '0',
node.attrib.get('writable') == '1',
node.attrib.get('construct') == '1',
node.attrib.get('construct-only') == '1',
node.attrib.get('transfer-ownership'))
self._parse_generic_attribs(node, prop)
prop.parent = parent
return prop
def _parse_member(self, node):
member = ast.Member(node.attrib['name'],
node.attrib['value'],
node.attrib.get(_cns('identifier')),
node.attrib.get(_glibns('nick')))
self._parse_generic_attribs(node, member)
return member
def _parse_constant(self, node):
type_node = self._parse_type(node)
constant = ast.Constant(node.attrib['name'],
type_node,
node.attrib['value'],
node.attrib.get(_cns('type')))
self._parse_generic_attribs(node, constant)
self._namespace.append(constant)
def _parse_enumeration_bitfield(self, node):
name = node.attrib.get('name')
ctype = node.attrib.get(_cns('type'))
get_type = node.attrib.get(_glibns('get-type'))
type_name = node.attrib.get(_glibns('type-name'))
glib_error_domain = node.attrib.get(_glibns('error-domain'))
if node.tag == _corens('bitfield'):
klass = ast.Bitfield
else:
klass = ast.Enum
members = []
obj = klass(name, ctype,
members=members,
gtype_name=type_name,
get_type=get_type)
obj.error_domain = glib_error_domain
obj.ctype = ctype
self._parse_generic_attribs(node, obj)
if self._types_only:
self._namespace.append(obj)
return
for member_node in self._find_children(node, _corens('member')):
member = self._parse_member(member_node)
member.parent = obj
members.append(member)
for func_node in self._find_children(node, _corens('function')):
func = self._parse_function_common(func_node, ast.Function)
func.parent = obj
obj.static_methods.append(func)
self._namespace.append(obj)
|
gpl-2.0
| -4,067,458,910,382,923,000 | 42.259319 | 94 | 0.558878 | false | 4.103151 | false | false | false |
philanthropy-u/edx-platform
|
lms/djangoapps/courseware/views/index.py
|
1
|
25792
|
"""
View for Courseware Index
"""
# pylint: disable=attribute-defined-outside-init
import logging
import urllib
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.views import redirect_to_login
from django.urls import reverse
from django.http import Http404
from django.template.context_processors import csrf
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import View
from edx_django_utils.monitoring import set_custom_metrics_for_course_key
from opaque_keys.edx.keys import CourseKey
from web_fragments.fragment import Fragment
from edxmako.shortcuts import render_to_response, render_to_string
from lms.djangoapps.courseware.courses import allow_public_access
from lms.djangoapps.courseware.exceptions import CourseAccessRedirect
from lms.djangoapps.experiments.utils import get_experiment_user_metadata_context
from lms.djangoapps.gating.api import get_entrance_exam_score_ratio, get_entrance_exam_usage_key
from lms.djangoapps.grades.course_grade_factory import CourseGradeFactory
from openedx.core.djangoapps.crawlers.models import CrawlersConfig
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference
from openedx.core.djangoapps.util.user_messages import PageLevelMessages
from openedx.core.djangoapps.waffle_utils import WaffleSwitchNamespace
from openedx.core.djangolib.markup import HTML, Text
from openedx.features.course_experience import (
COURSE_OUTLINE_PAGE_FLAG, default_course_url_name, COURSE_ENABLE_UNENROLLED_ACCESS_FLAG
)
from openedx.features.course_experience.views.course_sock import CourseSockFragmentView
from openedx.features.enterprise_support.api import data_sharing_consent_required
from shoppingcart.models import CourseRegistrationCode
from student.views import is_course_blocked
from util.views import ensure_valid_course_key
from xmodule.modulestore.django import modulestore
from xmodule.course_module import COURSE_VISIBILITY_PUBLIC
from xmodule.x_module import PUBLIC_VIEW, STUDENT_VIEW
from .views import CourseTabView
from ..access import has_access
from ..courses import check_course_access, get_course_with_access, get_current_child, get_studio_url
from ..entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
user_can_skip_entrance_exam,
user_has_passed_entrance_exam
)
from ..masquerade import (
setup_masquerade,
check_content_start_date_for_masquerade_user
)
from ..model_data import FieldDataCache
from ..module_render import get_module_for_descriptor, toc_for_course
log = logging.getLogger("edx.courseware.views.index")
TEMPLATE_IMPORTS = {'urllib': urllib}
CONTENT_DEPTH = 2
class CoursewareIndex(View):
"""
View class for the Courseware page.
"""
@cached_property
def enable_unenrolled_access(self):
return COURSE_ENABLE_UNENROLLED_ACCESS_FLAG.is_enabled(self.course_key)
@method_decorator(ensure_csrf_cookie)
@method_decorator(cache_control(no_cache=True, no_store=True, must_revalidate=True))
@method_decorator(ensure_valid_course_key)
@method_decorator(data_sharing_consent_required)
def get(self, request, course_id, chapter=None, section=None, position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right
chapter.
If neither chapter or section are specified, displays the user's most
recent chapter, or the first chapter if this is the user's first visit.
Arguments:
request: HTTP request
course_id (unicode): course id
chapter (unicode): chapter url_name
section (unicode): section url_name
position (unicode): position in module, eg of <sequential> module
"""
self.course_key = CourseKey.from_string(course_id)
if not (request.user.is_authenticated or self.enable_unenrolled_access):
return redirect_to_login(request.get_full_path())
self.original_chapter_url_name = chapter
self.original_section_url_name = section
self.chapter_url_name = chapter
self.section_url_name = section
self.position = position
self.chapter, self.section = None, None
self.course = None
self.url = request.path
try:
set_custom_metrics_for_course_key(self.course_key)
self._clean_position()
with modulestore().bulk_operations(self.course_key):
self.view = STUDENT_VIEW
# Do the enrollment check if enable_unenrolled_access is not enabled.
self.course = get_course_with_access(
request.user, 'load', self.course_key,
depth=CONTENT_DEPTH,
check_if_enrolled=not self.enable_unenrolled_access,
)
if self.enable_unenrolled_access:
# Check if the user is considered enrolled (i.e. is an enrolled learner or staff).
try:
check_course_access(
self.course, request.user, 'load', check_if_enrolled=True,
)
except CourseAccessRedirect as exception:
# If the user is not considered enrolled:
if self.course.course_visibility == COURSE_VISIBILITY_PUBLIC:
# If course visibility is public show the XBlock public_view.
self.view = PUBLIC_VIEW
else:
# Otherwise deny them access.
raise exception
else:
# If the user is considered enrolled show the default XBlock student_view.
pass
self.is_staff = has_access(request.user, 'staff', self.course)
self._setup_masquerade_for_effective_user()
return self.render(request)
except Exception as exception: # pylint: disable=broad-except
return CourseTabView.handle_exceptions(request, self.course, exception)
def _setup_masquerade_for_effective_user(self):
"""
Setup the masquerade information to allow the request to
be processed for the requested effective user.
"""
self.real_user = self.request.user
self.masquerade, self.effective_user = setup_masquerade(
self.request,
self.course_key,
self.is_staff,
reset_masquerade_data=True
)
# Set the user in the request to the effective user.
self.request.user = self.effective_user
def render(self, request):
"""
Render the index page.
"""
self._redirect_if_needed_to_pay_for_course()
self._prefetch_and_bind_course(request)
if self.course.has_children_at_depth(CONTENT_DEPTH):
self._reset_section_to_exam_if_required()
self.chapter = self._find_chapter()
self.section = self._find_section()
if self.chapter and self.section:
self._redirect_if_not_requested_section()
self._save_positions()
self._prefetch_and_bind_section()
check_content_start_date_for_masquerade_user(self.course_key, self.effective_user, request,
self.course.start, self.chapter.start, self.section.start)
if not request.user.is_authenticated:
qs = urllib.urlencode({
'course_id': self.course_key,
'enrollment_action': 'enroll',
'email_opt_in': False,
})
allow_anonymous = allow_public_access(self.course, [COURSE_VISIBILITY_PUBLIC])
if not allow_anonymous:
PageLevelMessages.register_warning_message(
request,
Text(_("You are not signed in. To see additional course content, {sign_in_link} or "
"{register_link}, and enroll in this course.")).format(
sign_in_link=HTML('<a href="{url}">{sign_in_label}</a>').format(
sign_in_label=_('sign in'),
url='{}?{}'.format(reverse('signin_user'), qs),
),
register_link=HTML('<a href="/{url}">{register_label}</a>').format(
register_label=_('register'),
url='{}?{}'.format(reverse('register_user'), qs),
),
)
)
return render_to_response('courseware/courseware.html', self._create_courseware_context(request))
def _redirect_if_not_requested_section(self):
"""
If the resulting section and chapter are different from what was initially
requested, redirect back to the index page, but with an updated URL that includes
the correct section and chapter values. We do this so that our analytics events
and error logs have the appropriate URLs.
"""
if (
self.chapter.url_name != self.original_chapter_url_name or
(self.original_section_url_name and self.section.url_name != self.original_section_url_name)
):
raise CourseAccessRedirect(
reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course_key),
'chapter': self.chapter.url_name,
'section': self.section.url_name,
},
)
)
def _clean_position(self):
"""
Verify that the given position is an integer. If it is not positive, set it to 1.
"""
if self.position is not None:
try:
self.position = max(int(self.position), 1)
except ValueError:
raise Http404(u"Position {} is not an integer!".format(self.position))
def _redirect_if_needed_to_pay_for_course(self):
"""
Redirect to dashboard if the course is blocked due to non-payment.
"""
redeemed_registration_codes = []
if self.request.user.is_authenticated:
self.real_user = User.objects.prefetch_related("groups").get(id=self.real_user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=self.course_key,
registrationcoderedemption__redeemed_by=self.real_user
)
if is_course_blocked(self.request, redeemed_registration_codes, self.course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
# TODO Update message to account for the fact that the user is not authenticated.
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
self.real_user,
unicode(self.course_key),
)
raise CourseAccessRedirect(reverse('dashboard'))
def _reset_section_to_exam_if_required(self):
"""
Check to see if an Entrance Exam is required for the user.
"""
if not user_can_skip_entrance_exam(self.effective_user, self.course):
exam_chapter = get_entrance_exam_content(self.effective_user, self.course)
if exam_chapter and exam_chapter.get_children():
exam_section = exam_chapter.get_children()[0]
if exam_section:
self.chapter_url_name = exam_chapter.url_name
self.section_url_name = exam_section.url_name
def _get_language_preference(self):
"""
Returns the preferred language for the actual user making the request.
"""
language_preference = settings.LANGUAGE_CODE
if self.request.user.is_authenticated:
language_preference = get_user_preference(self.real_user, LANGUAGE_KEY)
return language_preference
def _is_masquerading_as_student(self):
"""
Returns whether the current request is masquerading as a student.
"""
return self.masquerade and self.masquerade.role == 'student'
def _is_masquerading_as_specific_student(self):
"""
Returns whether the current request is masqueurading as a specific student.
"""
return self._is_masquerading_as_student() and self.masquerade.user_name
def _find_block(self, parent, url_name, block_type, min_depth=None):
"""
Finds the block in the parent with the specified url_name.
If not found, calls get_current_child on the parent.
"""
child = None
if url_name:
child = parent.get_child_by(lambda m: m.location.block_id == url_name)
if not child:
# User may be trying to access a child that isn't live yet
if not self._is_masquerading_as_student():
raise Http404('No {block_type} found with name {url_name}'.format(
block_type=block_type,
url_name=url_name,
))
elif min_depth and not child.has_children_at_depth(min_depth - 1):
child = None
if not child:
child = get_current_child(parent, min_depth=min_depth, requested_child=self.request.GET.get("child"))
return child
def _find_chapter(self):
"""
Finds the requested chapter.
"""
return self._find_block(self.course, self.chapter_url_name, 'chapter', CONTENT_DEPTH - 1)
def _find_section(self):
"""
Finds the requested section.
"""
if self.chapter:
return self._find_block(self.chapter, self.section_url_name, 'section')
def _prefetch_and_bind_course(self, request):
"""
Prefetches all descendant data for the requested section and
sets up the runtime, which binds the request user to the section.
"""
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_key,
self.effective_user,
self.course,
depth=CONTENT_DEPTH,
read_only=CrawlersConfig.is_crawler(request),
)
self.course = get_module_for_descriptor(
self.effective_user,
self.request,
self.course,
self.field_data_cache,
self.course_key,
course=self.course,
)
def _prefetch_and_bind_section(self):
"""
Prefetches all descendant data for the requested section and
sets up the runtime, which binds the request user to the section.
"""
# Pre-fetch all descendant data
self.section = modulestore().get_item(self.section.location, depth=None, lazy=False)
self.field_data_cache.add_descriptor_descendents(self.section, depth=None)
# Bind section to user
self.section = get_module_for_descriptor(
self.effective_user,
self.request,
self.section,
self.field_data_cache,
self.course_key,
self.position,
course=self.course,
)
def _save_positions(self):
"""
Save where we are in the course and chapter.
"""
save_child_position(self.course, self.chapter_url_name)
save_child_position(self.chapter, self.section_url_name)
#TODO move this method in philu app
def can_view_score(self):
"""
Check if user is allowed to view score
:return: Boolean
"""
from lms.djangoapps.philu_api.helpers import get_course_custom_settings
from courseware.access import get_user_role
course_custom_settings = get_course_custom_settings(self.course.course_id)
current_user_role = get_user_role(self.request.user, self.course.course_id)
return course_custom_settings.show_grades or current_user_role in ["staff", 'instructor']
def _create_courseware_context(self, request):
"""
Returns and creates the rendering context for the courseware.
Also returns the table of contents for the courseware.
"""
course_url_name = default_course_url_name(self.course.id)
course_url = reverse(course_url_name, kwargs={'course_id': unicode(self.course.id)})
show_grades = self.can_view_score()
courseware_context = {
'show_grades': show_grades,
'csrf': csrf(self.request)['csrf_token'],
'course': self.course,
'course_url': course_url,
'chapter': self.chapter,
'section': self.section,
'init': '',
'fragment': Fragment(),
'staff_access': self.is_staff,
'masquerade': self.masquerade,
'supports_preview_menu': True,
'studio_url': get_studio_url(self.course, 'course'),
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
'bookmarks_api_url': reverse('bookmarks'),
'language_preference': self._get_language_preference(),
'disable_optimizely': not WaffleSwitchNamespace('RET').is_enabled('enable_optimizely_in_courseware'),
'section_title': None,
'sequence_title': None,
'disable_accordion': COURSE_OUTLINE_PAGE_FLAG.is_enabled(self.course.id),
}
courseware_context.update(
get_experiment_user_metadata_context(
self.course,
self.effective_user,
)
)
table_of_contents = toc_for_course(
self.effective_user,
self.request,
self.course,
self.chapter_url_name,
self.section_url_name,
self.field_data_cache,
)
# TODO: Move this section out as we are changing built in edx code
default_chapter = ''
if self.chapter:
default_chapter = self.chapter.display_name
if self.section:
default_chapter = "%s-%s" % (default_chapter, self.section.display_name)
active_tab = self.request.GET.get('active_tab', default_chapter)
courseware_context['toc'] = table_of_contents
courseware_context['active_tab'] = active_tab
courseware_context['accordion'] = render_accordion(
self.request,
self.course,
table_of_contents['chapters'],
)
courseware_context['course_sock_fragment'] = CourseSockFragmentView().render_to_fragment(
request, course=self.course)
# entrance exam data
self._add_entrance_exam_to_context(courseware_context)
if self.section:
# chromeless data
if self.section.chrome:
chrome = [s.strip() for s in self.section.chrome.lower().split(",")]
if 'accordion' not in chrome:
courseware_context['disable_accordion'] = True
if 'tabs' not in chrome:
courseware_context['disable_tabs'] = True
# default tab
if self.section.default_tab:
courseware_context['default_tab'] = self.section.default_tab
# section data
courseware_context['section_title'] = self.section.display_name_with_default
section_context = self._create_section_context(
table_of_contents['previous_of_active_section'],
table_of_contents['next_of_active_section'],
)
courseware_context['fragment'] = self.section.render(self.view, section_context)
if self.section.position and self.section.has_children:
self._add_sequence_title_to_context(courseware_context)
return courseware_context
def _add_sequence_title_to_context(self, courseware_context):
"""
Adds sequence title to the given context.
If we're rendering a section with some display items, but position
exceeds the length of the displayable items, default the position
to the first element.
"""
display_items = self.section.get_display_items()
if not display_items:
return
if self.section.position > len(display_items):
self.section.position = 1
courseware_context['sequence_title'] = display_items[self.section.position - 1].display_name_with_default
def _add_entrance_exam_to_context(self, courseware_context):
"""
Adds entrance exam related information to the given context.
"""
if course_has_entrance_exam(self.course) and getattr(self.chapter, 'is_entrance_exam', False):
courseware_context['entrance_exam_passed'] = user_has_passed_entrance_exam(self.effective_user, self.course)
courseware_context['entrance_exam_current_score'] = get_entrance_exam_score_ratio(
CourseGradeFactory().read(self.effective_user, self.course),
get_entrance_exam_usage_key(self.course),
)
def _create_section_context(self, previous_of_active_section, next_of_active_section):
"""
Returns and creates the rendering context for the section.
"""
def _compute_section_url(section_info, requested_child):
"""
Returns the section URL for the given section_info with the given child parameter.
"""
return "{url}?child={requested_child}".format(
url=reverse(
'courseware_section',
args=[unicode(self.course_key), section_info['chapter_url_name'], section_info['url_name']],
),
requested_child=requested_child,
)
# NOTE (CCB): Pull the position from the URL for un-authenticated users. Otherwise, pull the saved
# state from the data store.
position = None if self.request.user.is_authenticated else self.position
section_context = {
'activate_block_id': self.request.GET.get('activate_block_id'),
'requested_child': self.request.GET.get("child"),
'progress_url': reverse('progress', kwargs={'course_id': unicode(self.course_key)}),
'user_authenticated': self.request.user.is_authenticated,
'position': position,
}
if previous_of_active_section:
section_context['prev_url'] = _compute_section_url(previous_of_active_section, 'last')
if next_of_active_section:
section_context['next_url'] = _compute_section_url(next_of_active_section, 'first')
# sections can hide data that masquerading staff should see when debugging issues with specific students
section_context['specific_masquerade'] = self._is_masquerading_as_specific_student()
return section_context
def render_accordion(request, course, table_of_contents):
"""
Returns the HTML that renders the navigation for the given course.
Expects the table_of_contents to have data on each chapter and section,
including which ones are active.
"""
context = dict(
[
('toc', table_of_contents),
('course_id', unicode(course.id)),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format),
] + TEMPLATE_IMPORTS.items()
)
return render_to_string('courseware/accordion.html', context)
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, child in enumerate(seq_module.get_display_items(), start=1):
if child.location.block_id == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule, course=None):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(
user,
request,
parent_descriptor,
field_data_cache,
current_module.location.course_key,
course=course
)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.block_id)
current_module = parent
|
agpl-3.0
| 928,075,862,110,087,700 | 40.734628 | 120 | 0.61426 | false | 4.298667 | false | false | false |
ljwolf/spvcm
|
spvcm/both_levels/se_se/sample.py
|
1
|
1359
|
import numpy as np
import scipy.linalg as scla
from ...utils import splogdet
from pysal.spreg.utils import spdot
def logp_rho_prec(state, val):
"""
This computes the logp of the spatial parameter using the precision, rather than the covariance. This results in fewer matrix operations in the case of a SE formulation, but not in an SMA formulation.
"""
st = state
#must truncate in logp otherwise sampling gets unstable
if (val < st.Rho_min) or (val > st.Rho_max):
return np.array([-np.inf])
PsiRhoi = st.Psi_1i(val, st.W, sparse=True)
logdet = splogdet(PsiRhoi)
eta = st.Y - st.XBetas - st.DeltaAlphas
kernel = spdot(spdot(eta.T, PsiRhoi), eta) / st.Sigma2
return .5*logdet -.5 * kernel + st.Log_Rho0(val) #since precision, no negative on ld
def logp_lambda_prec(state, val):
"""
The logp for upper level spatial parameters in this case has the same form
as a multivariate normal distribution, sampled over the variance matrix,
rather than over Y.
"""
st = state
#must truncate
if (val < st.Lambda_min) or (val > st.Lambda_max):
return np.array([-np.inf])
PsiLambdai = st.Psi_2i(val, st.M)
logdet = splogdet(PsiLambdai)
kernel = spdot(spdot(st.Alphas.T, PsiLambdai), st.Alphas) / st.Tau2
return .5*logdet - .5*kernel + st.Log_Lambda0(val)
|
mit
| 5,092,001,588,908,868,000 | 31.357143 | 204 | 0.671082 | false | 3.095672 | false | false | false |
funkring/fdoo
|
addons-funkring/at_stock/__openerp__.py
|
1
|
1542
|
# -*- coding: utf-8 -*-
# -*- encoding: utf-8 -*-
#############################################################################
#
# Copyright (c) 2007 Martin Reisenhofer <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "oerp.at Stock",
"description":"""
oerp.at Stock Base Module
=========================
* additional functions
* access rights for invoice creation
""",
"version" : "1.1",
"author" : "funkring.net",
"website": "http://www.funkring.net",
"category" : "Warehouse",
"depends" : ["at_base", "stock", "delivery", "stock_account"],
"data" : ["security.xml",
"report/stock_picking_report.xml",
"view/picking_view.xml",
"wizard/check_avail_wizard.xml"],
"auto_install": False,
"installable": True
}
|
agpl-3.0
| -4,827,440,454,047,546,000 | 34.045455 | 78 | 0.57393 | false | 3.913706 | false | false | false |
sanal-cem/heritago
|
heritago/heritages/tests/tests_models.py
|
1
|
2021
|
from django.test import TestCase
from heritages.models import Tag, Heritage, Multimedia, UserProfile, User
import os
testfile = "testfile.txt"
testfile2 = "testfile2.txt"
class ModelsTest(TestCase):
@classmethod
def setUpClass(cls):
Tag.objects.create(name="TAG_ancient")
title = "Test Mosque"
Heritage.objects.create(title=title)
file = open(testfile, "w")
file.close()
file = open(testfile2, "w")
file.close()
Multimedia.objects.create(url="B", file=testfile, heritage=Heritage.objects.get(title="Test Mosque"))
Heritage.objects.create(title="Selimiye Mosque")
Multimedia.objects.create(url="A", file=testfile2, heritage=Heritage.objects.get(title="Selimiye Mosque"))
@classmethod
def tearDownClass(cls):
try:
os.remove(testfile)
except OSError:
pass
try:
os.remove(testfile2)
except OSError:
pass
def test_tag_get(self):
ancient_tag = Tag.objects.get(name="TAG_ancient")
self.assertEqual(ancient_tag.name, "TAG_ancient")
def test_heritage_get(self):
test_mosque = Heritage.objects.get(title="Test Mosque")
self.assertEqual(test_mosque.title, "Test Mosque")
def test_heritage_delete(self):
Heritage.objects.get(title="Test Mosque").delete()
with self.assertRaises(Heritage.DoesNotExist):
Heritage.objects.get(title="Test Mosque")
def test_multimedia_delete(self):
Multimedia.objects.get(url="A").delete()
with self.assertRaises(Multimedia.DoesNotExist):
Multimedia.objects.get(url="A")
def test_userprofile(self):
user = User.objects.create(username="testuser")
user_profile = UserProfile.objects.create(user=user, email="[email protected]", note="Test Note")
self.assertEqual("testuser", str(user_profile)
, "__unicode__ fails, replace with __str__ then you'll pass this test")
|
mit
| 3,023,365,666,309,742,000 | 33.844828 | 114 | 0.641267 | false | 3.564374 | true | false | false |
mjhouse/scripts
|
hex/env.py
|
1
|
2609
|
import re, shlex
import math
hextag = re.compile('{{(.*?)}}',flags=re.MULTILINE|re.DOTALL)
class HexExpression:
delimiters = (' ', '=','-','+','*','(',')')
quotes = ('\"','\'')
def __init__( self, tokens ):
self.operations = {
'=':self.assign,
'-':self.minus,
'+':self.plus,
'*':self.multi,
}
if isinstance(tokens,str):
self.tokens = [ tok for tok in self.tokenize( tokens ) if tok != '(' and tok !=')' ]
else:
self.tokens = [ tok for tok in tokens if tok != '(' and tok !=')' ]
self.value = self.tokens[0]
def execute( self ):
if self.value in self.operations:
self.tokens = HexExpression(self.tokens[1:]).execute()
#self.tokens = HexExpression(self.tokens[2:]).execute()
print(self.tokens)
#return self.operations[ self.value ]( self.left, self.right )
else:
return self.tokens
def assign( self, left, right ):
print ('assign: ' + str(left) + ' ' + str(right))
return {str(left):right}
def minus( self, left, right ):
print ('minus: ' + str(left) + ' ' + str(right))
return left - right
def plus( self, left, right ):
print ('plus: ' + str(left) + ' ' + str(right))
return left + right
def multi( self, left, right ):
print ('multi: ' + str(left) + ' ' + str(right))
return left*right
def tokenize( self, string ):
acc, word, inside = [], '', False
for char in string:
if char in self.delimiters and not inside:
if word.strip(): acc.append(word)
if char.strip(): acc.append(char)
word = ''
elif char in self.quotes:
inside = not inside
word += char
else:
word += char
if word.strip(): acc.append(word)
return [ self.evaluate(tok) for tok in acc ]
def evaluate( self, token ):
token = token.strip('. ')
if token.replace('.','',1).isdigit():
if '.' in token:
return float(token)
else:
return int(token)
elif token.lower() == 'true':
return True
elif token.lower() == 'false':
return False
else:
return token
def collapse( self, tokens ):
pass
if __name__=='__main__':
exp = HexExpression('( = this (+ (+ 2 3) (- 4 3))')
print(exp.tokens)
result = exp.execute()
print(result)
|
gpl-2.0
| -2,340,866,839,609,065,500 | 29.337209 | 96 | 0.48716 | false | 4.013846 | false | false | false |
thejevans/pointSourceAnalysis
|
convertH5_GFU.py
|
1
|
1276
|
#!/usr/bin/env python
'''
Template to convert from HDF5 files to NPY numpy array Files. This implementation uses
parseGFU.py to parse the data
'''
# Imports
from __future__ import print_function
from optparse import OptionParser
import tables
import numpy as np
import parseGFU
# Command parsing
usage = '%prog [options] --infile <hdf5 file> --outdir <output directory>'
parser = OptionParser(usage = usage, description=__doc__)
parser.add_option('-i', '--infile', type = 'string', default = None,
help = 'HDF5 file to be parsed')
parser.add_option('-o', '--outdir', type = 'string', default = './',
help = 'NPY file output path')
(options, args) = parser.parse_args()
inFile = options.infile
outDir = options.outdir
# If no input file given, ask for one
if inFile == None:
inFile = raw_input('Select input HDF5 file: ')
# If output directory does not end with a /, add one
if outDir.rfind('/') != len(outDir)-1:
outDir = ''.join([outDir, '/'])
# Set output file name based on input file name
outFile = ''.join([outDir, inFile[inFile.rfind('/')+1:inFile.rfind('.')], '.npy'])
# Read in .h5 file
hdf = tables.openFile(inFile)
# Convert to numpy array
arr = parseGFU.convert(hdf)
# Write out .npy file
np.save(outFile, arr)
|
gpl-3.0
| -6,438,877,276,518,161,000 | 27.355556 | 86 | 0.670063 | false | 3.393617 | false | false | false |
flgiordano/netcash
|
+/google-cloud-sdk/lib/googlecloudsdk/core/resource/resource_projection_spec.py
|
1
|
10147
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class that creates resource projection specification."""
import sys
from googlecloudsdk.third_party.py27 import py27_copy as copy
PROJECTION_ARG_DOC = ' projection: The parent ProjectionSpec.'
ALIGN_DEFAULT = 'left'
ALIGNMENTS = {'left': lambda s, w: s.ljust(w),
'center': lambda s, w: s.center(w),
'right': lambda s, w: s.rjust(w)}
class ProjectionSpec(object):
"""Creates a resource projection specification.
A resource projection is an expression string that contains a list of resource
keys with optional attributes. A projector is a method that takes a projection
specification and a resource object as input and produces a new
JSON-serializable object containing only the values corresponding to the keys
in the projection specification.
Optional projection key attributes may transform the values in the output
JSON-serializable object. Cloud SDK projection attributes are used for output
formatting.
A default or empty projection expression still produces a projector that
converts a resource to a JSON-serializable object.
This class is used by the resource projection expression parser to create a
resource projection specification from a projection expression string.
Attributes:
aliases: The short key name alias dictionary.
_active: The transform active level. Incremented each time Defaults() is
called. Used to determine active transforms.
attributes: Projection attributes dict indexed by attribute name.
_columns: A list of (key,_Attribute) tuples used to project a resource to
a list of columns.
_compiler: The projection compiler method for nested projections.
_empty: An empty projection _Tree used by Projector().
_name: The projection name from the expression string.
_tree: The projection _Tree root, used by
resource_projector.Evaluate() to efficiently project each resource.
symbols: Default and caller-defined transform function dict indexed by
function name.
"""
DEFAULT = 0 # _Attribute default node flag.
INNER = 1 # _Attribute inner node flag.
PROJECT = 2 # _Attribute project node flag.
class _Column(object):
"""Column key and transform attribute for self._columns.
Attributes:
key: The column key.
attribute: The column key _Attribute.
"""
def __init__(self, key, attribute):
self.key = key
self.attribute = attribute
def __init__(self, defaults=None, symbols=None, compiler=None):
"""Initializes a projection.
Args:
defaults: resource_projection_spec.ProjectionSpec defaults.
symbols: Transform function symbol table dict indexed by function name.
compiler: The projection compiler method for nested projections.
"""
self.aliases = {}
self.attributes = {}
self._columns = []
self._compiler = compiler
self._empty = None
self._name = None
self._snake_headings = {}
self._snake_re = None
if defaults:
self._active = defaults.active
self._tree = copy.deepcopy(defaults.GetRoot())
self.Defaults()
if defaults.symbols:
self.symbols = copy.deepcopy(defaults.symbols)
if symbols:
self.symbols.update(symbols)
else:
self.symbols = symbols if symbols else {}
self.aliases.update(defaults.aliases)
else:
self._active = 0
self._tree = None
self.symbols = symbols
@property
def active(self):
"""Gets the transform active level."""
return self._active
@property
def compiler(self):
"""Returns the projection compiler method for nested projections."""
return self._compiler
def _Defaults(self, projection):
"""Defaults() helper -- converts a projection to a default projection.
Args:
projection: A node in the original projection _Tree.
"""
projection.attribute.flag = self.DEFAULT
for node in projection.tree.values():
self._Defaults(node)
def _Print(self, projection, out, level):
"""Print() helper -- prints projection node p and its children.
Args:
projection: A _Tree node in the original projection.
out: The output stream.
level: The nesting level counting from 1 at the root.
"""
for key in projection.tree:
out.write('{indent} {key} : {attribute}\n'.format(
indent=' ' * level,
key=key,
attribute=projection.tree[key].attribute))
self._Print(projection.tree[key], out, level + 1)
def AddAttribute(self, name, value):
"""Adds name=value to the attributes.
Args:
name: The attribute name.
value: The attribute value
"""
self.attributes[name] = value
def DelAttribute(self, name):
"""Deletes name from the attributes if it is in the attributes.
Args:
name: The attribute name.
"""
if name in self.attributes:
del self.attributes[name]
def AddAlias(self, name, key):
"""Adds name as an alias for key to the projection.
Args:
name: The short (no dots) alias name for key.
key: The parsed key to add.
"""
self.aliases[name] = key
def AddKey(self, key, attribute):
"""Adds key and attribute to the projection.
Args:
key: The parsed key to add.
attribute: Parsed _Attribute to add.
"""
self._columns.append(self._Column(key, attribute))
def SetName(self, name):
"""Sets the projection name.
The projection name is the rightmost of the names in the expression.
Args:
name: The projection name.
"""
if self._name:
# Reset the name-specific attributes.
self.attributes = {}
self._name = name
def GetRoot(self):
"""Returns the projection root node.
Returns:
The resource_projector_parser._Tree root node.
"""
return self._tree
def SetRoot(self, root):
"""Sets the projection root node.
Args:
root: The resource_projector_parser._Tree root node.
"""
self._tree = root
def GetEmpty(self):
"""Returns the projector resource_projector_parser._Tree empty node.
Returns:
The projector resource_projector_parser._Tree empty node.
"""
return self._empty
def SetEmpty(self, node):
"""Sets the projector resource_projector_parser._Tree empty node.
The empty node is used by to apply [] empty slice projections.
Args:
node: The projector resource_projector_parser._Tree empty node.
"""
self._empty = node
def Columns(self):
"""Returns the projection columns.
Returns:
The columns in the projection, None if the entire resource is projected.
"""
return self._columns
def ColumnCount(self):
"""Returns the number of columns in the projection.
Returns:
The number of columns in the projection, 0 if the entire resource is
projected.
"""
return len(self._columns)
def Defaults(self):
"""Converts the projection to a default projection.
A default projection provides defaults for attribute values and function
symbols. An explicit non-default projection value always overrides the
corresponding default value.
"""
if self._tree:
self._Defaults(self._tree)
self._columns = []
self._active += 1
def Aliases(self):
"""Returns the short key name alias dictionary.
This dictionary maps short (no dots) names to parsed keys.
Returns:
The short key name alias dictionary.
"""
return self.aliases
def Attributes(self):
"""Returns the projection _Attribute dictionary.
Returns:
The projection _Attribute dictionary.
"""
return self.attributes
def Alignments(self):
"""Returns the projection column justfication list.
Returns:
The ordered list of alignment functions, where each function is one of
ljust [default], center, or rjust.
"""
return [ALIGNMENTS[col.attribute.align] for col in self._columns]
def Labels(self):
"""Returns the ordered list of projection labels.
Returns:
The ordered list of projection label strings, None if all labels are
empty.
"""
labels = [col.attribute.label or '' for col in self._columns]
return labels if any(labels) else None
def Name(self):
"""Returns the projection name.
The projection name is the rightmost of the names in the expression.
Returns:
The projection name, None if none was specified.
"""
return self._name
def Order(self):
"""Returns the projection sort key order suitable for use by sorted().
Example:
projection = resource_projector.Compile('...')
order = projection.Order()
if order:
rows = sorted(rows, key=itemgetter(*order))
Returns:
The list of (sort-key-index, reverse), [] if projection is None
or if all sort order indices in the projection are None (unordered).
"""
ordering = []
for i, col in enumerate(self._columns):
if col.attribute.order or col.attribute.reverse:
ordering.append(
(col.attribute.order or sys.maxint, i, col.attribute.reverse))
return [(i, reverse) for _, i, reverse in sorted(ordering)]
def Print(self, out=sys.stdout):
"""Prints the projection with indented nesting.
Args:
out: The output stream, sys.stdout if None.
"""
if self._tree:
self._Print(self._tree, out, 1)
def Tree(self):
"""Returns the projection tree root.
Returns:
The projection tree root.
"""
return self._tree
|
bsd-3-clause
| 4,696,202,010,263,796,000 | 28.669591 | 80 | 0.673401 | false | 4.373707 | false | false | false |
cmr/automatafl
|
old_python_prototype/server.py
|
1
|
6501
|
"""
Automatafl HTTP game server.
There is a central store of all currently-running games, with the board and
game state. Each game is identified by a UUIDv4. When creating a game, the
client has an option to make the game public or private. Public games will be
listed publically. A player may join any game which they have a URL to, if
there are any empty player slots. Clients are expected to open a websocket
connection to receive notifications of moves and state changes for the games
they are participating in and/or watching.
Sessions store identifiers, also UUIDv4's, which are used to lookup and
track the games a given client is participating in.
The server keeps a log of committed transactions (sets of moves) for a
not-yet-implemented replay feature.
"""
from contextlib import contextmanager
import os
import uuid
from flask.json import jsonify
from flask import Flask, session, abort
from flask_socketio import SocketIO, join_room, leave_room, send, emit
import model
class FEPleb(model.Plebeian):
def __init__(self, id, name, uuid):
self.id = id
self.name = name
self.uuid = uuid
class FEGame(object):
def __init__(self, name):
self.name = name
self.private = False
self.max_plebid = 0
self.sess_plebs = {}
self.uuid_plebs = {}
self.uuid = uuid.uuid4()
self.committed_moves = []
self.setup = None
self.mgame = None
def add_pleb(self, name, sessid):
pleb = FEPleb(self.max_plebid, name, uuid.uuid4())
self.sess_plebs[sessid] = pleb
self.uuid_plebs[pleb.uuid] = pleb
self.max_plebid += 1
return self.plebs[-1]
def create_game_model(self):
self.mgame = model.Game(plebs=self.plebs, setup=self.setup)
def pleb_uuids(self):
return [pleb.uuid for pleb in self.plebs]
def pleb_from_sess(self, sess):
return self.sess_plebs.get(sess)
def pleb_from_uuid(self, uuid):
return self.uuid_plebs.get(uuid)
def serialize(self):
return {
'board': None if self.mgame is None else self.mgame.Serialize(),
'name': self.name,
'players': len(self.sess_plebs),
}
# TODO: Setup configuration (chosing initial board etc)
def is_coord(thing):
# Coordinates are a [y, x] pair; JSON should deserialize them as a list.
return isinstance(thing, list) and len(thing) == 2 and isinstance(thing[0], int)
# Map from UUID to dict.
client_session_states = {}
# Map from UUID to FEGame
current_games = {}
app = Flask(__name__)
app.secret_key = os.urandom(32)
socketio = SocketIO(app)
def sess_uuid():
if "sess" not in session:
session["sess"] = uuid.uuid4()
return session["sess"]
def client_sess_state():
uid = sess_uuid()
if uid not in client_session_states:
d = {}
client_session_states[uid] = d
d["in_games"] = []
return client_session_states.get(uid)
@app.route("/game", methods=["GET"])
def list_games():
return jsonify([
feg.serialize()
for feg in current_games.values()
if not feg.private or feg.pleb_from_sess(sess_uuid())
])
@app.route("/game/<uuid:gameid>", methods=["GET"])
def get_game(gameid):
feg = current_games.get(gameid, None)
if feg is None:
abort(404)
return jsonify({"status": "ok", "game": feg.serialize()})
@app.route("/game/<uuid:gameid>/join", methods=["POST"])
def join_game(gameid):
feg = current_games.get(gameid, None)
if feg is None:
abort(404)
if hasattr(feg, "mgame"):
abort(403)
j = request.get_json()
if "name" not in j:
abort(400)
feg.add_pleb(j["name"], sess_uuid())
return jsonify({"status": "ok"})
@app.route("/game", methods=["POST"])
def make_game():
j = request.get_json()
if "name" not in j:
abort(400)
feg = FEGame(j["name"])
current_games[feg.uuid] = feg
return jsonify({"status": "ok", "uuid": feg.uuid})
@socketio.on("subscribe_to_game")
def subscribe_to_game(msg):
if "reqid" not in msg:
return {"status": "error", "reqid": 0, "error": "NO_REQID"}
elif "sessid" not in msg:
return {"status": "error", "reqid": msg["reqid"], "error": "NO_SESSID"}
elif "game_id" not in msg:
return {"status": "error", "reqid": msg["reqid"], "error": "NEED_GAME_ID"}
elif msg["game_id"] not in current_games:
return {"status": "error", "reqid": msg["reqid"], "error": "GAME_NOT_EXIST"}
elif msg["game_id"] not in client_sess_state()["in_games"]:
return {"status": "error", "reqid": msg["reqid"], "error": "NOT_IN_GAME"}
else:
join_room(msg["game_id"])
return {"status": "ok", "reqid": msg["reqid"]}
@socketio.on("unsubscribe_from_game")
def unsubscribe_from_game(msg):
if "reqid" not in msg:
return {"status": "error", "reqid": 0, "error": "NO_REQID"}
elif "game_id" not in msg:
return {"status": "error", "reqid": msg["reqid"], "error": "NEED_GAME_ID"}
else:
leave_room(msg["game_id"])
return {"status": "ok", "reqid": msg["reqid"]}
@socketio.on("submit_move")
def submit_move(msg):
s = client_sess_state()
if "reqid" not in msg:
return {"status": "error", "reqid": 0, "error": "NO_REQID"}
elif "game_id" not in msg:
return {"status": "error", "reqid": msg["reqid"], "error": "NEED_GAME_ID"}
elif msg["game_id"] not in s["in_games"]:
return {"status": "error", "reqid": msg["reqid"], "error": "NOT_IN_GAME"}
elif msg["game_id"] not in current_games:
return {"status": "error", "reqid": msg["reqid"], "error": "GAME_NOT_EXIST"}
elif not is_coord(msg.get("from")) or not is_coord(msg.get("to")):
return {"status": "error", "reqid": msg["reqid"], "error": "NEED_COORDS"}
else:
feg = current_games[msg["game_id"]]
plebid = feg.pleb_from_sess(sess_uuid())
iev = model.Move(plebid, msg["from"], msg["to"])
oev = feg.mgame.Handle(iev)
if len(feg.mgame.pending_moves) == len(feg.mgame.plebeians):
conflicts = feg.mgame.Resolve()
emit("resolved", {"status": "ok", "event": [c.serialize() for c in
conflicts]}, broadcast=True, room=feg.uuid)
return {"status": "ok", "reqid": msg["reqid"], "event": oev.serialize()}
@app.route("/")
def index():
return app.send_static_file("index.html")
if __name__ == "__main__":
socketio.run(app)
|
apache-2.0
| -5,343,330,481,925,989,000 | 31.505 | 84 | 0.610983 | false | 3.188328 | false | false | false |
ppy/angle
|
scripts/generate_android_bp.py
|
1
|
19660
|
# Copyright The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Generates an Android.bp file from the json output of a 'gn desc' command.
# Example usage:
# gn desc out/Android --format=json "*" > desc.json
# python scripts/generate_android_bp.py desc.json > Android.bp
import json
import sys
import re
import os
import argparse
root_targets = [
"//:libGLESv2",
"//:libGLESv1_CM",
"//:libEGL",
"//:libfeature_support",
]
sdk_version = '28'
stl = 'libc++_static'
abi_arm = 'arm'
abi_arm64 = 'arm64'
abi_x86 = 'x86'
abi_x64 = 'x86_64'
abi_targets = [abi_arm, abi_arm64, abi_x86, abi_x64]
def tabs(indent):
return ' ' * (indent * 4)
def has_child_values(value):
# Elements of the blueprint can be pruned if they are empty lists or dictionaries of empty
# lists
if isinstance(value, list):
return len(value) > 0
if isinstance(value, dict):
for (item, item_value) in value.items():
if has_child_values(item_value):
return True
return False
# This is a value leaf node
return True
def write_blueprint_key_value(output, name, value, indent=1):
if not has_child_values(value):
return
if isinstance(value, set) or isinstance(value, list):
value = list(sorted(set(value)))
if isinstance(value, list):
output.append(tabs(indent) + '%s: [' % name)
for item in value:
output.append(tabs(indent + 1) + '"%s",' % item)
output.append(tabs(indent) + '],')
return
if isinstance(value, dict):
if not value:
return
output.append(tabs(indent) + '%s: {' % name)
for (item, item_value) in value.items():
write_blueprint_key_value(output, item, item_value, indent + 1)
output.append(tabs(indent) + '},')
return
if isinstance(value, bool):
output.append(tabs(indent) + '%s: %s,' % (name, 'true' if value else 'false'))
return
output.append(tabs(indent) + '%s: "%s",' % (name, value))
def write_blueprint(output, target_type, values):
output.append('%s {' % target_type)
for (key, value) in values.items():
write_blueprint_key_value(output, key, value)
output.append('}')
def gn_target_to_blueprint_target(target, target_info):
if 'output_name' in target_info:
return target_info['output_name']
# Split the gn target name (in the form of //gn_file_path:target_name) into gn_file_path and
# target_name
target_regex = re.compile(r"^//([a-zA-Z0-9\-_/]*):([a-zA-Z0-9\-_.]+)$")
match = re.match(target_regex, target)
assert match is not None
gn_file_path = match.group(1)
target_name = match.group(2)
assert len(target_name) > 0
# Clean up the gn file path to be a valid blueprint target name.
gn_file_path = gn_file_path.replace("/", "_").replace(".", "_").replace("-", "_")
# Generate a blueprint target name by merging the gn path and target so each target is unique.
# Prepend the 'angle' prefix to all targets in the root path (empty gn_file_path).
# Skip this step if the target name already starts with 'angle' to avoid target names such as 'angle_angle_common'.
root_prefix = "angle"
if len(gn_file_path) == 0 and not target_name.startswith(root_prefix):
gn_file_path = root_prefix
# Avoid names such as _angle_common if the gn_file_path is empty.
if len(gn_file_path) > 0:
gn_file_path += "_"
return gn_file_path + target_name
def remap_gn_path(path):
# TODO: pass the gn gen folder as an arg so it is future proof. b/150457277
remap_folders = [
('out/Android/gen/angle/', ''),
('out/Android/gen/', ''),
]
remapped_path = path
for (remap_source, remap_dest) in remap_folders:
remapped_path = remapped_path.replace(remap_source, remap_dest)
return remapped_path
def gn_path_to_blueprint_path(source):
# gn uses '//' to indicate the root directory, blueprint uses the .bp file's location
return remap_gn_path(re.sub(r'^//?', '', source))
def gn_paths_to_blueprint_paths(paths):
rebased_paths = []
for path in paths:
rebased_paths.append(gn_path_to_blueprint_path(path))
return rebased_paths
def gn_sources_to_blueprint_sources(sources):
# Blueprints only list source files in the sources list. Headers are only referenced though
# include paths.
file_extension_allowlist = [
'.c',
'.cc',
'.cpp',
]
rebased_sources = []
for source in sources:
if os.path.splitext(source)[1] in file_extension_allowlist:
rebased_sources.append(gn_path_to_blueprint_path(source))
return rebased_sources
target_blockist = [
'//build/config:shared_library_deps',
'//third_party/vulkan-validation-layers/src:vulkan_clean_old_validation_layer_objects',
]
include_blocklist = [
'//out/Android/gen/third_party/vulkan-deps/glslang/src/include/',
]
def gn_deps_to_blueprint_deps(target_info, build_info):
static_libs = []
shared_libs = []
defaults = []
generated_headers = []
header_libs = []
if 'deps' not in target_info:
return static_libs, defaults
for dep in target_info['deps']:
if dep not in target_blockist:
dep_info = build_info[dep]
blueprint_dep_name = gn_target_to_blueprint_target(dep, dep_info)
# Depending on the dep type, blueprints reference it differently.
gn_dep_type = dep_info['type']
if gn_dep_type == 'static_library':
static_libs.append(blueprint_dep_name)
elif gn_dep_type == 'shared_library':
shared_libs.append(blueprint_dep_name)
elif gn_dep_type == 'source_set' or gn_dep_type == 'group':
defaults.append(blueprint_dep_name)
elif gn_dep_type == 'action':
generated_headers.append(blueprint_dep_name)
# Blueprints do not chain linking of static libraries.
(child_static_libs, _, _, child_generated_headers, _) = gn_deps_to_blueprint_deps(
dep_info, build_info)
# Each target needs to link all child static library dependencies.
static_libs += child_static_libs
# Each blueprint target runs genrules in a different output directory unlike GN. If a
# target depends on another's genrule, it wont find the outputs. Propogate generated
# headers up the dependency stack.
generated_headers += child_generated_headers
return static_libs, shared_libs, defaults, generated_headers, header_libs
def gn_libs_to_blueprint_shared_libraries(target_info):
lib_blockist = [
'android_support',
'unwind',
]
result = []
if 'libs' in target_info:
for lib in target_info['libs']:
if lib not in lib_blockist:
android_lib = lib if '@' in lib else 'lib' + lib
result.append(android_lib)
return result
def gn_include_dirs_to_blueprint_include_dirs(target_info):
result = []
if 'include_dirs' in target_info:
for include_dir in target_info['include_dirs']:
if len(include_dir) > 0 and include_dir not in include_blocklist:
result.append(gn_path_to_blueprint_path(include_dir))
return result
def escape_quotes(string):
return string.replace("\"", "\\\"").replace("\'", "\\\'")
angle_cpu_bits_define = r'^ANGLE_IS_[0-9]+_BIT_CPU$'
def gn_cflags_to_blueprint_cflags(target_info):
result = []
# regexs of allowlisted cflags
cflag_allowlist = [
r'^-Wno-.*$', # forward cflags that disable warnings
r'-mpclmul' # forward "-mpclmul" (used by zlib)
]
for cflag_type in ['cflags', 'cflags_c', 'cflags_cc']:
if cflag_type in target_info:
for cflag in target_info[cflag_type]:
for allowlisted_cflag in cflag_allowlist:
if re.search(allowlisted_cflag, cflag):
result.append(cflag)
# Chrome and Android use different versions of Clang which support differnt warning options.
# Ignore errors about unrecognized warning flags.
result.append('-Wno-unknown-warning-option')
if 'defines' in target_info:
for define in target_info['defines']:
# Don't emit ANGLE's CPU-bits define here, it will be part of the arch-specific
# information later
result.append('-D%s' % escape_quotes(define))
return result
blueprint_library_target_types = {
"static_library": "cc_library_static",
"shared_library": "cc_library_shared",
"source_set": "cc_defaults",
"group": "cc_defaults",
}
def merge_bps(bps_for_abis):
common_bp = {}
for abi in abi_targets:
for key in bps_for_abis[abi]:
if isinstance(bps_for_abis[abi][key], list):
# Find list values that are common to all ABIs
for value in bps_for_abis[abi][key]:
value_in_all_abis = True
for abi2 in abi_targets:
if key == 'defaults':
# arch-specific defaults are not supported
break
value_in_all_abis = value_in_all_abis and (key in bps_for_abis[abi2].keys(
)) and (value in bps_for_abis[abi2][key])
if value_in_all_abis:
if key in common_bp.keys():
common_bp[key].append(value)
else:
common_bp[key] = [value]
else:
if 'arch' not in common_bp.keys():
# Make sure there is an 'arch' entry to hold ABI-specific values
common_bp['arch'] = {}
for abi3 in abi_targets:
common_bp['arch'][abi3] = {}
if key in common_bp['arch'][abi].keys():
common_bp['arch'][abi][key].append(value)
else:
common_bp['arch'][abi][key] = [value]
else:
# Assume everything that's not a list is common to all ABIs
common_bp[key] = bps_for_abis[abi][key]
return common_bp
def library_target_to_blueprint(target, build_info):
bps_for_abis = {}
blueprint_type = ""
for abi in abi_targets:
if target not in build_info[abi].keys():
bps_for_abis[abi] = {}
continue
target_info = build_info[abi][target]
blueprint_type = blueprint_library_target_types[target_info['type']]
bp = {'name': gn_target_to_blueprint_target(target, target_info)}
if 'sources' in target_info:
bp['srcs'] = gn_sources_to_blueprint_sources(target_info['sources'])
(bp['static_libs'], bp['shared_libs'], bp['defaults'], bp['generated_headers'],
bp['header_libs']) = gn_deps_to_blueprint_deps(target_info, build_info[abi])
bp['shared_libs'] += gn_libs_to_blueprint_shared_libraries(target_info)
bp['local_include_dirs'] = gn_include_dirs_to_blueprint_include_dirs(target_info)
bp['cflags'] = gn_cflags_to_blueprint_cflags(target_info)
bp['sdk_version'] = sdk_version
bp['stl'] = stl
if target in root_targets:
bp['vendor'] = True
bp['target'] = {'android': {'relative_install_path': 'egl'}}
bps_for_abis[abi] = bp
common_bp = merge_bps(bps_for_abis)
return blueprint_type, common_bp
def gn_action_args_to_blueprint_args(blueprint_inputs, blueprint_outputs, args):
# TODO: pass the gn gen folder as an arg so we know how to get from the gen path to the root
# path. b/150457277
remap_folders = [
# Specific special-cases first, since the other will strip the prefixes.
('gen/third_party/vulkan-deps/glslang/src/include/glslang/build_info.h',
'glslang/build_info.h'),
('third_party/vulkan-deps/glslang/src',
'external/angle/third_party/vulkan-deps/glslang/src'),
('../../', ''),
('gen/', ''),
]
result_args = []
for arg in args:
# Attempt to find if this arg is a path to one of the inputs. If it is, use the blueprint
# $(location <path>) argument instead so the path gets remapped properly to the location
# that the script is run from
remapped_path_arg = arg
for (remap_source, remap_dest) in remap_folders:
remapped_path_arg = remapped_path_arg.replace(remap_source, remap_dest)
if remapped_path_arg in blueprint_inputs or remapped_path_arg in blueprint_outputs:
result_args.append('$(location %s)' % remapped_path_arg)
elif os.path.basename(remapped_path_arg) in blueprint_outputs:
result_args.append('$(location %s)' % os.path.basename(remapped_path_arg))
else:
result_args.append(remapped_path_arg)
return result_args
blueprint_gen_types = {
"action": "cc_genrule",
}
inputs_blocklist = [
'//.git/HEAD',
]
outputs_remap = {
'build_info.h': 'glslang/build_info.h',
}
def is_input_in_tool_files(tool_files, input):
return input in tool_files
def action_target_to_blueprint(target, build_info):
target_info = build_info[target]
blueprint_type = blueprint_gen_types[target_info['type']]
bp = {'name': gn_target_to_blueprint_target(target, target_info)}
# Blueprints use only one 'srcs', merge all gn inputs into one list.
gn_inputs = []
if 'inputs' in target_info:
for input in target_info['inputs']:
if input not in inputs_blocklist:
gn_inputs.append(input)
if 'sources' in target_info:
gn_inputs += target_info['sources']
# Filter out the 'script' entry since Android.bp doesn't like the duplicate entries
if 'script' in target_info:
gn_inputs = [
input for input in gn_inputs
if not is_input_in_tool_files(target_info['script'], input)
]
bp_srcs = gn_paths_to_blueprint_paths(gn_inputs)
bp['srcs'] = bp_srcs
# genrules generate the output right into the 'root' directory. Strip any path before the
# file name.
bp_outputs = []
for gn_output in target_info['outputs']:
output = os.path.basename(gn_output)
if output in outputs_remap.keys():
output = outputs_remap[output]
bp_outputs.append(output)
bp['out'] = bp_outputs
bp['tool_files'] = [gn_path_to_blueprint_path(target_info['script'])]
# Generate the full command, $(location) refers to tool_files[0], the script
cmd = ['$(location)'] + gn_action_args_to_blueprint_args(bp_srcs, bp_outputs,
target_info['args'])
bp['cmd'] = ' '.join(cmd)
bp['sdk_version'] = sdk_version
return blueprint_type, bp
def gn_target_to_blueprint(target, build_info):
for abi in abi_targets:
gn_type = build_info[abi][target]['type']
if gn_type in blueprint_library_target_types:
return library_target_to_blueprint(target, build_info)
elif gn_type in blueprint_gen_types:
return action_target_to_blueprint(target, build_info[abi])
else:
# Target is not used by this ABI
continue
def get_gn_target_dependencies(output_dependencies, build_info, target):
if target not in output_dependencies:
output_dependencies.insert(0, target)
for dep in build_info[target]['deps']:
if dep in target_blockist:
# Blocklisted dep
continue
if dep not in build_info:
# No info for this dep, skip it
continue
# Recurse
get_gn_target_dependencies(output_dependencies, build_info, dep)
def main():
parser = argparse.ArgumentParser(
description='Generate Android blueprints from gn descriptions.')
for abi in abi_targets:
fixed_abi = abi
if abi == abi_x64:
fixed_abi = 'x64' # gn uses x64, rather than x86_64
parser.add_argument(
'gn_json_' + fixed_abi,
help=fixed_abi +
'gn desc in json format. Generated with \'gn desc <out_dir> --format=json "*"\'.')
args = vars(parser.parse_args())
build_info = {}
for abi in abi_targets:
fixed_abi = abi
if abi == abi_x64:
fixed_abi = 'x64' # gn uses x64, rather than x86_64
with open(args['gn_json_' + fixed_abi], 'r') as f:
build_info[abi] = json.load(f)
targets_to_write = []
for abi in abi_targets:
for root_target in root_targets:
get_gn_target_dependencies(targets_to_write, build_info[abi], root_target)
blueprint_targets = []
for target in targets_to_write:
blueprint_targets.append(gn_target_to_blueprint(target, build_info))
# Add APKs with all of the root libraries
blueprint_targets.append(('filegroup', {
'name': 'ANGLE_srcs',
'srcs': ['src/**/*.java',],
}))
blueprint_targets.append((
'java_defaults',
{
'name': 'ANGLE_java_defaults',
'sdk_version': 'system_current',
'min_sdk_version': sdk_version,
'compile_multilib': 'both',
'use_embedded_native_libs': True,
'jni_libs': [
# hack: assume abi_arm
gn_target_to_blueprint_target(target, build_info[abi_arm][target])
for target in root_targets
],
'aaptflags': [
# Don't compress *.json files
'-0 .json',
# Give com.android.angle.common Java files access to the R class
'--extra-packages com.android.angle.common',
],
'srcs': [':ANGLE_srcs'],
'plugins': ['java_api_finder',],
'privileged': True,
'owner': 'google',
}))
blueprint_targets.append((
'android_library',
{
'name': 'ANGLE_library',
'sdk_version': 'system_current',
'min_sdk_version': sdk_version,
'resource_dirs': ['src/android_system_settings/res',],
'asset_dirs': ['src/android_system_settings/assets',],
'aaptflags': [
# Don't compress *.json files
'-0 .json',
],
'manifest': 'src/android_system_settings/src/com/android/angle/AndroidManifest.xml',
'static_libs': ['androidx.preference_preference',],
}))
blueprint_targets.append(('android_app', {
'name': 'ANGLE',
'defaults': ['ANGLE_java_defaults'],
'static_libs': ['ANGLE_library'],
'manifest': 'src/android_system_settings/src/com/android/angle/AndroidManifest.xml',
}))
output = [
"""// GENERATED FILE - DO NOT EDIT.
// Generated by %s
//
// Copyright 2020 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
""" % sys.argv[0]
]
for (blueprint_type, blueprint_data) in blueprint_targets:
write_blueprint(output, blueprint_type, blueprint_data)
print('\n'.join(output))
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
| -6,153,206,636,641,283,000 | 33.013841 | 119 | 0.588708 | false | 3.685098 | false | false | false |
biothings/biothings_explorer
|
tests/test_apis/test_cordmolecularactivity.py
|
1
|
4943
|
import unittest
from biothings_explorer.registry import Registry
from biothings_explorer.user_query_dispatcher import SingleEdgeQueryDispatcher
from .utils import get_apis
reg = Registry()
class TestSingleHopQuery(unittest.TestCase):
def test_ma2protein(self):
"""Test gene-protein"""
seqd = SingleEdgeQueryDispatcher(
output_cls="Protein",
input_cls="MolecularActivity",
input_id="GO",
pred="related_to",
values="GO:0050626",
)
seqd.query()
self.assertTrue("PR:000015198" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["PR:000015198"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2genomicentity(self):
"""Test gene-protein"""
seqd = SingleEdgeQueryDispatcher(
output_cls="GenomicEntity",
input_cls="MolecularActivity",
pred="related_to",
input_id="GO",
output_id="SO",
values="GO:0050626",
)
seqd.query()
self.assertTrue("SO:0000121" in seqd.G)
self.assertTrue("SO:0000167" in seqd.G)
def test_ma2chemicalsubstance(self):
"""Test gene-genomic entity"""
seqd = SingleEdgeQueryDispatcher(
output_cls="ChemicalSubstance",
input_cls="MolecularActivity",
input_id="GO",
output_id="GO",
values="GO:0050626",
)
seqd.query()
self.assertTrue("CHEBI:50526" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["CHEBI:50526"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2gene(self):
"""Test gene-gene"""
seqd = SingleEdgeQueryDispatcher(
output_cls="Gene",
input_cls="MolecularActivity",
input_id="GO",
values="GO:0050626",
)
seqd.query()
self.assertTrue("CD55" in seqd.G)
self.assertTrue("AKT1" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["CD55"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2anatomy(self):
"""Test gene-anatomy"""
seqd = SingleEdgeQueryDispatcher(
output_cls="AnatomicalEntity",
input_cls="MolecularActivity",
input_id="GO",
output_id="UBERON",
values="GO:0050626",
)
seqd.query()
self.assertTrue("UBERON:0000062" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["UBERON:0000062"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2ma(self):
"""Test gene-molecular_activity"""
seqd = SingleEdgeQueryDispatcher(
output_cls="MolecularActivity",
input_cls="MolecularActivity",
input_id="GO",
output_id="MOP",
values="GO:0050626",
)
seqd.query()
self.assertTrue("MOP:0000797" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["MOP:0000797"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2bp(self):
"""Test gene-biological_process"""
seqd = SingleEdgeQueryDispatcher(
output_cls="BiologicalProcess",
input_cls="MolecularActivity",
input_id="GO",
output_id="GO",
values="GO:0050626",
)
seqd.query()
self.assertTrue("GO:0006935" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["GO:0006935"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2cc(self):
"""Test gene-cellular_component"""
seqd = SingleEdgeQueryDispatcher(
output_cls="CellularComponent",
input_cls="MolecularActivity",
input_id="GO",
output_id="GO",
values="GO:0050626",
)
seqd.query()
self.assertTrue("GO:0005790" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["GO:0005790"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
def test_ma2cell(self):
"""Test gene-cell"""
seqd = SingleEdgeQueryDispatcher(
output_cls="Cell",
input_cls="MolecularActivity",
input_id="GO",
output_id="CL",
values="GO:0050626",
)
seqd.query()
self.assertTrue("CL:0000097" in seqd.G)
def test_ma2disease(self):
"""Test gene-disease"""
seqd = SingleEdgeQueryDispatcher(
output_cls="Disease",
input_cls="MolecularActivity",
input_id="GO",
output_id="DOID",
values="GO:0050626",
)
seqd.query()
self.assertTrue("DOID:1883" in seqd.G)
edges = seqd.G["GO:GO:0050626"]["DOID:1883"]
self.assertTrue("CORD Molecular Activity API" in get_apis(edges))
|
apache-2.0
| 8,533,805,272,853,411,000 | 32.856164 | 78 | 0.564839 | false | 3.57929 | true | false | false |
mithro/HDMI2USB-litex-firmware
|
targets/neso/base.py
|
1
|
5595
|
# Support for the Numato Neso Artix 7 100T Board
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.soc.integration.soc_core import mem_decoder
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litedram.modules import MT41K128M16
from litedram.phy import a7ddrphy
from litedram.core import ControllerSettings
from gateware import info
from gateware import spi_flash
from targets.utils import csr_map_update, period_ns
class _CRG(Module):
def __init__(self, platform):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_sys4x_dqs = ClockDomain(reset_less=True)
self.clock_domains.cd_clk200 = ClockDomain()
self.clock_domains.cd_clk50 = ClockDomain()
clk100 = platform.request("clk100")
pll_locked = Signal()
pll_fb = Signal()
self.pll_sys = Signal()
pll_sys4x = Signal()
pll_sys4x_dqs = Signal()
pll_clk200 = Signal()
pll_clk50 = Signal()
self.specials += [
Instance("PLLE2_BASE",
p_STARTUP_WAIT="FALSE", o_LOCKED=pll_locked,
# VCO @ 1600 MHz
p_REF_JITTER1=0.01, p_CLKIN1_PERIOD=10.0,
p_CLKFBOUT_MULT=16, p_DIVCLK_DIVIDE=1,
i_CLKIN1=clk100, i_CLKFBIN=pll_fb, o_CLKFBOUT=pll_fb,
# 100 MHz
p_CLKOUT0_DIVIDE=16, p_CLKOUT0_PHASE=0.0,
o_CLKOUT0=self.pll_sys,
# 400 MHz
p_CLKOUT1_DIVIDE=4, p_CLKOUT1_PHASE=0.0,
o_CLKOUT1=pll_sys4x,
# 400 MHz dqs
p_CLKOUT2_DIVIDE=4, p_CLKOUT2_PHASE=90.0,
o_CLKOUT2=pll_sys4x_dqs,
# 200 MHz
p_CLKOUT3_DIVIDE=8, p_CLKOUT3_PHASE=0.0,
o_CLKOUT3=pll_clk200,
# 50MHz
p_CLKOUT4_DIVIDE=32, p_CLKOUT4_PHASE=0.0,
o_CLKOUT4=pll_clk50
),
Instance("BUFG", i_I=self.pll_sys, o_O=self.cd_sys.clk),
Instance("BUFG", i_I=pll_sys4x, o_O=self.cd_sys4x.clk),
Instance("BUFG", i_I=pll_sys4x_dqs, o_O=self.cd_sys4x_dqs.clk),
Instance("BUFG", i_I=pll_clk200, o_O=self.cd_clk200.clk),
Instance("BUFG", i_I=pll_clk50, o_O=self.cd_clk50.clk),
AsyncResetSynchronizer(self.cd_sys, ~pll_locked),
AsyncResetSynchronizer(self.cd_clk200, ~pll_locked),
AsyncResetSynchronizer(self.cd_clk50, ~pll_locked),
]
reset_counter = Signal(4, reset=15)
ic_reset = Signal(reset=1)
self.sync.clk200 += \
If(reset_counter != 0,
reset_counter.eq(reset_counter - 1)
).Else(
ic_reset.eq(0)
)
self.specials += Instance("IDELAYCTRL", i_REFCLK=ClockSignal("clk200"), i_RST=ic_reset)
class BaseSoC(SoCSDRAM):
csr_peripherals = (
"spiflash",
"ddrphy",
"info",
)
csr_map_update(SoCSDRAM.csr_map, csr_peripherals)
mem_map = {
"spiflash": 0x20000000, # (default shadow @0xa0000000)
}
mem_map.update(SoCSDRAM.mem_map)
def __init__(self, platform, spiflash="spiflash_1x", **kwargs):
if 'integrated_rom_size' not in kwargs:
kwargs['integrated_rom_size']=0x8000
if 'integrated_sram_size' not in kwargs:
kwargs['integrated_sram_size']=0x8000
clk_freq = int(100e6)
SoCSDRAM.__init__(self, platform, clk_freq, **kwargs)
self.submodules.crg = _CRG(platform)
self.crg.cd_sys.clk.attr.add("keep")
self.platform.add_period_constraint(self.crg.cd_sys.clk, period_ns(clk_freq))
# Basic peripherals
self.submodules.info = info.Info(platform, self.__class__.__name__)
# spi flash
spiflash_pads = platform.request(spiflash)
spiflash_pads.clk = Signal()
self.specials += Instance("STARTUPE2",
i_CLK=0, i_GSR=0, i_GTS=0, i_KEYCLEARB=0, i_PACK=0,
i_USRCCLKO=spiflash_pads.clk, i_USRCCLKTS=0, i_USRDONEO=1, i_USRDONETS=1)
spiflash_dummy = {
"spiflash_1x": 9,
"spiflash_4x": 11,
}
self.submodules.spiflash = spi_flash.SpiFlash(
spiflash_pads,
dummy=spiflash_dummy[spiflash],
div=2)
self.add_constant("SPIFLASH_PAGE_SIZE", 256)
self.add_constant("SPIFLASH_SECTOR_SIZE", 0x10000)
self.add_wb_slave(mem_decoder(self.mem_map["spiflash"]), self.spiflash.bus)
self.add_memory_region(
"spiflash", self.mem_map["spiflash"] | self.shadow_base, 16*1024*1024)
# sdram
sdram_module = MT41K128M16(self.clk_freq, "1:4")
self.submodules.ddrphy = a7ddrphy.A7DDRPHY(
platform.request("ddram"))
self.add_constant("READ_LEVELING_BITSLIP", 3)
self.add_constant("READ_LEVELING_DELAY", 14)
controller_settings = ControllerSettings(
with_bandwidth=True,
cmd_buffer_depth=8,
with_refresh=True)
self.register_sdram(self.ddrphy,
sdram_module.geom_settings,
sdram_module.timing_settings,
controller_settings=controller_settings)
SoC = BaseSoC
|
bsd-2-clause
| 329,493,176,064,576,200 | 35.809211 | 107 | 0.562645 | false | 3.008065 | false | false | false |
SergeyStaroletov/Patterns17
|
CourseWorkReports/Курсовой проект Киреков ПИ-42/Исходный код/Model/Classifiers/ImageClassifier.py
|
1
|
1143
|
import keras
import numpy as np
from keras.preprocessing import image
from .IClassifier import IClassifier
class ImageClassifier(IClassifier):
""" классификатор изображений """
def __init__(self):
self.__input_shape = None
self.__aliases = None
self.__model = None
def init_classifier(self, h5file_path, input_shape, aliases):
self.__input_shape = input_shape
self.__aliases = aliases
self.__model = keras.models.load_model(h5file_path)
def classify(self, img_path):
try:
img = image.load_img(img_path, target_size=self.__input_shape)
img = image.img_to_array(img)
x = np.expand_dims(img, axis=0) / 255
a = self.__model.predict(x)[0]
except:
return {'ok': False, 'message': 'На вход не было подано изображение'}
res = []
for i in range(len(a)):
res.append('С вероятностью {0}% это {1}'.format(a[i] * 100, self.__aliases[i]))
return {'ok': True, 'result': res}
|
mit
| -2,960,119,143,002,614,000 | 31.5625 | 91 | 0.568901 | false | 3.20597 | false | false | false |
gmr/mikkoo
|
mikkoo/statsd.py
|
1
|
5519
|
"""
Statsd Client that takes configuration first from the rejected configuration
file, falling back to environment variables, and finally default values.
Environment Variables:
- STATSD_HOST
- STATSD_PORT
- STATSD_PREFIX
"""
import logging
import os
import socket
from tornado import iostream
LOGGER = logging.getLogger(__name__)
class Client(object):
"""A simple statsd client that buffers counters to emit fewer UDP packets
than once per incr.
"""
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8125
DEFAULT_PREFIX = 'mikkoo'
PAYLOAD_HOSTNAME = '{}.{}.{}.{}:{}|{}\n'
PAYLOAD_NO_HOSTNAME = '{}.{}.{}:{}|{}\n'
def __init__(self, name, settings, failure_callback):
"""
:param str name: The name of the worker for this client
:param dict settings: statsd Settings
"""
self._connected = False
self._failure_callback = failure_callback
self._hostname = socket.gethostname().split('.')[0]
self._name = name
self._settings_in = settings
self._settings = {}
self._address = (self._setting('host', self.DEFAULT_HOST),
int(self._setting('port', self.DEFAULT_PORT)))
self._prefix = self._setting('prefix', self.DEFAULT_PREFIX)
self._tcp_sock, self._udp_sock = None, None
if self._setting('tcp', False):
self._tcp_sock = self._tcp_socket()
else:
self._udp_sock = self._udp_socket()
def add_timing(self, key, value=0):
"""Add a timer value to statsd for the specified key
:param str key: The key to add the timing to
:param int or float value: The value of the timing in seconds
"""
return self._send(key, value * 1000, 'ms')
def incr(self, key, value=1):
"""Increment the counter value in statsd
:param str key: The key to increment
:param int value: The value to increment by, defaults to 1
"""
return self._send(key, value, 'c')
def set_gauge(self, key, value):
"""Set a gauge value in statsd
:param str key: The key to set the value for
:param int or float value: The value to set
"""
return self._send(key, value, 'g')
def stop(self):
"""Close the socket if connected via TCP."""
if self._tcp_sock:
self._tcp_sock.close()
def _build_payload(self, key, value, metric_type):
"""Return the """
if self._setting('include_hostname', True):
return self.PAYLOAD_HOSTNAME.format(
self._prefix, self._hostname, self._name, key, value,
metric_type)
return self.PAYLOAD_NO_HOSTNAME.format(
self._prefix, self._name, key, value, metric_type)
def _send(self, key, value, metric_type):
"""Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str key: The key name to send
:param int or float value: The value for the key
"""
payload = self._build_payload(key, value, metric_type)
LOGGER.debug('Sending statsd payload: %r', payload)
try:
if self._tcp_sock:
return self._tcp_sock.write(payload.encode('utf-8'))
else:
self._udp_sock.sendto(payload.encode('utf-8'), self._address)
except (OSError, socket.error) as error: # pragma: nocover
if self._connected:
LOGGER.exception('Error sending statsd metric: %s', error)
self._connected = False
self._failure_callback()
def _setting(self, key, default):
"""Return the setting, checking config, then the appropriate
environment variable, falling back to the default, caching the
results.
:param str key: The key to get
:param any default: The default value if not set
:return: str
"""
if key not in self._settings:
value = self._settings_in.get(
key, os.environ.get('STATSD_{}'.format(key).upper(), default))
self._settings[key] = value
return self._settings[key]
def _tcp_on_closed(self):
"""Invoked when the socket is closed."""
LOGGER.warning('Disconnected from statsd, reconnecting')
self._connected = False
self._tcp_sock = self._tcp_socket()
def _tcp_on_connected(self):
"""Invoked when the IOStream is connected"""
LOGGER.debug('Connected to statsd at %s via TCP', self._address)
self._connected = True
def _tcp_socket(self):
"""Connect to statsd via TCP and return the IOStream handle.
:rtype: iostream.IOStream
"""
sock = iostream.IOStream(socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP))
try:
sock.connect(self._address, self._tcp_on_connected)
except (OSError, socket.error) as error:
LOGGER.error('Failed to connect via TCP, triggering shutdown: %s',
error)
self._failure_callback()
else:
self._connected = True
sock.set_close_callback(self._tcp_on_closed)
return sock
@staticmethod
def _udp_socket():
"""Return the UDP socket handle
:rtype: socket.socket
"""
return socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
|
bsd-3-clause
| -7,954,871,945,239,064,000 | 31.464706 | 78 | 0.586157 | false | 4.17158 | false | false | false |
f3at/feat
|
src/feat/configure/uninstalled.py
|
1
|
1393
|
# F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
# Headers in this file shall remain intact.
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
import os
import feat
_basedir = os.path.abspath(os.path.join(feat.__path__[0], '..', '..'))
bindir = os.path.join(_basedir, 'bin')
logdir = os.path.join(_basedir, 'log')
rundir = os.path.join(_basedir, 'run')
lockdir = os.path.join(_basedir, 'run')
confdir = os.path.join(_basedir, 'conf')
gatewaydir = os.path.join(_basedir, 'gateway', 'static')
socketdir = os.path.join(_basedir, 'run')
|
gpl-2.0
| -509,342,342,318,453,400 | 38.8 | 73 | 0.730079 | false | 3.491228 | false | false | false |
bibarz/bibarz.github.io
|
dabble/ab/auth_algorithms.py
|
1
|
17145
|
# Import any required libraries or modules.
import numpy as np
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import csv
import sys
class MetaParams:
n_lda_ensemble = 101
lda_ensemble_feature_fraction = 0.4
mode = 'lda_ensemble'
# The following is a hacky container for Statistics computed from the
# whole training set; we don't want to have to recompute them again at every call
# to build_template (it becomes slow for parameter searches with cross validation),
# so we preserve it here between calls. The proper place to
# do this would be in main.py, but we don't want to touch that.
Global = lambda: None
Global.ready = False
def pca_converter(data, feature_discriminabilities, explained_variance):
'''
PCA conversion of the data. The PCA is based on the complete dataset, but each feature
is normalized to a std dev proportional to the given discriminability.
:param data: n_samples x n_features matrix with all data to do PCA on
:param feature_discriminabilities: n_features length vector
:param explained_variance: ratio of explained variance (between 0 and 1) that will
determine how many components are kept
:return: function transforming data into pca components, and covariance matrix
of transformed data
'''
mu = np.mean(data, axis=0)
std = np.std(data, axis=0) / feature_discriminabilities
normalized_data = (data - mu) / std
u, s, vt = np.linalg.svd(normalized_data)
cut_idx = np.argmin(np.abs(np.cumsum(s * s) / np.sum(s * s) - explained_variance))
vt = vt[:cut_idx + 1]
return (lambda x, mu=mu, std=std, vt=vt: np.dot((x - mu) / std, vt.T)),\
np.diag(s[:cut_idx + 1] ** 2 / (len(data) - 1))
def preprocess_data(data):
'''
Turn raw data into an array of hand-picked features useful for classification
:param data: n_samples x n_raw_features numpy array
:return: n_samples x n_processed_features array
'''
keypress_dt = data[:, 8::10] - data[:, 3::10] # duration of each keystroke
key_to_key_dt = data[:, 13::10] - data[:, 3:-10:10] # interval between keystrokes
x_down = data[:, 4::10].astype(np.float) / data[:, 1][:, None].astype(np.float) # x relative to screen width
y_down = data[:, 5::10].astype(np.float) / data[:, 0][:, None].astype(np.float) # y relative to screen height
x_up = data[:, 9::10].astype(np.float) / data[:, 1][:, None].astype(np.float) # x relative to screen width
y_up = data[:, 10::10].astype(np.float) / data[:, 0][:, None].astype(np.float) # y relative to screen height
size_down = data[:, 6::10]
size_up = data[:, 11::10]
pressure_down = data[:, 7::10]
pressure_up = data[:, 12::10]
assert np.all((x_down >= 0) & (x_down <= 1) & (y_down >= 0) & (y_down <= 1))
assert np.all((x_up >= 0) & (x_up <= 1) & (y_up >= 0) & (y_up <= 1))
touch_d = np.hypot(x_down - x_up, y_down - y_up)
collected_data = np.hstack((keypress_dt, key_to_key_dt,
np.diff(x_down, axis=1), np.diff(y_down, axis=1),
touch_d,
size_down, size_up, pressure_down, pressure_up,
))
return collected_data
def get_random_feature_selector(n_all_features, feature_fraction, seed):
'''
Return a selector of random features from a data array
:param n_all_features: total number of features
:param feature_fraction: desired fraction of selected features
:param seed: random seed for repeatable experiments
:return: a function taking in full data and returning only the random features from it
'''
n_features = int(np.round(feature_fraction * n_all_features))
rng = np.random.RandomState(seed)
p = rng.permutation(n_all_features)[:n_features]
return lambda x, p=p: x[..., p]
def simple_gaussian(user_pca):
# template will consist of mean and std dev of each feature in pca space
mean_pca = np.mean(user_pca, axis=0)
std_pca = np.std(user_pca, axis=0)
return mean_pca, std_pca
def scikit_classifier(user, training_dataset, generator=lambda:KNeighborsClassifier(5)):
'''
Train a given classifier on user vs others
:param generator: a function creating a scikit classifier with fit and predict functions
:return: the trained classifier
'''
all_users = training_dataset.keys()
others_raw = np.vstack([training_dataset[u] for u in all_users if u != user])
others_pca = Global.pca(preprocess_data(others_raw))
user_raw = training_dataset[user]
user_pca = Global.pca(preprocess_data(user_raw))
clf = generator()
clf.fit(np.vstack((user_pca, others_pca)),
np.hstack((np.zeros(len(user_pca)), np.ones(len(others_pca)))))
return clf
def lda(user_pca, all_pca_cov, n_all):
'''
Compute the Fisher discriminant vector and threshold to classify user vs others.
:param user_pca: n_samples x n_pca_features array of user instances
:param all_pca_cov: covariance matrix of the complete dataset; it is assumed that
the user data was part of the dataset, and that the mean of the whole dataset
is 0 for every feature
:param n_all: number of samples that formed the complete dataset
:return: Fisher discriminant vector, threshold
'''
n_user = len(user_pca)
assert n_user < n_all - 1 # make sure the complete dataset has more than just the current user
# We compute mean and variance for the user data directly, and infer the mean
# and variance of the rest of the dataset from the covariance of the complete set
# (and its mean, which is assumed zero)
user_mu = np.mean(user_pca, axis=0)
others_mu = - n_user * user_mu / (n_all - n_user)
user_sigma = np.cov(user_pca.T)
def sq_(x):
return x[:, None] * x[None, :]
others_sigma = ((n_all - 1) * all_pca_cov - (n_user - 1) * user_sigma\
- n_user * sq_(user_mu) - (n_all - n_user) * sq_(others_mu)) / (n_all - n_user - 1)
ld_vector = np.dot(np.linalg.inv(user_sigma + others_sigma), user_mu - others_mu) # order determines sign of criterion
ld_vector /= np.linalg.norm(ld_vector)
# find the threshold for equal false positives and false negatives
user_proj_mu = np.dot(user_mu, ld_vector)
others_proj_mu = np.dot(others_mu, ld_vector)
user_proj_std = np.sqrt(np.dot(ld_vector, np.dot(user_sigma, ld_vector)))
others_proj_std = np.sqrt(np.dot(ld_vector, np.dot(others_sigma, ld_vector)))
ld_threshold = (others_proj_std * user_proj_mu + user_proj_std * others_proj_mu) / (user_proj_std + others_proj_std)
return ld_vector, ld_threshold
def compute_feature_discriminabilities(each_preprocessed):
'''
Return a vector of discriminability for each feature
:param each_preprocessed: list with one n_samples x n_features data matrix for each user
:return: vector of discriminabilities (sqrt of the square of the difference of means divided by
the sum of variances) for each feature
'''
n_users = len(each_preprocessed)
each_mu = np.array([np.mean(m, axis=0) for m in each_preprocessed]) # n_users x n_features
each_var = np.array([np.var(m, axis=0) for m in each_preprocessed]) # n_users x n_features
# compute discriminability for each feature and pair of users
pairwise_discriminability = (each_mu[:, None, :] - each_mu[None :, :]) ** 2 / (1e-6 + each_var[:, None, :] + each_var[None :, :])
# compute discriminability of each feature as the average over pairs of users
return np.sqrt(np.sum(pairwise_discriminability, axis=(0, 1)) / (n_users * (n_users - 1)))
def _prepare_global(training_dataset):
'''
Processing of the complete dataset, to be reused for each user
- feature preprocessing
- pca converter
- selection of features and computation of covariances for ensemble lda
:param training_dataset: the complete dataset
:return: None. The Global container is initialized with all necessary data
'''
each_preprocessed = [preprocess_data(training_dataset[u]) for u in training_dataset]
Global.feature_discriminabilities = compute_feature_discriminabilities(each_preprocessed)
all_preprocessed = np.vstack(each_preprocessed)
Global.n_all = len(all_preprocessed)
Global.pca, Global.all_pca_cov = pca_converter(all_preprocessed, Global.feature_discriminabilities, explained_variance=0.98)
if MetaParams.mode == 'lda_ensemble':
Global.lda_ensemble = []
for i in range(MetaParams.n_lda_ensemble):
seed = np.random.randint(200000)
feature_selector = get_random_feature_selector(all_preprocessed.shape[1],
feature_fraction=MetaParams.lda_ensemble_feature_fraction, seed=seed)
selected_pca, selected_pca_cov = pca_converter(feature_selector(all_preprocessed),
feature_selector(Global.feature_discriminabilities),
explained_variance=0.99)
Global.lda_ensemble.append({'selector': feature_selector, 'pca': selected_pca, 'pca_cov': selected_pca_cov})
Global.ready = True
# Implement template building here. Feel free to write any helper classes or functions required.
# Return the generated template for that user.
def build_template(user, training_dataset):
if not Global.ready:
_prepare_global(training_dataset)
user_raw = training_dataset[user]
user_preprocessed = preprocess_data(user_raw)
template = {}
if MetaParams.mode in ['lda', 'simple', 'combined']:
user_pca = Global.pca(user_preprocessed)
template['mean_pca'], template['std_pca'] = simple_gaussian(user_pca)
template['ld_vector'], template['ld_threshold'] =\
lda(user_pca, all_pca_cov=Global.all_pca_cov, n_all=Global.n_all)
if MetaParams.mode == 'lda_ensemble':
lda_ensemble = []
for lda_item in Global.lda_ensemble:
user_selected_pca = lda_item['pca'](lda_item['selector'](user_preprocessed))
ld_vector, ld_threshold = lda(user_selected_pca, n_all=Global.n_all, all_pca_cov=lda_item['pca_cov'])
lda_ensemble.append({'ld_vector': ld_vector, 'ld_threshold': ld_threshold})
template['lda_ensemble'] = lda_ensemble
if MetaParams.mode in ['nonlinear', 'combined']:
template['clf_1'] = scikit_classifier(user, training_dataset, generator=lambda: KNeighborsClassifier(5))
template['clf_2'] = scikit_classifier(user, training_dataset, generator=lambda: svm.LinearSVC(C=0.05, class_weight='balanced'))
return template
# Implement authentication method here. Feel free to write any helper classes or functions required.
# Return the authenttication score and threshold above which you consider it being a correct user.
def authenticate(instance, user, templates):
mode = MetaParams.mode
assert mode in ['lda', 'combined', 'lda_ensemble', 'nonlinear', 'simple'], ("Unrecognized mode: %s" % mode)
t = templates[user]
batch_mode = instance.ndim > 1
if not batch_mode:
instance = instance[None, :]
preprocessed_instance = preprocess_data(instance)
if mode in ['lda', 'combined']:
user_pca = Global.pca(preprocessed_instance)
user_lda_proj = np.dot(user_pca, t['ld_vector'])
lda_score, lda_thr = user_lda_proj - t['ld_threshold'], np.zeros(len(user_lda_proj))
if mode in ['nonlinear', 'combined']:
user_pca = Global.pca(preprocessed_instance)
clf_score_1, clf_thr_1 = (t['clf_1'].predict(user_pca) == 0).astype(np.float), 0.5 * np.ones(len(user_pca))
clf_score_2, clf_thr_2 = (t['clf_2'].predict(user_pca) == 0).astype(np.float), 0.5 * np.ones(len(user_pca))
if mode == 'simple':
user_pca = Global.pca(preprocessed_instance)
z = (user_pca - t['mean_pca']) / t['std_pca']
distance = np.mean(np.abs(z) ** 2, axis=1) ** 0.5
score, thr = distance, 1.2 * np.ones(len(distance))
if mode == 'lda_ensemble':
ensemble_scores = np.empty((len(preprocessed_instance), len(t['lda_ensemble'])))
for i, sub_t in enumerate(t['lda_ensemble']):
g_item = Global.lda_ensemble[i]
user_selected_pca = g_item['pca'](g_item['selector'](preprocessed_instance))
user_thinned_lda_proj = np.dot(user_selected_pca, sub_t['ld_vector'])
ensemble_scores[:, i] = user_thinned_lda_proj - sub_t['ld_threshold']
score = np.mean(ensemble_scores > 0, axis=1)
thr = 0.5 * np.ones(len(score))
if mode == 'lda':
score, thr = lda_score, lda_thr
elif mode == 'nonlinear':
score, thr = clf_score_1, clf_thr_1
elif mode == 'combined':
score = np.mean(np.vstack((lda_score > lda_thr, clf_score_1 > clf_thr_1, clf_score_2 > clf_thr_2)), axis=0)
thr = 0.5 * np.ones(len(score))
if not batch_mode:
assert score.shape == (1, )
assert thr.shape == (1, )
score, thr = score[0], thr[0]
return score, thr
def cross_validate(full_dataset, print_results=False):
'''
n-fold cross-validation of given dataset
:param full_dataset: dictionary of raw data for each user
:param print_results: if True, print progress messages and results
:return: (percentage of false rejects, percentage of false accepts)
'''
n_folds = 5 # for cross-validation
all_false_accept = 0
all_false_reject = 0
all_true_accept = 0
all_true_reject = 0
for i in range(n_folds):
# split full dataset into training and validation
training_dataset = dict()
validation_dataset = dict()
for u in full_dataset.keys():
n = len(full_dataset[u])
idx = np.round(float(n) / n_folds * np.arange(n_folds + 1)).astype(np.int)
n_validation = np.diff(idx)
rolled_set = np.roll(full_dataset[u], -idx[i], axis=0)
training_dataset[u] = rolled_set[n_validation[i]:, :]
validation_dataset[u] = rolled_set[:n_validation[i], :]
# reset global data
Global.ready = False
templates = {u: build_template(u, training_dataset) for u in training_dataset}
# For each user test authentication.
true_accept = 0
false_reject = 0
true_reject = 0
false_accept = 0
for u in training_dataset:
# Test false rejections.
(score, threshold) = authenticate(validation_dataset[u], u, templates)
true_accept += np.sum(score > threshold)
false_reject += np.sum(score <= threshold)
# Test false acceptance.
for u_attacker in validation_dataset:
if u == u_attacker:
continue
(score, threshold) = authenticate(validation_dataset[u_attacker], u, templates)
false_accept += np.sum(score > threshold)
true_reject += np.sum(score <= threshold)
if print_results:
print "fold %i: false reject rate: %.1f%%, false accept rate: %.1f%%" %\
(i, 100. * float(false_reject) / (false_reject + true_accept),
100. * float(false_accept) / (false_accept + true_reject))
all_false_accept += false_accept
all_false_reject += false_reject
all_true_accept += true_accept
all_true_reject += true_reject
false_reject_percent = 100. * float(all_false_reject) / (all_false_reject + all_true_accept)
false_accept_percent = 100. * float(all_false_accept) / (all_false_accept + all_true_reject)
if print_results:
print "Total: false reject rate: %.1f%%, false accept rate: %.1f%%" % (false_reject_percent, false_accept_percent)
return false_reject_percent, false_accept_percent
if __name__ == "__main__":
# Reading the data into the training dataset separated by user.
data_training_file = open('dataset_training.csv', 'rb')
csv_training_reader = csv.reader(data_training_file, delimiter=',', quotechar='"')
csv_training_reader.next()
full_dataset = dict()
for row in csv_training_reader:
if row[0] not in full_dataset:
full_dataset[row[0]] = np.array([]).reshape((0, len(row[1:])))
full_dataset[row[0]] = np.vstack([full_dataset[row[0]], np.array(row[1:]).astype(float)])
for feature_fraction in [0.4]:
for n_lda_ensemble in [51]:
n_trials = 10
tot_rej = 0
tot_acc = 0
for _ in range(n_trials):
MetaParams.feature_fraction = feature_fraction
MetaParams.n_lda_ensemble = n_lda_ensemble
rej, acc = cross_validate(full_dataset)
tot_rej += rej
tot_acc += acc
print "feature fraction=%.2f, ensemble size=%i, false_rej=%.2f%%, false_acc=%.2f%%" % (feature_fraction, n_lda_ensemble, tot_rej / n_trials, tot_acc / n_trials)
|
mit
| 2,837,573,524,067,170,300 | 45.972603 | 172 | 0.633829 | false | 3.518367 | false | false | false |
maferelo/saleor
|
saleor/site/migrations/0017_auto_20180803_0528.py
|
3
|
1357
|
# Generated by Django 2.0.3 on 2018-08-03 10:28
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("site", "0016_auto_20180719_0520")]
operations = [
migrations.CreateModel(
name="SiteSettingsTranslation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("language_code", models.CharField(max_length=10)),
("header_text", models.CharField(blank=True, max_length=200)),
("description", models.CharField(blank=True, max_length=500)),
(
"site_settings",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="translations",
to="site.SiteSettings",
),
),
],
),
migrations.AlterUniqueTogether(
name="sitesettingstranslation",
unique_together={("language_code", "site_settings")},
),
]
|
bsd-3-clause
| -1,561,289,987,221,625,000 | 32.097561 | 78 | 0.46647 | false | 5.101504 | false | false | false |
Azure/azure-sdk-for-python
|
sdk/servicebus/azure-servicebus/tests/perf_tests/receive_message_batch.py
|
1
|
1324
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import asyncio
from ._test_base import _ReceiveTest
class ReceiveMessageBatchTest(_ReceiveTest):
def run_sync(self):
count = 0
while count < self.args.num_messages:
batch = self.receiver.receive_messages(
max_message_count=self.args.num_messages - count,
max_wait_time=self.args.max_wait_time or None)
if self.args.peeklock:
for msg in batch:
self.receiver.complete_message(msg)
count += len(batch)
async def run_async(self):
count = 0
while count < self.args.num_messages:
batch = await self.async_receiver.receive_messages(
max_message_count=self.args.num_messages - count,
max_wait_time=self.args.max_wait_time or None)
if self.args.peeklock:
await asyncio.gather(*[self.async_receiver.complete_message(m) for m in batch])
count += len(batch)
|
mit
| -8,110,725,861,099,683,000 | 41.709677 | 95 | 0.527946 | false | 4.597222 | false | false | false |
slogan621/tscharts
|
tscharts/views.py
|
1
|
9381
|
#(C) Copyright Syd Logan 2016-2021
#(C) Copyright Thousand Smiles Foundation 2016-2021
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.exceptions import APIException, NotFound
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.http import JsonResponse
from rest_framework.authtoken.models import Token
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest, HttpResponseServerError, HttpResponseNotFound
import json
from common.decorators import *
from pin.models import PIN
import traceback
import logging
LOG = logging.getLogger("tscharts")
class LoginView(APIView):
authentication_classes = ()
permission_classes = ()
@log_request
def post(self, request, format=None):
badRequest = False
forbidden = False
data = json.loads(request.body)
if not "username" in data:
badRequest = True
if not "password" in data and not "pin" in data:
badRequest = True
if not badRequest:
username = data['username']
if "password" in data:
password = data['password']
user = authenticate(username=username, password=password)
else:
LOG.error("traceback 0 {}".format(traceback.print_stack()))
user = User.objects.get(username=username)
if user:
LOG.error("traceback 1 {}".format(traceback.print_stack()))
pin = PIN.objects.get(user=user.id)
if pin:
LOG.error("traceback 2 {}".format(traceback.print_stack()))
if not pin.user == user:
LOG.error("traceback 3 {}".format(traceback.print_stack()))
user = None
elif not pin.pin == data["pin"]:
LOG.error("traceback 4 {}".format(traceback.print_stack()))
user = None
else:
LOG.error("traceback 5 {}".format(traceback.print_stack()))
user = None
if user:
LOG.error("traceback 6 {}".format(traceback.print_stack()))
if user.is_active:
# XXX hack
try:
if not user.backend:
user.backend = 'django.contrib.auth.backends.ModelBackend'
except:
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
else:
LOG.error("traceback 7 {}".format(traceback.print_stack()))
forbidden = True
else:
LOG.error("traceback 8 {}".format(traceback.print_stack()))
forbidden = True
if not forbidden and not badRequest:
token = Token.objects.get_or_create(user=user)[0]
return JsonResponse({"token": "{}".format(token.key),
"id": "{}".format(user.id)})
elif forbidden:
return HttpResponseForbidden()
else:
return HttpResponseBadRequest()
class LogoutView(APIView):
authentication_classes = ()
permission_classes = ()
@log_request
def post(self, request, format=None):
logout(request)
return HttpResponse()
class CreateUserView(APIView):
authentication_classes = ()
permission_classes = ()
@log_request
def post(self, request, format=None):
badRequest = False
duplicateUser = False
implError = False
data = json.loads(request.body)
if not "first" in data:
badRequest = True
if not "last" in data:
badRequest = True
if not "password" in data:
badRequest = True
if not "email" in data:
badRequest = True
if not "pin" in data:
badRequest = True
if not badRequest:
first = data['first']
last = data['last']
password = data['password']
email = data['email']
try:
user = User.objects.get(username=email)
except:
user = None
if user:
badRequest = True
duplicateUser = True
if not badRequest:
try:
user = User.objects.create_user(email, email, password)
user.is_active = True
user.first_name = first
user.last_name = last
user.save()
except:
user = None
if user:
kwargs = {}
kwargs["pin"] = data['pin']
kwargs["user"] = user
try:
pin = PIN(**kwargs)
if pin:
pin.save()
except:
pin = None
if not pin:
implMsg = "Unable to create PIN"
implError = True
else:
implMsg = "Unable to create user"
implError = True
if badRequest:
if duplicateUser:
r = HttpResponse(status=status.HTTP_409_CONFLICT, reason="User (%d) already exists".format(user.id))
return r
else:
return HttpResponseBadRequest()
elif implError:
return HttpResponseServerError(implMsg)
else:
return Response({'id': user.id})
class UpdatePINView(APIView):
'''
XXX in insecure deployments, like the web, the following lines should
be commented out so that only an authenticated user can modify a PIN
authentication_classes = ()
permission_classes = ()
'''
authentication_classes = ()
permission_classes = ()
@log_request
def put(self, request, format=None):
badRequest = False
notFound = False
implError = False
data = json.loads(request.body)
if not "username" in data:
badRequest = True
if not "pin" in data:
badRequest = True
if not badRequest:
pin = data['pin']
username = data['username']
try:
user = User.objects.get(username=username)
except:
user = None
if not user:
notFound = True
if not badRequest and not notFound:
try:
pinobj = PIN.objects.get_or_create(user=user)[0]
except:
pinobj = None
if pinobj:
pinobj.pin = pin
pinobj.save()
else:
implError = True
implMsg = "Unable to update PIN"
if badRequest:
return HttpResponseBadRequest()
elif implError:
return HttpResponseServerError(implMsg)
elif notFound:
return HttpResponseNotFound()
else:
return Response({})
class UpdatePasswordView(APIView):
'''
XXX in insecure deployments, like the web, the following lines should
be commented out so that only an authenticated user can modify a password
authentication_classes = ()
permission_classes = ()
'''
authentication_classes = ()
permission_classes = ()
@log_request
def put(self, request, format=None):
badRequest = False
notFound = False
implError = False
data = json.loads(request.body)
if not "username" in data:
badRequest = True
if not "password" in data:
badRequest = True
if not badRequest:
password = data['password']
username = data['username']
try:
user = User.objects.get(username=username)
except:
user = None
if not user:
notFound = True
if not badRequest and not notFound:
try:
user.set_password(password)
user.save()
except:
implError = True
implMsg = "Unable to update password"
if badRequest:
return HttpResponseBadRequest()
elif implError:
return HttpResponseServerError(implMsg)
elif notFound:
return HttpResponseNotFound()
else:
return Response({})
|
apache-2.0
| 1,100,830,941,482,948,100 | 30.166113 | 130 | 0.538109 | false | 4.981944 | false | false | false |
wanglei828/apollo
|
modules/tools/plot_planning/speed_dsteering_data.py
|
1
|
3396
|
#!/usr/bin/env python
###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys
from record_reader import RecordItemReader
import matplotlib.pyplot as plt
from cyber_py.record import RecordReader
from modules.canbus.proto import chassis_pb2
class SpeedDsteeringData:
def __init__(self):
self.last_steering_percentage = None
self.last_speed_mps = None
self.last_timestamp_sec = None
self.speed_data = []
self.d_steering_data = []
def add(self, chassis):
steering_percentage = chassis.steering_percentage
speed_mps = chassis.speed_mps
timestamp_sec = chassis.header.timestamp_sec
if self.last_timestamp_sec is None:
self.last_steering_percentage = steering_percentage
self.last_speed_mps = speed_mps
self.last_timestamp_sec = timestamp_sec
return
if (timestamp_sec - self.last_timestamp_sec) > 0.02:
d_steering = (steering_percentage - self.last_steering_percentage) \
/ (timestamp_sec - self.last_timestamp_sec)
self.speed_data.append(speed_mps)
self.d_steering_data.append(d_steering)
self.last_steering_percentage = steering_percentage
self.last_speed_mps = speed_mps
self.last_timestamp_sec = timestamp_sec
def get_speed_dsteering(self):
return self.speed_data, self.d_steering_data
if __name__ == "__main__":
import sys
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
folders = sys.argv[1:]
fig, ax = plt.subplots()
colors = ["g", "b", "r", "m", "y"]
markers = ["o", "o", "o", "o"]
for i in range(len(folders)):
folder = folders[i]
color = colors[i % len(colors)]
marker = markers[i % len(markers)]
fns = [f for f in listdir(folder) if isfile(join(folder, f))]
for fn in fns:
reader = RecordItemReader(folder+"/"+fn)
processor = SpeedDsteeringData()
last_pose_data = None
last_chassis_data = None
topics = ["/apollo/localization/pose", "/apollo/canbus/chassis"]
for data in reader.read(topics):
if "chassis" in data:
last_chassis_data = data["chassis"]
if last_chassis_data is not None:
processor.add(last_chassis_data)
#last_pose_data = None
#last_chassis_data = None
data_x, data_y = processor.get_speed_dsteering()
ax.scatter(data_x, data_y, c=color, marker=marker, alpha=0.2)
plt.show()
|
apache-2.0
| -6,538,834,778,381,319,000 | 37.157303 | 80 | 0.592756 | false | 3.96729 | false | false | false |
PanDAWMS/panda-server
|
pandaserver/daemons/scripts/copyArchive.py
|
1
|
71078
|
import os
import re
import sys
import time
import fcntl
import shelve
import datetime
import traceback
import requests
from urllib3.exceptions import InsecureRequestWarning
import pandaserver.userinterface.Client as Client
from pandaserver.taskbuffer import EventServiceUtils
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandaserver.jobdispatcher.Watcher import Watcher
from pandaserver.brokerage.SiteMapper import SiteMapper
# from pandaserver.dataservice.MailUtils import MailUtils
from pandaserver.srvcore.CoreUtils import commands_get_status_output
from pandaserver.config import panda_config
# logger
_logger = PandaLogger().getLogger('copyArchive')
# main
def main(argv=tuple(), tbuf=None, **kwargs):
# password
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
_logger.debug("===================== start =====================")
# memory checker
def _memoryCheck(str):
try:
proc_status = '/proc/%d/status' % os.getpid()
procfile = open(proc_status)
name = ""
vmSize = ""
vmRSS = ""
# extract Name,VmSize,VmRSS
for line in procfile:
if line.startswith("Name:"):
name = line.split()[-1]
continue
if line.startswith("VmSize:"):
vmSize = ""
for item in line.split()[1:]:
vmSize += item
continue
if line.startswith("VmRSS:"):
vmRSS = ""
for item in line.split()[1:]:
vmRSS += item
continue
procfile.close()
_logger.debug('MemCheck - %s Name=%s VSZ=%s RSS=%s : %s' % (os.getpid(),name,vmSize,vmRSS,str))
except Exception:
type, value, traceBack = sys.exc_info()
_logger.error("memoryCheck() : %s %s" % (type,value))
_logger.debug('MemCheck - %s unknown : %s' % (os.getpid(),str))
return
_memoryCheck("start")
# # kill old dq2 process
# try:
# # time limit
# timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
# # get process list
# scriptName = sys.argv[0]
# out = commands_get_status_output(
# 'ps axo user,pid,lstart,args | grep dq2.clientapi | grep -v PYTHONPATH | grep -v grep')[-1]
# for line in out.split('\n'):
# if line == '':
# continue
# items = line.split()
# # owned process
# if items[0] not in ['sm','atlpan','pansrv','root']: # ['os.getlogin()']: doesn't work in cron
# continue
# # look for python
# if re.search('python',line) is None:
# continue
# # PID
# pid = items[1]
# # start time
# timeM = re.search('(\S+\s+\d+ \d+:\d+:\d+ \d+)',line)
# startTime = datetime.datetime(*time.strptime(timeM.group(1),'%b %d %H:%M:%S %Y')[:6])
# # kill old process
# if startTime < timeLimit:
# _logger.debug("old dq2 process : %s %s" % (pid,startTime))
# _logger.debug(line)
# commands_get_status_output('kill -9 %s' % pid)
# except Exception:
# type, value, traceBack = sys.exc_info()
# _logger.error("kill dq2 process : %s %s" % (type,value))
#
#
# # kill old process
# try:
# # time limit
# timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=7)
# # get process list
# scriptName = sys.argv[0]
# out = commands_get_status_output('ps axo user,pid,lstart,args | grep %s' % scriptName)[-1]
# for line in out.split('\n'):
# items = line.split()
# # owned process
# if items[0] not in ['sm','atlpan','pansrv','root']: # ['os.getlogin()']: doesn't work in cron
# continue
# # look for python
# if re.search('python',line) is None:
# continue
# # PID
# pid = items[1]
# # start time
# timeM = re.search('(\S+\s+\d+ \d+:\d+:\d+ \d+)',line)
# startTime = datetime.datetime(*time.strptime(timeM.group(1),'%b %d %H:%M:%S %Y')[:6])
# # kill old process
# if startTime < timeLimit:
# _logger.debug("old process : %s %s" % (pid,startTime))
# _logger.debug(line)
# commands_get_status_output('kill -9 %s' % pid)
# except Exception:
# type, value, traceBack = sys.exc_info()
# _logger.error("kill process : %s %s" % (type,value))
# instantiate TB
# if tbuf is None:
from pandaserver.taskbuffer.TaskBuffer import taskBuffer
taskBuffer.init(panda_config.dbhost,panda_config.dbpasswd,nDBConnection=1)
# else:
# taskBuffer = tbuf
# instantiate sitemapper
siteMapper = SiteMapper(taskBuffer)
# send email for access requests
_logger.debug("Site Access")
try:
# get contact
contactAddr = {}
siteContactAddr = {}
sql = "SELECT name,email FROM ATLAS_PANDAMETA.cloudconfig"
status,res = taskBuffer.querySQLS(sql,{})
for cloudName,cloudEmail in res:
contactAddr[cloudName] = cloudEmail
# get requests
sql = "SELECT pandaSite,status,dn FROM ATLAS_PANDAMETA.siteaccess WHERE status IN (:status1,:status2,:status3) "
sql += "ORDER BY pandaSite,status "
varMap = {}
varMap[':status1'] = 'requested'
varMap[':status2'] = 'tobeapproved'
varMap[':status3'] = 'toberejected'
status,res = taskBuffer.querySQLS(sql,varMap)
requestsInCloud = {}
# mailUtils = MailUtils()
# loop over all requests
for pandaSite,reqStatus,userName in res:
cloud = siteMapper.getSite(pandaSite).cloud
_logger.debug("request : '%s' site=%s status=%s cloud=%s" % (userName,pandaSite,reqStatus,cloud))
# send emails to user
if reqStatus in ['tobeapproved','toberejected']:
# set status
if reqStatus == 'tobeapproved':
newStatus = 'approved'
else:
newStatus = 'rejected'
# get mail address for user
userMailAddr = ''
sqlUM = "SELECT email FROM ATLAS_PANDAMETA.users WHERE name=:userName"
varMap = {}
varMap[':userName'] = userName
stUM,resUM = taskBuffer.querySQLS(sqlUM,varMap)
if resUM is None or len(resUM) == 0:
_logger.error("email address is unavailable for '%s'" % userName)
else:
userMailAddr = resUM[0][0]
# send
# if userMailAddr not in ['',None,'None','notsend']:
# _logger.debug("send update to %s" % userMailAddr)
# retMail = mailUtils.sendSiteAccessUpdate(userMailAddr,newStatus,pandaSite)
# _logger.debug(retMail)
# update database
sqlUp = "UPDATE ATLAS_PANDAMETA.siteaccess SET status=:newStatus "
sqlUp += "WHERE pandaSite=:pandaSite AND dn=:userName"
varMap = {}
varMap[':userName'] = userName
varMap[':newStatus'] = newStatus
varMap[':pandaSite'] = pandaSite
stUp,resUp = taskBuffer.querySQLS(sqlUp,varMap)
else:
# append cloud
requestsInCloud.setdefault(cloud, {})
# append site
requestsInCloud[cloud].setdefault(pandaSite, [])
# append user
requestsInCloud[cloud][pandaSite].append(userName)
# send requests to the cloud responsible
for cloud in requestsInCloud:
requestsMap = requestsInCloud[cloud]
_logger.debug("requests for approval : cloud=%s" % cloud)
# send
if cloud in contactAddr and contactAddr[cloud] not in ['',None,'None']:
# get site contact
for pandaSite in requestsMap:
userNames = requestsMap[pandaSite]
if pandaSite not in siteContactAddr:
varMap = {}
varMap[':siteid'] = pandaSite
sqlSite = "SELECT email FROM ATLAS_PANDAMETA.schedconfig WHERE siteid=:siteid AND rownum<=1"
status,res = taskBuffer.querySQLS(sqlSite,varMap)
siteContactAddr[pandaSite] = res[0][0]
# append
if siteContactAddr[pandaSite] not in ['',None,'None']:
contactAddr[cloud] += ',%s' % siteContactAddr[pandaSite]
else:
_logger.error("contact email address is unavailable for %s" % cloud)
except Exception:
type, value, traceBack = sys.exc_info()
_logger.error("Failed with %s %s" % (type,value))
_logger.debug("Site Access : done")
# finalize failed jobs
_logger.debug("AnalFinalizer session")
try:
# get min PandaID for failed jobs in Active table
sql = "SELECT MIN(PandaID),prodUserName,jobDefinitionID,jediTaskID,computingSite FROM ATLAS_PANDA.jobsActive4 "
sql += "WHERE prodSourceLabel=:prodSourceLabel AND jobStatus=:jobStatus "
sql += "GROUP BY prodUserName,jobDefinitionID,jediTaskID,computingSite "
varMap = {}
varMap[':jobStatus'] = 'failed'
varMap[':prodSourceLabel'] = 'user'
status,res = taskBuffer.querySQLS(sql,varMap)
if res is not None:
# loop over all user/jobdefID
for pandaID,prodUserName,jobDefinitionID,jediTaskID,computingSite in res:
# check
_logger.debug("check finalization for %s task=%s jobdefID=%s site=%s" % (prodUserName,jediTaskID,
jobDefinitionID,
computingSite))
sqlC = "SELECT COUNT(*) FROM ("
sqlC += "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 "
sqlC += "WHERE prodSourceLabel=:prodSourceLabel AND prodUserName=:prodUserName "
sqlC += "AND jediTaskID=:jediTaskID "
sqlC += "AND computingSite=:computingSite "
sqlC += "AND jobDefinitionID=:jobDefinitionID "
sqlC += "AND NOT jobStatus IN (:jobStatus1,:jobStatus2) "
sqlC += "UNION "
sqlC += "SELECT PandaID FROM ATLAS_PANDA.jobsDefined4 "
sqlC += "WHERE prodSourceLabel=:prodSourceLabel AND prodUserName=:prodUserName "
sqlC += "AND jediTaskID=:jediTaskID "
sqlC += "AND computingSite=:computingSite "
sqlC += "AND jobDefinitionID=:jobDefinitionID "
sqlC += "AND NOT jobStatus IN (:jobStatus1,:jobStatus2) "
sqlC += ") "
varMap = {}
varMap[':jobStatus1'] = 'failed'
varMap[':jobStatus2'] = 'merging'
varMap[':prodSourceLabel'] = 'user'
varMap[':jediTaskID'] = jediTaskID
varMap[':computingSite'] = computingSite
varMap[':prodUserName'] = prodUserName
varMap[':jobDefinitionID'] = jobDefinitionID
statC,resC = taskBuffer.querySQLS(sqlC,varMap)
# finalize if there is no non-failed jobs
if resC is not None:
_logger.debug("n of non-failed jobs : %s" % resC[0][0])
if resC[0][0] == 0:
jobSpecs = taskBuffer.peekJobs([pandaID],fromDefined=False,fromArchived=False,fromWaiting=False)
jobSpec = jobSpecs[0]
if jobSpec is None:
_logger.debug("skip PandaID={0} not found in jobsActive".format(pandaID))
continue
_logger.debug("finalize %s %s" % (prodUserName,jobDefinitionID))
finalizedFlag = taskBuffer.finalizePendingJobs(prodUserName,jobDefinitionID)
_logger.debug("finalized with %s" % finalizedFlag)
if finalizedFlag and jobSpec.produceUnMerge():
# collect sub datasets
subDsNames = set()
subDsList = []
for tmpFileSpec in jobSpec.Files:
if tmpFileSpec.type in ['log','output'] and \
re.search('_sub\d+$',tmpFileSpec.destinationDBlock) is not None:
if tmpFileSpec.destinationDBlock in subDsNames:
continue
subDsNames.add(tmpFileSpec.destinationDBlock)
datasetSpec = taskBuffer.queryDatasetWithMap({'name':tmpFileSpec.destinationDBlock})
subDsList.append(datasetSpec)
_logger.debug("update unmerged datasets")
taskBuffer.updateUnmergedDatasets(jobSpec,subDsList)
else:
_logger.debug("n of non-failed jobs : None")
except Exception:
errType,errValue = sys.exc_info()[:2]
_logger.error("AnalFinalizer failed with %s %s" % (errType,errValue))
# finalize failed jobs
_logger.debug("check stuck mergeing jobs")
try:
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
# get PandaIDs
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus'] = 'merging'
varMap[':timeLimit'] = timeLimit
sql = "SELECT distinct jediTaskID FROM ATLAS_PANDA.jobsActive4 "
sql += "WHERE prodSourceLabel=:prodSourceLabel AND jobStatus=:jobStatus and modificationTime<:timeLimit "
tmp,res = taskBuffer.querySQLS(sql,varMap)
checkedDS = set()
for jediTaskID, in res:
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':dsType'] = 'trn_log'
sql = "SELECT datasetID FROM ATLAS_PANDA.JEDI_Datasets WHERE jediTaskID=:jediTaskID AND type=:dsType AND nFilesUsed=nFilesTobeUsed "
tmpP,resD = taskBuffer.querySQLS(sql,varMap)
for datasetID, in resD:
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':fileStatus'] = 'ready'
varMap[':datasetID'] = datasetID
sql = "SELECT PandaID FROM ATLAS_PANDA.JEDI_Dataset_Contents "
sql += "WHERE jediTaskID=:jediTaskID AND datasetid=:datasetID AND status=:fileStatus AND PandaID=OutPandaID AND rownum<=1 "
tmpP,resP = taskBuffer.querySQLS(sql,varMap)
if resP == []:
continue
PandaID = resP[0][0]
varMap = {}
varMap[':PandaID'] = PandaID
varMap[':fileType'] = 'log'
sql = "SELECT d.status FROM ATLAS_PANDA.filesTable4 f,ATLAS_PANDA.datasets d WHERE PandaID=:PandaID AND f.type=:fileType AND d.name=f.destinationDBlock "
tmpS,resS = taskBuffer.querySQLS(sql,varMap)
if resS is not None:
subStatus, = resS[0]
if subStatus in ['completed']:
jobSpecs = taskBuffer.peekJobs([PandaID],fromDefined=False,fromArchived=False,fromWaiting=False)
jobSpec = jobSpecs[0]
subDsNames = set()
subDsList = []
for tmpFileSpec in jobSpec.Files:
if tmpFileSpec.type in ['log','output'] and \
re.search('_sub\d+$',tmpFileSpec.destinationDBlock) is not None:
if tmpFileSpec.destinationDBlock in subDsNames:
continue
subDsNames.add(tmpFileSpec.destinationDBlock)
datasetSpec = taskBuffer.queryDatasetWithMap({'name':tmpFileSpec.destinationDBlock})
subDsList.append(datasetSpec)
_logger.debug("update unmerged datasets for jediTaskID={0} PandaID={1}".format(jediTaskID,PandaID))
taskBuffer.updateUnmergedDatasets(jobSpec,subDsList,updateCompleted=True)
except Exception:
errType,errValue = sys.exc_info()[:2]
_logger.error("check for stuck merging jobs failed with %s %s" % (errType,errValue))
# get sites to skip various timeout
varMap = {}
varMap[':status'] = 'paused'
sql = "SELECT siteid FROM ATLAS_PANDAMETA.schedconfig WHERE status=:status "
sitesToSkipTO = set()
status,res = taskBuffer.querySQLS(sql,varMap)
for siteid, in res:
sitesToSkipTO.add(siteid)
_logger.debug("PQs to skip timeout : {0}".format(','.join(sitesToSkipTO)))
sitesToDisableReassign = set()
# get sites to disable reassign
for siteName in siteMapper.siteSpecList:
siteSpec = siteMapper.siteSpecList[siteName]
if siteSpec.capability == 'ucore' and not siteSpec.is_unified:
continue
if siteSpec.disable_reassign():
sitesToDisableReassign.add(siteName)
_logger.debug("PQs to disable reassign : {0}".format(','.join(sitesToDisableReassign)))
_memoryCheck("watcher")
_logger.debug("Watcher session")
# get the list of workflows
sql = "SELECT DISTINCT workflow FROM ATLAS_PANDAMETA.schedconfig WHERE status='online' "
status, res = taskBuffer.querySQLS(sql, {})
workflow_timeout_map = {}
for workflow, in res + [('production',), ('analysis',)]:
timeout = taskBuffer.getConfigValue('watcher', 'HEARTBEAT_TIMEOUT_{0}'.format(workflow), 'pandaserver', 'atlas')
if timeout is not None:
workflow_timeout_map[workflow] = timeout
elif workflow in ['production', 'analysis']:
workflow_timeout_map[workflow] = 2
workflows = list(workflow_timeout_map)
_logger.debug("timeout : {0}".format(str(workflow_timeout_map)))
# check heartbeat for analysis jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=workflow_timeout_map['analysis'])
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':prodSourceLabel1'] = 'panda'
varMap[':prodSourceLabel2'] = 'user'
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':jobStatus3'] = 'stagein'
varMap[':jobStatus4'] = 'stageout'
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2) "
sql += "AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3,:jobStatus4) AND modificationTime<:modificationTime"
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of Anal Watcher : %s" % res)
else:
_logger.debug("# of Anal Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Anal Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for analysis jobs in transferring
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=workflow_timeout_map['analysis'])
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':prodSourceLabel1'] = 'panda'
varMap[':prodSourceLabel2'] = 'user'
varMap[':jobStatus1'] = 'transferring'
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel IN (:prodSourceLabel1,:prodSourceLabel2) "
sql += "AND jobStatus=:jobStatus1 AND modificationTime<:modificationTime"
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of Transferring Anal Watcher : %s" % res)
else:
_logger.debug("# of Transferring Anal Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Trans Anal Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for sent jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(minutes=30)
varMap = {}
varMap[':jobStatus'] = 'sent'
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND modificationTime<:modificationTime",
varMap)
if res is None:
_logger.debug("# of Sent Watcher : %s" % res)
else:
_logger.debug("# of Sent Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Sent Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=30,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for 'holding' analysis/ddm jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
# get XMLs
xmlIDs = set()
# xmlFiles = os.listdir(panda_config.logdir)
# for file in xmlFiles:
# match = re.search('^(\d+)_([^_]+)_.{36}$',file)
# if match is not None:
# id = match.group(1)
# xmlIDs.append(int(id))
job_output_report_list = taskBuffer.listJobOutputReport()
if job_output_report_list is not None:
for panda_id, job_status, attempt_nr, time_stamp in job_output_report_list:
xmlIDs.add(int(panda_id))
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND (modificationTime<:modificationTime OR (endTime IS NOT NULL AND endTime<:endTime)) AND (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2 OR prodSourceLabel=:prodSourceLabel3) AND stateChangeTime != modificationTime"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':endTime'] = timeLimit
varMap[':jobStatus'] = 'holding'
varMap[':prodSourceLabel1'] = 'panda'
varMap[':prodSourceLabel2'] = 'user'
varMap[':prodSourceLabel3'] = 'ddm'
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of Holding Anal/DDM Watcher : %s" % res)
else:
_logger.debug("# of Holding Anal/DDM Watcher : %s - XMLs : %s" % (len(res),len(xmlIDs)))
for (id,) in res:
_logger.debug("Holding Anal/DDM Watcher %s" % id)
if int(id) in xmlIDs:
_logger.debug(" found XML -> skip %s" % id)
continue
thr = Watcher(taskBuffer,id,single=True,sleepTime=180,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for high prio production jobs
timeOutVal = 3
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND currentPriority>:pLimit "
sql += "AND (modificationTime<:modificationTime OR (endTime IS NOT NULL AND endTime<:endTime))"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':endTime'] = timeLimit
varMap[':jobStatus'] = 'holding'
varMap[':pLimit'] = 800
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of High prio Holding Watcher : %s" % res)
else:
_logger.debug("# of High prio Holding Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("High prio Holding Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for production jobs
timeOutVal = 48
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND (modificationTime<:modificationTime OR (endTime IS NOT NULL AND endTime<:endTime))"
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':endTime'] = timeLimit
varMap[':jobStatus'] = 'holding'
status,res = taskBuffer.querySQLS(sql,varMap)
if res is None:
_logger.debug("# of Holding Watcher : %s" % res)
else:
_logger.debug("# of Holding Watcher : %s" % len(res))
for (id,) in res:
_logger.debug("Holding Watcher %s" % id)
thr = Watcher(taskBuffer,id,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for production jobs with internal stage-out
sql = "SELECT PandaID,jobStatus,jobSubStatus FROM ATLAS_PANDA.jobsActive4 j,ATLAS_PANDAMETA.schedconfig s "
sql += "WHERE j.computingSite=s.siteid AND jobStatus=:jobStatus1 AND jobSubStatus IS NOT NULL AND modificationTime<:modificationTime "
for workflow in workflows:
if workflow == 'analysis':
continue
varMap = {}
varMap[':modificationTime'] = timeLimit
varMap[':jobStatus1'] = 'transferring'
sqlX = sql
if workflow == 'production':
if len(workflows) > 2:
sqlX += "AND (s.workflow IS NULL OR s.workflow NOT IN ("
for ng_workflow in workflows:
if ng_workflow in ['production', 'analysis']:
continue
tmp_key = ':w_{0}'.format(ng_workflow)
varMap[tmp_key] = ng_workflow
sqlX += '{0},'.format(tmp_key)
sqlX = sqlX[:-1]
sqlX += ")) "
else:
tmp_key = ':w_{0}'.format(workflow)
sqlX += "AND s.workflow={0} ".format(tmp_key)
varMap[tmp_key] = workflow
timeOutVal = workflow_timeout_map[workflow]
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS(sqlX, varMap)
if res is None:
_logger.debug("# of Internal Staging Watcher with workflow={0}: {1}".format(workflow, res))
else:
_logger.debug("# of Internal Staging Watcher with workflow={0}: {1}".format(workflow, len(res)))
for pandaID, jobStatus, jobSubStatus in res:
_logger.debug("Internal Staging Watcher %s %s:%s" % (pandaID, jobStatus, jobSubStatus))
thr = Watcher(taskBuffer,pandaID,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
# check heartbeat for production jobs
sql = "SELECT PandaID,jobStatus,j.computingSite FROM ATLAS_PANDA.jobsActive4 j, ATLAS_PANDAMETA.schedconfig s "
sql += "WHERE j.computingSite=s.siteid AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3,:jobStatus4) AND modificationTime<:modificationTime "
for workflow in workflows:
if workflow == 'analysis':
continue
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':jobStatus3'] = 'stagein'
varMap[':jobStatus4'] = 'stageout'
sqlX = sql
if workflow == 'production':
if len(workflows) > 2:
sqlX += "AND (s.workflow IS NULL OR s.workflow NOT IN ("
for ng_workflow in workflows:
if ng_workflow in ['production', 'analysis']:
continue
tmp_key = ':w_{0}'.format(ng_workflow)
varMap[tmp_key] = ng_workflow
sqlX += '{0},'.format(tmp_key)
sqlX = sqlX[:-1]
sqlX += ")) "
else:
tmp_key = ':w_{0}'.format(workflow)
sqlX += "AND s.workflow={0} ".format(tmp_key)
varMap[tmp_key] = workflow
timeOutVal = workflow_timeout_map[workflow]
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeOutVal)
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS(sqlX, varMap)
if res is None:
_logger.debug("# of General Watcher with workflow={0}: {1}".format(workflow, res))
else:
_logger.debug("# of General Watcher with workflow={0}: {1}".format(workflow, len(res)))
for pandaID,jobStatus,computingSite in res:
if computingSite in sitesToSkipTO:
_logger.debug("skip General Watcher for PandaID={0} at {1} since timeout is disabled for {2}".format(pandaID,computingSite,jobStatus))
continue
_logger.debug("General Watcher %s" % pandaID)
thr = Watcher(taskBuffer,pandaID,single=True,sleepTime=60*timeOutVal,sitemapper=siteMapper)
thr.start()
thr.join()
_memoryCheck("reassign")
# kill long-waiting jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
status,res = taskBuffer.querySQLS("SELECT PandaID,cloud,prodSourceLabel FROM ATLAS_PANDA.jobsDefined4 WHERE creationTime<:creationTime",
{':creationTime':timeLimit})
jobs=[]
dashFileMap = {}
if res is not None:
for pandaID,cloud,prodSourceLabel in res:
# collect PandaIDs
jobs.append(pandaID)
if len(jobs):
_logger.debug("killJobs for Defined (%s)" % str(jobs))
Client.killJobs(jobs,2)
# kill long-waiting jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':jobStatus'] = 'activated'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID from ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime",
varMap)
jobs=[]
if res is not None:
for (id,) in res:
jobs.append(id)
if len(jobs):
_logger.debug("killJobs for Active (%s)" % str(jobs))
Client.killJobs(jobs,2)
# kill long-waiting ddm jobs for dispatch
_logger.debug("kill PandaMovers")
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=12)
sql = "SELECT PandaID from ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel=:prodSourceLabel AND transferType=:transferType AND creationTime<:creationTime"
varMap = {}
varMap[':creationTime'] = timeLimit
varMap[':prodSourceLabel'] = 'ddm'
varMap[':transferType'] = 'dis'
_logger.debug(sql+str(varMap))
status,res = taskBuffer.querySQLS(sql,varMap)
_logger.debug(res)
jobs=[]
if res is not None:
for (id,) in res:
jobs.append(id)
if len(jobs):
_logger.debug("kill DDM Jobs (%s)" % str(jobs))
Client.killJobs(jobs,2)
# reassign activated jobs in inactive sites
inactiveTimeLimitSite = 2
inactiveTimeLimitJob = 4
inactivePrioLimit = 800
timeLimitSite = datetime.datetime.utcnow() - datetime.timedelta(hours=inactiveTimeLimitSite)
timeLimitJob = datetime.datetime.utcnow() - datetime.timedelta(hours=inactiveTimeLimitJob)
# get PandaIDs
sql = 'SELECT distinct computingSite FROM ATLAS_PANDA.jobsActive4 '
sql += 'WHERE prodSourceLabel=:prodSourceLabel '
sql += 'AND ((modificationTime<:timeLimit AND jobStatus=:jobStatus1) '
sql += 'OR (stateChangeTime<:timeLimit AND jobStatus=:jobStatus2)) '
sql += 'AND lockedby=:lockedby AND currentPriority>=:prioLimit '
sql += 'AND NOT processingType IN (:pType1) AND relocationFlag<>:rFlag1 '
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'starting'
varMap[':lockedby'] = 'jedi'
varMap[':timeLimit'] = timeLimitJob
varMap[':prioLimit'] = inactivePrioLimit
varMap[':pType1'] = 'pmerge'
varMap[':rFlag1'] = 2
stDS,resDS = taskBuffer.querySQLS(sql,varMap)
sqlSS = 'SELECT laststart FROM ATLAS_PANDAMETA.siteData '
sqlSS += 'WHERE site=:site AND flag=:flag AND hours=:hours AND laststart<:laststart '
sqlPI = 'SELECT PandaID,eventService,attemptNr FROM ATLAS_PANDA.jobsActive4 '
sqlPI += 'WHERE prodSourceLabel=:prodSourceLabel AND jobStatus IN (:jobStatus1,:jobStatus2) '
sqlPI += 'AND (modificationTime<:timeLimit OR stateChangeTime<:timeLimit) '
sqlPI += 'AND lockedby=:lockedby AND currentPriority>=:prioLimit '
sqlPI += 'AND computingSite=:site AND NOT processingType IN (:pType1) AND relocationFlag<>:rFlag1 '
for tmpSite, in resDS:
if tmpSite in sitesToDisableReassign:
_logger.debug('skip reassignJobs at inactive site %s since reassign is disabled' % (tmpSite))
continue
# check if the site is inactive
varMap = {}
varMap[':site'] = tmpSite
varMap[':flag'] = 'production'
varMap[':hours'] = 3
varMap[':laststart'] = timeLimitSite
stSS,resSS = taskBuffer.querySQLS(sqlSS,varMap)
if stSS is not None and len(resSS) > 0:
# get jobs
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'starting'
varMap[':lockedby'] = 'jedi'
varMap[':timeLimit'] = timeLimitJob
varMap[':prioLimit'] = inactivePrioLimit
varMap[':site'] = tmpSite
varMap[':pType1'] = 'pmerge'
varMap[':rFlag1'] = 2
stPI,resPI = taskBuffer.querySQLS(sqlPI,varMap)
jediJobs = []
# reassign
_logger.debug('reassignJobs for JEDI at inactive site %s laststart=%s' % (tmpSite,resSS[0][0]))
if resPI is not None:
for pandaID, eventService, attemptNr in resPI:
if eventService in [EventServiceUtils.esMergeJobFlagNumber]:
_logger.debug('retrying es merge %s at inactive site %s' % (pandaID,tmpSite))
taskBuffer.retryJob(pandaID,{},getNewPandaID=True,attemptNr=attemptNr,
recoverableEsMerge=True)
jediJobs.append(pandaID)
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for JEDI at inactive site %s (%s)' % (tmpSite,jediJobs[iJob:iJob+nJob]))
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign defined jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=4)
# get PandaIDs
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsDefined4",timeLimit,['defined'],['managed'],[],[],[],
True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
# reassign
_logger.debug('reassignJobs for defined jobs -> #%s' % len(jobs))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for defined jobs (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for JEDI defined jobs -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for JEDI defined jobs (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign long-waiting jobs in defined table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=12)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsDefined4",timeLimit,[],['managed'],[],[],[],
True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
# reassign
_logger.debug('reassignJobs for long in defined table -> #%s' % len(jobs))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long in defined table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long JEDI in defined table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long JEDI in defined table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long-standing evgen/simul jobs with active state at T1
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
for tmpCloud in siteMapper.getCloudList():
# ignore special clouds
if tmpCloud in ['CERN','OSG']:
continue
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],
['evgen','simul'],[siteMapper.getCloud(tmpCloud)['tier1']],[],
True,onlyReassignable=True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
_logger.debug('reassignJobs for Active T1 evgensimul in %s -> #%s' % (tmpCloud,len(jobs)))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for Active T1 evgensimul (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for Active T1 JEDI evgensimul in %s -> #%s' % (tmpCloud,len(jediJobs)))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for Active T1 JEDI evgensimul (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long-standing evgen/simul jobs with active state at T2
try:
_logger.debug('looking for stuck T2s to reassign evgensimul')
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=6)
varMap = {}
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'running'
varMap[':prodSourceLabel'] = 'managed'
varMap[':processingType1'] = 'evgen'
varMap[':processingType2'] = 'simul'
sql = "SELECT cloud,computingSite,jobStatus,COUNT(*) FROM ATLAS_PANDA.jobsActive4 "\
"WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND prodSourceLabel=:prodSourceLabel "\
"AND processingType IN (:processingType1,:processingType2) GROUP BY cloud,computingSite,jobStatus "
status,res = taskBuffer.querySQLS(sql, varMap)
if res is not None:
# get ratio of activated/running
siteStatData = {}
for tmpCloud,tmpComputingSite,tmpJobStatus,tmpCount in res:
# skip T1
if tmpComputingSite == siteMapper.getCloud(tmpCloud)['tier1']:
continue
# skip if reassign is disabled
if tmpComputingSite in sitesToDisableReassign:
continue
# add cloud/site
tmpKey = (tmpCloud,tmpComputingSite)
if tmpKey not in siteStatData:
siteStatData[tmpKey] = {'activated':0,'running':0}
# add the number of jobs
if tmpJobStatus in siteStatData[tmpKey]:
siteStatData[tmpKey][tmpJobStatus] += tmpCount
# look for stuck site
stuckThr = 10
stuckSites = []
for tmpKey in siteStatData:
tmpStatData = siteStatData[tmpKey]
if tmpStatData['running'] == 0 or \
float(tmpStatData['activated'])/float(tmpStatData['running']) > stuckThr:
tmpCloud,tmpComputingSite = tmpKey
_logger.debug(' %s:%s %s/%s > %s' % (tmpCloud,tmpComputingSite,tmpStatData['activated'],tmpStatData['running'],stuckThr))
# get stuck jobs
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],
['evgen','simul'],[tmpComputingSite],[tmpCloud],True,
onlyReassignable=True)
jobs = []
jediJobs = []
if res is not None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
_logger.debug('reassignJobs for Active T2 evgensimul %s:%s -> #%s' % (tmpCloud,tmpComputingSite,len(jobs)))
if len(jobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for Active T2 evgensimul (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for Active T2 JEDI evgensimul %s:%s -> #%s' % (tmpCloud,tmpComputingSite,len(jediJobs)))
if len(jediJobs) > 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for Active T2 JEDI evgensimul (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
except Exception:
errType,errValue = sys.exc_info()[:2]
_logger.error("failed to reassign T2 evgensimul with %s:%s" % (errType,errValue))
# reassign too long activated jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=2)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['activated'],['managed'],[],[],[],True,
onlyReassignable=True,getEventService=True)
jobs = []
jediJobs = []
if res is not None:
for pandaID, lockedby, eventService, attemptNr, computingSite in res:
if computingSite in sitesToDisableReassign:
_logger.debug('skip reassignJobs for long activated PandaID={0} since disabled at {1}'.format(pandaID,computingSite))
continue
if lockedby == 'jedi':
if eventService in [EventServiceUtils.esMergeJobFlagNumber]:
_logger.debug('retrying {0} in long activated' % pandaID)
taskBuffer.retryJob(pandaID,{},getNewPandaID=True,attemptNr=attemptNr,
recoverableEsMerge=True)
jediJobs.append(pandaID)
else:
jobs.append(pandaID)
_logger.debug('reassignJobs for long activated in active table -> #%s' % len(jobs))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long activated in active table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long activated JEDI in active table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long activated JEDI in active table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# reassign too long starting jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=48)
status,res = taskBuffer.lockJobsForReassign("ATLAS_PANDA.jobsActive4",timeLimit,['starting'],['managed'],[],[],[],True,
onlyReassignable=True,useStateChangeTime=True,getEventService=True)
jobs = []
jediJobs = []
if res is not None:
for pandaID, lockedby, eventService, attemptNr, computingSite in res:
if computingSite in sitesToDisableReassign:
_logger.debug('skip reassignJobs for long starting PandaID={0} since disabled at {1}'.format(pandaID,computingSite))
continue
if lockedby == 'jedi':
jediJobs.append(pandaID)
else:
jobs.append(pandaID)
_logger.debug('reassignJobs for long starting in active table -> #%s' % len(jobs))
if len(jobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug('reassignJobs for long starting in active table (%s)' % jobs[iJob:iJob+nJob])
taskBuffer.reassignJobs(jobs[iJob:iJob+nJob],joinThr=True)
iJob += nJob
_logger.debug('reassignJobs for long starting JEDI in active table -> #%s' % len(jediJobs))
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
_logger.debug('reassignJobs for long stating JEDI in active table (%s)' % jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],51,keepUnmerged=True)
iJob += nJob
# kill too long-standing analysis jobs in active table
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':prodSourceLabel1'] = 'test'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':prodSourceLabel3'] = 'user'
varMap[':modificationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE (prodSourceLabel=:prodSourceLabel1 OR prodSourceLabel=:prodSourceLabel2 OR prodSourceLabel=:prodSourceLabel3) AND modificationTime<:modificationTime ORDER BY PandaID",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for Anal Active (%s)" % str(jobs))
# kill too long pending jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
varMap = {}
varMap[':jobStatus'] = 'pending'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for Pending (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob],4)
iJob += nJob
# kick waiting ES merge jobs which were generated from fake co-jumbo
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(minutes=10)
varMap = {}
varMap[':jobStatus'] = 'waiting'
varMap[':creationTime'] = timeLimit
varMap[':esMerge'] = EventServiceUtils.esMergeJobFlagNumber
sql = "SELECT PandaID,computingSite FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime "
sql += "AND eventService=:esMerge ORDER BY jediTaskID "
status,res = taskBuffer.querySQLS(sql, varMap)
jobsMap = {}
if res is not None:
for id,site in res:
if site not in jobsMap:
jobsMap[site] = []
jobsMap[site].append(id)
# kick
if len(jobsMap):
for site in jobsMap:
jobs = jobsMap[site]
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("kick waiting ES merge (%s)" % str(jobs[iJob:iJob+nJob]))
Client.reassignJobs(jobs[iJob:iJob+nJob], )
iJob += nJob
# kill too long waiting jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
varMap = {}
varMap[':jobStatus'] = 'waiting'
varMap[':creationTime'] = timeLimit
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime "
sql += "AND (eventService IS NULL OR eventService<>:coJumbo) "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for Waiting (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob],4)
iJob += nJob
# kill too long running ES jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':timeLimit'] = timeLimit
varMap[':esJob'] = EventServiceUtils.esJobFlagNumber
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND stateChangeTime<:timeLimit "
sql += "AND eventService IN (:esJob,:coJumbo) AND currentPriority>=900 "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for long running ES jobs (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob], 2, keepUnmerged=True, jobSubStatus='es_toolong')
iJob += nJob
# kill too long running ES merge jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
varMap = {}
varMap[':jobStatus1'] = 'running'
varMap[':jobStatus2'] = 'starting'
varMap[':timeLimit'] = timeLimit
varMap[':esMergeJob'] = EventServiceUtils.esMergeJobFlagNumber
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus IN (:jobStatus1,:jobStatus2) AND stateChangeTime<:timeLimit "
sql += "AND eventService=:esMergeJob "
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
_logger.debug("killJobs for long running ES merge jobs (%s)" % str(jobs[iJob:iJob+nJob]))
Client.killJobs(jobs[iJob:iJob+nJob], 2)
iJob += nJob
# kill too long waiting jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
sql = "SELECT PandaID FROM ATLAS_PANDA.jobsWaiting4 WHERE ((creationTime<:timeLimit AND (eventService IS NULL OR eventService<>:coJumbo)) "
sql += "OR modificationTime<:timeLimit) "
varMap = {}
varMap[':timeLimit'] = timeLimit
varMap[':coJumbo'] = EventServiceUtils.coJumboJobFlagNumber
status,res = taskBuffer.querySQLS(sql, varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,4)
_logger.debug("killJobs in jobsWaiting (%s)" % str(jobs))
# rebrokerage
_logger.debug("Rebrokerage start")
# get timeout value
timeoutVal = taskBuffer.getConfigValue('rebroker','ANALY_TIMEOUT')
if timeoutVal is None:
timeoutVal = 12
_logger.debug("timeout value : {0}h".format(timeoutVal))
try:
normalTimeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=timeoutVal)
sortTimeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
sql = "WITH p AS ("\
"SELECT MIN(PandaID) PandaID,jobDefinitionID,prodUserName,prodUserID,computingSite,jediTaskID,processingType "\
"FROM ATLAS_PANDA.jobsActive4 "\
"WHERE prodSourceLabel IN (:prodSourceLabel1,:prodSourceLabel2) "\
"AND jobStatus IN (:jobStatus1,:jobStatus2,:jobStatus3) "\
"AND jobsetID IS NOT NULL AND lockedBy=:lockedBy "\
"GROUP BY jobDefinitionID,prodUserName,prodUserID,computingSite,jediTaskID,processingType "\
") "\
"SELECT /*+ INDEX (s JOBS_STATUSLOG_PANDAID_IDX) */ "\
"p.jobDefinitionID,p.prodUserName,p.prodUserID,p.computingSite,s.modificationTime,p.jediTaskID,p.processingType " \
"FROM p, ATLAS_PANDA.jobs_statuslog s "\
"WHERE s.PandaID=p.PandaID AND s.jobStatus=:s_jobStatus AND s.modificationTime<:modificationTime "
varMap = {}
varMap[':prodSourceLabel1'] = 'user'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':modificationTime'] = sortTimeLimit
varMap[':lockedBy'] = 'jedi'
varMap[':jobStatus1'] = 'activated'
varMap[':jobStatus2'] = 'dummy'
varMap[':jobStatus3'] = 'starting'
varMap[':s_jobStatus'] = 'activated'
# get jobs older than threshold
ret,res = taskBuffer.querySQLS(sql, varMap)
resList = []
keyList = set()
if res is not None:
for tmpItem in res:
jobDefinitionID,prodUserName,prodUserID,computingSite,maxTime,jediTaskID,processingType = tmpItem
tmpKey = (jediTaskID,jobDefinitionID)
keyList.add(tmpKey)
resList.append(tmpItem)
# get stalled assigned job
sqlA = "SELECT jobDefinitionID,prodUserName,prodUserID,computingSite,MAX(creationTime),jediTaskID,processingType "
sqlA += "FROM ATLAS_PANDA.jobsDefined4 "
sqlA += "WHERE prodSourceLabel IN (:prodSourceLabel1,:prodSourceLabel2) AND jobStatus IN (:jobStatus1,:jobStatus2) "
sqlA += "AND creationTime<:modificationTime AND lockedBy=:lockedBy "
sqlA += "GROUP BY jobDefinitionID,prodUserName,prodUserID,computingSite,jediTaskID,processingType "
varMap = {}
varMap[':prodSourceLabel1'] = 'user'
varMap[':prodSourceLabel2'] = 'panda'
varMap[':modificationTime'] = sortTimeLimit
varMap[':lockedBy'] = 'jedi'
varMap[':jobStatus1'] = 'assigned'
varMap[':jobStatus2'] = 'defined'
retA,resA = taskBuffer.querySQLS(sqlA, varMap)
if resA is not None:
for tmpItem in resA:
jobDefinitionID,prodUserName,prodUserID,computingSite,maxTime,jediTaskID,processingType = tmpItem
tmpKey = (jediTaskID,jobDefinitionID)
if tmpKey not in keyList:
keyList.add(tmpKey)
resList.append(tmpItem)
# sql to check recent activity
sql = "SELECT PandaID,stateChangeTime,jobStatus FROM %s "
sql += "WHERE prodUserName=:prodUserName AND jobDefinitionID=:jobDefinitionID "
sql += "AND computingSite=:computingSite AND jediTaskID=:jediTaskID "
sql += "AND jobStatus NOT IN (:jobStatus1,:jobStatus2,:jobStatus3) "
sql += "AND stateChangeTime>:modificationTime "
sql += "AND rownum <= 1"
# sql to get associated jobs with jediTaskID
sqlJJ = "SELECT PandaID FROM %s "
sqlJJ += "WHERE jediTaskID=:jediTaskID AND jobStatus IN (:jobS1,:jobS2,:jobS3,:jobS4,:jobS5) "
sqlJJ += "AND jobDefinitionID=:jobDefID AND computingSite=:computingSite "
if resList != []:
recentRuntimeLimit = datetime.datetime.utcnow() - datetime.timedelta(hours=3)
# loop over all user/jobID combinations
iComb = 0
nComb = len(resList)
_logger.debug("total combinations = %s" % nComb)
for jobDefinitionID,prodUserName,prodUserID,computingSite,maxModificationTime,jediTaskID,processingType in resList:
# check if jobs with the jobID have run recently
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':computingSite'] = computingSite
varMap[':prodUserName'] = prodUserName
varMap[':jobDefinitionID'] = jobDefinitionID
varMap[':modificationTime'] = recentRuntimeLimit
varMap[':jobStatus1'] = 'closed'
varMap[':jobStatus2'] = 'failed'
varMap[':jobStatus3'] = 'starting'
_logger.debug(" rebro:%s/%s:ID=%s:%s jediTaskID=%s site=%s" % (iComb,nComb,jobDefinitionID,
prodUserName,jediTaskID,
computingSite))
iComb += 1
hasRecentJobs = False
# check site
if not siteMapper.checkSite(computingSite):
_logger.debug(" -> skip unknown site=%s" % computingSite)
continue
# check site status
tmpSiteStatus = siteMapper.getSite(computingSite).status
if tmpSiteStatus not in ['offline','test']:
# use normal time limit for normal site status
if maxModificationTime > normalTimeLimit:
_logger.debug(" -> skip wait for normal timelimit=%s<maxModTime=%s" % (normalTimeLimit,maxModificationTime))
continue
for tableName in ['ATLAS_PANDA.jobsActive4','ATLAS_PANDA.jobsArchived4']:
retU,resU = taskBuffer.querySQLS(sql % tableName, varMap)
if resU is None:
# database error
raise RuntimeError("failed to check modTime")
if resU != []:
# found recent jobs
hasRecentJobs = True
_logger.debug(" -> skip due to recent activity %s to %s at %s" % (resU[0][0],
resU[0][2],
resU[0][1]))
break
else:
_logger.debug(" -> immediate rebro due to site status=%s" % tmpSiteStatus)
if hasRecentJobs:
# skip since some jobs have run recently
continue
else:
if jediTaskID is None:
_logger.debug(" -> rebro for normal task : no action")
else:
_logger.debug(" -> rebro for JEDI task")
killJobs = []
varMap = {}
varMap[':jediTaskID'] = jediTaskID
varMap[':jobDefID'] = jobDefinitionID
varMap[':computingSite'] = computingSite
varMap[':jobS1'] = 'defined'
varMap[':jobS2'] = 'assigned'
varMap[':jobS3'] = 'activated'
varMap[':jobS4'] = 'dummy'
varMap[':jobS5'] = 'starting'
for tableName in ['ATLAS_PANDA.jobsDefined4','ATLAS_PANDA.jobsActive4']:
retJJ,resJJ = taskBuffer.querySQLS(sqlJJ % tableName, varMap)
for tmpPandaID, in resJJ:
killJobs.append(tmpPandaID)
# reverse sort to kill buildJob in the end
killJobs.sort()
killJobs.reverse()
# kill to reassign
taskBuffer.killJobs(killJobs,'JEDI','51',True)
except Exception as e:
_logger.error("rebrokerage failed with {0} : {1}".format(str(e), traceback.format_exc()))
# kill too long running jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=21)
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE creationTime<:creationTime",
{':creationTime':timeLimit})
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
# set tobekill
_logger.debug('killJobs for Running (%s)' % jobs[iJob:iJob+nJob])
Client.killJobs(jobs[iJob:iJob+nJob],2)
# run watcher
for id in jobs[iJob:iJob+nJob]:
thr = Watcher(taskBuffer,id,single=True,sitemapper=siteMapper,sleepTime=60*24*21)
thr.start()
thr.join()
time.sleep(1)
iJob += nJob
time.sleep(10)
# kill too long waiting ddm jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=5)
varMap = {}
varMap[':prodSourceLabel'] = 'ddm'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE prodSourceLabel=:prodSourceLabel AND creationTime<:creationTime",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for DDM (%s)" % str(jobs))
# kill too long throttled jobs
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
varMap = {}
varMap[':jobStatus'] = 'throttled'
varMap[':creationTime'] = timeLimit
status,res = taskBuffer.querySQLS("SELECT PandaID FROM ATLAS_PANDA.jobsActive4 WHERE jobStatus=:jobStatus AND creationTime<:creationTime ",
varMap)
jobs = []
if res is not None:
for (id,) in res:
jobs.append(id)
# kill
if len(jobs):
Client.killJobs(jobs,2)
_logger.debug("killJobs for throttled (%s)" % str(jobs))
# check if merge job is valid
_logger.debug('kill invalid pmerge')
varMap = {}
varMap[':processingType'] = 'pmerge'
varMap[':timeLimit'] = datetime.datetime.utcnow() - datetime.timedelta(minutes=30)
sql = "SELECT PandaID,jediTaskID FROM ATLAS_PANDA.jobsDefined4 WHERE processingType=:processingType AND modificationTime<:timeLimit "
sql += "UNION "
sql += "SELECT PandaID,jediTaskID FROM ATLAS_PANDA.jobsActive4 WHERE processingType=:processingType AND modificationTime<:timeLimit "
status,res = taskBuffer.querySQLS(sql,varMap)
nPmerge = 0
badPmerge = 0
_logger.debug('check {0} pmerge'.format(len(res)))
for pandaID,jediTaskID in res:
nPmerge += 1
isValid,tmpMsg = taskBuffer.isValidMergeJob(pandaID,jediTaskID)
if isValid is False:
_logger.debug("kill pmerge {0} since {1} gone".format(pandaID,tmpMsg))
taskBuffer.killJobs([pandaID],'killed since pre-merge job {0} gone'.format(tmpMsg),
'52',True)
badPmerge += 1
_logger.debug('killed invalid pmerge {0}/{1}'.format(badPmerge,nPmerge))
# cleanup of jumbo jobs
_logger.debug('jumbo job cleanup')
res = taskBuffer.cleanupJumboJobs()
_logger.debug(res)
_memoryCheck("delete XML")
# delete old files in DA cache
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=7)
files = os.listdir(panda_config.cache_dir)
for file in files:
# skip special test file
if file == 'sources.72c48dc5-f055-43e5-a86e-4ae9f8ea3497.tar.gz':
continue
if file == 'sources.090f3f51-fc81-4e80-9749-a5e4b2bd58de.tar.gz':
continue
try:
# get timestamp
timestamp = datetime.datetime.fromtimestamp(os.stat('%s/%s' % (panda_config.cache_dir,file)).st_mtime)
# delete
if timestamp < timeLimit:
_logger.debug("delete %s " % file)
os.remove('%s/%s' % (panda_config.cache_dir,file))
except Exception:
pass
_memoryCheck("delete core")
# delete core
dirName = '%s/..' % panda_config.logdir
for file in os.listdir(dirName):
if file.startswith('core.'):
_logger.debug("delete %s " % file)
try:
os.remove('%s/%s' % (dirName,file))
except Exception:
pass
# update email DB
_memoryCheck("email")
_logger.debug("Update emails")
# lock file
_lockGetMail = open(panda_config.lockfile_getMail, 'w')
# lock email DB
fcntl.flock(_lockGetMail.fileno(), fcntl.LOCK_EX)
# open email DB
pDB = shelve.open(panda_config.emailDB)
# read
mailMap = {}
for name in pDB:
addr = pDB[name]
mailMap[name] = addr
# close DB
pDB.close()
# release file lock
fcntl.flock(_lockGetMail.fileno(), fcntl.LOCK_UN)
# set email address
for name in mailMap:
addr = mailMap[name]
# remove _
name = re.sub('_$','',name)
status,res = taskBuffer.querySQLS("SELECT email FROM ATLAS_PANDAMETA.users WHERE name=:name",{':name':name})
# failed or not found
if status == -1 or len(res) == 0:
_logger.error("%s not found in user DB" % name)
continue
# already set
if res[0][0] not in ['','None',None]:
continue
# update email
_logger.debug("set '%s' to %s" % (name,addr))
status,res = taskBuffer.querySQLS("UPDATE ATLAS_PANDAMETA.users SET email=:addr WHERE name=:name",{':addr':addr,':name':name})
# sandbox
_logger.debug("Touch sandbox")
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=1)
sqlC = "SELECT hostName,fileName,creationTime,userName FROM ATLAS_PANDAMETA.userCacheUsage "\
"WHERE creationTime>:timeLimit AND creationTime>modificationTime "\
"AND (fileName like 'sources%' OR fileName like 'jobO%') "
sqlU = "UPDATE ATLAS_PANDAMETA.userCacheUsage SET modificationTime=CURRENT_DATE "\
"WHERE userName=:userName AND fileName=:fileName "
status, res = taskBuffer.querySQLS(sqlC, {':timeLimit': timeLimit})
if res is None:
_logger.error("failed to get files")
elif len(res) > 0:
_logger.debug("{0} files to touch".format(len(res)))
for hostName, fileName, creationTime, userName in res:
base_url = 'https://{0}:{1}'.format(hostName, panda_config.pserverport)
_logger.debug("touch {0} on {1} created at {2}".format(fileName, hostName, creationTime))
s,o = Client.touchFile(base_url, fileName)
_logger.debug(o)
if o == 'True':
varMap = dict()
varMap[':userName'] = userName
varMap[':fileName'] = fileName
taskBuffer.querySQLS(sqlU, varMap)
_logger.debug("Check sandbox")
timeLimit = datetime.datetime.utcnow() - datetime.timedelta(days=1)
expireLimit = datetime.datetime.utcnow() - datetime.timedelta(days=30)
sqlD = "DELETE FROM ATLAS_PANDAMETA.userCacheUsage WHERE userName=:userName AND fileName=:fileName "
nRange = 100
for i in range(nRange):
_logger.debug("{0}/{1} {2} files to check".format(nRange, i, len(res)))
res = taskBuffer.getLockSandboxFiles(timeLimit, 1000)
if res is None:
_logger.error("failed to get files")
break
elif len(res) == 0:
break
for userName, hostName, fileName, creationTime, modificationTime in res:
url = 'https://{0}:{1}/cache/{2}'.format(hostName, panda_config.pserverport, fileName)
_logger.debug("checking {0} created at {1}".format(url, creationTime))
toDelete = False
try:
x = requests.head(url, verify=False)
_logger.debug("code {0}".format(x.status_code))
if x.status_code == 404:
_logger.debug("delete")
toDelete = True
except Exception as e:
_logger.debug("failed with {0}".format(str(e)))
if creationTime < expireLimit:
toDelete = True
_logger.debug("delete due to creationTime={0}".format(creationTime))
# update or delete
varMap = dict()
varMap[':userName'] = userName
varMap[':fileName'] = fileName
if toDelete:
taskBuffer.querySQLS(sqlD, varMap)
else:
_logger.debug("keep")
_memoryCheck("end")
_logger.debug("===================== end =====================")
# run
if __name__ == '__main__':
main(argv=sys.argv)
|
apache-2.0
| 4,297,887,075,583,032,000 | 45.885224 | 323 | 0.571991 | false | 3.775724 | false | false | false |
mvendra/mvtools
|
security/hash_check.py
|
1
|
1222
|
#!/usr/bin/env python3
import sys
import os
import sha256_wrapper
def puaq():
print("Usage: %s archive-to-check [hash-file]" % os.path.basename(__file__))
sys.exit(1)
def sha256sum_check(archive_file, hash_file):
hash_file_contents = ""
with open(hash_file, "r") as f:
hash_file_contents = f.read()
v, r = sha256_wrapper.hash_sha_256_app_file(archive_file)
if not v:
print("Failed generating hash for file %s" % archive_file)
sys.exit(1)
# and then compare
if hash_file_contents[0:64] == r:
return True
else:
return False
if __name__ == "__main__":
if len(sys.argv) < 2:
puaq()
archive_file = sys.argv[1]
hash_file = ""
if len(sys.argv) > 2:
hash_file = sys.argv[2]
else:
hash_file = archive_file + ".sha256"
if not os.path.isfile(archive_file):
print("%s does not exist. Aborting." % archive_file)
sys.exit(1)
if not os.path.isfile(hash_file):
print("%s does not exist. Aborting." % hash_file)
sys.exit(1)
if sha256sum_check(archive_file, hash_file):
print("Correct match")
else:
print("Check failed!")
sys.exit(1)
|
mit
| 7,459,735,787,600,688,000 | 21.62963 | 80 | 0.576923 | false | 3.174026 | false | false | false |
Kokemomo/Kokemomo
|
kokemomo/plugins/engine/model/km_storage/impl/km_rdb_adapter.py
|
1
|
4103
|
from sqlalchemy import Column, Integer, String, DateTime, Boolean, Text, func
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from kokemomo.plugins.engine.model.km_storage.km_adapter import BaseAdapter
class BaseModel(object):
id = Column(Integer, primary_key=True)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
def __repr__(self):
return '<%s>' % self.__class__.__name__
def validate(self):
pass
def save(self, validate=True):
try:
adapter.add(self)
adapter.commit()
except:
adapter.rollback()
raise
def delete(self):
try:
adapter.delete(self)
adapter.commit()
except:
adapter.rollback()
raise
@classmethod
def all(cls, order=None, commit=True):
res = adapter.session.query(cls).order_by(order).all()
if commit:
adapter.session.commit()
return res
@classmethod
def get(cls, id, commit=True):
res = adapter.session.query(cls).filter(cls.id == id).first()
if commit:
adapter.session.commit()
return res
@classmethod
def delete_by_id(cls, id):
try:
elem = cls.get(id)
adapter.delete(elem)
adapter.commit()
except:
adapter.rollback()
raise
@classmethod
def delete_by_condition(cls, **kwargs):
target_list = cls.find(**kwargs)
try:
for target in target_list:
adapter.session.delete(target)
adapter.commit()
except:
adapter.rollback()
raise
@classmethod
def find(cls, order=None, commit=True, **kwargs):
res = adapter.session.query(cls).order_by(order).filter_by(**kwargs).all()
if commit:
adapter.session.commit()
return res
class KMRDBAdapter(BaseAdapter):
def __init__(self, rdb_path, options):
self.rdb_path = rdb_path
self.options = options
self.Model = declarative_base(cls=BaseModel)
self.fields = [Column, String, Integer, Boolean, Text, DateTime]
for field in self.fields:
setattr(self, field.__name__, field)
@property
def metadata(self):
return self.Model.metadata
def init(self, rdb_path=None, options={}):
self.session = scoped_session(sessionmaker())
if rdb_path:
self.rdb_path = rdb_path
self.engine = create_engine(self.rdb_path, **options)
self.session.configure(bind=self.engine)
self.metadata.create_all(self.engine)
def drop_all(self):
self.metadata.drop_all(self.engine)
def add(self, *args, **kwargs):
self.session.add(*args, **kwargs)
def delete(self, *args, **kwargs):
self.session.delete(*args, **kwargs)
def commit(self):
self.session.commit()
def set(self, *args, **kwargs):
self.add(*args, **kwargs)
self.commit()
def rollback(self):
self.session.rollback()
def get(self, *args, **kwargs):
pass
def rollback():
adapter.session.rollback()
class Transaction(object):
@classmethod
def begin(cls):
return adapter.session.begin(subtransactions=True)
@classmethod
def add(cls, *args, **kwargs):
adapter.session.add(*args, **kwargs)
@classmethod
def delete(cls, *args, **kwargs):
adapter.session.delete(*args, **kwargs)
@classmethod
def commit(cls):
adapter.session.commit()
@classmethod
def rollback(self):
adapter.session.rollback()
from kokemomo.settings.common import DATA_BASE, DATA_BASE_OPTIONS
adapter = KMRDBAdapter(DATA_BASE, DATA_BASE_OPTIONS)
|
mit
| 8,601,652,151,066,538,000 | 25.642857 | 82 | 0.60078 | false | 3.991245 | false | false | false |
prashanthpai/swift
|
swift/common/ring/builder.py
|
1
|
73566
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import errno
import itertools
import logging
import math
import random
import six.moves.cPickle as pickle
from copy import deepcopy
from contextlib import contextmanager
from array import array
from collections import defaultdict
import six
from six.moves import range
from time import time
from swift.common import exceptions
from swift.common.ring import RingData
from swift.common.ring.utils import tiers_for_dev, build_tier_tree, \
validate_and_normalize_address
# we can't store None's in the replica2part2dev array, so we high-jack
# the max value for magic to represent the part is not currently
# assigned to any device.
NONE_DEV = 2 ** 16 - 1
MAX_BALANCE = 999.99
MAX_BALANCE_GATHER_COUNT = 3
class RingValidationWarning(Warning):
pass
try:
# python 2.7+
from logging import NullHandler
except ImportError:
# python 2.6
class NullHandler(logging.Handler):
def emit(self, *a, **kw):
pass
class RingBuilder(object):
"""
Used to build swift.common.ring.RingData instances to be written to disk
and used with swift.common.ring.Ring instances. See bin/swift-ring-builder
for example usage.
The instance variable devs_changed indicates if the device information has
changed since the last balancing. This can be used by tools to know whether
a rebalance request is an isolated request or due to added, changed, or
removed devices.
:param part_power: number of partitions = 2**part_power.
:param replicas: number of replicas for each partition
:param min_part_hours: minimum number of hours between partition changes
"""
def __init__(self, part_power, replicas, min_part_hours):
if part_power > 32:
raise ValueError("part_power must be at most 32 (was %d)"
% (part_power,))
if replicas < 1:
raise ValueError("replicas must be at least 1 (was %.6f)"
% (replicas,))
if min_part_hours < 0:
raise ValueError("min_part_hours must be non-negative (was %d)"
% (min_part_hours,))
self.part_power = part_power
self.replicas = replicas
self.min_part_hours = min_part_hours
self.parts = 2 ** self.part_power
self.devs = []
self.devs_changed = False
self.version = 0
self.overload = 0.0
# _replica2part2dev maps from replica number to partition number to
# device id. So, for a three replica, 2**23 ring, it's an array of
# three 2**23 arrays of device ids (unsigned shorts). This can work a
# bit faster than the 2**23 array of triplet arrays of device ids in
# many circumstances. Making one big 2**23 * 3 array didn't seem to
# have any speed change; though you're welcome to try it again (it was
# a while ago, code-wise, when I last tried it).
self._replica2part2dev = None
# _last_part_moves is an array of unsigned bytes representing
# the number of hours since a given partition was last moved.
# This is used to guarantee we don't move a partition twice
# within a given number of hours (24 is my usual test). Removing
# a device overrides this behavior as it's assumed that's only
# done because of device failure.
self._last_part_moves = None
# _last_part_moves_epoch indicates the time the offsets in
# _last_part_moves is based on.
self._last_part_moves_epoch = 0
self._last_part_gather_start = 0
self._dispersion_graph = {}
self.dispersion = 0.0
self._remove_devs = []
self._ring = None
self.logger = logging.getLogger("swift.ring.builder")
if not self.logger.handlers:
self.logger.disabled = True
# silence "no handler for X" error messages
self.logger.addHandler(NullHandler())
@contextmanager
def debug(self):
"""
Temporarily enables debug logging, useful in tests, e.g.
with rb.debug():
rb.rebalance()
"""
self.logger.disabled = False
try:
yield
finally:
self.logger.disabled = True
@property
def min_part_seconds_left(self):
"""Get the total seconds until a rebalance can be performed"""
elapsed_seconds = int(time() - self._last_part_moves_epoch)
return max((self.min_part_hours * 3600) - elapsed_seconds, 0)
def weight_of_one_part(self):
"""
Returns the weight of each partition as calculated from the
total weight of all the devices.
"""
try:
return self.parts * self.replicas / \
sum(d['weight'] for d in self._iter_devs())
except ZeroDivisionError:
raise exceptions.EmptyRingError('There are no devices in this '
'ring, or all devices have been '
'deleted')
@classmethod
def from_dict(cls, builder_data):
b = cls(1, 1, 1) # Dummy values
b.copy_from(builder_data)
return b
def copy_from(self, builder):
"""
Reinitializes this RingBuilder instance from data obtained from the
builder dict given. Code example::
b = RingBuilder(1, 1, 1) # Dummy values
b.copy_from(builder)
This is to restore a RingBuilder that has had its b.to_dict()
previously saved.
"""
if hasattr(builder, 'devs'):
self.part_power = builder.part_power
self.replicas = builder.replicas
self.min_part_hours = builder.min_part_hours
self.parts = builder.parts
self.devs = builder.devs
self.devs_changed = builder.devs_changed
self.overload = builder.overload
self.version = builder.version
self._replica2part2dev = builder._replica2part2dev
self._last_part_moves_epoch = builder._last_part_moves_epoch
self._last_part_moves = builder._last_part_moves
self._last_part_gather_start = builder._last_part_gather_start
self._remove_devs = builder._remove_devs
else:
self.part_power = builder['part_power']
self.replicas = builder['replicas']
self.min_part_hours = builder['min_part_hours']
self.parts = builder['parts']
self.devs = builder['devs']
self.devs_changed = builder['devs_changed']
self.overload = builder.get('overload', 0.0)
self.version = builder['version']
self._replica2part2dev = builder['_replica2part2dev']
self._last_part_moves_epoch = builder['_last_part_moves_epoch']
self._last_part_moves = builder['_last_part_moves']
self._last_part_gather_start = builder['_last_part_gather_start']
self._dispersion_graph = builder.get('_dispersion_graph', {})
self.dispersion = builder.get('dispersion')
self._remove_devs = builder['_remove_devs']
self._ring = None
# Old builders may not have a region defined for their devices, in
# which case we default it to 1.
for dev in self._iter_devs():
dev.setdefault("region", 1)
if not self._last_part_moves_epoch:
self._last_part_moves_epoch = 0
def __deepcopy__(self, memo):
return type(self).from_dict(deepcopy(self.to_dict(), memo))
def to_dict(self):
"""
Returns a dict that can be used later with copy_from to
restore a RingBuilder. swift-ring-builder uses this to
pickle.dump the dict to a file and later load that dict into
copy_from.
"""
return {'part_power': self.part_power,
'replicas': self.replicas,
'min_part_hours': self.min_part_hours,
'parts': self.parts,
'devs': self.devs,
'devs_changed': self.devs_changed,
'version': self.version,
'overload': self.overload,
'_replica2part2dev': self._replica2part2dev,
'_last_part_moves_epoch': self._last_part_moves_epoch,
'_last_part_moves': self._last_part_moves,
'_last_part_gather_start': self._last_part_gather_start,
'_dispersion_graph': self._dispersion_graph,
'dispersion': self.dispersion,
'_remove_devs': self._remove_devs}
def change_min_part_hours(self, min_part_hours):
"""
Changes the value used to decide if a given partition can be moved
again. This restriction is to give the overall system enough time to
settle a partition to its new location before moving it to yet another
location. While no data would be lost if a partition is moved several
times quickly, it could make that data unreachable for a short period
of time.
This should be set to at least the average full partition replication
time. Starting it at 24 hours and then lowering it to what the
replicator reports as the longest partition cycle is best.
:param min_part_hours: new value for min_part_hours
"""
self.min_part_hours = min_part_hours
def set_replicas(self, new_replica_count):
"""
Changes the number of replicas in this ring.
If the new replica count is sufficiently different that
self._replica2part2dev will change size, sets
self.devs_changed. This is so tools like
bin/swift-ring-builder can know to write out the new ring
rather than bailing out due to lack of balance change.
"""
old_slots_used = int(self.parts * self.replicas)
new_slots_used = int(self.parts * new_replica_count)
if old_slots_used != new_slots_used:
self.devs_changed = True
self.replicas = new_replica_count
def set_overload(self, overload):
self.overload = overload
def get_ring(self):
"""
Get the ring, or more specifically, the swift.common.ring.RingData.
This ring data is the minimum required for use of the ring. The ring
builder itself keeps additional data such as when partitions were last
moved.
"""
# We cache the self._ring value so multiple requests for it don't build
# it multiple times. Be sure to set self._ring = None whenever the ring
# will need to be rebuilt.
if not self._ring:
# Make devs list (with holes for deleted devices) and not including
# builder-specific extra attributes.
devs = [None] * len(self.devs)
for dev in self._iter_devs():
devs[dev['id']] = dict((k, v) for k, v in dev.items()
if k not in ('parts', 'parts_wanted'))
# Copy over the replica+partition->device assignments, the device
# information, and the part_shift value (the number of bits to
# shift an unsigned int >I right to obtain the partition for the
# int).
if not self._replica2part2dev:
self._ring = RingData([], devs, 32 - self.part_power)
else:
self._ring = \
RingData([array('H', p2d) for p2d in
self._replica2part2dev],
devs, 32 - self.part_power)
return self._ring
def add_dev(self, dev):
"""
Add a device to the ring. This device dict should have a minimum of the
following keys:
====== ===============================================================
id unique integer identifier amongst devices. Defaults to the next
id if the 'id' key is not provided in the dict
weight a float of the relative weight of this device as compared to
others; this indicates how many partitions the builder will try
to assign to this device
region integer indicating which region the device is in
zone integer indicating which zone the device is in; a given
partition will not be assigned to multiple devices within the
same (region, zone) pair if there is any alternative
ip the ip address of the device
port the tcp port of the device
device the device's name on disk (sdb1, for example)
meta general use 'extra' field; for example: the online date, the
hardware description
====== ===============================================================
.. note::
This will not rebalance the ring immediately as you may want to
make multiple changes for a single rebalance.
:param dev: device dict
:returns: id of device (not used in the tree anymore, but unknown
users may depend on it)
"""
if 'id' not in dev:
dev['id'] = 0
if self.devs:
try:
dev['id'] = self.devs.index(None)
except ValueError:
dev['id'] = len(self.devs)
if dev['id'] < len(self.devs) and self.devs[dev['id']] is not None:
raise exceptions.DuplicateDeviceError(
'Duplicate device id: %d' % dev['id'])
# Add holes to self.devs to ensure self.devs[dev['id']] will be the dev
while dev['id'] >= len(self.devs):
self.devs.append(None)
dev['weight'] = float(dev['weight'])
dev['parts'] = 0
self.devs[dev['id']] = dev
self.devs_changed = True
self.version += 1
return dev['id']
def set_dev_weight(self, dev_id, weight):
"""
Set the weight of a device. This should be called rather than just
altering the weight key in the device dict directly, as the builder
will need to rebuild some internal state to reflect the change.
.. note::
This will not rebalance the ring immediately as you may want to
make multiple changes for a single rebalance.
:param dev_id: device id
:param weight: new weight for device
"""
if any(dev_id == d['id'] for d in self._remove_devs):
raise ValueError("Can not set weight of dev_id %s because it "
"is marked for removal" % (dev_id,))
self.devs[dev_id]['weight'] = weight
self.devs_changed = True
self.version += 1
def remove_dev(self, dev_id):
"""
Remove a device from the ring.
.. note::
This will not rebalance the ring immediately as you may want to
make multiple changes for a single rebalance.
:param dev_id: device id
"""
dev = self.devs[dev_id]
dev['weight'] = 0
self._remove_devs.append(dev)
self.devs_changed = True
self.version += 1
def rebalance(self, seed=None):
"""
Rebalance the ring.
This is the main work function of the builder, as it will assign and
reassign partitions to devices in the ring based on weights, distinct
zones, recent reassignments, etc.
The process doesn't always perfectly assign partitions (that'd take a
lot more analysis and therefore a lot more time -- I had code that did
that before). Because of this, it keeps rebalancing until the device
skew (number of partitions a device wants compared to what it has) gets
below 1% or doesn't change by more than 1% (only happens with a ring
that can't be balanced no matter what).
:returns: (number_of_partitions_altered, resulting_balance,
number_of_removed_devices)
"""
# count up the devs, and cache some stuff
num_devices = 0
for dev in self._iter_devs():
dev['tiers'] = tiers_for_dev(dev)
if dev['weight'] > 0:
num_devices += 1
if num_devices < self.replicas:
raise exceptions.RingValidationError(
"Replica count of %(replicas)s requires more "
"than %(num_devices)s devices" % {
'replicas': self.replicas,
'num_devices': num_devices,
})
if seed is not None:
random.seed(seed)
self._ring = None
old_replica2part2dev = copy.deepcopy(self._replica2part2dev)
if self._last_part_moves is None:
self.logger.debug("New builder; performing initial balance")
self._last_part_moves = array('B', itertools.repeat(0, self.parts))
self._update_last_part_moves()
replica_plan = self._build_replica_plan()
self._set_parts_wanted(replica_plan)
assign_parts = defaultdict(list)
# gather parts from replica count adjustment
self._adjust_replica2part2dev_size(assign_parts)
# gather parts from failed devices
removed_devs = self._gather_parts_from_failed_devices(assign_parts)
# gather parts for dispersion (N.B. this only picks up parts that
# *must* disperse according to the replica plan)
self._gather_parts_for_dispersion(assign_parts, replica_plan)
# we'll gather a few times, or until we archive the plan
for gather_count in range(MAX_BALANCE_GATHER_COUNT):
self._gather_parts_for_balance(assign_parts, replica_plan)
if not assign_parts:
# most likely min part hours
finish_status = 'Unable to finish'
break
assign_parts_list = list(assign_parts.items())
# shuffle the parts to be reassigned, we have no preference on the
# order in which the replica plan is fulfilled.
random.shuffle(assign_parts_list)
# reset assign_parts map for next iteration
assign_parts = defaultdict(list)
num_part_replicas = sum(len(r) for p, r in assign_parts_list)
self.logger.debug("Gathered %d parts", num_part_replicas)
self._reassign_parts(assign_parts_list, replica_plan)
self.logger.debug("Assigned %d parts", num_part_replicas)
if not sum(d['parts_wanted'] < 0 for d in
self._iter_devs()):
finish_status = 'Finished'
break
else:
finish_status = 'Unable to finish'
self.logger.debug('%s rebalance plan after %s attempts' % (
finish_status, gather_count + 1))
self.devs_changed = False
self.version += 1
changed_parts = self._build_dispersion_graph(old_replica2part2dev)
# clean up the cache
for dev in self._iter_devs():
dev.pop('tiers', None)
return changed_parts, self.get_balance(), removed_devs
def _build_dispersion_graph(self, old_replica2part2dev=None):
"""
Build a dict of all tiers in the cluster to a list of the number of
parts with a replica count at each index. The values of the dict will
be lists of length the maximum whole replica + 1 so that the
graph[tier][3] is the number of parts within the tier with 3 replicas
and graph [tier][0] is the number of parts not assigned in this tier.
i.e.
{
<tier>: [
<number_of_parts_with_0_replicas>,
<number_of_parts_with_1_replicas>,
...
<number_of_parts_with_n_replicas>,
],
...
}
:param old_replica2part2dev: if called from rebalance, the
old_replica2part2dev can be used to count moved parts.
:returns: number of parts with different assignments than
old_replica2part2dev if provided
"""
# Since we're going to loop over every replica of every part we'll
# also count up changed_parts if old_replica2part2dev is passed in
old_replica2part2dev = old_replica2part2dev or []
# Compare the partition allocation before and after the rebalance
# Only changed device ids are taken into account; devices might be
# "touched" during the rebalance, but actually not really moved
changed_parts = 0
int_replicas = int(math.ceil(self.replicas))
max_allowed_replicas = self._build_max_replicas_by_tier()
parts_at_risk = 0
dispersion_graph = {}
# go over all the devices holding each replica part by part
for part_id, dev_ids in enumerate(
six.moves.zip(*self._replica2part2dev)):
# count the number of replicas of this part for each tier of each
# device, some devices may have overlapping tiers!
replicas_at_tier = defaultdict(int)
for rep_id, dev in enumerate(iter(
self.devs[dev_id] for dev_id in dev_ids)):
for tier in (dev.get('tiers') or tiers_for_dev(dev)):
replicas_at_tier[tier] += 1
# IndexErrors will be raised if the replicas are increased or
# decreased, and that actually means the partition has changed
try:
old_device = old_replica2part2dev[rep_id][part_id]
except IndexError:
changed_parts += 1
continue
if old_device != dev['id']:
changed_parts += 1
part_at_risk = False
# update running totals for each tiers' number of parts with a
# given replica count
for tier, replicas in replicas_at_tier.items():
if tier not in dispersion_graph:
dispersion_graph[tier] = [self.parts] + [0] * int_replicas
dispersion_graph[tier][0] -= 1
dispersion_graph[tier][replicas] += 1
if replicas > max_allowed_replicas[tier]:
part_at_risk = True
# this part may be at risk in multiple tiers, but we only count it
# as at_risk once
if part_at_risk:
parts_at_risk += 1
self._dispersion_graph = dispersion_graph
self.dispersion = 100.0 * parts_at_risk / self.parts
return changed_parts
def validate(self, stats=False):
"""
Validate the ring.
This is a safety function to try to catch any bugs in the building
process. It ensures partitions have been assigned to real devices,
aren't doubly assigned, etc. It can also optionally check the even
distribution of partitions across devices.
:param stats: if True, check distribution of partitions across devices
:returns: if stats is True, a tuple of (device_usage, worst_stat), else
(None, None). device_usage[dev_id] will equal the number of
partitions assigned to that device. worst_stat will equal the
number of partitions the worst device is skewed from the
number it should have.
:raises RingValidationError: problem was found with the ring.
"""
# "len" showed up in profiling, so it's just computed once.
dev_len = len(self.devs)
parts_on_devs = sum(d['parts'] for d in self._iter_devs())
if not self._replica2part2dev:
raise exceptions.RingValidationError(
'_replica2part2dev empty; did you forget to rebalance?')
parts_in_map = sum(len(p2d) for p2d in self._replica2part2dev)
if parts_on_devs != parts_in_map:
raise exceptions.RingValidationError(
'All partitions are not double accounted for: %d != %d' %
(parts_on_devs, parts_in_map))
if stats:
# dev_usage[dev_id] will equal the number of partitions assigned to
# that device.
dev_usage = array('I', (0 for _junk in range(dev_len)))
for part2dev in self._replica2part2dev:
for dev_id in part2dev:
dev_usage[dev_id] += 1
for dev in self._iter_devs():
if not isinstance(dev['port'], int):
raise exceptions.RingValidationError(
"Device %d has port %r, which is not an integer." %
(dev['id'], dev['port']))
int_replicas = int(math.ceil(self.replicas))
rep2part_len = map(len, self._replica2part2dev)
# check the assignments of each part's replicas
for part in range(self.parts):
devs_for_part = []
for replica, part_len in enumerate(rep2part_len):
if part_len <= part:
# last replica may be short on parts because of floating
# replica count
if replica + 1 < int_replicas:
raise exceptions.RingValidationError(
"The partition assignments of replica %r were "
"shorter than expected (%s < %s) - this should "
"only happen for the last replica" % (
replica,
len(self._replica2part2dev[replica]),
self.parts,
))
break
dev_id = self._replica2part2dev[replica][part]
if dev_id >= dev_len or not self.devs[dev_id]:
raise exceptions.RingValidationError(
"Partition %d, replica %d was not allocated "
"to a device." %
(part, replica))
devs_for_part.append(dev_id)
if len(devs_for_part) != len(set(devs_for_part)):
raise exceptions.RingValidationError(
"The partition %s has been assigned to "
"duplicate devices %r" % (
part, devs_for_part))
if stats:
weight_of_one_part = self.weight_of_one_part()
worst = 0
for dev in self._iter_devs():
if not dev['weight']:
if dev_usage[dev['id']]:
# If a device has no weight, but has partitions, then
# its overage is considered "infinity" and therefore
# always the worst possible. We show MAX_BALANCE for
# convenience.
worst = MAX_BALANCE
break
continue
skew = abs(100.0 * dev_usage[dev['id']] /
(dev['weight'] * weight_of_one_part) - 100.0)
if skew > worst:
worst = skew
return dev_usage, worst
return None, None
def _build_balance_per_dev(self):
"""
Build a map of <device_id> => <balance> where <balance> is a float
representing the percentage difference from the desired amount of
partitions a given device wants and the amount it has.
N.B. this method only considers a device's weight and the parts
assigned, not the parts wanted according to the replica plan.
"""
weight_of_one_part = self.weight_of_one_part()
balance_per_dev = {}
for dev in self._iter_devs():
if not dev['weight']:
if dev['parts']:
# If a device has no weight, but has partitions, then its
# overage is considered "infinity" and therefore always the
# worst possible. We show MAX_BALANCE for convenience.
balance = MAX_BALANCE
else:
balance = 0
else:
balance = 100.0 * dev['parts'] / (
dev['weight'] * weight_of_one_part) - 100.0
balance_per_dev[dev['id']] = balance
return balance_per_dev
def get_balance(self):
"""
Get the balance of the ring. The balance value is the highest
percentage of the desired amount of partitions a given device
wants. For instance, if the "worst" device wants (based on its
weight relative to the sum of all the devices' weights) 123
partitions and it has 124 partitions, the balance value would
be 0.83 (1 extra / 123 wanted * 100 for percentage).
:returns: balance of the ring
"""
balance_per_dev = self._build_balance_per_dev()
return max(abs(b) for b in balance_per_dev.values())
def get_required_overload(self, weighted=None, wanted=None):
"""
Returns the minimum overload value required to make the ring maximally
dispersed.
The required overload is the largest percentage change of any single
device from its weighted replicanth to its wanted replicanth (note:
under weighted devices have a negative percentage change) to archive
dispersion - that is to say a single device that must be overloaded by
5% is worse than 5 devices in a single tier overloaded by 1%.
"""
weighted = weighted or self._build_weighted_replicas_by_tier()
wanted = wanted or self._build_wanted_replicas_by_tier()
max_overload = 0.0
for dev in self._iter_devs():
tier = (dev['region'], dev['zone'], dev['ip'], dev['id'])
if not dev['weight']:
if tier not in wanted or not wanted[tier]:
continue
raise exceptions.RingValidationError(
'Device %s has zero weight and '
'should not want any replicas' % (tier,))
required = (wanted[tier] - weighted[tier]) / weighted[tier]
self.logger.debug('%s wants %s and is weighted for %s so '
'therefore requires %s overload' % (
tier, wanted[tier], weighted[tier],
required))
if required > max_overload:
max_overload = required
return max_overload
def pretend_min_part_hours_passed(self):
"""
Override min_part_hours by marking all partitions as having been moved
255 hours ago and last move epoch to 'the beginning of time'. This can
be used to force a full rebalance on the next call to rebalance.
"""
self._last_part_moves_epoch = 0
if not self._last_part_moves:
return
for part in range(self.parts):
self._last_part_moves[part] = 0xff
def get_part_devices(self, part):
"""
Get the devices that are responsible for the partition,
filtering out duplicates.
:param part: partition to get devices for
:returns: list of device dicts
"""
devices = []
for dev in self._devs_for_part(part):
if dev not in devices:
devices.append(dev)
return devices
def _iter_devs(self):
"""
Returns an iterator all the non-None devices in the ring. Note that
this means list(b._iter_devs())[some_id] may not equal b.devs[some_id];
you will have to check the 'id' key of each device to obtain its
dev_id.
"""
for dev in self.devs:
if dev is not None:
yield dev
def _build_tier2children(self):
"""
Wrap helper build_tier_tree so exclude zero-weight devices.
"""
return build_tier_tree(d for d in self._iter_devs() if d['weight'])
def _set_parts_wanted(self, replica_plan):
"""
Sets the parts_wanted key for each of the devices to the number of
partitions the device wants based on its relative weight. This key is
used to sort the devices according to "most wanted" during rebalancing
to best distribute partitions. A negative parts_wanted indicates the
device is "overweight" and wishes to give partitions away if possible.
:param replica_plan: a dict of dicts, as returned from
_build_replica_plan, that that maps
each tier to it's target replicanths.
"""
tier2children = self._build_tier2children()
parts_by_tier = defaultdict(int)
def place_parts(tier, parts):
parts_by_tier[tier] = parts
sub_tiers = sorted(tier2children[tier])
if not sub_tiers:
return
to_place = defaultdict(int)
for t in sub_tiers:
to_place[t] = int(math.floor(
replica_plan[t]['target'] * self.parts))
parts -= to_place[t]
# if there's some parts left over, just throw 'em about
sub_tier_gen = itertools.cycle(sorted(
sub_tiers, key=lambda t: replica_plan[t]['target']))
while parts:
t = next(sub_tier_gen)
to_place[t] += 1
parts -= 1
for t, p in to_place.items():
place_parts(t, p)
total_parts = int(self.replicas * self.parts)
place_parts((), total_parts)
# belts & suspenders/paranoia - at every level, the sum of
# parts_by_tier should be total_parts for the ring
tiers = ['cluster', 'regions', 'zones', 'servers', 'devices']
for i, tier_name in enumerate(tiers):
parts_at_tier = sum(parts_by_tier[t] for t in parts_by_tier
if len(t) == i)
if parts_at_tier != total_parts:
raise exceptions.RingValidationError(
'%s != %s at tier %s' % (
parts_at_tier, total_parts, tier_name))
for dev in self._iter_devs():
if not dev['weight']:
# With no weight, that means we wish to "drain" the device. So
# we set the parts_wanted to a really large negative number to
# indicate its strong desire to give up everything it has.
dev['parts_wanted'] = -self.parts * self.replicas
else:
tier = (dev['region'], dev['zone'], dev['ip'], dev['id'])
dev['parts_wanted'] = parts_by_tier[tier] - dev['parts']
def _update_last_part_moves(self):
"""
Updates how many hours ago each partition was moved based on the
current time. The builder won't move a partition that has been moved
more recently than min_part_hours.
"""
elapsed_hours = int(time() - self._last_part_moves_epoch) / 3600
if elapsed_hours <= 0:
return
for part in range(self.parts):
# The "min(self._last_part_moves[part] + elapsed_hours, 0xff)"
# which was here showed up in profiling, so it got inlined.
last_plus_elapsed = self._last_part_moves[part] + elapsed_hours
if last_plus_elapsed < 0xff:
self._last_part_moves[part] = last_plus_elapsed
else:
self._last_part_moves[part] = 0xff
self._last_part_moves_epoch = int(time())
def _gather_parts_from_failed_devices(self, assign_parts):
"""
Update the map of partition => [replicas] to be reassigned from
removed devices.
"""
# First we gather partitions from removed devices. Since removed
# devices usually indicate device failures, we have no choice but to
# reassign these partitions. However, we mark them as moved so later
# choices will skip other replicas of the same partition if possible.
if self._remove_devs:
dev_ids = [d['id'] for d in self._remove_devs if d['parts']]
if dev_ids:
for part, replica in self._each_part_replica():
dev_id = self._replica2part2dev[replica][part]
if dev_id in dev_ids:
self._replica2part2dev[replica][part] = NONE_DEV
self._last_part_moves[part] = 0
assign_parts[part].append(replica)
self.logger.debug(
"Gathered %d/%d from dev %d [dev removed]",
part, replica, dev_id)
removed_devs = 0
while self._remove_devs:
remove_dev_id = self._remove_devs.pop()['id']
self.logger.debug("Removing dev %d", remove_dev_id)
self.devs[remove_dev_id] = None
removed_devs += 1
return removed_devs
def _adjust_replica2part2dev_size(self, to_assign):
"""
Make sure that the lengths of the arrays in _replica2part2dev
are correct for the current value of self.replicas.
Example:
self.part_power = 8
self.replicas = 2.25
self._replica2part2dev will contain 3 arrays: the first 2 of
length 256 (2**8), and the last of length 64 (0.25 * 2**8).
Update the mapping of partition => [replicas] that need assignment.
"""
fractional_replicas, whole_replicas = math.modf(self.replicas)
whole_replicas = int(whole_replicas)
removed_parts = 0
new_parts = 0
desired_lengths = [self.parts] * whole_replicas
if fractional_replicas:
desired_lengths.append(int(self.parts * fractional_replicas))
if self._replica2part2dev is not None:
# If we crossed an integer threshold (say, 4.1 --> 4),
# we'll have a partial extra replica clinging on here. Clean
# up any such extra stuff.
for part2dev in self._replica2part2dev[len(desired_lengths):]:
for dev_id in part2dev:
dev_losing_part = self.devs[dev_id]
dev_losing_part['parts'] -= 1
removed_parts -= 1
self._replica2part2dev = \
self._replica2part2dev[:len(desired_lengths)]
else:
self._replica2part2dev = []
for replica, desired_length in enumerate(desired_lengths):
if replica < len(self._replica2part2dev):
part2dev = self._replica2part2dev[replica]
if len(part2dev) < desired_length:
# Not long enough: needs to be extended and the
# newly-added pieces assigned to devices.
for part in range(len(part2dev), desired_length):
to_assign[part].append(replica)
part2dev.append(NONE_DEV)
new_parts += 1
elif len(part2dev) > desired_length:
# Too long: truncate this mapping.
for part in range(desired_length, len(part2dev)):
dev_losing_part = self.devs[part2dev[part]]
dev_losing_part['parts'] -= 1
removed_parts -= 1
self._replica2part2dev[replica] = part2dev[:desired_length]
else:
# Mapping not present at all: make one up and assign
# all of it.
for part in range(desired_length):
to_assign[part].append(replica)
new_parts += 1
self._replica2part2dev.append(
array('H', itertools.repeat(NONE_DEV, desired_length)))
self.logger.debug(
"%d new parts and %d removed parts from replica-count change",
new_parts, removed_parts)
def _gather_parts_for_dispersion(self, assign_parts, replica_plan):
"""
Update the map of partition => [replicas] to be reassigned from
insufficiently-far-apart replicas.
"""
# Now we gather partitions that are "at risk" because they aren't
# currently sufficient spread out across the cluster.
for part in range(self.parts):
if self._last_part_moves[part] < self.min_part_hours:
continue
# First, add up the count of replicas at each tier for each
# partition.
replicas_at_tier = defaultdict(int)
for dev in self._devs_for_part(part):
for tier in dev['tiers']:
replicas_at_tier[tier] += 1
# Now, look for partitions not yet spread out enough.
undispersed_dev_replicas = []
for replica in self._replicas_for_part(part):
dev_id = self._replica2part2dev[replica][part]
if dev_id == NONE_DEV:
continue
dev = self.devs[dev_id]
if all(replicas_at_tier[tier] <=
replica_plan[tier]['max']
for tier in dev['tiers']):
continue
undispersed_dev_replicas.append((dev, replica))
if not undispersed_dev_replicas:
continue
undispersed_dev_replicas.sort(
key=lambda dr: dr[0]['parts_wanted'])
for dev, replica in undispersed_dev_replicas:
# the min part hour check is ignored if and only if a device
# has more than one replica of a part assigned to it - which
# would have only been possible on rings built with an older
# version of the code
if (self._last_part_moves[part] < self.min_part_hours and
not replicas_at_tier[dev['tiers'][-1]] > 1):
continue
dev['parts_wanted'] += 1
dev['parts'] -= 1
assign_parts[part].append(replica)
self.logger.debug(
"Gathered %d/%d from dev %d [dispersion]",
part, replica, dev['id'])
self._replica2part2dev[replica][part] = NONE_DEV
for tier in dev['tiers']:
replicas_at_tier[tier] -= 1
self._last_part_moves[part] = 0
def _gather_parts_for_balance_can_disperse(self, assign_parts, start,
replica_plan):
"""
Update the map of partition => [replicas] to be reassigned from
overweight drives where the replicas can be better dispersed to
another failure domain.
:param assign_parts: the map of partition => [replica] to update
:param start: offset into self.parts to begin search
:param replica_plan: replicanth targets for tiers
"""
# Last, we gather partitions from devices that are "overweight" because
# they have more partitions than their parts_wanted.
for offset in range(self.parts):
part = (start + offset) % self.parts
if self._last_part_moves[part] < self.min_part_hours:
continue
# For each part we'll look at the devices holding those parts and
# see if any are overweight, keeping track of replicas_at_tier as
# we go
overweight_dev_replica = []
replicas_at_tier = defaultdict(int)
for replica in self._replicas_for_part(part):
dev_id = self._replica2part2dev[replica][part]
if dev_id == NONE_DEV:
continue
dev = self.devs[dev_id]
for tier in dev['tiers']:
replicas_at_tier[tier] += 1
if dev['parts_wanted'] < 0:
overweight_dev_replica.append((dev, replica))
if not overweight_dev_replica:
continue
overweight_dev_replica.sort(
key=lambda dr: dr[0]['parts_wanted'])
for dev, replica in overweight_dev_replica:
if self._last_part_moves[part] < self.min_part_hours:
break
if any(replica_plan[tier]['min'] <=
replicas_at_tier[tier] <
replica_plan[tier]['max']
for tier in dev['tiers']):
continue
# this is the most overweight_device holding a replica
# of this part that can shed it according to the plan
dev['parts_wanted'] += 1
dev['parts'] -= 1
assign_parts[part].append(replica)
self.logger.debug(
"Gathered %d/%d from dev %d [weight disperse]",
part, replica, dev['id'])
self._replica2part2dev[replica][part] = NONE_DEV
for tier in dev['tiers']:
replicas_at_tier[tier] -= 1
self._last_part_moves[part] = 0
def _gather_parts_for_balance(self, assign_parts, replica_plan):
"""
Gather parts that look like they should move for balance reasons.
A simple gathers of parts that looks dispersible normally works out,
we'll switch strategies if things don't seem to move.
"""
# pick a random starting point on the other side of the ring
quarter_turn = (self.parts // 4)
random_half = random.randint(0, self.parts / 2)
start = (self._last_part_gather_start + quarter_turn +
random_half) % self.parts
self.logger.debug('Gather start is %s '
'(Last start was %s)' % (
start, self._last_part_gather_start))
self._last_part_gather_start = start
self._gather_parts_for_balance_can_disperse(
assign_parts, start, replica_plan)
if not assign_parts:
self._gather_parts_for_balance_forced(assign_parts, start)
def _gather_parts_for_balance_forced(self, assign_parts, start, **kwargs):
"""
Update the map of partition => [replicas] to be reassigned from
overweight drives without restriction, parts gathered from this method
may be placed back onto devices that are no better (or worse) than the
device from which they are gathered.
This method allows devices to flop around enough to unlock replicas
that would have otherwise potentially been locked because of
dispersion - it should be used as a last resort.
:param assign_parts: the map of partition => [replica] to update
:param start: offset into self.parts to begin search
"""
for offset in range(self.parts):
part = (start + offset) % self.parts
if self._last_part_moves[part] < self.min_part_hours:
continue
overweight_dev_replica = []
for replica in self._replicas_for_part(part):
dev_id = self._replica2part2dev[replica][part]
if dev_id == NONE_DEV:
continue
dev = self.devs[dev_id]
if dev['parts_wanted'] < 0:
overweight_dev_replica.append((dev, replica))
if not overweight_dev_replica:
continue
overweight_dev_replica.sort(
key=lambda dr: dr[0]['parts_wanted'])
for dev, replica in overweight_dev_replica:
if self._last_part_moves[part] < self.min_part_hours:
break
# this is the most overweight_device holding a replica of this
# part we don't know where it's going to end up - but we'll
# pick it up and hope for the best.
dev['parts_wanted'] += 1
dev['parts'] -= 1
assign_parts[part].append(replica)
self.logger.debug(
"Gathered %d/%d from dev %d [weight forced]",
part, replica, dev['id'])
self._replica2part2dev[replica][part] = NONE_DEV
self._last_part_moves[part] = 0
def _reassign_parts(self, reassign_parts, replica_plan):
"""
For an existing ring data set, partitions are reassigned similar to
the initial assignment.
The devices are ordered by how many partitions they still want and
kept in that order throughout the process.
The gathered partitions are iterated through, assigning them to
devices according to the "most wanted" while keeping the replicas as
"far apart" as possible.
Two different regions are considered the farthest-apart things,
followed by zones, then different ip within a zone; the
least-far-apart things are different devices with the same ip in the
same zone.
:param reassign_parts: An iterable of (part, replicas_to_replace)
pairs. replicas_to_replace is an iterable of the
replica (an int) to replace for that partition.
replicas_to_replace may be shared for multiple
partitions, so be sure you do not modify it.
"""
parts_available_in_tier = defaultdict(int)
for dev in self._iter_devs():
dev['sort_key'] = self._sort_key_for(dev)
# Note: this represents how many partitions may be assigned to a
# given tier (region/zone/server/disk). It does not take into
# account how many partitions a given tier wants to shed.
#
# If we did not do this, we could have a zone where, at some
# point during an assignment, number-of-parts-to-gain equals
# number-of-parts-to-shed. At that point, no further placement
# into that zone would occur since its parts_available_in_tier
# would be 0. This would happen any time a zone had any device
# with partitions to shed, which is any time a device is being
# removed, which is a pretty frequent operation.
wanted = max(dev['parts_wanted'], 0)
for tier in dev['tiers']:
parts_available_in_tier[tier] += wanted
available_devs = \
sorted((d for d in self._iter_devs() if d['weight']),
key=lambda x: x['sort_key'])
tier2devs = defaultdict(list)
tier2sort_key = defaultdict(tuple)
tier2dev_sort_key = defaultdict(list)
max_tier_depth = 0
for dev in available_devs:
for tier in dev['tiers']:
tier2devs[tier].append(dev) # <-- starts out sorted!
tier2dev_sort_key[tier].append(dev['sort_key'])
tier2sort_key[tier] = dev['sort_key']
if len(tier) > max_tier_depth:
max_tier_depth = len(tier)
tier2children_sets = build_tier_tree(available_devs)
tier2children = defaultdict(list)
tier2children_sort_key = {}
tiers_list = [()]
depth = 1
while depth <= max_tier_depth:
new_tiers_list = []
for tier in tiers_list:
child_tiers = list(tier2children_sets[tier])
child_tiers.sort(key=tier2sort_key.__getitem__)
tier2children[tier] = child_tiers
tier2children_sort_key[tier] = map(
tier2sort_key.__getitem__, child_tiers)
new_tiers_list.extend(child_tiers)
tiers_list = new_tiers_list
depth += 1
for part, replace_replicas in reassign_parts:
# always update part_moves for min_part_hours
self._last_part_moves[part] = 0
# count up where these replicas be
replicas_at_tier = defaultdict(int)
for dev in self._devs_for_part(part):
for tier in dev['tiers']:
replicas_at_tier[tier] += 1
for replica in replace_replicas:
# Find a new home for this replica
tier = ()
# This used to be a cute, recursive function, but it's been
# unrolled for performance.
depth = 1
while depth <= max_tier_depth:
# Choose the roomiest tier among those that don't
# already have their max replicas assigned according
# to the replica_plan.
candidates = [t for t in tier2children[tier] if
replicas_at_tier[t] <
replica_plan[t]['max']]
if not candidates:
raise Exception('no home for %s/%s %s' % (
part, replica, {t: (
replicas_at_tier[t],
replica_plan[t]['max'],
) for t in tier2children[tier]}))
tier = max(candidates, key=lambda t:
parts_available_in_tier[t])
depth += 1
dev = tier2devs[tier][-1]
dev['parts_wanted'] -= 1
dev['parts'] += 1
for tier in dev['tiers']:
parts_available_in_tier[tier] -= 1
replicas_at_tier[tier] += 1
self._replica2part2dev[replica][part] = dev['id']
self.logger.debug(
"Placed %d/%d onto dev %d", part, replica, dev['id'])
# Just to save memory and keep from accidental reuse.
for dev in self._iter_devs():
del dev['sort_key']
@staticmethod
def _sort_key_for(dev):
return (dev['parts_wanted'], random.randint(0, 0xFFFF), dev['id'])
def _build_max_replicas_by_tier(self, bound=math.ceil):
"""
Returns a defaultdict of (tier: replica_count) for all tiers in the
ring excluding zero weight devices.
There will always be a () entry as the root of the structure, whose
replica_count will equal the ring's replica_count.
Then there will be (region,) entries for each region, indicating the
maximum number of replicas the region might have for any given
partition.
Next there will be (region, zone) entries for each zone, indicating
the maximum number of replicas in a given region and zone. Anything
greater than 1 indicates a partition at slightly elevated risk, as if
that zone were to fail multiple replicas of that partition would be
unreachable.
Next there will be (region, zone, ip_port) entries for each node,
indicating the maximum number of replicas stored on a node in a given
region and zone. Anything greater than 1 indicates a partition at
elevated risk, as if that ip_port were to fail multiple replicas of
that partition would be unreachable.
Last there will be (region, zone, ip_port, device) entries for each
device, indicating the maximum number of replicas the device shares
with other devices on the same node for any given partition.
Anything greater than 1 indicates a partition at serious risk, as the
data on that partition will not be stored distinctly at the ring's
replica_count.
Example return dict for the common SAIO setup::
{(): 3.0,
(1,): 3.0,
(1, 1): 1.0,
(1, 1, '127.0.0.1:6010'): 1.0,
(1, 1, '127.0.0.1:6010', 0): 1.0,
(1, 2): 1.0,
(1, 2, '127.0.0.1:6020'): 1.0,
(1, 2, '127.0.0.1:6020', 1): 1.0,
(1, 3): 1.0,
(1, 3, '127.0.0.1:6030'): 1.0,
(1, 3, '127.0.0.1:6030', 2): 1.0,
(1, 4): 1.0,
(1, 4, '127.0.0.1:6040'): 1.0,
(1, 4, '127.0.0.1:6040', 3): 1.0}
"""
# Used by walk_tree to know what entries to create for each recursive
# call.
tier2children = self._build_tier2children()
def walk_tree(tier, replica_count):
if len(tier) == 4:
# special case for device, it's not recursive
replica_count = min(1, replica_count)
mr = {tier: replica_count}
if tier in tier2children:
subtiers = tier2children[tier]
for subtier in subtiers:
submax = bound(float(replica_count) / len(subtiers))
mr.update(walk_tree(subtier, submax))
return mr
mr = defaultdict(float)
mr.update(walk_tree((), self.replicas))
return mr
def _build_weighted_replicas_by_tier(self):
"""
Returns a dict mapping <tier> => replicanths for all tiers in
the ring based on their weights.
"""
weight_of_one_part = self.weight_of_one_part()
# assign each device some replicanths by weight (can't be > 1)
weighted_replicas_for_dev = {}
devices_with_room = []
for dev in self._iter_devs():
if not dev['weight']:
continue
weighted_replicas = (
dev['weight'] * weight_of_one_part / self.parts)
if weighted_replicas < 1:
devices_with_room.append(dev['id'])
else:
weighted_replicas = 1
weighted_replicas_for_dev[dev['id']] = weighted_replicas
while True:
remaining = self.replicas - sum(weighted_replicas_for_dev.values())
if remaining < 1e-10:
break
devices_with_room = [d for d in devices_with_room if
weighted_replicas_for_dev[d] < 1]
rel_weight = remaining / sum(
weighted_replicas_for_dev[d] for d in devices_with_room)
for d in devices_with_room:
weighted_replicas_for_dev[d] = min(
1, weighted_replicas_for_dev[d] * (rel_weight + 1))
weighted_replicas_by_tier = defaultdict(float)
for dev in self._iter_devs():
if not dev['weight']:
continue
assigned_replicanths = weighted_replicas_for_dev[dev['id']]
dev_tier = (dev['region'], dev['zone'], dev['ip'], dev['id'])
for i in range(len(dev_tier) + 1):
tier = dev_tier[:i]
weighted_replicas_by_tier[tier] += assigned_replicanths
# belts & suspenders/paranoia - at every level, the sum of
# weighted_replicas should be very close to the total number of
# replicas for the ring
tiers = ['cluster', 'regions', 'zones', 'servers', 'devices']
for i, tier_name in enumerate(tiers):
replicas_at_tier = sum(weighted_replicas_by_tier[t] for t in
weighted_replicas_by_tier if len(t) == i)
if abs(self.replicas - replicas_at_tier) > 1e-10:
raise exceptions.RingValidationError(
'%s != %s at tier %s' % (
replicas_at_tier, self.replicas, tier_name))
return weighted_replicas_by_tier
def _build_wanted_replicas_by_tier(self):
"""
Returns a defaultdict of (tier: replicanths) for all tiers in the ring
based on unique-as-possible (full dispersion) with respect to their
weights and device counts.
N.B. _build_max_replicas_by_tier calculates the upper bound on the
replicanths each tier may hold irrespective of the weights of the
tier; this method will calculate the minimum replicanth <=
max_replicas[tier] that will still solve dispersion. However, it is
not guaranteed to return a fully dispersed solution if failure domains
are over-weighted for their device count.
"""
weighted_replicas = self._build_weighted_replicas_by_tier()
dispersed_replicas = {
t: {
'min': math.floor(r),
'max': math.ceil(r),
} for (t, r) in
self._build_max_replicas_by_tier(bound=float).items()
}
# watch out for device limited tiers
num_devices = defaultdict(int)
for d in self._iter_devs():
if d['weight'] <= 0:
continue
for t in (d.get('tiers') or tiers_for_dev(d)):
num_devices[t] += 1
num_devices[()] += 1
tier2children = self._build_tier2children()
wanted_replicas = defaultdict(float)
def place_replicas(tier, replicanths):
if replicanths > num_devices[tier]:
raise exceptions.RingValidationError(
'More replicanths (%s) than devices (%s) '
'in tier (%s)' % (replicanths, num_devices[tier], tier))
wanted_replicas[tier] = replicanths
sub_tiers = sorted(tier2children[tier])
if not sub_tiers:
return
to_place = defaultdict(float)
remaining = replicanths
tiers_to_spread = sub_tiers
device_limited = False
while True:
rel_weight = remaining / sum(weighted_replicas[t]
for t in tiers_to_spread)
for t in tiers_to_spread:
replicas = to_place[t] + (
weighted_replicas[t] * rel_weight)
if replicas < dispersed_replicas[t]['min']:
replicas = dispersed_replicas[t]['min']
elif (replicas > dispersed_replicas[t]['max'] and
not device_limited):
replicas = dispersed_replicas[t]['max']
if replicas > num_devices[t]:
replicas = num_devices[t]
to_place[t] = replicas
remaining = replicanths - sum(to_place.values())
if remaining < -1e-10:
tiers_to_spread = [
t for t in sub_tiers
if to_place[t] > dispersed_replicas[t]['min']
]
elif remaining > 1e-10:
tiers_to_spread = [
t for t in sub_tiers
if (num_devices[t] > to_place[t] <
dispersed_replicas[t]['max'])
]
if not tiers_to_spread:
device_limited = True
tiers_to_spread = [
t for t in sub_tiers
if to_place[t] < num_devices[t]
]
else:
# remaining is "empty"
break
for t in sub_tiers:
self.logger.debug('Planning %s on %s',
to_place[t], t)
place_replicas(t, to_place[t])
# place all replicas in the cluster tier
place_replicas((), self.replicas)
# belts & suspenders/paranoia - at every level, the sum of
# wanted_replicas should be very close to the total number of
# replicas for the ring
tiers = ['cluster', 'regions', 'zones', 'servers', 'devices']
for i, tier_name in enumerate(tiers):
replicas_at_tier = sum(wanted_replicas[t] for t in
wanted_replicas if len(t) == i)
if abs(self.replicas - replicas_at_tier) > 1e-10:
raise exceptions.RingValidationError(
'%s != %s at tier %s' % (
replicas_at_tier, self.replicas, tier_name))
return wanted_replicas
def _build_target_replicas_by_tier(self):
"""
Build a map of <tier> => <target_replicas> accounting for device
weights, unique-as-possible dispersion and overload.
<tier> - a tuple, describing each tier in the ring topology
<target_replicas> - a float, the target replicanths at the tier
"""
weighted_replicas = self._build_weighted_replicas_by_tier()
wanted_replicas = self._build_wanted_replicas_by_tier()
max_overload = self.get_required_overload(weighted=weighted_replicas,
wanted=wanted_replicas)
if max_overload <= 0.0:
return wanted_replicas
else:
overload = min(self.overload, max_overload)
self.logger.debug("Using effective overload of %f", overload)
target_replicas = defaultdict(float)
for tier, weighted in weighted_replicas.items():
m = (wanted_replicas[tier] - weighted) / max_overload
target_replicas[tier] = m * overload + weighted
# belts & suspenders/paranoia - at every level, the sum of
# target_replicas should be very close to the total number
# of replicas for the ring
tiers = ['cluster', 'regions', 'zones', 'servers', 'devices']
for i, tier_name in enumerate(tiers):
replicas_at_tier = sum(target_replicas[t] for t in
target_replicas if len(t) == i)
if abs(self.replicas - replicas_at_tier) > 1e-10:
raise exceptions.RingValidationError(
'%s != %s at tier %s' % (
replicas_at_tier, self.replicas, tier_name))
return target_replicas
def _build_replica_plan(self):
"""
Wraps return value of _build_target_replicas_by_tier to include
pre-calculated min and max values for each tier.
:returns: a dict, mapping <tier> => <replica_plan>, where
<replica_plan> is itself a dict
<replica_plan> include at least the following keys:
min - the minimum number of replicas at the tier
target - the target replicanths at the tier
max - the maximum number of replicas at the tier
"""
# replica part-y planner!
target_replicas = self._build_target_replicas_by_tier()
replica_plan = defaultdict(
lambda: {'min': 0, 'target': 0, 'max': 0})
replica_plan.update({
t: {
'min': math.floor(r + 1e-10),
'target': r,
'max': math.ceil(r - 1e-10),
} for (t, r) in
target_replicas.items()
})
return replica_plan
def _devs_for_part(self, part):
"""
Returns a list of devices for a specified partition.
Deliberately includes duplicates.
"""
if self._replica2part2dev is None:
return []
devs = []
for part2dev in self._replica2part2dev:
if part >= len(part2dev):
continue
dev_id = part2dev[part]
if dev_id == NONE_DEV:
continue
devs.append(self.devs[dev_id])
return devs
def _replicas_for_part(self, part):
"""
Returns a list of replicas for a specified partition.
These can be used as indices into self._replica2part2dev
without worrying about IndexErrors.
"""
return [replica for replica, part2dev
in enumerate(self._replica2part2dev)
if part < len(part2dev)]
def _each_part_replica(self):
"""
Generator yielding every (partition, replica) pair in the ring.
"""
for replica, part2dev in enumerate(self._replica2part2dev):
for part in range(len(part2dev)):
yield (part, replica)
@classmethod
def load(cls, builder_file, open=open):
"""
Obtain RingBuilder instance of the provided builder file
:param builder_file: path to builder file to load
:return: RingBuilder instance
"""
try:
fp = open(builder_file, 'rb')
except IOError as e:
if e.errno == errno.ENOENT:
raise exceptions.FileNotFoundError(
'Ring Builder file does not exist: %s' % builder_file)
elif e.errno in [errno.EPERM, errno.EACCES]:
raise exceptions.PermissionError(
'Ring Builder file cannot be accessed: %s' % builder_file)
else:
raise
else:
with fp:
try:
builder = pickle.load(fp)
except Exception:
# raise error during unpickling as UnPicklingError
raise exceptions.UnPicklingError(
'Ring Builder file is invalid: %s' % builder_file)
if not hasattr(builder, 'devs'):
builder_dict = builder
builder = RingBuilder(1, 1, 1)
builder.copy_from(builder_dict)
for dev in builder.devs:
# really old rings didn't have meta keys
if dev and 'meta' not in dev:
dev['meta'] = ''
# NOTE(akscram): An old ring builder file don't contain
# replication parameters.
if dev:
if 'ip' in dev:
dev.setdefault('replication_ip', dev['ip'])
if 'port' in dev:
dev.setdefault('replication_port', dev['port'])
return builder
def save(self, builder_file):
"""Serialize this RingBuilder instance to disk.
:param builder_file: path to builder file to save
"""
with open(builder_file, 'wb') as f:
pickle.dump(self.to_dict(), f, protocol=2)
def search_devs(self, search_values):
"""Search devices by parameters.
:param search_values: a dictionary with search values to filter
devices, supported parameters are id,
region, zone, ip, port, replication_ip,
replication_port, device, weight, meta
:returns: list of device dicts
"""
matched_devs = []
for dev in self.devs:
if not dev:
continue
matched = True
for key in ('id', 'region', 'zone', 'ip', 'port', 'replication_ip',
'replication_port', 'device', 'weight', 'meta'):
if key in search_values:
value = search_values.get(key)
if value is not None:
if key == 'meta':
if value not in dev.get(key):
matched = False
elif key == 'ip' or key == 'replication_ip':
cdev = ''
try:
cdev = validate_and_normalize_address(
dev.get(key, ''))
except ValueError:
pass
if cdev != value:
matched = False
elif dev.get(key) != value:
matched = False
if matched:
matched_devs.append(dev)
return matched_devs
def increase_partition_power(self):
""" Increases ring partition power by one.
Devices will be assigned to partitions like this:
OLD: 0, 3, 7, 5, 2, 1, ...
NEW: 0, 0, 3, 3, 7, 7, 5, 5, 2, 2, 1, 1, ...
"""
new_replica2part2dev = []
for replica in self._replica2part2dev:
new_replica = array('H')
for device in replica:
new_replica.append(device)
new_replica.append(device) # append device a second time
new_replica2part2dev.append(new_replica)
self._replica2part2dev = new_replica2part2dev
for device in self._iter_devs():
device['parts'] *= 2
# We need to update the time when a partition has been moved the last
# time. Since this is an array of all partitions, we need to double it
# two
new_last_part_moves = []
for partition in self._last_part_moves:
new_last_part_moves.append(partition)
new_last_part_moves.append(partition)
self._last_part_moves = new_last_part_moves
self.part_power += 1
self.parts *= 2
self.version += 1
|
apache-2.0
| -738,666,777,734,163,300 | 41.572917 | 79 | 0.551532 | false | 4.356111 | false | false | false |
oz123/radiopy
|
rpy.py
|
1
|
1118
|
from subprocess import Popen
import pty
import os
import sys
import pynotify
def parse_ICY(line):
aline = line.split('\r\n')[-1]
junk, info = aline.split('=', 1)
try:
info, junk = info.split(';', 1)
except ValueError:
pass
artist, title = info.split('-')
return artist.strip("'"), title.strip("'")
cmd = ['mplayer',
'-playlist', 'http://www.radioparadise.com/musiclinks/rp_128aac.m3u']
if sys.argv[1:]:
cmd = cmd[:1] + sys.argv[1:] + cmd[1:]
master, slave = pty.openpty()
proc = Popen(cmd, stdout=slave, stderr=slave)
stdout = os.fdopen(master)
ICYSTRING = ''
while True:
line = stdout.readline(1)
ICYSTRING = ICYSTRING + line
if 'ICY Info' in ICYSTRING:
for i in range(80):
ICYSTRING = ICYSTRING + stdout.readline(1)
a, t = parse_ICY(ICYSTRING)
ICYSTRING = ''
n = pynotify.Notification(a, t)
n.set_timeout(10000) # 10 sec
n.set_category("device")
pynotify.init("Timekpr notification")
n.show()
pynotify.uninit()
ICYSTRING = ''
sys.stdout.write(line)
|
gpl-3.0
| 1,780,432,038,875,164,700 | 23.844444 | 76 | 0.592129 | false | 3.122905 | false | false | false |
HartBlanc/Mastercard_Exchange_Rates
|
rate__retriever.py
|
1
|
7513
|
# 151 currencies, 22650 currency pairs, 364 days (period 1) 134 days (period 2) => 3,035,100(20,100/from_c) - 8,221,950 entries(8361.157)
print('importing packages')
import time
import sqlite3
import json
import requests
import datetime
import math
import pytz
from datetime import date
from multiprocessing.pool import Pool
print('connecting to db')
conn = sqlite3.connect('mastercard_1.sqlite')
cur = conn.cursor()
print('defining functions')
def day_calculator(date):
return (date - date_1).days + 1
def date_calculator(day):
return date_1+datetime.timedelta(day-1)
def date_stringer(date):
return date.strftime('%Y-%m-%d')
print('defining constants')
start_from_id = int(input('from_id initial value: '))
start_to_id = int(input('to_id initial value: '))
base_url = 'https://www.mastercard.us/settlement/currencyrate/fxDate={date_};transCurr={from_};crdhldBillCurr={to_};bankFee=0.00;transAmt=1/conversion-rate'
first_date=date(2016,2,29)
now = datetime.datetime.now(pytz.timezone('US/Eastern'))
if now.hour < 14:
today=now.date() - datetime.timedelta(days=1)
else:
today=now.date()
date_1=today - datetime.timedelta(days=364)
if date_1.weekday()==6:
date_1=date_1+datetime.timedelta(days=1)
if date_1.weekday()==5:
date_1=date_1+datetime.timedelta(days=2)
date_string = date_stringer(date_1)
print('first date in period', date_1, 'today:',today)
late_day=day_calculator(date(2016,10,14))
print('grabbing codes from db')
cur.execute('SELECT code FROM Currency_Codes')
code_tuples=cur.fetchall()
codes = [ x[0] for x in code_tuples ]
number_of_codes = len(codes)
print('initiating')
for code in codes[(start_from_id-1):]:
start_time_f = datetime.datetime.now()
to_id = start_to_id
from_c = code
cur.execute('SELECT id FROM Currency_Codes WHERE code=?', (from_c,))
from_id = cur.fetchone()[0]
while to_id <= number_of_codes:
start_time_t = datetime.datetime.now()
to_c = codes[to_id-1]
print(from_c,to_c)
if from_c is to_c:
to_id +=1
continue
#### FIND START DATE - FIRST CHECKS LATE DAY, THEN FIRST DAY, THEN DOES BINARY SEARCH
lower_bound=1
upper_bound=late_day
day_i=late_day-1
while upper_bound != lower_bound:
date_i = date_calculator(day_i)
if day_i < late_day-4:
if date_i.weekday() == 6:
if lower_bound <= day_i-2 :
day_i=day_i-2
if date_i.weekday() == 5:
if lower_bound <= day_i-1:
day_i=day_i-1
date_i = date_calculator(day_i)
date_string_i=date_stringer(date_i)
url=base_url.format(date_=date_string_i,from_=from_c,to_=to_c)
print(date_string_i,'day number:', day_i,'day of the week:', date_i.weekday())
#Retries if requests doesn't return a json file (server errors)
print('requesting url')
while True:
try:
r = requests.get(url)
JSON=r.json()
except:
time.sleep(5)
continue
break
print('json retrieved')
if 'errorCode' in JSON['data']:
if JSON['data']['errorCode'] in ('104','114'):
print('data not available for this date')
lower_bound = day_i+1
if day_i==late_day-1:
day_i=late_day
break
else:
day_i=math.ceil((lower_bound+upper_bound)/2)
print('lower:',lower_bound,'upper:',upper_bound)
elif JSON['data']['errorCode'] in ('500','401','400'):
print('error code: ',JSON['data']['errorCode'])
print('Server having technical problems')
time.sleep(500)
continue
else:
print('error code: ',JSON['data']['errorCode'])
print('conversion rate too small')
break
else:
upper_bound = day_i
if day_i == late_day-1:
day_i=1
elif day_i == 1:
break
else:
day_i=math.floor((lower_bound+upper_bound)/2)
print('lower:',lower_bound,'upper:',upper_bound)
#### Extract rates for period up to today
day=day_i
date=date_calculator(day_i)
print('found start day')
start_=datetime.datetime.now()
while (today - date).days >=0:
if day < late_day-4:
if date.weekday() == 5:
day = day + 2
date = date_calculator(day)
date_string=date_stringer(date)
url=base_url.format(date_=date_string,from_=from_c,to_=to_c)
print(date)
#Retries if requests doesn't return a json file (server errors)
print('requesting url')
while True:
try:
r = requests.get(url)
JSON=r.json()
except:
time.sleep(5)
continue
break
print('json retrieved')
if 'errorCode' in JSON['data']:
if JSON['data']['errorCode'] in ('104','114'):
print('data not available for this date')
day = day + 1
date = date_calculator(day)
continue
elif JSON['data']['errorCode'] in ('500','401','400'):
print('error code: ',JSON['data']['errorCode'])
print('Server having technical problems')
time.sleep(500)
continue
else:
print('error code: ',JSON['data']['errorCode'])
print('conversion rate too small')
break
else:
rate = JSON['data']['conversionRate']
day = day_calculator(date)
print(rate)
date_id=(date_1-first_date).days+day
cur.execute('''INSERT OR REPLACE INTO Rates
(rate, from_id, to_id, date_id)
VALUES ( ?, ?, ?, ?)''',
(rate, from_id, to_id, date_id) )
day = day + 1
date = date_calculator(day)
end_ = datetime.datetime.now()
print(from_c,'Duration: {}'.format(end_ - start_))
to_id +=1
conn.commit()
end_time_t = datetime.datetime.now()
print(to_c,'Duration: {}'.format(end_time_t - start_time_t))
date_1=today - datetime.timedelta(days=364)
if date_1.weekday()==6:
date_1=date_1+datetime.timedelta(days=1)
if date_1.weekday()==5:
date_1=date_1+datetime.timedelta(days=2)
now = datetime.datetime.now(pytz.timezone('US/Eastern'))
if now.hour < 14:
today=now.date() - datetime.timedelta(days=1)
else:
today=now.date()
end_time_f = datetime.datetime.now()
print(from_c,'Duration: {}'.format(end_time_f - start_time_f))
print('done')
|
mit
| 8,326,743,759,790,757,000 | 35.64878 | 156 | 0.508585 | false | 3.86074 | false | false | false |
linebp/pandas
|
pandas/tests/reshape/test_reshape.py
|
1
|
43476
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas import DataFrame, Series
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import assert_frame_equal
from pandas.core.reshape.reshape import (
melt, lreshape, get_dummies, wide_to_long)
import pandas.util.testing as tm
from pandas.compat import range, u
class TestMelt(object):
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df['id1'] = (self.df['A'] > 0).astype(np.int64)
self.df['id2'] = (self.df['B'] > 0).astype(np.int64)
self.var_name = 'var'
self.value_name = 'val'
self.df1 = pd.DataFrame([[1.067683, -1.110463, 0.20867
], [-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361]])
self.df1.columns = [list('ABC'), list('abc')]
self.df1.columns.names = ['CAP', 'low']
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ['variable', 'value']
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(),
melt(self.df))
tm.assert_frame_equal(self.df.melt(id_vars=['id1', 'id2'],
value_vars=['A', 'B']),
melt(self.df,
id_vars=['id1', 'id2'],
value_vars=['A', 'B']))
tm.assert_frame_equal(self.df.melt(var_name=self.var_name,
value_name=self.value_name),
melt(self.df,
var_name=self.var_name,
value_name=self.value_name))
tm.assert_frame_equal(self.df1.melt(col_level=0),
melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ['variable', 'value']
result1 = self.df.melt(id_vars=['id1'])
assert result1.columns.tolist() == ['id1', 'variable', 'value']
result2 = self.df.melt(id_vars=['id1', 'id2'])
assert result2.columns.tolist() == ['id1', 'id2', 'variable', 'value']
def test_value_vars(self):
result3 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A')
assert len(result3) == 10
result4 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'])
expected4 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable', 'value'])
tm.assert_frame_equal(result4, expected4)
def test_value_vars_types(self):
# GH 15348
expected = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable', 'value'])
for type_ in (tuple, list, np.array):
result = self.df.melt(id_vars=['id1', 'id2'],
value_vars=type_(('A', 'B')))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self):
expected = DataFrame({
('A', 'a'): self.df1[('A', 'a')],
'CAP': ['B'] * len(self.df1),
'low': ['b'] * len(self.df1),
'value': self.df1[('B', 'b')],
}, columns=[('A', 'a'), 'CAP', 'low', 'value'])
result = self.df1.melt(id_vars=[('A', 'a')], value_vars=[('B', 'b')])
tm.assert_frame_equal(result, expected)
def test_tuple_vars_fail_with_multiindex(self):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
tuple_a = ('A', 'a')
list_a = [tuple_a]
tuple_b = ('B', 'b')
list_b = [tuple_b]
for id_vars, value_vars in ((tuple_a, list_b), (list_a, tuple_b),
(tuple_a, tuple_b)):
with tm.assert_raises_regex(ValueError, r'MultiIndex'):
self.df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self):
result5 = self.df.melt(var_name=self.var_name)
assert result5.columns.tolist() == ['var', 'value']
result6 = self.df.melt(id_vars=['id1'], var_name=self.var_name)
assert result6.columns.tolist() == ['id1', 'var', 'value']
result7 = self.df.melt(id_vars=['id1', 'id2'], var_name=self.var_name)
assert result7.columns.tolist() == ['id1', 'id2', 'var', 'value']
result8 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
var_name=self.var_name)
assert result8.columns.tolist() == ['id1', 'id2', 'var', 'value']
result9 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
var_name=self.var_name)
expected9 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
self.var_name: ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', self.var_name, 'value'])
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = self.df.melt(value_name=self.value_name)
assert result10.columns.tolist() == ['variable', 'val']
result11 = self.df.melt(id_vars=['id1'], value_name=self.value_name)
assert result11.columns.tolist() == ['id1', 'variable', 'val']
result12 = self.df.melt(id_vars=['id1', 'id2'],
value_name=self.value_name)
assert result12.columns.tolist() == ['id1', 'id2', 'variable', 'val']
result13 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
value_name=self.value_name)
assert result13.columns.tolist() == ['id1', 'id2', 'variable', 'val']
result14 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
value_name=self.value_name)
expected14 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
self.value_name: (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable',
self.value_name])
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = self.df.melt(var_name=self.var_name,
value_name=self.value_name)
assert result15.columns.tolist() == ['var', 'val']
result16 = self.df.melt(id_vars=['id1'], var_name=self.var_name,
value_name=self.value_name)
assert result16.columns.tolist() == ['id1', 'var', 'val']
result17 = self.df.melt(id_vars=['id1', 'id2'],
var_name=self.var_name,
value_name=self.value_name)
assert result17.columns.tolist() == ['id1', 'id2', 'var', 'val']
result18 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
var_name=self.var_name,
value_name=self.value_name)
assert result18.columns.tolist() == ['id1', 'id2', 'var', 'val']
result19 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
var_name=self.var_name,
value_name=self.value_name)
expected19 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
self.var_name: ['A'] * 10 + ['B'] * 10,
self.value_name: (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', self.var_name,
self.value_name])
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = 'foo'
result20 = df20.melt()
assert result20.columns.tolist() == ['foo', 'value']
def test_col_level(self):
res1 = self.df1.melt(col_level=0)
res2 = self.df1.melt(col_level='CAP')
assert res1.columns.tolist() == ['CAP', 'value']
assert res2.columns.tolist() == ['CAP', 'value']
def test_multiindex(self):
res = self.df1.melt()
assert res.columns.tolist() == ['CAP', 'low', 'value']
class TestGetDummies(object):
sparse = False
def setup_method(self, method):
self.df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
def test_basic(self):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': {0: 1,
1: 0,
2: 0},
'b': {0: 0,
1: 1,
2: 0},
'c': {0: 0,
1: 0,
2: 1}}, dtype=np.uint8)
assert_frame_equal(get_dummies(s_list, sparse=self.sparse), expected)
assert_frame_equal(get_dummies(s_series, sparse=self.sparse), expected)
expected.index = list('ABC')
assert_frame_equal(
get_dummies(s_series_index, sparse=self.sparse), expected)
def test_basic_types(self):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype='uint8',
columns=list('abc'))
if not self.sparse:
compare = tm.assert_frame_equal
else:
expected = expected.to_sparse(fill_value=0, kind='integer')
compare = tm.assert_sp_frame_equal
result = get_dummies(s_list, sparse=self.sparse)
compare(result, expected)
result = get_dummies(s_series, sparse=self.sparse)
compare(result, expected)
result = get_dummies(s_df, sparse=self.sparse, columns=s_df.columns)
tm.assert_series_equal(result.get_dtype_counts(),
Series({'uint8': 8}))
result = get_dummies(s_df, sparse=self.sparse, columns=['a'])
expected = Series({'uint8': 3, 'int64': 1, 'object': 1}).sort_values()
tm.assert_series_equal(result.get_dtype_counts().sort_values(),
expected)
def test_just_na(self):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=['A'])
res_list = get_dummies(just_na_list, sparse=self.sparse)
res_series = get_dummies(just_na_series, sparse=self.sparse)
res_series_index = get_dummies(just_na_series_index,
sparse=self.sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ['A']
def test_include_na(self):
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=self.sparse)
exp = DataFrame({'a': {0: 1, 1: 0, 2: 0},
'b': {0: 0, 1: 1, 2: 0}}, dtype=np.uint8)
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=self.sparse)
exp_na = DataFrame({nan: {0: 0, 1: 0, 2: 1},
'a': {0: 1, 1: 0, 2: 0},
'b': {0: 0, 1: 1, 2: 0}},
dtype=np.uint8)
exp_na = exp_na.reindex_axis(['a', 'b', nan], 1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse)
exp_just_na = DataFrame(Series(1, index=[0]), columns=[nan],
dtype=np.uint8)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self
): # See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=self.sparse)
exp = DataFrame({'letter_e': {0: 1,
1: 0,
2: 0},
u('letter_%s') % eacute: {0: 0,
1: 1,
2: 1}},
dtype=np.uint8)
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self):
df = self.df[['A', 'B']]
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]}, dtype=np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self):
df = self.df
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]})
cols = ['A_a', 'A_b', 'B_b', 'B_c']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self):
prefixes = ['from_A', 'from_B']
df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
result = get_dummies(df, prefix=prefixes, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]})
cols = expected.columns[1:]
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'from_A_a', 'from_A_b', 'from_B_b',
'from_B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self):
# not that you should do this...
df = self.df
result = get_dummies(df, prefix='bad', sparse=self.sparse)
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['C', 'bad_a', 'bad_b', 'bad_b', 'bad_c'],
dtype=np.uint8)
expected = expected.astype({"C": np.int64})
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self):
df = self.df
result = get_dummies(df, prefix=['from_A'], columns=['A'],
sparse=self.sparse)
expected = DataFrame({'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
cols = ['from_A_a', 'from_A_b']
expected[cols] = expected[cols].astype(np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self):
df = self.df
result = get_dummies(df, prefix_sep='..', sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'A..a': [1, 0, 1],
'A..b': [0, 1, 0],
'B..b': [1, 1, 0],
'B..c': [0, 0, 1]})
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
cols = expected.columns[1:]
expected[cols] = expected[cols].astype(np.uint8)
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=self.sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..',
'B': '__'}, sparse=self.sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self):
with pytest.raises(ValueError):
get_dummies(self.df, prefix=['too few'], sparse=self.sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self):
with pytest.raises(ValueError):
get_dummies(self.df, prefix_sep=['bad'], sparse=self.sparse)
def test_dataframe_dummies_prefix_dict(self):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
result = get_dummies(df, prefix=prefixes, sparse=self.sparse)
expected = DataFrame({'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1],
'C': [1, 2, 3]})
cols = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected[cols] = expected[cols].astype(np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self):
df = self.df
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_a': [1, 0, 1, 0],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_b': [1, 1, 0, 0],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_a', 'A_b', 'A_nan', 'B_b', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_a', 'A_b', 'A_nan',
'B_b', 'B_c', 'B_nan']]
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=self.sparse)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self):
df = self.df
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=self.sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1],
'cat_x': [1, 0, 0],
'cat_y': [0, 1, 1]})
cols = ['A_a', 'A_b', 'B_b', 'B_c', 'cat_x', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c',
'cat_x', 'cat_y']]
assert_frame_equal(result, expected)
def test_basic_drop_first(self):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'b': {0: 0,
1: 1,
2: 0},
'c': {0: 0,
1: 0,
2: 1}}, dtype=np.uint8)
result = get_dummies(s_list, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, sparse=self.sparse,
drop_first=True)
assert_frame_equal(result, expected)
def test_basic_drop_first_one_level(self):
# Test the case that categorical variable only has one level.
s_list = list('aaa')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=self.sparse, drop_first=True)
assert_frame_equal(result, expected)
expected = DataFrame(index=list('ABC'))
result = get_dummies(s_series_index, sparse=self.sparse,
drop_first=True)
assert_frame_equal(result, expected)
def test_basic_drop_first_NA(self):
# Test NA hadling together with drop_first
s_NA = ['a', 'b', np.nan]
res = get_dummies(s_NA, sparse=self.sparse, drop_first=True)
exp = DataFrame({'b': {0: 0,
1: 1,
2: 0}}, dtype=np.uint8)
assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, sparse=self.sparse,
drop_first=True)
exp_na = DataFrame({'b': {0: 0,
1: 1,
2: 0},
nan: {0: 0,
1: 0,
2: 1}}, dtype=np.uint8).reindex_axis(
['b', nan], 1)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, sparse=self.sparse,
drop_first=True)
exp_just_na = DataFrame(index=np.arange(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self):
df = self.df[['A', 'B']]
result = get_dummies(df, sparse=self.sparse, drop_first=True)
expected = DataFrame({'A_b': [0, 1, 0],
'B_c': [0, 0, 1]}, dtype=np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(self):
df = self.df
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=self.sparse, drop_first=True)
expected = DataFrame({'C': [1, 2, 3],
'A_b': [0, 1, 0],
'B_c': [0, 0, 1],
'cat_y': [0, 1, 1]})
cols = ['A_b', 'B_c', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'B_c', 'cat_y']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self):
df = self.df
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, sparse=self.sparse,
drop_first=True)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'A_nan', 'B_c', 'B_nan']]
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=self.sparse,
drop_first=True)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
def test_int_int(self):
data = Series([1, 2, 1])
result = pd.get_dummies(data)
expected = DataFrame([[1, 0], [0, 1], [1, 0]], columns=[1, 2],
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(pd.Categorical(['a', 'b', 'a']))
result = pd.get_dummies(data)
expected = DataFrame([[1, 0], [0, 1], [1, 0]],
columns=pd.Categorical(['a', 'b']),
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
def test_int_df(self):
data = DataFrame(
{'A': [1, 2, 1],
'B': pd.Categorical(['a', 'b', 'a']),
'C': [1, 2, 1],
'D': [1., 2., 1.]
}
)
columns = ['C', 'D', 'A_1', 'A_2', 'B_a', 'B_b']
expected = DataFrame([
[1, 1., 1, 0, 1, 0],
[2, 2., 0, 1, 0, 1],
[1, 1., 1, 0, 1, 0]
], columns=columns)
expected[columns[2:]] = expected[columns[2:]].astype(np.uint8)
result = pd.get_dummies(data, columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cat = pd.Categorical(list("xy"), categories=list("xyz"),
ordered=ordered)
result = get_dummies(cat)
data = np.array([[1, 0, 0], [0, 1, 0]], dtype=np.uint8)
cols = pd.CategoricalIndex(cat.categories,
categories=cat.categories,
ordered=ordered)
expected = DataFrame(data, columns=cols)
tm.assert_frame_equal(result, expected)
class TestGetDummiesSparse(TestGetDummies):
sparse = True
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cidx = pd.CategoricalIndex(list("xyz"), ordered=ordered)
midx = pd.MultiIndex(levels=[['a'], cidx],
labels=[[0, 0], [0, 1]])
df = DataFrame([[10, 11]], index=midx)
expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
index=midx, columns=cidx)
from pandas.core.reshape.reshape import make_axis_dummies
result = make_axis_dummies(df)
tm.assert_frame_equal(result, expected)
result = make_axis_dummies(df, transform=lambda x: x)
tm.assert_frame_equal(result, expected)
class TestLreshape(object):
def test_pairs(self):
data = {'birthdt': ['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133],
'id': [101, 102, 103, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female'],
'visitdt1': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009'],
'visitdt2':
['21jan2009', nan, '22jan2009', '31dec2008', '03feb2009'],
'visitdt3': ['05feb2009', nan, nan, '02jan2009', '15feb2009'],
'wt1': [1823, 3338, 1549, 3298, 4306],
'wt2': [2011.0, nan, 1892.0, 3338.0, 4575.0],
'wt3': [2293.0, nan, nan, 3377.0, 4805.0]}
df = DataFrame(data)
spec = {'visitdt': ['visitdt%d' % i for i in range(1, 4)],
'wt': ['wt%d' % i for i in range(1, 4)]}
result = lreshape(df, spec)
exp_data = {'birthdt':
['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133, 1766, 1454, 3139,
4133, 1766, 3139, 4133],
'id': [101, 102, 103, 104, 105, 101, 103, 104, 105, 101,
104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Male',
'Female', 'Female'],
'visitdt': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009', '21jan2009',
'22jan2009', '31dec2008', '03feb2009',
'05feb2009', '02jan2009', '15feb2009'],
'wt': [1823.0, 3338.0, 1549.0, 3298.0, 4306.0, 2011.0,
1892.0, 3338.0, 4575.0, 2293.0, 3377.0, 4805.0]}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
result = lreshape(df, spec, dropna=False)
exp_data = {'birthdt':
['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '20dec2008', '30dec2008',
'21dec2008', '11jan2009', '08jan2009', '20dec2008',
'30dec2008', '21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133, 1766, 3301, 1454,
3139, 4133, 1766, 3301, 1454, 3139, 4133],
'id': [101, 102, 103, 104, 105, 101, 102, 103, 104, 105,
101, 102, 103, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Female'],
'visitdt': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009', '21jan2009', nan,
'22jan2009', '31dec2008', '03feb2009',
'05feb2009', nan, nan, '02jan2009',
'15feb2009'],
'wt': [1823.0, 3338.0, 1549.0, 3298.0, 4306.0, 2011.0, nan,
1892.0, 3338.0, 4575.0, 2293.0, nan, nan, 3377.0,
4805.0]}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
spec = {'visitdt': ['visitdt%d' % i for i in range(1, 3)],
'wt': ['wt%d' % i for i in range(1, 4)]}
pytest.raises(ValueError, lreshape, df, spec)
class TestWideToLong(object):
def test_simple(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A1970": {0: "a",
1: "b",
2: "c"},
"A1980": {0: "d",
1: "e",
2: "f"},
"B1970": {0: 2.5,
1: 1.2,
2: .7},
"B1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A": ['a', 'b', 'c', 'd', 'e', 'f'],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": ['1970', '1970', '1970', '1980', '1980', '1980'],
"id": [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(long_frame, exp_frame)
def test_stubs(self):
# GH9204
df = pd.DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]])
df.columns = ['id', 'inc1', 'inc2', 'edu1', 'edu2']
stubs = ['inc', 'edu']
# TODO: unused?
df_long = pd.wide_to_long(df, stubs, i='id', j='age') # noqa
assert stubs == ['inc', 'edu']
def test_separating_character(self):
# GH14779
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A.1970": {0: "a",
1: "b",
2: "c"},
"A.1980": {0: "d",
1: "e",
2: "f"},
"B.1970": {0: 2.5,
1: 1.2,
2: .7},
"B.1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A": ['a', 'b', 'c', 'd', 'e', 'f'],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": ['1970', '1970', '1970', '1980', '1980', '1980'],
"id": [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
tm.assert_frame_equal(long_frame, exp_frame)
def test_escapable_characters(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A(quarterly)1970": {0: "a",
1: "b",
2: "c"},
"A(quarterly)1980": {0: "d",
1: "e",
2: "f"},
"B(quarterly)1970": {0: 2.5,
1: 1.2,
2: .7},
"B(quarterly)1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A(quarterly)": ['a', 'b', 'c', 'd', 'e', 'f'],
"B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": ['1970', '1970', '1970', '1980', '1980', '1980'],
"id": [0, 1, 2, 0, 1, 2]}
exp_frame = DataFrame(exp_data)
exp_frame = exp_frame.set_index(
['id', 'year'])[["X", "A(quarterly)", "B(quarterly)"]]
long_frame = wide_to_long(df, ["A(quarterly)", "B(quarterly)"],
i="id", j="year")
tm.assert_frame_equal(long_frame, exp_frame)
def test_unbalanced(self):
# test that we can have a varying amount of time variables
df = pd.DataFrame({'A2010': [1.0, 2.0],
'A2011': [3.0, 4.0],
'B2010': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': ['X1', 'X1', 'X2', 'X2'],
'A': [1.0, 3.0, 2.0, 4.0],
'B': [5.0, np.nan, 6.0, np.nan],
'id': [0, 0, 1, 1],
'year': ['2010', '2011', '2010', '2011']}
exp_frame = pd.DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[["X", "A", "B"]]
long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year')
tm.assert_frame_equal(long_frame, exp_frame)
def test_character_overlap(self):
# Test we handle overlapping characters in both id_vars and value_vars
df = pd.DataFrame({
'A11': ['a11', 'a22', 'a33'],
'A12': ['a21', 'a22', 'a23'],
'B11': ['b11', 'b12', 'b13'],
'B12': ['b21', 'b22', 'b23'],
'BB11': [1, 2, 3],
'BB12': [4, 5, 6],
'BBBX': [91, 92, 93],
'BBBZ': [91, 92, 93]
})
df['id'] = df.index
exp_frame = pd.DataFrame({
'BBBX': [91, 92, 93, 91, 92, 93],
'BBBZ': [91, 92, 93, 91, 92, 93],
'A': ['a11', 'a22', 'a33', 'a21', 'a22', 'a23'],
'B': ['b11', 'b12', 'b13', 'b21', 'b22', 'b23'],
'BB': [1, 2, 3, 4, 5, 6],
'id': [0, 1, 2, 0, 1, 2],
'year': ['11', '11', '11', '12', '12', '12']})
exp_frame = exp_frame.set_index(['id', 'year'])[
['BBBX', 'BBBZ', 'A', 'B', 'BB']]
long_frame = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_invalid_separator(self):
# if an invalid separator is supplied a empty data frame is returned
sep = 'nope!'
df = pd.DataFrame({'A2010': [1.0, 2.0],
'A2011': [3.0, 4.0],
'B2010': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': '',
'A2010': [],
'A2011': [],
'B2010': [],
'id': [],
'year': [],
'A': [],
'B': []}
exp_frame = pd.DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[[
'X', 'A2010', 'A2011', 'B2010', 'A', 'B']]
exp_frame.index.set_levels([[0, 1], []], inplace=True)
long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year', sep=sep)
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_num_string_disambiguation(self):
# Test that we can disambiguate number value_vars from
# string value_vars
df = pd.DataFrame({
'A11': ['a11', 'a22', 'a33'],
'A12': ['a21', 'a22', 'a23'],
'B11': ['b11', 'b12', 'b13'],
'B12': ['b21', 'b22', 'b23'],
'BB11': [1, 2, 3],
'BB12': [4, 5, 6],
'Arating': [91, 92, 93],
'Arating_old': [91, 92, 93]
})
df['id'] = df.index
exp_frame = pd.DataFrame({
'Arating': [91, 92, 93, 91, 92, 93],
'Arating_old': [91, 92, 93, 91, 92, 93],
'A': ['a11', 'a22', 'a33', 'a21', 'a22', 'a23'],
'B': ['b11', 'b12', 'b13', 'b21', 'b22', 'b23'],
'BB': [1, 2, 3, 4, 5, 6],
'id': [0, 1, 2, 0, 1, 2],
'year': ['11', '11', '11', '12', '12', '12']})
exp_frame = exp_frame.set_index(['id', 'year'])[
['Arating', 'Arating_old', 'A', 'B', 'BB']]
long_frame = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_invalid_suffixtype(self):
# If all stubs names end with a string, but a numeric suffix is
# assumed, an empty data frame is returned
df = pd.DataFrame({'Aone': [1.0, 2.0],
'Atwo': [3.0, 4.0],
'Bone': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': '',
'Aone': [],
'Atwo': [],
'Bone': [],
'id': [],
'year': [],
'A': [],
'B': []}
exp_frame = pd.DataFrame(exp_data)
exp_frame = exp_frame.set_index(['id', 'year'])[[
'X', 'Aone', 'Atwo', 'Bone', 'A', 'B']]
exp_frame.index.set_levels([[0, 1], []], inplace=True)
long_frame = wide_to_long(df, ['A', 'B'], i='id', j='year')
tm.assert_frame_equal(long_frame.sort_index(axis=1),
exp_frame.sort_index(axis=1))
def test_multiple_id_columns(self):
# Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm
df = pd.DataFrame({
'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
})
exp_frame = pd.DataFrame({
'ht': [2.8, 3.4, 2.9, 3.8, 2.2, 2.9, 2.0, 3.2, 1.8,
2.8, 1.9, 2.4, 2.2, 3.3, 2.3, 3.4, 2.1, 2.9],
'famid': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3],
'birth': [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3],
'age': ['1', '2', '1', '2', '1', '2', '1', '2', '1',
'2', '1', '2', '1', '2', '1', '2', '1', '2']
})
exp_frame = exp_frame.set_index(['famid', 'birth', 'age'])[['ht']]
long_frame = wide_to_long(df, 'ht', i=['famid', 'birth'], j='age')
tm.assert_frame_equal(long_frame, exp_frame)
def test_non_unique_idvars(self):
# GH16382
# Raise an error message if non unique id vars (i) are passed
df = pd.DataFrame({
'A_A1': [1, 2, 3, 4, 5],
'B_B1': [1, 2, 3, 4, 5],
'x': [1, 1, 1, 1, 1]
})
with pytest.raises(ValueError):
wide_to_long(df, ['A_A', 'B_B'], i='x', j='colname')
|
bsd-3-clause
| -60,484,100,107,960,040 | 42.959555 | 79 | 0.426281 | false | 3.403209 | true | false | false |
xclxxl414/rqalpha
|
rqalpha/mod/rqalpha_mod_alphaStar_utils/mod.py
|
1
|
2371
|
#coding=utf-8
"""
@author: evilXu
@file: mod.py
@time: 2018/2/28 16:59
@description:
"""
from rqalpha.interface import AbstractMod
from rqalpha.utils.logger import system_log,user_system_log
import pandas as pd
from rqalpha.api import *
class UtilsMod(AbstractMod):
def __init__(self):
self._inject_api()
def start_up(self, env, mod_config):
system_log.debug("UtilsMod.start_up,config:{0}",mod_config)
def tear_down(self, code, exception=None):
pass
# print(">>> AlphaHDataMode.tear_down")
def _inject_api(self):
from rqalpha import export_as_api
from rqalpha.execution_context import ExecutionContext
from rqalpha.const import EXECUTION_PHASE
@export_as_api
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT,
EXECUTION_PHASE.BEFORE_TRADING,
EXECUTION_PHASE.ON_BAR,
EXECUTION_PHASE.AFTER_TRADING,
EXECUTION_PHASE.SCHEDULED)
def equalWeight_order(tobe_holding_codes=[], context=None):
user_system_log.info("equalWeight_order:{}",str(tobe_holding_codes))
if len(tobe_holding_codes) < 1:
for code, pos in context.portfolio.positions.items():
if pos.sellable > 0:
order_shares(code, -1 * pos.sellable)
return
# print("positions",context.portfolio.positions)
_target_percent = round(1.0 / len(tobe_holding_codes), 2)
_targets = set(tobe_holding_codes)
_tobe_sell = [pos for code, pos in context.portfolio.positions.items() if code not in _targets]
for pos in _tobe_sell:
if pos.sellable > 0:
order_shares(pos.order_book_id, -1 * pos.sellable)
for code in tobe_holding_codes:
_acount = context.portfolio.stock_account
_cash_percent = round(_acount.cash / _acount.total_value, 2)
_real_percent = min(_cash_percent, _target_percent)
# print(_acount.cash,_acount.total_value,_cash_percent,_real_percent)
if _real_percent > 0:
order_target_percent(code, _real_percent)
return
|
apache-2.0
| -1,318,417,233,380,429,800 | 39.896552 | 107 | 0.566428 | false | 3.745656 | false | false | false |
data-tsunami/museo-cachi
|
cachi/admin.py
|
1
|
1605
|
# -*- coding: utf-8 -*-
#======================================================================
# This file is part of "Museo-Cachi".
#
# Museo-Cachi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Museo-Cachi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Museo-Cachi. If not, see <http://www.gnu.org/licenses/>.
#======================================================================
from __future__ import unicode_literals
from cachi import models
from django.contrib import admin
admin.site.register(models.PiezaConjunto)
admin.site.register(models.Fragmento)
admin.site.register(models.FichaTecnica)
admin.site.register(models.Adjunto)
admin.site.register(models.TipoAdquisicion)
admin.site.register(models.TipoCondicionHallazgo)
admin.site.register(models.Naturaleza)
admin.site.register(models.Persona)
admin.site.register(models.Ubicacion)
admin.site.register(models.InformeCampo)
admin.site.register(models.UbicacionGeografica)
admin.site.register(models.Procedencia)
admin.site.register(models.SitioArqueologico)
admin.site.register(models.FichaRelevamientoSitio)
admin.site.register(models.Modificacion)
|
gpl-3.0
| 8,912,619,328,321,128,000 | 40.153846 | 73 | 0.71028 | false | 3.459052 | false | false | false |
sciapp/pyMolDyn
|
src/config/configuration.py
|
1
|
6186
|
import configobj
import validate
import os
import os.path
import inspect
CONFIG_DIRECTORY = '~/.pymoldyn/' # MUST be written with ~ to save a path in the config file that is relative to the user's home directory
CONFIG_FILE = os.path.expanduser('%s/config.cfg' % CONFIG_DIRECTORY)
CONFIG_SPEC_FILE = os.path.expanduser('%s/config.spec' % CONFIG_DIRECTORY)
# second string is the list type name
type_dict = {
int: ('integer', 'int'),
float: ('float', 'float'),
str: ('string', 'string'),
unicode: ('string', 'string'),
bool: ('boolean', 'bool'),
}
class ConfigNode(object):
def __init__(self):
pass
class Configuration(ConfigNode):
"""
Configuration Object that contains the application settings
"""
class Colors(ConfigNode):
def __init__(self):
self.surface_cavity = [0.2, 0.4, 1.]
self.domain = [0., 1., 0.5]
self.center_cavity = [0.9, 0.4, 0.2]
self.background = [0.0, 0.0, 0.0]
self.bounding_box = [1.0, 1.0, 1.0]
self.bonds = [0.8, 0.8, 0.8]
class OpenGL(ConfigNode):
def __init__(self):
# camera_position =
# offset = (0.0, 0.0, 0.0)
self.gl_window_size = [1200, 400]
self.atom_radius = 0.4
self.bond_radius = 0.1
pass
class Computation(ConfigNode):
def __init__(self):
self.std_cutoff_radius = 2.8
self.std_resolution = 64
self.max_cachefiles = 0
class Path(ConfigNode):
def __init__(self):
self.cache_dir = os.path.join(CONFIG_DIRECTORY, 'cache')
self.ffmpeg = '/usr/local/bin/ffmpeg'
self.result_dir = os.path.join(CONFIG_DIRECTORY, 'results')
def __init__(self):
# standard configuration
self.Colors = Configuration.Colors()
self.OpenGL = Configuration.OpenGL()
self.Computation = Configuration.Computation()
self.Path = Configuration.Path()
self.window_position = [-1, -1]
self.recent_files = ['']
self.max_files = 5
self._file = ConfigFile(self)
def add_recent_file(self, filename):
if len(self.recent_files) == 1 and not self.recent_files[0]:
self.recent_files[0] = filename
elif len(self.recent_files) == self.max_files:
self.recent_files.pop(-1)
self.recent_files.insert(0,filename)
else:
self.recent_files.insert(0, filename)
self.save()
def save(self):
"""
write configuration to file
"""
self._file.save()
def read(self):
"""
read configuration from file
"""
self._file = ConfigFile(self)
self._file.read()
class ConfigFile(object):
"""
ConfigFile that parses the settings to a configuration file using ConfigObj 4
"""
def __init__(self, cfg):
self.config = cfg
@staticmethod
def _create_needed_parent_directories(filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def generate_configspec(self):
"""
generates the type specification for the configuration data
"""
spec_file = configobj.ConfigObj(CONFIG_SPEC_FILE)
self.generate_spec_for_section(self.file, spec_file)
# TODO: better error handling
try:
self._create_needed_parent_directories(CONFIG_SPEC_FILE)
spec_file.write()
except IOError as e:
print "IOError in ConfigFile.generate_configspec"
def generate_spec_for_section(self, section, spec_section):
"""
recursive type specification for each subtree
"""
for scalar in section.scalars:
t = type(section[scalar])
type_string = type_dict[t][0] if t is not list else type_dict[type(section[scalar][0])][1] + '_list'
spec_section[scalar] = type_string
for sect in section.sections:
spec_section[sect] = {}
self.generate_spec_for_section(section[sect], spec_section[sect])
def save(self):
"""
recursively reads the object and saves it to the ConfigFile object and finally writes it into the file
"""
self.file = configobj.ConfigObj(CONFIG_FILE)
self.parse_node_to_section(self.config, self.file)
# TODO: better error handling
try:
self._create_needed_parent_directories(CONFIG_FILE)
self.file.write()
self.generate_configspec()
self.file.write()
except IOError as e:
print "IOError in ConfigFile.save"
def parse_node_to_section(self, node, section):
"""
parses a ConfigNode to file object
"""
for attr_str in dir(node):
attr = getattr(node, attr_str)
if isinstance(attr, ConfigNode):
section[type(attr).__name__] = {}
self.parse_node_to_section(attr, section[type(attr).__name__])
elif not inspect.ismethod(attr) and not attr_str.startswith('_'):
section[attr_str] = attr
else:
pass
# print attr_str, 'NOT PROCESSED'
def read(self):
"""
read a configuration from file
"""
if not os.path.isfile(CONFIG_SPEC_FILE) or not os.path.isfile(CONFIG_FILE):
self.save()
else:
validator = validate.Validator()
self.file = configobj.ConfigObj(CONFIG_FILE,
configspec=CONFIG_SPEC_FILE)
self.file.validate(validator)
self.parse_section_to_node(self.file, self.config)
def parse_section_to_node(self, section, node):
"""
parses a config section to config object
"""
for scalar in section.scalars:
setattr(node, scalar, section[scalar])
for sec in section.sections:
self.parse_section_to_node(section[sec], getattr(node, sec))
config = Configuration()
config.read()
|
mit
| 8,964,271,255,305,038,000 | 30.561224 | 140 | 0.57032 | false | 3.973025 | true | false | false |
rosix-ru/django-directapps-client
|
setup.py
|
1
|
2120
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 Grigoriy Kramarenko <[email protected]>
#
# This file is part of DjangoDirectAppsClient.
#
# DjangoDirectAppsClient is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DjangoDirectAppsClient is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with DjangoDirectAppsClient. If not, see
# <http://www.gnu.org/licenses/>.
#
from setuptools import setup, find_packages
from directapps_client import __version__
setup(
name='django-directapps-client',
version=__version__,
description='Django Direct Apps Client.',
long_description=open('README.rst').read(),
author='Grigoriy Kramarenko',
author_email='[email protected]',
url='https://github.com/rosix-ru/django-directapps-client/',
license='GNU Affero General Public License v3 or later (AGPLv3+)',
platforms='any',
zip_safe=False,
packages=find_packages(),
include_package_data = True,
install_requires=['django-directapps'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
agpl-3.0
| 4,842,296,416,597,150,000 | 38.259259 | 93 | 0.675943 | false | 3.992467 | false | false | false |
jpludens/quartrmastr
|
db/tables/equip_traits.py
|
1
|
5244
|
import sqlite3
from db import get_connection, get_from_datamaster, get_equip_keys
from db.tables import equips, traits
requirements = [equips, traits]
def build():
with get_connection() as con:
con.row_factory = sqlite3.Row
cur = con.cursor()
equip_ids_by_name = get_equip_keys(cur)
cur.execute("SELECT Text, Id FROM Traits")
trait_ids_by_text = {cur_row[0]: cur_row[1]
for cur_row in cur.fetchall()}
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("DROP TABLE IF EXISTS EquipTraits")
cur.execute("CREATE TABLE EquipTraits("
"Id INTEGER PRIMARY KEY AUTOINCREMENT, "
"Equip INTEGER, "
"Trait INTEGER, "
"FOREIGN KEY(Equip) REFERENCES Equips(Id), "
"FOREIGN KEY(Trait) REFERENCES Traits(Id))")
for csv_row in get_from_datamaster('EquipTraits.csv'):
cur.execute("INSERT INTO EquipTraits ("
"Equip, Trait)"
"VALUES (\"{}\", \"{}\")".format(
equip_ids_by_name[csv_row.get('EquipName')],
trait_ids_by_text[csv_row.get('Text')]))
def read():
con = get_connection()
con.row_factory = sqlite3.Row
with con:
cur = con.cursor()
cur.execute("SELECT "
"EquipTraits.Id AS Id, "
"Traits.Id AS trait, "
"TraitTypes.TraitTypeName AS traitType, "
"Traits.Text AS text, "
"Equip AS equip, "
"ElementName AS element "
"FROM EquipTraits "
"JOIN Traits "
"ON EquipTraits.Trait = Traits.Id "
"JOIN Elements "
"ON TraitElements.Element = Elements.Id "
"JOIN TraitTypes "
"ON Traits.TraitType = TraitTypes.Id "
"JOIN TraitElements "
"WHERE TraitElements.Trait = Traits.Id ")
element_traits = [dict(row) for row in cur.fetchall()]
cur.execute("SELECT "
"EquipTraits.Id AS id, "
"Traits.Id AS trait, "
"TraitTypes.TraitTypeName AS traitType, "
"Traits.Text AS text, "
"Equip AS equip, "
"StatusName AS status "
"FROM EquipTraits "
"JOIN Traits "
"ON EquipTraits.Trait = Traits.Id "
"JOIN Statuses "
"ON TraitStatuses.Status = Statuses.Id "
"JOIN TraitTypes "
"ON Traits.TraitType = TraitTypes.Id "
"JOIN TraitStatuses "
"WHERE TraitStatuses.Trait = Traits.Id ")
stat_traits = [dict(row) for row in cur.fetchall()]
cur.execute("SELECT "
"EquipTraits.Id AS id, "
"Traits.Id AS trait, "
"TraitTypes.TraitTypeName AS traitType, "
"Traits.Text AS text, "
"Equip AS equip, "
"StatName AS stat "
"FROM EquipTraits "
"JOIN Traits "
"ON EquipTraits.Trait = Traits.Id "
"JOIN Stats "
"ON TraitStats.Stat = Stats.Id "
"JOIN TraitTypes "
"ON Traits.TraitType = TraitTypes.Id "
"JOIN TraitStats "
"WHERE TraitStats.Trait = Traits.Id")
status_traits = [dict(row) for row in cur.fetchall()]
cur.execute("SELECT "
"EquipTraits.Id AS id, "
"Traits.Id AS trait, "
"TraitTypes.TraitTypeName AS traitType, "
"Traits.Text AS text, "
"Equip AS equip, "
"SkillName AS skillName "
"FROM EquipTraits "
"JOIN Traits "
"ON EquipTraits.Trait = Traits.Id "
"JOIN TraitTypes "
"ON Traits.TraitType = TraitTypes.Id "
"JOIN TraitSkills "
"WHERE TraitSkills.Trait = Traits.Id")
skill_traits = [dict(row) for row in cur.fetchall()]
cur.execute("SELECT "
"EquipTraits.Id AS id, "
"Traits.Id AS trait, "
"TraitTypes.TraitTypeName AS traitType, "
"Traits.Text AS text, "
"Equip AS equip, "
"Miscprop AS property "
"FROM EquipTraits "
"JOIN Traits "
"ON EquipTraits.Trait = Traits.Id "
"JOIN TraitTypes "
"ON Traits.TraitType = TraitTypes.Id "
"JOIN TraitMiscprops "
"WHERE TraitMiscprops.Trait = Traits.Id")
misc_traits = [dict(row) for row in cur.fetchall()]
return element_traits + stat_traits + status_traits + skill_traits + misc_traits
|
mit
| 2,389,829,314,110,966,300 | 40.291339 | 88 | 0.466438 | false | 4.301887 | false | false | false |
aarontuor/antk
|
antk/scripts/dssmgrid.py
|
1
|
2443
|
from __future__ import print_function
import argparse
from antk.core import loader
import numpy
def return_parser():
parser = argparse.ArgumentParser(description="Command line utility for performing grid search on a matrix factorization model.")
parser.add_argument("datadir", type=str,
help="data directory for conducting search.")
parser.add_argument("configfile", type=str,
help="Config file for conducting search.")
parser.add_argument("logfile", type=str,
help="log file for conducting search.")
return parser
if __name__ == '__main__':
args = return_parser().parse_args()
data = loader.read_data_sets(args.datadir, folders=['item', 'user', 'dev', 'test', 'train'])
data.train.labels['ratings'] = loader.center(data.train.labels['ratings'])
data.dev.labels['ratings'] = loader.center(data.dev.labels['ratings'])
data.user.features['age'] = loader.center(data.user.features['age'])
data.item.features['year'] = loader.center(data.item.features['year'])
data.user.features['age'] = loader.max_norm(data.user.features['age'])
data.item.features['year'] = loader.max_norm(data.item.features['year'])
data.dev.features['time'] = loader.center(data.dev.features['time'])
data.dev.features['time'] = loader.max_norm(data.dev.features['time'])
data.train.features['time'] = loader.center(data.train.features['time'])
data.train.features['time'] = loader.max_norm(data.train.features['time'])
# x = dsmodel.dssm(data, args.configfile)
mb = [500, 1000, 10000, 20000, 40000, 80000,50, 100, 200]
arguments = [[data],
[args.configfile],
[0.00001],
[2, 5, 10, 20, 50, 100, 200, 500, 1000],
[0.0001, 0.001, 0.01, 0.1, 0.3, 1],
mb,
[0.0001, 0.0003, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3],
[True],
map(lambda x: 10*numpy.ceil(numpy.log(data.train.num_examples / x)), mb),
[1000],
[500]]
argumentnames = ['data',
'config',
'initrange',
'kfactors',
'lamb',
'mb',
'learnrate',
'verbose',
'maxbadcount',
'epochs',
'random_seed']
# antsearch.gridsearch(args.logfile, '.', 'dsmodel', 'dssm', arguments, argumentnames)
|
mit
| -7,314,639,539,161,021,000 | 41.859649 | 132 | 0.57675 | false | 3.619259 | false | false | false |
kidscancode/gamedev
|
tutorials/examples/pathfinding/part2_test.py
|
1
|
4760
|
import pygame as pg
from collections import deque
from os import path
vec = pg.math.Vector2
TILESIZE = 48
GRIDWIDTH = 28
GRIDHEIGHT = 15
WIDTH = TILESIZE * GRIDWIDTH
HEIGHT = TILESIZE * GRIDHEIGHT
FPS = 30
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
MAGENTA = (255, 0, 255)
YELLOW = (255, 255, 0)
DARKGRAY = (40, 40, 40)
LIGHTGRAY = (140, 140, 140)
pg.init()
screen = pg.display.set_mode((WIDTH, HEIGHT))
clock = pg.time.Clock()
class SquareGrid:
def __init__(self, width, height):
self.width = width
self.height = height
self.walls = []
self.connections = [vec(1, 0), vec(-1, 0), vec(0, 1), vec(0, -1)]
def in_bounds(self, node):
return 0 <= node.x < self.width and 0 <= node.y < self.height
def passable(self, node):
return node not in self.walls
def find_neighbors(self, node):
neighbors = [node + connection for connection in self.connections]
if (node.x + node.y) % 2 == 0:
neighbors.reverse()
neighbors = filter(self.in_bounds, neighbors)
neighbors = filter(self.passable, neighbors)
return neighbors
def draw(self):
for wall in self.walls:
rect = pg.Rect(wall * TILESIZE, (TILESIZE, TILESIZE))
pg.draw.rect(screen, LIGHTGRAY, rect)
def vec2int(v):
return (int(v.x), int(v.y))
def flow_field(graph, start):
frontier = deque()
frontier.append(start)
path = {}
path[vec2int(start)] = None
while len(frontier) > 0:
current = frontier.popleft()
for next in graph.find_neighbors(current):
if vec2int(next) not in path:
frontier.append(next)
path[vec2int(next)] = current - next
return path
def draw_grid():
for x in range(0, WIDTH, TILESIZE):
pg.draw.line(screen, LIGHTGRAY, (x, 0), (x, HEIGHT))
for y in range(0, HEIGHT, TILESIZE):
pg.draw.line(screen, LIGHTGRAY, (0, y), (WIDTH, y))
x = start.x * TILESIZE + TILESIZE / 2
y = start.y * TILESIZE + TILESIZE / 2
r = star_img.get_rect(center=(x, y))
screen.blit(star_img, r)
icon_dir = path.join(path.dirname(__file__), '../icons')
star_img = pg.image.load(path.join(icon_dir, 'star.png')).convert_alpha()
star_img.fill((0, 255, 0, 255), special_flags=pg.BLEND_RGBA_MULT)
star_img = pg.transform.scale(star_img, (50, 50))
arrows = {}
arrow_img = pg.image.load(path.join(icon_dir, 'arrowRight.png')).convert_alpha()
arrow_img = pg.transform.scale(arrow_img, (50, 50))
for dir in [(1, 0), (0, 1), (-1, 0), (0, -1)]:
arrows[dir] = pg.transform.rotate(arrow_img, vec(dir).angle_to(vec(1, 0)))
g = SquareGrid(GRIDWIDTH, GRIDHEIGHT)
walls = [(10, 7), (11, 7), (12, 7), (13, 7), (14, 7), (15, 7), (16, 7), (7, 7), (6, 7), (5, 7), (5, 5), (5, 6), (1, 6), (2, 6), (3, 6), (5, 10), (5, 11), (5, 12), (5, 9), (5, 8), (12, 8), (12, 9), (12, 10), (12, 11), (15, 14), (15, 13), (15, 12), (15, 11), (15, 10), (17, 7), (18, 7), (21, 7), (21, 6), (21, 5), (21, 4), (21, 3), (22, 5), (23, 5), (24, 5), (25, 5), (18, 10), (20, 10), (19, 10), (21, 10), (22, 10), (23, 10), (14, 4), (14, 5), (14, 6), (14, 0), (14, 1), (9, 2), (9, 1), (7, 3), (8, 3), (10, 3), (9, 3), (11, 3), (2, 5), (2, 4), (2, 3), (2, 2), (2, 0), (2, 1), (0, 11), (1, 11), (2, 11), (21, 2), (20, 11), (20, 12), (23, 13), (23, 14), (24, 10), (25, 10), (6, 12), (7, 12), (10, 12), (11, 12), (12, 12), (5, 3), (6, 3), (5, 4)]
for wall in walls:
g.walls.append(vec(wall))
start = vec(0, 0)
path = flow_field(g, start)
running = True
while running:
clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
running = False
if event.key == pg.K_m:
# dump the wall list for saving
print([(int(loc.x), int(loc.y)) for loc in g.walls])
if event.type == pg.MOUSEBUTTONDOWN:
mpos = vec(pg.mouse.get_pos()) // TILESIZE
if event.button == 1:
if mpos in g.walls:
g.walls.remove(mpos)
else:
g.walls.append(mpos)
if event.button == 3:
start = mpos
path = flow_field(g, start)
pg.display.set_caption("{:.2f}".format(clock.get_fps()))
screen.fill(DARKGRAY)
draw_grid()
g.draw()
for n, d in path.items():
if d:
x, y = n
x = x * TILESIZE + TILESIZE / 2
y = y * TILESIZE + TILESIZE / 2
img = arrows[vec2int(d)]
r = img.get_rect(center=(x, y))
screen.blit(img, r)
pg.display.flip()
|
mit
| -7,233,398,497,294,835,000 | 35.615385 | 744 | 0.532563 | false | 2.723112 | false | false | false |
nimbis/django-guardian
|
guardian/view_mixins.py
|
1
|
5462
|
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import REDIRECT_FIELD_NAME
from django.http import HttpResponseForbidden
from django.http import HttpResponseRedirect
from django.utils.http import urlquote
from django.utils.decorators import method_decorator
class LoginRequiredMixin(object):
"""
A login required mixin for use with class based views. This Class is a light wrapper around the
`login_required` decorator and hence function parameters are just attributes defined on the class.
Due to parent class order traversal this mixin must be added as the left most
mixin of a view.
The mixin has exaclty the same flow as `login_required` decorator:
If the user isn't logged in, redirect to settings.LOGIN_URL, passing the current
absolute path in the query string. Example: /accounts/login/?next=/polls/3/.
If the user is logged in, execute the view normally. The view code is free to
assume the user is logged in.
**Class Settings**
`redirect_field_name - defaults to "next"
`login_url` - the login url of your site
"""
redirect_field_name = REDIRECT_FIELD_NAME
login_url = None
@method_decorator(login_required(redirect_field_name=redirect_field_name, login_url=login_url))
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
class PermissionRequiredMixin(object):
"""
A view mixin that verifies if the current logged in user has the specified permission
by wrapping the ``request.user.has_perm(..)`` method.
If a `get_object()` method is defined either manually or by including another mixin (for example
``SingleObjectMixin``) or ``self.object`` is defiend then the permission will be tested against
that specific instance.
.. NOTE: Testing of a permission against a specific object instance requires an authentication backend
that supports. Please see ``django-guardian`` to add object level permissions to your project.
The mixin does the following:
If the user isn't logged in, redirect to settings.LOGIN_URL, passing the current
absolute path in the query string. Example: /accounts/login/?next=/polls/3/.
If the `raise_exception` is set to True than rather than redirect to login page
a `PermisionDenied` (403) is raised.
If the user is logged in, and passes the permission check than the view is executed
normally.
**Example Usage**
class FitterEditView(PermissionRequiredMixin, UpdateView):
...
### PermissionRequiredMixin settings
permission_required = 'fitters.change_fitter'
### other view settings
context_object_name="fitter"
queryset = Fitter.objects.all()
form_class = FitterForm
...
**Class Settings**
`permission_required` - the permission to check of form "<app_label>.<permission codename>"
i.e. 'polls.can_vote' for a permission on a model in the polls application.
`login_url` - the login url of your site
`redirect_field_name - defaults to "next"
`raise_exception` - defaults to False - raise PermisionDenied (403) if set to True
"""
### default class view settings
login_url = settings.LOGIN_URL
raise_exception = False
permission_required = None
redirect_field_name=REDIRECT_FIELD_NAME
def dispatch(self, request, *args, **kwargs):
# call the parent dispatch first to pre-populate few things before we check for permissions
original_return_value = super(PermissionRequiredMixin, self).dispatch(request, *args, **kwargs)
# verify class settings
if self.permission_required == None or len(self.permission_required.split('.')) != 2:
raise ImproperlyConfigured("'PermissionRequiredMixin' requires 'permission_required' attribute to be set to '<app_label>.<permission codename>' but is set to '%s' instead" % self.permission_required)
# verify permission on object instance if needed
has_permission = False
if hasattr(self, 'object') and self.object is not None:
has_permission = request.user.has_perm(self.permission_required, self.object)
elif hasattr(self, 'get_object') and callable(self.get_object):
has_permission = request.user.has_perm(self.permission_required, self.get_object())
else:
has_permission = request.user.has_perm(self.permission_required)
# user failed permission
if not has_permission:
if self.raise_exception:
return HttpResponseForbidden()
else:
path = urlquote(request.get_full_path())
tup = self.login_url, self.redirect_field_name, path
return HttpResponseRedirect("%s?%s=%s" % tup)
# user passed permission check so just return the result of calling .dispatch()
return original_return_value
|
bsd-2-clause
| 9,121,473,484,465,784,000 | 45.086207 | 211 | 0.643903 | false | 4.67637 | false | false | false |
martinogden/django-banner-rotator
|
banner_rotator/admin.py
|
1
|
3825
|
#-*- coding:utf-8 -*-
from __future__ import unicode_literals
import logging
from functools import update_wrapper
from django import forms, template
from django.contrib import admin
from django.contrib.admin.util import unquote
from django.db import models
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from banner_rotator.models import Campaign, Place, Banner, Click
class PlaceAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'size_str')
prepopulated_fields = {'slug': ('name',)}
class CampaignBannerInline(admin.StackedInline):
model = Banner
extra = 0
readonly_fields = ['views', 'clicks']
fields = ['is_active', 'places', 'name', 'url', 'file', 'weight', 'views', 'clicks']
formfield_overrides = {
models.ManyToManyField: {'widget': forms.CheckboxSelectMultiple},
}
class CampaignAdmin(admin.ModelAdmin):
list_display = ('name', 'created_at', 'updated_at')
fields = ('name',)
inlines = [CampaignBannerInline]
class BannerAdmin(admin.ModelAdmin):
list_display = ('name', 'campaign', 'weight', 'url', 'views', 'is_active')
list_filter = ('campaign', 'places', 'is_active')
date_hierarchy = 'created_at'
fieldsets = (
(_('Main'), {
'fields': ('campaign', 'places', 'name', 'url', 'url_target', 'file', 'alt'),
}),
(_('Show'), {
'fields': ('weight', 'views', 'max_views', 'clicks', 'max_clicks', 'start_at', 'finish_at', 'is_active'),
})
)
filter_horizontal = ('places',)
readonly_fields = ('views', 'clicks',)
object_log_clicks_template = None
def get_urls(self):
try:
# Django 1.4
from django.conf.urls import patterns, url
except ImportError:
from django.conf.urls.defaults import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.module_name
urlpatterns = patterns('',
url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info),
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info),
url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info),
url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info),
url(r'^(.+)/log/clicks/$', wrap(self.log_clicks_view), name='%s_%s_log_clicks' % info),
url(r'^(.+)/$', wrap(self.change_view), name='%s_%s_change' % info),
)
return urlpatterns
def log_clicks_view(self, request, object_id, extra_context=None):
model = self.model
opts = model._meta
app_label = opts.app_label
obj = get_object_or_404(model, pk=unquote(object_id))
context = {
'title': _('Log clicks'),
'module_name': capfirst(opts.verbose_name_plural),
'object': obj,
'app_label': app_label,
'log_clicks': Click.objects.filter(banner=obj).order_by('-datetime')
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.admin_site.name)
return render_to_response(self.object_log_clicks_template or [
"admin/%s/%s/object_log_clicks.html" % (app_label, opts.object_name.lower()),
"admin/%s/object_log_clicks.html" % app_label,
], context, context_instance=context_instance)
admin.site.register(Banner, BannerAdmin)
admin.site.register(Campaign, CampaignAdmin)
admin.site.register(Place, PlaceAdmin)
|
mit
| 6,486,784,982,969,740,000 | 35.778846 | 117 | 0.608889 | false | 3.601695 | false | false | false |
DailyActie/Surrogate-Model
|
01-codes/deap-master/examples/coev/coop_evol.py
|
1
|
6195
|
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""This example contains the evolving test from *Potter, M. and De Jong, K.,
2001, Cooperative Coevolution: An Architecture for Evolving Co-adapted
Subcomponents.* section 4.2.4. The number of species is evolved by adding and
removing species as stagnation occurs.
"""
import random
try:
import matplotlib.pyplot as plt
plt.figure()
except:
plt = False
import numpy
from deap import algorithms
from deap import tools
import coop_base
IND_SIZE = coop_base.IND_SIZE
SPECIES_SIZE = coop_base.SPECIES_SIZE
NUM_SPECIES = 1
TARGET_SIZE = 30
IMPROVMENT_TRESHOLD = 0.5
IMPROVMENT_LENGTH = 5
EXTINCTION_TRESHOLD = 5.0
noise = "*##*###*###*****##*##****#*##*###*#****##******##*#**#*#**######"
schematas = ("1##1###1###11111##1##1111#1##1###1#1111##111111##1#11#1#11######",
"1##1###1###11111##1##1000#0##0###0#0000##000000##0#00#0#00######",
"0##0###0###00000##0##0000#0##0###0#0000##001111##1#11#1#11######")
toolbox = coop_base.toolbox
toolbox.register("evaluateContribution", coop_base.matchSetContribution)
def main(extended=True, verbose=True):
target_set = []
species = []
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "species", "evals", "std", "min", "avg", "max"
ngen = 300
g = 0
for i in range(len(schematas)):
size = int(TARGET_SIZE / len(schematas))
target_set.extend(toolbox.target_set(schematas[i], size))
species = [toolbox.species() for _ in range(NUM_SPECIES)]
species_index = list(range(NUM_SPECIES))
last_index_added = species_index[-1]
# Init with random a representative for each species
representatives = [random.choice(species[i]) for i in range(NUM_SPECIES)]
best_fitness_history = [None] * IMPROVMENT_LENGTH
if plt and extended:
contribs = [[]]
stag_gen = []
collab = []
while g < ngen:
# Initialize a container for the next generation representatives
next_repr = [None] * len(species)
for (i, s), j in zip(enumerate(species), species_index):
# Vary the species individuals
s = algorithms.varAnd(s, toolbox, 0.6, 1.0)
# Get the representatives excluding the current species
r = representatives[:i] + representatives[i + 1:]
for ind in s:
# Evaluate and set the individual fitness
ind.fitness.values = toolbox.evaluate([ind] + r, target_set)
record = stats.compile(s)
logbook.record(gen=g, species=j, evals=len(s), **record)
if verbose:
print(logbook.stream)
# Select the individuals
species[i] = toolbox.select(s, len(s)) # Tournament selection
next_repr[i] = toolbox.get_best(s)[0] # Best selection
if plt and extended:
# Book keeping of the collaborative fitness
collab.append(next_repr[i].fitness.values[0])
g += 1
representatives = next_repr
# Keep representatives fitness for stagnation detection
best_fitness_history.pop(0)
best_fitness_history.append(representatives[0].fitness.values[0])
try:
diff = best_fitness_history[-1] - best_fitness_history[0]
except TypeError:
diff = float("inf")
if plt and extended:
for (i, rep), j in zip(enumerate(representatives), species_index):
contribs[j].append((toolbox.evaluateContribution(representatives,
target_set, i)[0], g - 1))
if diff < IMPROVMENT_TRESHOLD:
if len(species) > 1:
contributions = []
for i in range(len(species)):
contributions.append(toolbox.evaluateContribution(representatives, target_set, i)[0])
for i in reversed(range(len(species))):
if contributions[i] < EXTINCTION_TRESHOLD:
species.pop(i)
species_index.pop(i)
representatives.pop(i)
last_index_added += 1
best_fitness_history = [None] * IMPROVMENT_LENGTH
species.append(toolbox.species())
species_index.append(last_index_added)
representatives.append(random.choice(species[-1]))
if extended and plt:
stag_gen.append(g - 1)
contribs.append([])
if extended:
for r in representatives:
# print final representatives without noise
print("".join(str(x) for x, y in zip(r, noise) if y == "*"))
if extended and plt: # Ploting of the evolution
line1, = plt.plot(collab, "--", color="k")
for con in contribs:
try:
con, g = zip(*con)
line2, = plt.plot(g, con, "-", color="k")
except ValueError:
pass
axis = plt.axis("tight")
for s in stag_gen:
plt.plot([s, s], [0, axis[-1]], "--", color="k")
plt.legend((line1, line2), ("Collaboration", "Contribution"), loc="center right")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.show()
if __name__ == "__main__":
main()
|
mit
| 5,767,746,708,003,492,000 | 33.226519 | 105 | 0.585149 | false | 3.62069 | false | false | false |
ragupta-git/ImcSdk
|
imcsdk/mometa/bios/BiosVfLegacyUSBSupport.py
|
1
|
3878
|
"""This module contains the general information for BiosVfLegacyUSBSupport ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class BiosVfLegacyUSBSupportConsts:
VP_LEGACY_USBSUPPORT_AUTO = "Auto"
VP_LEGACY_USBSUPPORT_DISABLED = "Disabled"
VP_LEGACY_USBSUPPORT_ENABLED = "Enabled"
_VP_LEGACY_USBSUPPORT_AUTO = "auto"
_VP_LEGACY_USBSUPPORT_DISABLED = "disabled"
_VP_LEGACY_USBSUPPORT_ENABLED = "enabled"
VP_LEGACY_USBSUPPORT_PLATFORM_DEFAULT = "platform-default"
class BiosVfLegacyUSBSupport(ManagedObject):
"""This is BiosVfLegacyUSBSupport class."""
consts = BiosVfLegacyUSBSupportConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("BiosVfLegacyUSBSupport", "biosVfLegacyUSBSupport", "LegacyUSB-Support", VersionMeta.Version151f, "InputOutput", 0x1f, [], ["admin"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"]),
"modular": MoMeta("BiosVfLegacyUSBSupport", "biosVfLegacyUSBSupport", "LegacyUSB-Support", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin"], [u'biosPlatformDefaults', u'biosSettings'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_legacy_usb_support": MoPropertyMeta("vp_legacy_usb_support", "vpLegacyUSBSupport", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Auto", "Disabled", "Enabled", "auto", "disabled", "enabled", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
"modular": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_legacy_usb_support": MoPropertyMeta("vp_legacy_usb_support", "vpLegacyUSBSupport", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "auto", "disabled", "enabled", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpLegacyUSBSupport": "vp_legacy_usb_support",
"childAction": "child_action",
},
"modular": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpLegacyUSBSupport": "vp_legacy_usb_support",
"childAction": "child_action",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.status = None
self.vp_legacy_usb_support = None
self.child_action = None
ManagedObject.__init__(self, "BiosVfLegacyUSBSupport", parent_mo_or_dn, **kwargs)
|
apache-2.0
| -8,197,067,225,577,513,000 | 49.363636 | 274 | 0.625064 | false | 3.465594 | false | false | false |
spino327/sdr_testbed
|
DistributedTestbed/receiver/RXGui.py
|
1
|
8188
|
'''
Copyright (c) 2010, Universidad Industrial de Santander, Colombia
University of Delaware
All rights reserved.
@author: Sergio Pino
@author: Henry Arguello
Website: http://www.eecis.udel.edu/
emails : [email protected] - [email protected]
Date : Nov, 2010
'''
from gnuradio.wxgui import fftsink2, scopesink2
from gnuradio import gr
from gnuradio import blks2
from grc_gnuradio import wxgui
from gnuradio.wxgui import forms
import wx
class RXGui(gr.hier_block2):
'''
This class have several input ports, the first one is for the Antenna signal and the second is for
the demodulator output
This class construct the GUI for the application, flow graph
2 2 2
--->(throttle)--->(resampler)--->(fft)
| 2
--->(scope)
2 fft = 1 for raw antenna samples and 1 for matched filter
2 scope = 1 for raw antenna samples and 1 for matched filter
'''
def __init__(self, app, gain, fc, samp_rate, inter, dec):
'''
in:
- app = object of type RXApp
- gain = gain
- samp_rate = sample rate in Hertz
- inter = interpolation factor
- dec = decimation factor
'''
gr.hier_block2.__init__(self, "RXGui",
gr.io_signature(2, 2, gr.sizeof_gr_complex),
gr.io_signature(0, 0, 0))
# instance variables
self.app = app
self.gui = wxgui.top_block_gui("BPSK RX")
self.nb = self.__createNoteBook()
# controls
self.gainTextBox = forms.text_box(
parent=self.gui.GetWin(),
value=gain,
callback=self.setGain,
label="gain",
converter=forms.float_converter(),
)
self.fcTextBox = forms.text_box(
parent=self.gui.GetWin(),
value=fc,
callback=self.setFc,
label="fc",
converter=forms.float_converter(),
)
self.startButton = wx.Button(self.gui.GetWin(), label="Record")
self.startButton.Bind(wx.EVT_BUTTON, self.startRecording)
# adding the visual components to the notebook
self.gui.Add(self.gainTextBox)
self.gui.Add(self.fcTextBox)
self.gui.Add(self.startButton)
#EO Controls
# for port 1 Antenna samples (COMPLEX)
self.throttleAn = gr.throttle(gr.sizeof_gr_complex, samp_rate)
# resampler Antenna
if inter == 1 and dec == 1:
self.resamplerAn = gr.multiply_const_vcc((1,))
print("i: resamplerAn not need")
else:
self.resamplerAn = blks2.rational_resampler_ccc(
interpolation=inter,
decimation=dec,
taps=None,
fractional_bw=None,
)
# self.cmp2arg1 = gr.complex_to_arg()
self.fftAn = fftsink2.fft_sink_c(
self.nb.GetPage(0).GetWin(),
baseband_freq=0,
y_per_div=5,
y_divs=10,
ref_level=-40,
sample_rate= inter*samp_rate/dec,
fft_size=512,
fft_rate=10,
average=True,
avg_alpha=0.1,
title="FFT Plot Antenna",
peak_hold=False,
)
self.scope_IQAn = scopesink2.scope_sink_c(
self.nb.GetPage(1).GetWin(),
title="Scope IQ Antenna",
sample_rate = inter*samp_rate/dec,
v_scale=0.001,
t_scale=0.001,
ac_couple=False,
xy_mode=False,
num_inputs=1,
)
# adding the visual components to the notebook
self.nb.GetPage(0).Add(self.fftAn.win)
self.nb.GetPage(1).Add(self.scope_IQAn.win)
# for port 2 Matched filter (COMPLEX)
self.throttleMF = gr.throttle(gr.sizeof_gr_complex, samp_rate)
# resampler MF
if inter == 1 and dec == 1:
self.resamplerMF = gr.multiply_const_vcc((1,))
print("i: resamplerMF not need")
else:
self.resamplerMF = blks2.rational_resampler_ccc(
interpolation=inter,
decimation=dec,
taps=None,
fractional_bw=None,
)
# self.cmp2arg1 = gr.complex_to_arg()
self.fftMF = fftsink2.fft_sink_c(
self.nb.GetPage(2).GetWin(),
baseband_freq=0,
y_per_div=5,
y_divs=10,
ref_level=-40,
sample_rate= inter*samp_rate/dec,
fft_size=512,
fft_rate=10,
average=True,
avg_alpha=0.1,
title="FFT Plot MF",
peak_hold=False,
)
self.scope_IQMF = scopesink2.scope_sink_c(
self.nb.GetPage(3).GetWin(),
title="Scope IQ MF",
sample_rate = inter*samp_rate/dec,
v_scale=0.0005,
t_scale=0.001,
ac_couple=False,
xy_mode=False,
num_inputs=1,
)
# adding the visual components to the notebook
self.nb.GetPage(2).Add(self.fftMF.win)
self.nb.GetPage(3).Add(self.scope_IQMF.win)
# end of MF
self.__makeConnections()
def __createNoteBook(self):
'''
creates the NoteBook
'''
n1 = wx.Notebook(self.gui.GetWin(), style=wx.NB_RIGHT)
n1.AddPage(wxgui.Panel(n1), "fft Ant")
n1.AddPage(wxgui.Panel(n1), "scopeIQ Ant")
n1.AddPage(wxgui.Panel(n1), "fft MF")
n1.AddPage(wxgui.Panel(n1), "scopeIQ MF", True)
self.gui.Add(n1)
return n1
def __makeConnections(self):
'''
uses the method connect(src, des)
'''
#Port 1
self.connect((self, 0), (self.throttleAn, 0))
self.connect((self.throttleAn, 0), (self.resamplerAn, 0))
self.connect((self.resamplerAn, 0), (self.fftAn, 0))
self.connect((self.resamplerAn, 0), (self.scope_IQAn, 0))
# self.connect((self.resamplerAn, 0), (self.cmp2arg1, 0))
# self.connect((self.cmp2arg1, 0), (self.fftAn, 0))
# self.connect((self.cmp2arg1, 0), (self.scope_IQAn, 0))
# null_sink = gr.null_sink(gr.sizeof_gr_complex*1)
# self.connect((self, 0), null_sink)
#Port 2
self.connect((self, 1), (self.throttleMF, 0))
self.connect((self.throttleMF, 0), (self.resamplerMF, 0))
self.connect((self.resamplerMF, 0), (self.fftMF, 0))
self.connect((self.resamplerMF, 0), (self.scope_IQMF, 0))
# self.connect((self.resamplerDem, 0), (self.cmp2arg2, 0))
# self.connect((self.cmp2arg2, 0), (self.fftDem, 0))
# self.connect((self.cmp2arg2, 0), (self.scope_IQDem, 0))
def Run(self):
'''
calls the Run method in the gui object
'''
self.gui.Run(True)
def setFc(self, fc):
self.fcTextBox.set_value(fc)
self.app.setFc(fc)
def setGain(self, gain):
self.gainTextBox.set_value(gain)
self.app.setGain(gain)
def startRecording(self, event):
self.app.startRecording()
#if __name__ == "__main__":
#
# tb = gr.top_block()
# signalRaw = gr.sig_source_c(1e4, gr.GR_SIN_WAVE, 350, 1)
# signalDem = gr.sig_source_c(1e4, gr.GR_TRI_WAVE, 200, 1)
# signalCL = gr.sig_source_c(1e4, gr.GR_SIN_WAVE, 350, 1)
# signalAGC = gr.sig_source_c(1e4, gr.GR_TRI_WAVE, 200, 1)
# temp = RXGui(None, 1, 0, 1e4, 1, 1)
# tb.connect(signalRaw, (temp, 0))
# tb.connect(signalAGC, (temp, 1))
# tb.connect(signalCL, (temp, 2))
# tb.connect(signalDem, (temp, 3))
# tb.start()
# temp.Run()
|
apache-2.0
| -2,311,429,111,140,030,000 | 32.153846 | 102 | 0.51319 | false | 3.487223 | false | false | false |
nfqsolutions/nfq-conductor
|
nfq/conductor/server.py
|
1
|
5092
|
# NFQ Conductor. A tool for centralizing and visualizing logs.
# Copyright (C) 2017 Guillem Borrell Nogueras
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import os
from datetime import datetime
from functools import partial
import zmq
from tornado import web
from tornado.options import options
from zmq.eventloop import ioloop, zmqstream
# Global variables for cached content. Linters will say it is not used.
from nfq.conductor.config import root_path
from nfq.conductor.db import Process, Daemon
from nfq.conductor.web import ConfigHandler, DeleteHandler, RelaunchHandler
from nfq.conductor.web import DaemonsHandler, DaemonHandler, ResetHandler, \
IndexHandler, LastLogsHandler, ComponentHandler, RestActiveHandler, \
RestLastHandler, RestPageHandler, RestCoLastHandler
from nfq.conductor.ws import WSHandler
from nfq.conductor.db import engine, Base, LogEntry, session, clients
ioloop.install()
def process_log(messages):
global clients
for message in messages:
parsed = json.loads(message.decode())
entry = LogEntry(
source=parsed['source'],
when=datetime.strptime(parsed['when'], "%Y-%m-%dT%H:%M:%S.%f"),
message=parsed['message']
)
session.add(entry)
sub_message = parsed['message']
if sub_message.startswith('~~~~'):
sub_message = sub_message.strip('~')
sub_parsed = json.loads(sub_message)
process = Process(
process=sub_parsed['process'],
wrapped=sub_parsed['wrapped'],
when=datetime.strptime(parsed['when'], "%Y-%m-%dT%H:%M:%S.%f"),
host=sub_parsed['host'],
source=parsed['source'],
label=sub_parsed['label'],
command=sub_parsed['command'],
running=True
)
session.add(process)
logging.info('Added process {}'.format(sub_parsed['label']))
elif sub_message.startswith('^^^^'):
sub_message = sub_message.strip('^')
logging.info(sub_message)
sub_parsed = json.loads(sub_message)
daemon = Daemon(
ip=sub_parsed['ip'],
uuid=sub_parsed['uuid'],
when=datetime.strptime(parsed['when'], "%Y-%m-%dT%H:%M:%S.%f"),
port=sub_parsed['port'],
active=True
)
session.add(daemon)
logging.info('Added daemon {}'.format(sub_parsed['uuid']))
# Manage subscriptions
for client in clients:
if client.subscription and client.subscription.findall(parsed['message']):
client.client.write_message(parsed['message'])
def collector(address):
"""
Process that collects all logs and saves them to a database
"""
context = zmq.Context()
socket = context.socket(zmq.PULL)
socket.bind(address)
stream_pull = zmqstream.ZMQStream(socket)
stream_pull.on_recv(process_log)
def make_app():
return web.Application([
(r'/', IndexHandler),
(r'/ws', WSHandler),
(r'/last/([0-9]+)', LastLogsHandler),
(r'/co/(.+)/([0-9]+)', ComponentHandler),
(r'/api/active_last/([0-9]+)', RestActiveHandler),
(r'/api/last/co/(.+)/([0-9]+)', RestCoLastHandler),
(r'/api/last/([0-9]+)', RestLastHandler),
(r'/api/page/([0-9]+)/count/([0-9]+)', RestPageHandler),
(r'/conductor', DaemonsHandler),
(r'/reset', ResetHandler),
(r'/config', ConfigHandler),
(r'/relaunch/(.+)', RelaunchHandler),
(r'/daemon/(.+)', DaemonHandler),
(r'/daemon_delete/(.+)', DeleteHandler),
(r'/(favicon.ico)', web.StaticFileHandler,
{'path': os.path.join(root_path, 'img', 'favicon.ico')}),
(r'/css/(.*)', web.StaticFileHandler,
{'path': os.path.join(root_path, 'css')}),
(r'/js/(.*)', web.StaticFileHandler,
{'path': os.path.join(root_path, 'js')})
], autoreload=False) # Remove
def run():
# Configure DB stuff
logging.info('Updating DB tables...')
Base.metadata.create_all(engine)
logging.info('Done')
app = make_app()
app.listen(options.port)
ioloop.IOLoop.instance().run_sync(
partial(collector, address=options.collector)
)
logging.info('Starting event loop...')
ioloop.IOLoop.current().start()
if __name__ == '__main__':
run()
|
agpl-3.0
| 8,734,836,519,722,331,000 | 33.639456 | 86 | 0.616064 | false | 3.898928 | false | false | false |
akhilaananthram/nupic.research
|
classification/LanguageSensor.py
|
1
|
9726
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
from nupic.regions.PyRegion import PyRegion
class LanguageSensor(PyRegion):
"""
LanguageSensor (LS) is an extensible sensor for text data.
The LS obtains info from a file, csv or txt (not yet implemented).
An LS is essentially a shell containing two objects:
1. A DataSource object gets one record at a time. This record is returned
as a dict object. For example, a DataSource might return:
defaultdict(sample="Hello world!", labels=["Python"])
2. An encoder from nupic.fluent/encoders
The DataSource and LanguageEncoder are supplied after the node is created,
not in the node itself.
"""
def __init__(self,
verbosity=0,
numCategories=1):
"""
Create a node without an encoder or datasource.
TODO: use self._outputValues for logging(?)
"""
self.numCategories = numCategories
self.verbosity = verbosity
# These fields are set outside when building the region.
self.encoder = None
self.dataSource = None
self._outputValues = {}
self._iterNum = 0
@classmethod
def getSpec(cls):
"""Return base spec for this region. See base class method for more info."""
spec = {
"description":"Sensor that reads text data records and encodes them for "
"an HTM network.",
"singleNodeOnly":True,
"outputs":{
"dataOut":{
"description":"Encoded text",
"dataType":"Real32",
"count":0,
"regionLevel":True,
"isDefaultOutput":True,
},
"categoryOut":{
"description":"Index of the current word's category.",
"dataType":"Real32",
"count":0,
"regionLevel":True,
"isDefaultOutput":False,
},
"resetOut":{
"description":"Boolean reset output.",
"dataType":"Real32",
"count":1,
"regionLevel":True,
"isDefaultOutput":False,
},
"sequenceIdOut":{
"description":"Sequence ID",
"dataType":'UInt64',
"count":1,
"regionLevel":True,
"isDefaultOutput":False,
},
## commented out b/c dataType not cool w/ numpy
# "sourceOut":{
# "description":"Unencoded data from the source, input to the encoder",
# "dataType":str,
# "count":0,
# "regionLevel":True,
# "isDefaultOutput":False,
# },
## need these...??
# spatialTopDownOut=dict(
# description="""The top-down output signal, generated from
# feedback from SP""",
# dataType='Real32',
# count=0,
# regionLevel=True,
# isDefaultOutput=False),
# temporalTopDownOut=dict(
# description="""The top-down output signal, generated from
# feedback from TP through SP""",
# dataType='Real32',
# count=0,
# regionLevel=True,
# isDefaultOutput=False),
# classificationTopDownOut=dict(
# description="The top-down input signal, generated via feedback "
# "from classifier through TP through SP.",
# dataType='Real32',
# count=0,
# regionLevel=True,
# isDefaultOutput=False),
},
"inputs":{
"spatialTopDownIn":{
"description":"The top-down input signal, generated via feedback "
"from SP.",
"dataType":"Real32",
"count":0,
"required":False,
"regionLevel":True,
"isDefaultInput":False,
"requireSplitterMap":False,
},
"temporalTopDownIn":{
"description":"The top-down input signal, generated via feedback "
"from TP through SP.",
"dataType":"Real32",
"count":0,
"required":False,
"regionLevel":True,
"isDefaultInput":False,
"requireSplitterMap":False,
},
"classificationTopDownIn":{
"description":"The top-down input signal, generated via feedback "
"from classifier through TP through SP.",
"dataType":"int",
"count":0,
"required":False,
"regionLevel":True,
"isDefaultInput":False,
"requireSplitterMap":False,
},
},
"parameters":{
"verbosity":{
"description":"Verbosity level",
"dataType":"UInt32",
"accessMode":"ReadWrite",
"count":1,
"constraints":"",
},
"numCategories":{
"description":("Total number of categories to expect from the "
"FileRecordStream"),
"dataType":"UInt32",
"accessMode":"ReadWrite",
"count":1,
"constraints":""},
},
"commands":{},
}
return spec
def initialize(self, inputs, outputs):
"""Initialize the node after the network is fully linked."""
if self.encoder is None:
raise Exception("Unable to initialize LanguageSensor -- encoder has not been set")
if self.dataSource is None:
raise Exception("Unable to initialize LanguageSensor -- dataSource has not been set")
def populateCategoriesOut(self, categories, output):
"""
Populate the output array with the category indices.
Note: non-categories are represented with -1.
"""
if categories[0] is None:
# The record has no entry in category field.
output[:] = -1
else:
# Populate category output array by looping over the smaller of the
# output array (size specified by numCategories) and the record's number
# of categories.
[numpy.put(output, [i], cat)
for i, (_, cat) in enumerate(zip(output, categories))]
output[len(categories):] = -1
def compute(self, inputs, outputs):
"""
Get a record from the dataSource and encode it. The fields for inputs and
outputs are as defined in the spec above.
Expects the text data to be in under header "token" from the dataSource.
TODO: validate we're handling resets correctly
"""
data = self.dataSource.getNextRecordDict()
# The private keys in data are standard of RecordStreamIface objects. Any
# add'l keys are column headers from the data source.
# Copy important data input fields over to outputs dict. We set "sourceOut"
# explicitly b/c PyRegion.getSpec() won't take an output field w/ type str.
outputs["resetOut"][0] = data["_reset"]
outputs["sequenceIdOut"][0] = data["_sequenceId"]
outputs["sourceOut"] = data["_token"]
self.populateCategoriesOut(data["_category"], outputs['categoryOut'])
print outputs['categoryOut']
# Encode the token, where the encoding is a dict as expected in
# nupic.fluent ClassificationModel.
# The data key must match the datafile column header
# NOTE: this logic differs from RecordSensor, where output is a (sparse)
# numpy array populated in place. So we leave the data output alone for now,
# and (maybe) populate it in fluent.ClassificationModel.
outputs["encodingOut"] = self.encoder.encodeIntoArray(data["_token"], output=None)
self._iterNum += 1
def getOutputValues(self, outputName):
"""Return the dictionary of output values. Note that these are normal Python
lists, rather than numpy arrays. This is to support lists with mixed scalars
and strings, as in the case of records with categorical variables
"""
return self._outputValues[outputName]
def getOutputElementCount(self, name):
"""Returns the width of dataOut."""
if name == "resetOut" or name == "sequenceIdOut":
print ("WARNING: getOutputElementCount should not have been called with "
"{}.".format(name))
return 1
elif name == "dataOut":
if self.encoder == None:
raise Exception("Network requested output element count for {} on a "
"LanguageSensor node, but the encoder has not been set."
.format(name))
return self.encoder.getWidth()
elif name == "categoryOut":
return self.numCategories
elif (name == "sourceOut" or
name == 'spatialTopDownOut' or
name == 'temporalTopDownOut'):
if self.encoder == None:
raise Exception("Network requested output element count for {} on a "
"LanguageSensor node, but the encoder has not been set."
.format(name))
return len(self.encoder.getDescription())
else:
raise Exception("Unknown output {}.".format(name))
|
gpl-3.0
| 5,535,361,808,632,589,000 | 33.489362 | 91 | 0.599321 | false | 4.443125 | false | false | false |
google-research/fixmatch
|
ict.py
|
1
|
5150
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interpolation Consistency Training for Semi-Supervised Learning.
Reimplementation of https://arxiv.org/abs/1903.03825
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import models, utils
from libml.data import PAIR_DATASETS
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class ICT(models.MultiModel):
def model(self, batch, lr, wd, ema, warmup_pos, consistency_weight, beta, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch, 2] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
l = tf.one_hot(l_in, self.nclass)
wd *= lr
warmup = tf.clip_by_value(tf.to_float(self.step) / (warmup_pos * (FLAGS.train_kimg << 10)), 0, 1)
y = tf.reshape(tf.transpose(y_in, [1, 0, 2, 3, 4]), [-1] + hwc)
y_1, y_2 = tf.split(y, 2)
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(xt_in)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
logits_x = classifier(xt_in, training=True)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Take only first call to update batch norm.
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
logits_teacher = classifier(y_1, training=True, getter=ema_getter)
labels_teacher = tf.stop_gradient(tf.nn.softmax(logits_teacher))
labels_teacher = labels_teacher * mix[:, :, 0, 0] + labels_teacher[::-1] * (1 - mix[:, :, 0, 0])
logits_student = classifier(y_1 * mix + y_1[::-1] * (1 - mix), training=True)
loss_mt = tf.reduce_mean((labels_teacher - tf.nn.softmax(logits_student)) ** 2, -1)
loss_mt = tf.reduce_mean(loss_mt)
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=l, logits=logits_x)
loss = tf.reduce_mean(loss)
tf.summary.scalar('losses/xe', loss)
tf.summary.scalar('losses/mt', loss_mt)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss + loss_mt * warmup * consistency_weight,
colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = PAIR_DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = ICT(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
warmup_pos=FLAGS.warmup_pos,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
consistency_weight=FLAGS.consistency_weight,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('consistency_weight', 50., 'Consistency weight.')
flags.DEFINE_float('warmup_pos', 0.4, 'Relative position at which constraint loss warmup ends.')
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('augment', 'd.d.d')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
|
apache-2.0
| 1,690,494,292,168,825,600 | 40.869919 | 113 | 0.639223 | false | 3.245117 | false | false | false |
siemens/django-dingos
|
dingos/migrations/0007_auto__add_userdata__add_unique_userdata_user_data_kind.py
|
1
|
16414
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserData'
db.create_table(u'dingos_userdata', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('data_kind', self.gf('django.db.models.fields.SlugField')(max_length=32)),
('identifier', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dingos.Identifier'], null=True)),
))
db.send_create_signal(u'dingos', ['UserData'])
# Adding unique constraint on 'UserData', fields ['user', 'data_kind']
db.create_unique(u'dingos_userdata', ['user_id', 'data_kind'])
def backwards(self, orm):
# Removing unique constraint on 'UserData', fields ['user', 'data_kind']
db.delete_unique(u'dingos_userdata', ['user_id', 'data_kind'])
# Deleting model 'UserData'
db.delete_table(u'dingos_userdata')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dingos.blobstorage': {
'Meta': {'object_name': 'BlobStorage'},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sha256': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'dingos.datatypenamespace': {
'Meta': {'object_name': 'DataTypeNameSpace'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.fact': {
'Meta': {'object_name': 'Fact'},
'fact_term': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.FactTerm']"}),
'fact_values': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.FactValue']", 'null': 'True', 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value_iobject_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'value_of_set'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'value_iobject_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'dingos.factdatatype': {
'Meta': {'unique_together': "(('name', 'namespace'),)", 'object_name': 'FactDataType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_data_type_set'", 'to': u"orm['dingos.DataTypeNameSpace']"})
},
u'dingos.factterm': {
'Meta': {'unique_together': "(('term', 'attribute'),)", 'object_name': 'FactTerm'},
'attribute': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'dingos.factterm2type': {
'Meta': {'unique_together': "(('iobject_type', 'fact_term'),)", 'object_name': 'FactTerm2Type'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fact_data_types': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'fact_term_thru'", 'symmetrical': 'False', 'to': u"orm['dingos.FactDataType']"}),
'fact_term': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_thru'", 'to': u"orm['dingos.FactTerm']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_term_thru'", 'to': u"orm['dingos.InfoObjectType']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
u'dingos.factvalue': {
'Meta': {'unique_together': "(('value', 'fact_data_type', 'storage_location'),)", 'object_name': 'FactValue'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'fact_data_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_value_set'", 'to': u"orm['dingos.FactDataType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'storage_location': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'dingos.identifier': {
'Meta': {'unique_together': "(('uid', 'namespace'),)", 'object_name': 'Identifier'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'latest_of'", 'unique': 'True', 'null': 'True', 'to': u"orm['dingos.InfoObject']"}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.IdentifierNameSpace']"}),
'uid': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'dingos.identifiernamespace': {
'Meta': {'object_name': 'IdentifierNameSpace'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'uri': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.infoobject': {
'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('identifier', 'timestamp'),)", 'object_name': 'InfoObject'},
'create_timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'facts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.Fact']", 'through': u"orm['dingos.InfoObject2Fact']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.Identifier']"}),
'iobject_family': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.InfoObjectFamily']"}),
'iobject_family_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['dingos.Revision']"}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.InfoObjectType']"}),
'iobject_type_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['dingos.Revision']"}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Unnamed'", 'max_length': '255', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'dingos.infoobject2fact': {
'Meta': {'ordering': "['node_id__name']", 'object_name': 'InfoObject2Fact'},
'attributed_fact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'null': 'True', 'to': u"orm['dingos.InfoObject2Fact']"}),
'fact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_thru'", 'to': u"orm['dingos.Fact']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_thru'", 'to': u"orm['dingos.InfoObject']"}),
'node_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.NodeID']"})
},
u'dingos.infoobjectfamily': {
'Meta': {'object_name': 'InfoObjectFamily'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
u'dingos.infoobjectnaming': {
'Meta': {'ordering': "['position']", 'object_name': 'InfoObjectNaming'},
'format_string': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'to': u"orm['dingos.InfoObjectType']"}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
u'dingos.infoobjecttype': {
'Meta': {'unique_together': "(('name', 'iobject_family', 'namespace'),)", 'object_name': 'InfoObjectType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iobject_family': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'to': u"orm['dingos.InfoObjectFamily']"}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '30'}),
'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'blank': 'True', 'to': u"orm['dingos.DataTypeNameSpace']"})
},
u'dingos.marking2x': {
'Meta': {'object_name': 'Marking2X'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marking': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'marked_item_thru'", 'to': u"orm['dingos.InfoObject']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'dingos.nodeid': {
'Meta': {'object_name': 'NodeID'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'dingos.relation': {
'Meta': {'unique_together': "(('source_id', 'target_id', 'relation_type'),)", 'object_name': 'Relation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'relation_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.Fact']"}),
'source_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'yields_via'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}),
'target_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'yielded_by_via'", 'null': 'True', 'to': u"orm['dingos.Identifier']"})
},
u'dingos.revision': {
'Meta': {'object_name': 'Revision'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'dingos.userdata': {
'Meta': {'unique_together': "(('user', 'data_kind'),)", 'object_name': 'UserData'},
'data_kind': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.Identifier']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['dingos']
|
gpl-2.0
| -1,355,547,385,660,828,000 | 75.705607 | 187 | 0.557268 | false | 3.585408 | false | false | false |
openstack/oslo.context
|
doc/source/user/examples/usage.py
|
1
|
1601
|
#!/usr/bin/env python3
#
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A representative usage example of Oslo Context
This example requires the following modules to be installed.
$ pip install oslo.context oslo.log
More information can be found at:
https://docs.openstack.org/oslo.context/latest/user/usage.html
"""
from oslo_config import cfg
from oslo_context import context
from oslo_log import log as logging
CONF = cfg.CONF
DOMAIN = "demo"
logging.register_options(CONF)
logging.setup(CONF, DOMAIN)
LOG = logging.getLogger(__name__)
LOG.info("Message without context")
# ids in Openstack are 32 characters long
# For readability a shorter id value is used
context.RequestContext(user='6ce90b4d',
tenant='d6134462',
project_domain='a6b9360e')
LOG.info("Message with context")
context = context.RequestContext(user='ace90b4d',
tenant='b6134462',
project_domain='c6b9360e')
LOG.info("Message with passed context", context=context)
|
apache-2.0
| -5,818,148,704,935,449,000 | 30.392157 | 75 | 0.713929 | false | 3.914425 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.