repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
angst7/far | far.py | 1 | 9209 | from network.server import FarProtocol, FarFactory
from network.messages import Tag
from models.mob import Player, NPC, Attack
from models.world import Room, Exit, Direction
from twisted.internet import reactor, task
from copy import deepcopy
from guppy import hpy
class Game(object):
def __init__(self):
self.name = "FAR v0.01"
self.players = []
self.npcs = []
self.mobs = []
self.rooms = []
self.commandtags = [Tag("[SAY]", self.saymsg), Tag("[QUIT]", self.quit),
Tag("[FIGHT]", self.startfight), Tag("[FLEE]", self.stopfight),
Tag("[IDENTIFY]", self.identify), Tag("[LOOK]", self.look),
Tag("[EXITS]", self.exits), Tag("[MOVETO]", self.moveto),
Tag("[GO]",self.go), Tag("[NPCS]", self.listnpcs),
Tag("[MOBS]", self.listmobs), Tag("[STATS]", self.stats)]
self.exit = False
def tick(self):
#for player in self.players:
#player.addmessage('Tick!')
for room in self.rooms:
for mob in room.mobs:
if mob.mobile:
mob.walk()
print "Tick"
#print hpy().heap()
def combat(self):
for player in self.players:
if player.fighting:
player.combat()
for mob in self.mobs:
if mob.fighting:
mob.combat()
def connection(self, connect):
p = Player(connect);
p.addmessage('Welcome!')
p.goto(self.rooms[1])
self.players.append(p)
def disconnection(self, connect):
killplayer = None
for p in self.players:
if p.connection == connect:
killplayer = p
if killplayer == None:
print "Could not find player"
else:
killplayer.room.player_left(killplayer)
self.players.remove(killplayer)
def saymsg(self, player, args):
player.addmessage('You said: %s' % '|'.join(args))
for p in self.players:
if p != player:
p.addmessage('%s: %s' % (player.name, ' '.join(args)))
def quit(self, player, args):
player.addmessage('Bye!');
player.exit = True
def startfight(self, player, args):
if len(args) == 1:
player.target = self.findmob(args[0], player.room)
if player.target is None:
player.addmessage("I dont see them.")
else:
player.addmessage("You attack %s!" % player.target.name)
player.fighting = True
player.target.target = player
player.target.fighting = True
else:
player.addmessage("Who do you want to fight?")
def stopfight(self, player, args):
player.addmessage("You run away screaming.")
player.fighting = False
player.target.fighting = False
player.target.target = None
player.target = None
def identify(self, player, args):
if len(args) == 1:
player.name = args[0]
player.identified = True
player.addmessage("Welcome, %s" % player.name)
def look(self, player, args):
if len(args) == 0:
player.look()
#player.addmessage("[%d] %s\r\n%s" % (player.room.number,
# player.room.short_description,
# player.room.long_description))
def stats(self, player, args):
if len(args) == 0:
player.stats()
def moveto(self, player, args):
if len(args) == 1:
newroom = self.findroom(args[0])
print "Player %s moving to [%s]%s" % (player.name, args[0], newroom.short_description)
player.goto(newroom)
def go(self, player, args):
if len(args) == 1:
direction = int(args[0])
dest = None
for e in player.room.exits:
if e.direction == direction:
dest = e.room
self.moveto(player, [dest.number])
if dest == None:
player.addmessage("[BADEXIT]")
def listnpcs(self, player, args):
for n in self.npcs:
if n is not None:
player.addmessage("[%s] %s" % (n.number, n.name))
def listmobs(self, player, args):
for m in self.mobs:
player.addmessage("[%s] %s : [%s] %s" % (m.number, m.name, m.room.number, m.room.short_description))
def exits(self, player, args):
if len(args) == 0:
for e in player.room.exits:
player.addmessage("[%s] %s" % (Direction.STRINGS[e.direction], e.room.short_description))
def findroom(self, roomnum):
for r in self.rooms:
if r.number == int(roomnum):
return r
return self.rooms[0]
def findnpc(self, npcnum):
for n in self.npcs:
if n.number == int(npcnum):
return n
return self.npcs[0]
def findmob(self, npcnum, room):
for m in room.mobs:
if m.number == int(npcnum):
return m
return None
def parser(self, player, line):
parts = line.split('|')
success = False
for t in self.commandtags:
if t.name == parts[0]:
if len(parts) > 1:
p = parts[1:]
else:
p = []
t.action(player, p)
success = True
if (success == False):
print 'No such tag'
def loadstatic(self):
f = open('models/world1.txt','r')
iterator = 0
exits = []
for line in f:
if line[0] != "#":
iterator += 1
if iterator == 5:
print "Adding room: [%d] %s" % (rnum, sdesc)
self.rooms.append(Room(rnum, sdesc, ldesc))
iterator = 1
if iterator == 1:
rnum = int(line)
if iterator == 2:
sdesc = line.rstrip()
if iterator == 3:
ldesc = line.rstrip()
if iterator == 4:
direction = 1
for e in line.split('|'):
if int(e) > 0:
exits.append([rnum, direction, int(e)])
direction += 1
for e in exits:
fromroom = self.findroom(e[0])
toroom = self.findroom(e[2])
fromroom.connect_room(e[1], toroom)
print "Adding exit from %s to %s" % (fromroom.number, toroom.number)
f = open('models/mobs1.txt','r')
iterator = 0
attacks = []
for line in f:
if line[0] != "#":
iterator += 1
if iterator == 13:
print "Adding NPC: [%d] %s" % (number, name)
self.npcs.append(NPC(number, name, desc, level, hp, attacks, mobile))
attacks = []
iterator = 1
if iterator == 1:
number = int(line)
if iterator == 2:
name = line.rstrip()
if iterator == 3:
desc = line.rstrip()
if iterator == 4:
level = int(line)
if iterator == 5:
hp = int(line)
if iterator in range(6, 11):
dice = line.split('|')
if len(dice) == 3:
attacks.append(Attack(int(dice[0]), int(dice[1]), int(dice[2])))
if iterator == 12:
if int(line) == 1:
mobile = True
else:
mobile = False
f = open('models/populate1.txt', 'r')
for line in f:
data = line.split('|')
if len(data) == 2:
npcnum = int(data[0])
roomnum = int(data[1])
newmob = deepcopy(self.findnpc(npcnum))
newmob.goto(self.findroom(roomnum))
self.mobs.append(newmob)
print "Placed [%d] %s in room %d" % (newmob.number, newmob.name, roomnum)
if __name__ == '__main__':
g = Game()
# Set up a few rooms and exits to connect them
# this should go into a static load file
g.rooms = [Room(0, 'Nowhere', 'This is nowhere, man.')] #,
#Room(1, 'The Square', 'This is the center of town.',
# [Exit(Direction.EAST, 2), Exit(Direction.WEST, 3)]),
#Room(2, 'Main Street', 'Walking along the main street', [Exit(Direction.WEST, 1)]),
#Room(3, 'Main Street', 'Walking along the main street', [Exit(Direction.EAST, 1)])]
g.loadstatic()
reactor.listenTCP(4000, FarFactory(g))
reactor.run()
| mit | -169,936,922,520,400,900 | 34.555985 | 112 | 0.468889 | false | 4.002173 | false | false | false |
IxLahiji/lahujChatBot | lahujChatBot.py | 1 | 5703 | import discord
import discord.utils
import asyncio
import os.path
import re
import markovify
import random
import datetime
from chatBot.settings import JSONSettings
prog_path = os.path.dirname(os.path.abspath(__file__))
default_settings = {"Discord token": "",
"Source type (channel or user)": "",
"Source": [""],
"Target channel": "",
"Response frequency (%)": "25",
"Chat idle allowed (m)": "10",
"Sample size per source": "10000",
"Allow Mentions": "false"
}
#Load information
settings = JSONSettings(prog_path, default_settings)
#Create new discord client
client = discord.Client()
last_recieved = datetime.datetime.now()
def remove_emojii(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" #emoticons
u"\U0001F300-\U0001F5FF" #symbols & pictographs
u"\U0001F680-\U0001F6FF" #transport & map symbols
u"\U0001F1E0-\U0001F1FF" #flags (iOS)
u"\U00002702-\U000027B0" #dingbats
u"\U000024C2-\U0001F251" #enclosed characters
u"\U0001F681-\U0001F6C5" #additional transport
u"\U0001F30D-\U0001F567" #additional symbols
u"\U0001F600-\U0001F636" #additional emoticons
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
def remove_mentions(text):
convert_dict = {
'@\u200beveryone': '@-everyone',
'@\u200bhere': '@-here'
}
text = str(text.translate(convert_dict))
mentions = re.findall(r'<@!?([0-9]+)>', text)
for mention in mentions:
member = discord.utils.find(lambda m: str(m.id) == str(mention), find_channel(settings.get_setting('Target channel')).server.members)
if (member is not None):
text = text.replace(mention, "-" + member.name)
return text
async def auto_message_check():
global last_recieved
while True:
if ((datetime.datetime.now() - (last_recieved + datetime.timedelta(minutes=int(settings.get_setting('Chat idle allowed (m)'))))).days >= 0):
asyncio.ensure_future(send_response())
await asyncio.sleep(10)
def response_roll():
x = random.randint(0,100)
return (x <= int(settings.get_setting('Response frequency (%)')))
def safe_print(text):
print (remove_emojii(text))
def find_channel(target_channel_name):
channel = discord.utils.get(client.get_all_channels(), name=target_channel_name)
return channel
async def retrieve_source_text():
source = settings.get_setting('Source')
source_text = ""
if (settings.get_setting('Source type (channel or user)') == "channel"):
for channel in source:
target_channel = find_channel(channel)
async for message in client.logs_from(target_channel, limit=int(settings.get_setting('Sample size per source'))):
source_text += message.content + "\n"
return source_text
elif(settings.get_setting('Source type (channel or user)') == "user"):
for user in source:
pass
else:
print("Error: Invalid source type! Please choose either 'channel' or 'user' in settings file.")
time.sleep(3)
sys.exit()
async def generate_sentence ():
source_text = await retrieve_source_text()
text_model = markovify.NewlineText(source_text)
new_sentence = None
while not new_sentence:
new_sentence = text_model.make_sentence()
if (settings.get_setting('Allow Mentions') != "true"):
new_sentence = remove_mentions(new_sentence)
return new_sentence
async def send_response():
global last_recieved
target_channel_name = settings.get_setting('Target channel')
last_recieved = datetime.datetime.now()
start_last_recieved = last_recieved
sentence = await generate_sentence()
if (start_last_recieved == last_recieved):
await client.send_message(find_channel(target_channel_name), sentence)
@client.event
async def on_message(message):
target_channel_name = settings.get_setting('Target channel')
if ((message.channel.name == target_channel_name) and (message.author.id != client.user.id)):
if (response_roll()):
asyncio.ensure_future(send_response())
@client.event
async def on_ready():
print('Logged in as: ' + client.user.name + '[' + client.user.id + '].')
print("Logging in to bot...")
#Run client (connect and login) ~ Blocking (must be last) ~ This is an unabstracted version of client.run() to give more control
try:
if (not settings.get_setting('Discord token')):
print ("Please enter a discord bot token in 'settings.JSON' before running")
time.sleep(3)
sys.exit()
else:
client.loop.run_until_complete(asyncio.gather(
client.start(settings.get_setting('Discord token')),
auto_message_check()
))
except KeyboardInterrupt:
#Set exit flag to allow wakeup() to close properly
exit_flag = True
client.loop.run_until_complete(client.logout())
pending = asyncio.Task.all_tasks()
gathered = asyncio.gather(*pending)
try:
gathered.cancel()
client.loop.run_until_complete(gathered)
gathered.exception()
except:
pass
finally:
client.loop.close()
| mit | -1,426,452,223,220,533,500 | 31.162791 | 148 | 0.595476 | false | 3.756917 | false | false | false |
PradeeshSuganthan/mnist-classifier | mlp.py | 1 | 4683 | #based off of neuralnetworksanddeeplearning.com
import numpy as np
import gzip
import struct
import random
epochs = 10 #number of training cycles
y_train = np.zeros((60000,10)) #initialize for one-hot encoding
alpha = 100 #learning rate
batchsize = 6
num_neurons
def main():
print "Test"
#read in data
images_train, images_test, labels_train, labels_test = readData()
#randomly initialize weights and biases
weights = .01*np.random.rand(784,10)
bias = .01*np.random.rand(10000,10)
#one-hot encode labels
y_train[np.arange(60000), labels_train] = 1
#group training data
training_data = zip(images_train, labels_train)
#train classifier
weights_t, bias_t = trainClassifier(epochs, images_train_b, y_train_b, weights, bias)
#test classifier
accuracy = testClassifier(images_test, labels_test, weights_t, bias_t)
print "Accuracy: " + str(accuracy) + "%"
def readData():
image_train_filename = 'MNIST_data/train-images-idx3-ubyte.gz'
label_train_filename = 'MNIST_data/train-labels-idx1-ubyte.gz'
image_test_filename = 'MNIST_data/t10k-images-idx3-ubyte.gz'
label_test_filename = 'MNIST_data/t10k-labels-idx1-ubyte.gz'
print "Opening files"
#uncompress files and read data
with gzip.open(image_train_filename, 'r') as f:
magicnum, numimage, row, col = struct.unpack('>IIII', f.read(16))
images = np.fromstring(f.read(), dtype='uint8').reshape(numimage, row * col)
with gzip.open(label_train_filename, 'r') as f:
magicnum, numlabel = struct.unpack('>II', f.read(8))
labels = np.fromstring(f.read(), dtype='int8')
with gzip.open(image_test_filename, 'r') as f:
magicnum, numimage, row, col = struct.unpack('>IIII', f.read(16))
images_t = np.fromstring(f.read(), dtype='uint8').reshape(numimage, row * col)
with gzip.open(label_test_filename, 'r') as f:
magicnum, numlabel = struct.unpack('>II', f.read(8))
labels_t = np.fromstring(f.read(), dtype='int8')
return images, images_t, labels, labels_t
def forwardPass(weights, x, bias):
y_pred = []
#linear model
y_i = x.dot(weights) + bias
#activation function
for i in range(len(y_i)):
y_probs = softmax(y_i[i])
y_pred.append(y_probs)
return y_pred
# def softmax(y):
# y_s = np.exp(y-np.max(y))
# y_soft = y_s/y_s.sum()
# return y_soft
def loss(y_pred, y_actual):
#cross entropy loss
#y_actual multiplied by log of y_pred
#error_sum = y_actual * np.log10(y_pred-y_actual)
#sum
#error = -np.sum(error_sum)
#Least squares error
error = np.sum((y_pred-y_actual)**2)
return error
def sgd(training_data, weights, biases):
#train using stochastic gradient descent
for i in range(0,epochs):
#randomly shuffle data
random.shuffle(training_data)
#partition into batches
batches = np.split(training_data, batchsize)
#apply gradient descent for each batch
for batch in batches:
weights, biases = gradientUpdate(weights, biases)
print "Epoch " + str(i) + " complete"
return weights, biases
def gradientUpdate(weights, bias):
nabla_b = [np.zeros(b.shape) for b in bias]
nabla_w = [np.zeros(w.shape) for w in weights]
#obtain gradients
deltaW, deltaB = backprop()
deltaW = deltaW + nabla_w
deltaB = deltaB + nabla_b
#update weights & biases
w = (weights - (alpha/len(miniBatch))*deltaw)
b = (bias - (alpha/len(minibatch))*deltaB)
return w, b
def backprop(x, y, weights, bias):
nabla_b = [np.zeros(b.shape) for b in bias]
nabla_w = [np.zeros(w.shape) for w in weights]
#feedforward
activation = x
activation_list = [x]
z_list = []
for w, b in zip(weights, bias):
z = np.dot(w, activation) + b
z_list.append(z)
activation = softmax(z)
activation_list.append(activation)
#backward pass
delta = cost_derivative(activation_list[-1], y) * sigmoid_deriv(z_list[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activation_list[-2].T)
for l in xrange(2, num_neurons):
z = z_list[-1]
sd = sigmoid_deriv(z)
delta = np.dot(weights[-l + 1].T, delta) * sd
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activation_list[-l-1].T)
return (nabla_w, nabla_b)
def cost_derivative(output, y):
return (output - y)
def softmax(z):
return 1.0/(1.0 + np.exp(-z))
def softmax_deriv(z):
return softmax(z) * (1 - softmax(z))
def testClassifier(images, labels, weights, bias):
correct = 0
total = 0
prediction = []
print "Testing"
y_pred= forwardPass(weights, images, bias)
#predictions for test images
for i in range(len(y_pred)):
prediction.append(np.argmax(y_pred[i]))
#Check accuracy of guess
for i in range(0,len(y_pred)):
if prediction[i] == labels[i]:
correct += 1
total += 1
accuracy = (correct/ float(total))*100
return accuracy
if __name__ == '__main__':
main() | mit | 2,670,317,334,518,944,300 | 22.656566 | 86 | 0.681828 | false | 2.691379 | true | false | false |
defance/edx-platform | lms/djangoapps/courseware/tests/test_entrance_exam.py | 11 | 26647 | """
Tests use cases related to LMS Entrance Exam behavior, such as gated content access (TOC)
"""
from mock import patch, Mock
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from nose.plugins.attrib import attr
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from courseware.model_data import FieldDataCache
from courseware.module_render import toc_for_course, get_module, handle_xblock_callback
from courseware.tests.factories import UserFactory, InstructorFactory, StaffFactory
from courseware.tests.helpers import (
LoginEnrollmentTestCase,
get_request_for_user
)
from courseware.entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
get_entrance_exam_score,
user_can_skip_entrance_exam,
user_has_passed_entrance_exam,
)
from student.models import CourseEnrollment
from student.tests.factories import CourseEnrollmentFactory, AnonymousUserFactory
from util.milestones_helpers import (
add_milestone,
add_course_milestone,
get_namespace_choices,
generate_milestone_namespace,
add_course_content_milestone,
get_milestone_relationship_types,
)
from milestones.tests.utils import MilestonesTestCaseMixin
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@attr('shard_1')
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
class EntranceExamTestCases(LoginEnrollmentTestCase, ModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Check that content is properly gated.
Creates a test course from scratch. The tests below are designed to execute
workflows regardless of the feature flag settings.
"""
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
def setUp(self):
"""
Test case scaffolding
"""
super(EntranceExamTestCases, self).setUp()
self.course = CourseFactory.create(
metadata={
'entrance_exam_enabled': True,
}
)
self.chapter = ItemFactory.create(
parent=self.course,
display_name='Overview'
)
ItemFactory.create(
parent=self.chapter,
display_name='Welcome'
)
ItemFactory.create(
parent=self.course,
category='chapter',
display_name="Week 1"
)
self.chapter_subsection = ItemFactory.create(
parent=self.chapter,
category='sequential',
display_name="Lesson 1"
)
chapter_vertical = ItemFactory.create(
parent=self.chapter_subsection,
category='vertical',
display_name='Lesson 1 Vertical - Unit 1'
)
ItemFactory.create(
parent=chapter_vertical,
category="problem",
display_name="Problem - Unit 1 Problem 1"
)
ItemFactory.create(
parent=chapter_vertical,
category="problem",
display_name="Problem - Unit 1 Problem 2"
)
ItemFactory.create(
category="instructor",
parent=self.course,
data="Instructor Tab",
display_name="Instructor"
)
self.entrance_exam = ItemFactory.create(
parent=self.course,
category="chapter",
display_name="Entrance Exam Section - Chapter 1",
is_entrance_exam=True,
in_entrance_exam=True
)
self.exam_1 = ItemFactory.create(
parent=self.entrance_exam,
category='sequential',
display_name="Exam Sequential - Subsection 1",
graded=True,
in_entrance_exam=True
)
subsection = ItemFactory.create(
parent=self.exam_1,
category='vertical',
display_name='Exam Vertical - Unit 1'
)
problem_xml = MultipleChoiceResponseXMLFactory().build_xml(
question_text='The correct answer is Choice 3',
choices=[False, False, True, False],
choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3']
)
self.problem_1 = ItemFactory.create(
parent=subsection,
category="problem",
display_name="Exam Problem - Problem 1",
data=problem_xml
)
self.problem_2 = ItemFactory.create(
parent=subsection,
category="problem",
display_name="Exam Problem - Problem 2"
)
add_entrance_exam_milestone(self.course, self.entrance_exam)
self.course.entrance_exam_enabled = True
self.course.entrance_exam_minimum_score_pct = 0.50
self.course.entrance_exam_id = unicode(self.entrance_exam.scope_ids.usage_id)
self.anonymous_user = AnonymousUserFactory()
self.request = get_request_for_user(UserFactory())
modulestore().update_item(self.course, self.request.user.id) # pylint: disable=no-member
self.client.login(username=self.request.user.username, password="test")
CourseEnrollment.enroll(self.request.user, self.course.id)
self.expected_locked_toc = (
[
{
'active': True,
'sections': [
{
'url_name': u'Exam_Sequential_-_Subsection_1',
'display_name': u'Exam Sequential - Subsection 1',
'graded': True,
'format': '',
'due': None,
'active': True
}
],
'url_name': u'Entrance_Exam_Section_-_Chapter_1',
'display_name': u'Entrance Exam Section - Chapter 1',
'display_id': u'entrance-exam-section-chapter-1',
}
]
)
self.expected_unlocked_toc = (
[
{
'active': False,
'sections': [
{
'url_name': u'Welcome',
'display_name': u'Welcome',
'graded': False,
'format': '',
'due': None,
'active': False
},
{
'url_name': u'Lesson_1',
'display_name': u'Lesson 1',
'graded': False,
'format': '',
'due': None,
'active': False
}
],
'url_name': u'Overview',
'display_name': u'Overview',
'display_id': u'overview'
},
{
'active': False,
'sections': [],
'url_name': u'Week_1',
'display_name': u'Week 1',
'display_id': u'week-1'
},
{
'active': False,
'sections': [],
'url_name': u'Instructor',
'display_name': u'Instructor',
'display_id': u'instructor'
},
{
'active': True,
'sections': [
{
'url_name': u'Exam_Sequential_-_Subsection_1',
'display_name': u'Exam Sequential - Subsection 1',
'graded': True,
'format': '',
'due': None,
'active': True
}
],
'url_name': u'Entrance_Exam_Section_-_Chapter_1',
'display_name': u'Entrance Exam Section - Chapter 1',
'display_id': u'entrance-exam-section-chapter-1'
}
]
)
def test_view_redirect_if_entrance_exam_required(self):
"""
Unit Test: if entrance exam is required. Should return a redirect.
"""
url = reverse('courseware', kwargs={'course_id': unicode(self.course.id)})
expected_url = reverse('courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
})
resp = self.client.get(url)
self.assertRedirects(resp, expected_url, status_code=302, target_status_code=200)
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': False})
def test_entrance_exam_content_absence(self):
"""
Unit Test: If entrance exam is not enabled then page should be redirected with chapter contents.
"""
url = reverse('courseware', kwargs={'course_id': unicode(self.course.id)})
expected_url = reverse('courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.name,
'section': self.chapter_subsection.location.name
})
resp = self.client.get(url)
self.assertRedirects(resp, expected_url, status_code=302, target_status_code=200)
resp = self.client.get(expected_url)
self.assertNotIn('Exam Problem - Problem 1', resp.content)
self.assertNotIn('Exam Problem - Problem 2', resp.content)
def test_entrance_exam_content_presence(self):
"""
Unit Test: If entrance exam is enabled then its content e.g. problems should be loaded and redirection will
occur with entrance exam contents.
"""
url = reverse('courseware', kwargs={'course_id': unicode(self.course.id)})
expected_url = reverse('courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
})
resp = self.client.get(url)
self.assertRedirects(resp, expected_url, status_code=302, target_status_code=200)
resp = self.client.get(expected_url)
self.assertIn('Exam Problem - Problem 1', resp.content)
self.assertIn('Exam Problem - Problem 2', resp.content)
def test_get_entrance_exam_content(self):
"""
test get entrance exam content method
"""
exam_chapter = get_entrance_exam_content(self.request, self.course)
self.assertEqual(exam_chapter.url_name, self.entrance_exam.url_name)
self.assertFalse(user_has_passed_entrance_exam(self.request, self.course))
answer_entrance_exam_problem(self.course, self.request, self.problem_1)
answer_entrance_exam_problem(self.course, self.request, self.problem_2)
exam_chapter = get_entrance_exam_content(self.request, self.course)
self.assertEqual(exam_chapter, None)
self.assertTrue(user_has_passed_entrance_exam(self.request, self.course))
def test_entrance_exam_score(self):
"""
test entrance exam score. we will hit the method get_entrance_exam_score to verify exam score.
"""
with self.assertNumQueries(1):
exam_score = get_entrance_exam_score(self.request, self.course)
self.assertEqual(exam_score, 0)
answer_entrance_exam_problem(self.course, self.request, self.problem_1)
answer_entrance_exam_problem(self.course, self.request, self.problem_2)
with self.assertNumQueries(1):
exam_score = get_entrance_exam_score(self.request, self.course)
# 50 percent exam score should be achieved.
self.assertGreater(exam_score * 100, 50)
def test_entrance_exam_requirement_message(self):
"""
Unit Test: entrance exam requirement message should be present in response
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
}
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('To access course materials, you must score', resp.content)
def test_entrance_exam_requirement_message_with_correct_percentage(self):
"""
Unit Test: entrance exam requirement message should be present in response
and percentage of required score should be rounded as expected
"""
minimum_score_pct = 29
self.course.entrance_exam_minimum_score_pct = float(minimum_score_pct) / 100
modulestore().update_item(self.course, self.request.user.id) # pylint: disable=no-member
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
}
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('To access course materials, you must score {required_score}% or higher'.format(
required_score=minimum_score_pct
), resp.content)
def test_entrance_exam_requirement_message_hidden(self):
"""
Unit Test: entrance exam message should not be present outside the context of entrance exam subsection.
"""
# Login as staff to avoid redirect to entrance exam
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='test')
CourseEnrollment.enroll(staff_user, self.course.id)
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.name,
'section': self.chapter_subsection.location.name
}
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('To access course materials, you must score', resp.content)
self.assertNotIn('You have passed the entrance exam.', resp.content)
def test_entrance_exam_passed_message_and_course_content(self):
"""
Unit Test: exam passing message and rest of the course section should be present
when user achieves the entrance exam milestone/pass the exam.
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
}
)
answer_entrance_exam_problem(self.course, self.request, self.problem_1)
answer_entrance_exam_problem(self.course, self.request, self.problem_2)
resp = self.client.get(url)
self.assertNotIn('To access course materials, you must score', resp.content)
self.assertIn('You have passed the entrance exam.', resp.content)
self.assertIn('Lesson 1', resp.content)
def test_entrance_exam_gating(self):
"""
Unit Test: test_entrance_exam_gating
"""
# This user helps to cover a discovered bug in the milestone fulfillment logic
chaos_user = UserFactory()
locked_toc = self._return_table_of_contents()
for toc_section in self.expected_locked_toc:
self.assertIn(toc_section, locked_toc)
# Set up the chaos user
answer_entrance_exam_problem(self.course, self.request, self.problem_1, chaos_user)
answer_entrance_exam_problem(self.course, self.request, self.problem_1)
answer_entrance_exam_problem(self.course, self.request, self.problem_2)
unlocked_toc = self._return_table_of_contents()
for toc_section in self.expected_unlocked_toc:
self.assertIn(toc_section, unlocked_toc)
def test_skip_entrance_exam_gating(self):
"""
Tests gating is disabled if skip entrance exam is set for a user.
"""
# make sure toc is locked before allowing user to skip entrance exam
locked_toc = self._return_table_of_contents()
for toc_section in self.expected_locked_toc:
self.assertIn(toc_section, locked_toc)
# hit skip entrance exam api in instructor app
instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=instructor.username, password='test')
url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.request.user.email,
})
self.assertEqual(response.status_code, 200)
unlocked_toc = self._return_table_of_contents()
for toc_section in self.expected_unlocked_toc:
self.assertIn(toc_section, unlocked_toc)
def test_entrance_exam_gating_for_staff(self):
"""
Tests gating is disabled if user is member of staff.
"""
# Login as member of staff
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
staff_user.is_staff = True
self.client.login(username=staff_user.username, password='test')
# assert staff has access to all toc
self.request.user = staff_user
unlocked_toc = self._return_table_of_contents()
for toc_section in self.expected_unlocked_toc:
self.assertIn(toc_section, unlocked_toc)
@patch('courseware.entrance_exams.user_has_passed_entrance_exam', Mock(return_value=False))
def test_courseware_page_access_without_passing_entrance_exam(self):
"""
Test courseware access page without passing entrance exam
"""
url = reverse(
'courseware_chapter',
kwargs={'course_id': unicode(self.course.id), 'chapter': self.chapter.url_name}
)
response = self.client.get(url)
redirect_url = reverse('courseware', args=[unicode(self.course.id)])
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=302)
response = self.client.get(redirect_url)
exam_url = response.get('Location')
self.assertRedirects(response, exam_url)
@patch('courseware.entrance_exams.user_has_passed_entrance_exam', Mock(return_value=False))
def test_courseinfo_page_access_without_passing_entrance_exam(self):
"""
Test courseware access page without passing entrance exam
"""
url = reverse('info', args=[unicode(self.course.id)])
response = self.client.get(url)
redirect_url = reverse('courseware', args=[unicode(self.course.id)])
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=302)
response = self.client.get(redirect_url)
exam_url = response.get('Location')
self.assertRedirects(response, exam_url)
@patch('courseware.entrance_exams.user_has_passed_entrance_exam', Mock(return_value=True))
def test_courseware_page_access_after_passing_entrance_exam(self):
"""
Test courseware access page after passing entrance exam
"""
# Mocking get_required_content with empty list to assume user has passed entrance exam
self._assert_chapter_loaded(self.course, self.chapter)
@patch('util.milestones_helpers.get_required_content', Mock(return_value=['a value']))
def test_courseware_page_access_with_staff_user_without_passing_entrance_exam(self):
"""
Test courseware access page without passing entrance exam but with staff user
"""
self.logout()
staff_user = StaffFactory.create(course_key=self.course.id)
self.login(staff_user.email, 'test')
CourseEnrollmentFactory(user=staff_user, course_id=self.course.id)
self._assert_chapter_loaded(self.course, self.chapter)
def test_courseware_page_access_with_staff_user_after_passing_entrance_exam(self):
"""
Test courseware access page after passing entrance exam but with staff user
"""
self.logout()
staff_user = StaffFactory.create(course_key=self.course.id)
self.login(staff_user.email, 'test')
CourseEnrollmentFactory(user=staff_user, course_id=self.course.id)
self._assert_chapter_loaded(self.course, self.chapter)
@patch.dict("django.conf.settings.FEATURES", {'ENTRANCE_EXAMS': False})
def test_courseware_page_access_when_entrance_exams_disabled(self):
"""
Test courseware page access when ENTRANCE_EXAMS feature is disabled
"""
self._assert_chapter_loaded(self.course, self.chapter)
def test_can_skip_entrance_exam_with_anonymous_user(self):
"""
Test can_skip_entrance_exam method with anonymous user
"""
self.assertFalse(user_can_skip_entrance_exam(self.request, self.anonymous_user, self.course))
def test_has_passed_entrance_exam_with_anonymous_user(self):
"""
Test has_passed_entrance_exam method with anonymous user
"""
self.request.user = self.anonymous_user
self.assertFalse(user_has_passed_entrance_exam(self.request, self.course))
def test_course_has_entrance_exam_missing_exam_id(self):
course = CourseFactory.create(
metadata={
'entrance_exam_enabled': True,
}
)
self.assertFalse(course_has_entrance_exam(course))
def test_user_has_passed_entrance_exam_short_circuit_missing_exam(self):
course = CourseFactory.create(
)
self.assertTrue(user_has_passed_entrance_exam(self.request, course))
@patch.dict("django.conf.settings.FEATURES", {'ENABLE_MASQUERADE': False})
def test_entrance_exam_xblock_response(self):
"""
Tests entrance exam xblock has `entrance_exam_passed` key in json response.
"""
request_factory = RequestFactory()
data = {'input_{}_2_1'.format(unicode(self.problem_1.location.html_id())): 'choice_2'}
request = request_factory.post(
'problem_check',
data=data
)
request.user = self.user
response = handle_xblock_callback(
request,
unicode(self.course.id),
unicode(self.problem_1.location),
'xmodule_handler',
'problem_check',
)
self.assertEqual(response.status_code, 200)
self.assertIn('entrance_exam_passed', response.content)
def _assert_chapter_loaded(self, course, chapter):
"""
Asserts courseware chapter load successfully.
"""
url = reverse(
'courseware_chapter',
kwargs={'course_id': unicode(course.id), 'chapter': chapter.url_name}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def _return_table_of_contents(self):
"""
Returns table of content for the entrance exam specific to this test
Returns the table of contents for course self.course, for chapter
self.entrance_exam, and for section self.exam1
"""
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents( # pylint: disable=attribute-defined-outside-init
self.course.id,
self.request.user,
self.entrance_exam
)
return toc_for_course(
self.request.user,
self.request,
self.course,
self.entrance_exam.url_name,
self.exam_1.url_name,
self.field_data_cache
)
def answer_entrance_exam_problem(course, request, problem, user=None):
"""
Takes a required milestone `problem` in a `course` and fulfills it.
Args:
course (Course): Course object, the course the required problem is in
request (Request): request Object
problem (xblock): xblock object, the problem to be fulfilled
user (User): User object in case it is different from request.user
"""
if not user:
user = request.user
grade_dict = {'value': 1, 'max_value': 1, 'user_id': user.id}
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id,
user,
course,
depth=2
)
# pylint: disable=protected-access
module = get_module(
user,
request,
problem.scope_ids.usage_id,
field_data_cache,
)._xmodule
module.system.publish(problem, 'grade', grade_dict)
def add_entrance_exam_milestone(course, entrance_exam):
"""
Adds the milestone for given `entrance_exam` in `course`
Args:
course (Course): Course object in which the extrance_exam is located
entrance_exam (xblock): the entrance exam to be added as a milestone
"""
namespace_choices = get_namespace_choices()
milestone_relationship_types = get_milestone_relationship_types()
milestone_namespace = generate_milestone_namespace(
namespace_choices.get('ENTRANCE_EXAM'),
course.id
)
milestone = add_milestone(
{
'name': 'Test Milestone',
'namespace': milestone_namespace,
'description': 'Testing Courseware Entrance Exam Chapter',
}
)
add_course_milestone(
unicode(course.id),
milestone_relationship_types['REQUIRES'],
milestone
)
add_course_content_milestone(
unicode(course.id),
unicode(entrance_exam.location),
milestone_relationship_types['FULFILLS'],
milestone
)
| agpl-3.0 | -9,108,926,144,166,880,000 | 39.374242 | 130 | 0.587984 | false | 4.302067 | true | false | false |
scienceopen/madrigal-examples | test_globalisprint.py | 1 | 1869 | #!/usr/bin/env python3
"""
This is not necessarily an efficient way of doing things by downloading ASCII per Madrigal remote filter
and then converting to hdf5 locally, rather we want to download just HDF5, but it's a OK way to start.
Tests loading of globalisprint ascii file, resaving as HDF5 for fast data processing
first I clunkily used
globalIsprint.py --verbose --url=http://isr.sri.com/madrigal --parms=DNE,AZM,ELM,NE,UT1 --output=example.txt --startDate="01/01/1950" --endDate="10/31/2007" --inst=61 --kindat=0 --filter azm,90,270
then I ran the code below.
Finally, we demonstrate reading HDF5 into an array.
"""
from numpy import loadtxt #should consider perhaps genfromtxt to handle "missing" values
import h5py
from os.path import splitext,expanduser
from pandas import DataFrame
from time import time
def txt2h5(fn):
h5fn = splitext(expanduser(fn))[0] + '.h5'
print('saving to ' + h5fn)
gc=(1,2,4) # a priori based on the specific globalisprint command, and that numpy.loadtxt can't handle non-numeric values
# get column names
with open(fn,'r') as f:
head = f.readline().split()
# load data
tic = time()
arr = loadtxt(fn,skiprows=1,usecols=gc)
print('loading text data took {:.4f} seconds'.format(time()-tic))
with h5py.File(h5fn,'w',libver='latest') as f:
for i,c in enumerate(gc): #because we only read "good" columns
f[head[c]] = arr[:,i]
return h5fn
def readh5(h5fn):
tic = time()
with h5py.File(h5fn,'r',libver='latest') as f:
df = DataFrame(index=f['UT1'],
data={'AZM':f['AZM'],
'ELM':f['ELM']})
print('loading HDF5 data took {:.4f} seconds'.format(time()-tic))
return df
if __name__ == '__main__':
from sys import argv
h5fn = txt2h5(argv[1]) # ascii to hdf5
df = readh5(h5fn)
| gpl-3.0 | 6,723,598,362,351,025,000 | 34.264151 | 197 | 0.657571 | false | 3.173175 | false | false | false |
rlcode/reinforcement-learning-kr | 1-grid-world/3-monte-carlo/environment.py | 1 | 3707 | import time
import numpy as np
import tkinter as tk
from PIL import ImageTk, Image
np.random.seed(1)
PhotoImage = ImageTk.PhotoImage
UNIT = 100 # 픽셀 수
HEIGHT = 5 # 그리드 월드 세로
WIDTH = 5 # 그리드 월드 가로
class Env(tk.Tk):
def __init__(self):
super(Env, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.title('monte carlo')
self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT))
self.shapes = self.load_images()
self.canvas = self._build_canvas()
self.texts = []
def _build_canvas(self):
canvas = tk.Canvas(self, bg='white',
height=HEIGHT * UNIT,
width=WIDTH * UNIT)
# 그리드 생성
for c in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80
x0, y0, x1, y1 = c, 0, c, HEIGHT * UNIT
canvas.create_line(x0, y0, x1, y1)
for r in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80
x0, y0, x1, y1 = 0, r, HEIGHT * UNIT, r
canvas.create_line(x0, y0, x1, y1)
# 캔버스에 이미지 추가
self.rectangle = canvas.create_image(50, 50, image=self.shapes[0])
self.triangle1 = canvas.create_image(250, 150, image=self.shapes[1])
self.triangle2 = canvas.create_image(150, 250, image=self.shapes[1])
self.circle = canvas.create_image(250, 250, image=self.shapes[2])
canvas.pack()
return canvas
def load_images(self):
rectangle = PhotoImage(
Image.open("../img/rectangle.png").resize((65, 65)))
triangle = PhotoImage(
Image.open("../img/triangle.png").resize((65, 65)))
circle = PhotoImage(
Image.open("../img/circle.png").resize((65, 65)))
return rectangle, triangle, circle
@staticmethod
def coords_to_state(coords):
x = int((coords[0] - 50) / 100)
y = int((coords[1] - 50) / 100)
return [x, y]
def reset(self):
self.update()
time.sleep(0.5)
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)
return self.coords_to_state(self.canvas.coords(self.rectangle))
def step(self, action):
state = self.canvas.coords(self.rectangle)
base_action = np.array([0, 0])
self.render()
if action == 0: # 상
if state[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # 하
if state[1] < (HEIGHT - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # 좌
if state[0] > UNIT:
base_action[0] -= UNIT
elif action == 3: # 우
if state[0] < (WIDTH - 1) * UNIT:
base_action[0] += UNIT
# 에이전트 이동
self.canvas.move(self.rectangle, base_action[0], base_action[1])
# 에이전트(빨간 네모)를 가장 상위로 배치
self.canvas.tag_raise(self.rectangle)
next_state = self.canvas.coords(self.rectangle)
# 보상 함수
if next_state == self.canvas.coords(self.circle):
reward = 100
done = True
elif next_state in [self.canvas.coords(self.triangle1),
self.canvas.coords(self.triangle2)]:
reward = -100
done = True
else:
reward = 0
done = False
next_state = self.coords_to_state(next_state)
return next_state, reward, done
def render(self):
time.sleep(0.03)
self.update()
| mit | 2,465,659,495,563,917,000 | 31.297297 | 76 | 0.528591 | false | 3.085198 | false | false | false |
kwotsin/TensorFlow-ENet | predict_segmentation.py | 1 | 4193 | import tensorflow as tf
import os
import matplotlib.pyplot as plt
from enet import ENet, ENet_arg_scope
from preprocessing import preprocess
from scipy.misc import imsave
import numpy as np
slim = tf.contrib.slim
image_dir = './dataset/test/'
images_list = sorted([os.path.join(image_dir, file) for file in os.listdir(image_dir) if file.endswith('.png')])
checkpoint_dir = "./checkpoint_mfb"
checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
num_initial_blocks = 1
skip_connections = False
stage_two_repeat = 2
'''
#Labels to colours are obtained from here:
https://github.com/alexgkendall/SegNet-Tutorial/blob/c922cc4a4fcc7ce279dd998fb2d4a8703f34ebd7/Scripts/test_segmentation_camvid.py
However, the road_marking class is collapsed into the road class in the dataset provided.
Classes:
------------
Sky = [128,128,128]
Building = [128,0,0]
Pole = [192,192,128]
Road_marking = [255,69,0]
Road = [128,64,128]
Pavement = [60,40,222]
Tree = [128,128,0]
SignSymbol = [192,128,128]
Fence = [64,64,128]
Car = [64,0,128]
Pedestrian = [64,64,0]
Bicyclist = [0,128,192]
Unlabelled = [0,0,0]
'''
label_to_colours = {0: [128,128,128],
1: [128,0,0],
2: [192,192,128],
3: [128,64,128],
4: [60,40,222],
5: [128,128,0],
6: [192,128,128],
7: [64,64,128],
8: [64,0,128],
9: [64,64,0],
10: [0,128,192],
11: [0,0,0]}
#Create the photo directory
photo_dir = checkpoint_dir + "/test_images"
if not os.path.exists(photo_dir):
os.mkdir(photo_dir)
#Create a function to convert each pixel label to colour.
def grayscale_to_colour(image):
print 'Converting image...'
image = image.reshape((360, 480, 1))
image = np.repeat(image, 3, axis=-1)
for i in xrange(image.shape[0]):
for j in xrange(image.shape[1]):
label = int(image[i][j][0])
image[i][j] = np.array(label_to_colours[label])
return image
with tf.Graph().as_default() as graph:
images_tensor = tf.train.string_input_producer(images_list, shuffle=False)
reader = tf.WholeFileReader()
key, image_tensor = reader.read(images_tensor)
image = tf.image.decode_png(image_tensor, channels=3)
# image = tf.image.resize_image_with_crop_or_pad(image, 360, 480)
# image = tf.cast(image, tf.float32)
image = preprocess(image)
images = tf.train.batch([image], batch_size = 10, allow_smaller_final_batch=True)
#Create the model inference
with slim.arg_scope(ENet_arg_scope()):
logits, probabilities = ENet(images,
num_classes=12,
batch_size=10,
is_training=True,
reuse=None,
num_initial_blocks=num_initial_blocks,
stage_two_repeat=stage_two_repeat,
skip_connections=skip_connections)
variables_to_restore = slim.get_variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
def restore_fn(sess):
return saver.restore(sess, checkpoint)
predictions = tf.argmax(probabilities, -1)
predictions = tf.cast(predictions, tf.float32)
print 'HERE', predictions.get_shape()
sv = tf.train.Supervisor(logdir=None, init_fn=restore_fn)
with sv.managed_session() as sess:
for i in xrange(len(images_list) / 10 + 1):
segmentations = sess.run(predictions)
# print segmentations.shape
for j in xrange(segmentations.shape[0]):
#Stop at the 233rd image as it's repeated
if i*10 + j == 223:
break
converted_image = grayscale_to_colour(segmentations[j])
print 'Saving image %s/%s' %(i*10 + j, len(images_list))
plt.axis('off')
plt.imshow(converted_image)
imsave(photo_dir + "/image_%s.png" %(i*10 + j), converted_image)
# plt.show() | mit | -3,046,598,620,001,116,000 | 33.95 | 129 | 0.577868 | false | 3.414495 | false | false | false |
daineseh/kodi-plugin.video.ted-talks-chinese | youtube_dl/extractor/ruutu.py | 29 | 4297 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlparse
from ..utils import (
determine_ext,
int_or_none,
xpath_attr,
xpath_text,
)
class RuutuIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ruutu\.fi/video/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.ruutu.fi/video/2058907',
'md5': 'ab2093f39be1ca8581963451b3c0234f',
'info_dict': {
'id': '2058907',
'ext': 'mp4',
'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!',
'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 114,
'age_limit': 0,
},
},
{
'url': 'http://www.ruutu.fi/video/2057306',
'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9',
'info_dict': {
'id': '2057306',
'ext': 'mp4',
'title': 'Superpesis: katso koko kausi Ruudussa',
'description': 'md5:da2736052fef3b2bd5e0005e63c25eac',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 40,
'age_limit': 0,
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
video_xml = self._download_xml(
'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id, video_id)
formats = []
processed_urls = []
def extract_formats(node):
for child in node:
if child.tag.endswith('Files'):
extract_formats(child)
elif child.tag.endswith('File'):
video_url = child.text
if (not video_url or video_url in processed_urls or
any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
return
processed_urls.append(video_url)
ext = determine_ext(video_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
video_url, video_id, f4m_id='hds', fatal=False))
else:
proto = compat_urllib_parse_urlparse(video_url).scheme
if not child.tag.startswith('HTTP') and proto != 'rtmp':
continue
preference = -1 if proto == 'rtmp' else 1
label = child.get('label')
tbr = int_or_none(child.get('bitrate'))
format_id = '%s-%s' % (proto, label if label else tbr) if label or tbr else proto
if not self._is_valid_url(video_url, video_id, format_id):
continue
width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]]
formats.append({
'format_id': format_id,
'url': video_url,
'width': width,
'height': height,
'tbr': tbr,
'preference': preference,
})
extract_formats(video_xml.find('./Clip'))
self._sort_formats(formats)
return {
'id': video_id,
'title': xpath_attr(video_xml, './/Behavior/Program', 'program_name', 'title', fatal=True),
'description': xpath_attr(video_xml, './/Behavior/Program', 'description', 'description'),
'thumbnail': xpath_attr(video_xml, './/Behavior/Startpicture', 'href', 'thumbnail'),
'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')),
'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')),
'formats': formats,
}
| gpl-2.0 | -5,128,898,413,219,188,000 | 41.078431 | 115 | 0.466449 | false | 3.852783 | false | false | false |
bebound/lknovel | novel.py | 1 | 7902 | import threading
import re
from bs4 import BeautifulSoup
import requests
from global_variable import HAS_QT, HEADERS
if HAS_QT:
from global_variable import SENDER
class Novel():
"""
get novel information for creating epub file
Attributes:
volume_name: A string represent the volume name
volume_number: A string represent the volume number
volume_author: A string represent the author
volume_illustrator: A string represent the illustrator
volume_introduction: A string represent the introduction
volume_cover_url: A string represent the cover_url
chapter_links: A string represent the chapter links
output_dir: A stirng represent the epub save path
cover_path: A string represent the cover path
book_name: A string represent the book name
chapter: A list represent the chapter
base_path: A string represent the epub temp path
"""
def __init__(self, url, single_thread):
self.url = url
self.single_thread = single_thread
self.chapters = []
self.volume_name = ''
self.volume_number = ''
self.author = ''
self.illustrator = ''
self.introduction = ''
self.cover_url = ''
self.chapters_links = []
self.base_path = ''
@staticmethod
def parse_page(url):
"""
parse page with BeautifulSoup
Args:
url: A string represent the url to be parsed
Return:
A BeatifulSoup element
"""
r = requests.get(url, headers=HEADERS)
r.encoding = 'utf-8'
return BeautifulSoup(r.text)
@staticmethod
def find_chapter_links(soup):
"""
extract chapter links from page
Args:
soup: A parsed page
Returns:
a list contains the book's chapter links
"""
temp_chapter_links = soup.select(
'body div.content div.container div.row-fluid div.span9 div.well div.row-fluid ul.lk-chapter-list li')
find_chapter_links = re.compile(r'<a href="(.*)">')
chapter_links = []
for i in temp_chapter_links:
chapter_links.append(find_chapter_links.search(str(i)).group(1))
return chapter_links
def find_volume_name_number(self, soup):
name_and_number = str(soup.select('h1.ft-24 strong'))[1:-1].replace('</strong>', '').split('\n')
self.volume_name = name_and_number[1].strip()
self.volume_number = name_and_number[2].strip()
self.print_info('Volume_name:' + self.volume_name + ',Volume_number:' + self.volume_number)
@property
def book_name(self):
return self.volume_name + ' ' + self.volume_number
def find_author_illustrator(self, soup):
temp_author_name = soup.select('table.lk-book-detail td')
find_author_name = re.compile(r'target="_blank">(.*)</a></td>')
find_illustrator_name = re.compile(r'<td>(.*)</td>')
self.author = find_author_name.search(str(temp_author_name[3])).group(1)
self.illustrator = find_illustrator_name.search(str(temp_author_name[5])).group(1)
self.print_info('Author:' + self.author + '\nillustrator:' + self.illustrator)
def find_introduction(self, soup):
temp_introduction = soup.select(
'html body div.content div.container div.row-fluid div.span9 div.well div.row-fluid div.span10 p')
find_introduction = re.compile(r'<p style="width:42em; text-indent: 2em;">(.*)</p>')
self.introduction = find_introduction.search(str(temp_introduction).replace('\n', '')).group(1)
def find_cover_url(self, soup):
temp_cover_url = soup.select(
'div.container div.row-fluid div.span9 div.well div.row-fluid div.span2 div.lk-book-cover a')
find_cover_url = re.compile(r'<img src="(.*)"/>')
self.cover_url = 'http://lknovel.lightnovel.cn' + find_cover_url.search(str(temp_cover_url)).group(1)
def extract_epub_info(self):
"""
extract volume's basic info
Args:
soup: A parsed page
Return:
A dict contains the volume's info
"""
soup = self.parse_page(self.url)
self.find_volume_name_number(soup)
self.find_author_illustrator(soup)
self.find_introduction(soup)
self.find_cover_url(soup)
self.chapters_links = self.find_chapter_links(soup)
@staticmethod
def get_new_chapter_name(soup):
"""
get the formal chapter name
Args:
soup: A parsed page
Returns:
A string contain the chapter name
"""
chapter_name = soup.select('h3.ft-20')[0].get_text()
new_chapter_name = chapter_name[:chapter_name.index('章') + 1] + ' ' + chapter_name[chapter_name.index('章') + 1:]
return new_chapter_name
@staticmethod
def print_info(info):
try:
print(info)
if HAS_QT:
SENDER.sigChangeStatus.emit(info)
except UnicodeDecodeError as e:
print('Ignored:', e)
@staticmethod
def get_content(soup):
"""
extract contents from each page
Args:
soup: parsed page
Return:
A list contain paragraphs of one chapter
"""
content = []
temp_chapter_content = soup.select('div.lk-view-line')
find_picture_url = re.compile(r'data-cover="(.*)" src="')
for line in temp_chapter_content:
if 'lk-view-img' not in str(line):
content.append(line.get_text().strip())
else:
picture_url = find_picture_url.search(str(line)).group(1)
content.append(picture_url)
return content
def add_chapter(self, chapter):
"""
add chapter
chapter structure:a tuple (chapter number,chapter name,content)
"""
self.chapters.append(chapter)
def extract_chapter(self, url, number):
"""
add each chapter's content to the Epub instance
Args:
url: A string represent the chapter url to be added
epub: A Epub instance
number: A int represent the chapter's number
"""
try:
soup = self.parse_page(url)
new_chapter_name = self.get_new_chapter_name(soup)
self.print_info(new_chapter_name)
content = self.get_content(soup)
self.add_chapter((number, new_chapter_name, content))
except Exception as e:
if HAS_QT:
SENDER.sigWarningMessage.emit('错误', str(e) + '\nat:' + url)
SENDER.sigButton.emit()
print(self.url)
raise e
def get_chapter_content(self):
"""
start extract every chapter in epub
Args:
epub: The Epub instance to be created
"""
th = []
if not self.single_thread:
for i, link in enumerate(self.chapters_links):
t = threading.Thread(target=self.extract_chapter, args=(link, i))
t.start()
th.append(t)
for t in th:
t.join()
else:
for i, link in enumerate(self.chapters_links):
self.extract_chapter(link, i)
def get_novel_information(self):
"""get novel information"""
self.extract_epub_info()
self.get_chapter_content()
self.print_info('novel信息获取完成')
def novel_information(self):
return {'chapter': self.chapters, 'volume_name': self.volume_name, 'volume_number': self.volume_number,
'book_name': self.book_name, 'author': self.author,
'illustrator': self.illustrator, 'introduction': self.introduction, 'cover_url': self.cover_url}
| mit | 4,027,273,509,634,045,000 | 32.389831 | 120 | 0.584264 | false | 3.885602 | false | false | false |
paurosello/frappe | frappe/tests/test_global_search.py | 7 | 7660 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import global_search
from frappe.test_runner import make_test_objects
import frappe.utils
class TestGlobalSearch(unittest.TestCase):
def setUp(self):
global_search.setup_global_search_table()
self.assertTrue('__global_search' in frappe.db.get_tables())
doctype = "Event"
global_search.reset()
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
make_property_setter(doctype, "subject", "in_global_search", 1, "Int")
make_property_setter(doctype, "event_type", "in_global_search", 1, "Int")
make_property_setter(doctype, "roles", "in_global_search", 1, "Int")
make_property_setter(doctype, "repeat_on", "in_global_search", 0, "Int")
def tearDown(self):
frappe.db.sql('delete from `tabProperty Setter` where doc_type="Event"')
frappe.clear_cache(doctype='Event')
frappe.db.sql('delete from `tabEvent`')
frappe.db.sql('delete from __global_search')
make_test_objects('Event')
frappe.db.commit()
def insert_test_events(self):
frappe.db.sql('delete from tabEvent')
phrases = ['"The Sixth Extinction II: Amor Fati" is the second episode of the seventh season of the American science fiction.',
'After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. ',
'Carter explored themes of extraterrestrial involvement in ancient mass extinctions in this episode, the third in a trilogy.']
for text in phrases:
frappe.get_doc(dict(
doctype='Event',
subject=text,
repeat_on='Every Month',
starts_on=frappe.utils.now_datetime())).insert()
frappe.db.commit()
def test_search(self):
self.insert_test_events()
results = global_search.search('awakens')
self.assertTrue('After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. ' in results[0].content)
results = global_search.search('extraterrestrial')
self.assertTrue('Carter explored themes of extraterrestrial involvement in ancient mass extinctions in this episode, the third in a trilogy.' in results[0].content)
def test_update_doc(self):
self.insert_test_events()
test_subject = 'testing global search'
event = frappe.get_doc('Event', frappe.get_all('Event')[0].name)
event.subject = test_subject
event.save()
frappe.db.commit()
results = global_search.search('testing global search')
self.assertTrue('testing global search' in results[0].content)
def test_update_fields(self):
self.insert_test_events()
results = global_search.search('Every Month')
self.assertEquals(len(results), 0)
doctype = "Event"
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
make_property_setter(doctype, "repeat_on", "in_global_search", 1, "Int")
global_search.rebuild_for_doctype(doctype)
results = global_search.search('Every Month')
self.assertEquals(len(results), 3)
def test_delete_doc(self):
self.insert_test_events()
event_name = frappe.get_all('Event')[0].name
event = frappe.get_doc('Event', event_name)
test_subject = event.subject
results = global_search.search(test_subject)
self.assertEquals(len(results), 1)
frappe.delete_doc('Event', event_name)
results = global_search.search(test_subject)
self.assertEquals(len(results), 0)
def test_insert_child_table(self):
frappe.db.sql('delete from tabEvent')
phrases = ['Hydrus is a small constellation in the deep southern sky. ',
'It was first depicted on a celestial atlas by Johann Bayer in his 1603 Uranometria. ',
'The French explorer and astronomer Nicolas Louis de Lacaille charted the brighter stars and gave their Bayer designations in 1756. ',
'Its name means "male water snake", as opposed to Hydra, a much larger constellation that represents a female water snake. ',
'It remains below the horizon for most Northern Hemisphere observers.',
'The brightest star is the 2.8-magnitude Beta Hydri, also the closest reasonably bright star to the south celestial pole. ',
'Pulsating between magnitude 3.26 and 3.33, Gamma Hydri is a variable red giant some 60 times the diameter of our Sun. ',
'Lying near it is VW Hydri, one of the brightest dwarf novae in the heavens. ',
'Four star systems have been found to have exoplanets to date, most notably HD 10180, which could bear up to nine planetary companions.']
for text in phrases:
doc = frappe.get_doc({
'doctype':'Event',
'subject': text,
'starts_on': frappe.utils.now_datetime()
})
doc.insert()
frappe.db.commit()
def test_get_field_value(self):
cases = [
{
"case_type": "generic",
"data": '''
<style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px 'Open Sans';
-webkit-text-stroke: #000000} span.s1 {font-kerning: none} </style>
<script>
var options = {
foo: "bar"
}
</script>
<p class="p1"><span class="s1">Contrary to popular belief, Lorem Ipsum is not simply random text. It has
roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock,
a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur,
from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source.
Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero,
written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum,
"Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.</span></p>
''',
"result": ('Description : Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical '
'Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, '
'looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word '
'in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum '
'et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular '
'during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.')
},
{
"case_type": "with_style",
"data": '''
<style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px 'Open Sans';
-webkit-text-stroke: #000000} span.s1 {font-kerning: none} </style>Lorem Ipsum Dolor Sit Amet
''',
"result": "Description : Lorem Ipsum Dolor Sit Amet"
},
{
"case_type": "with_script",
"data": '''
<script>
var options = {
foo: "bar"
}
</script>
Lorem Ipsum Dolor Sit Amet
''',
"result": "Description : Lorem Ipsum Dolor Sit Amet"
}
]
for case in cases:
doc = frappe.get_doc({
'doctype':'Event',
'subject': 'Lorem Ipsum',
'starts_on': frappe.utils.now_datetime(),
'description': case["data"]
})
field_as_text = ''
for field in doc.meta.fields:
if field.fieldname == 'description':
field_as_text = global_search.get_formatted_value(doc.description, field)
self.assertEquals(case["result"], field_as_text)
| mit | 88,331,083,183,987,400 | 42.276836 | 166 | 0.707833 | false | 3.04815 | true | false | false |
CWSL/access-cm-tools | analyse/mom_plot_timeseries.py | 1 | 1854 | #!/usr/bin/env python
import sys
import os
import argparse
import netCDF4 as nc
import numpy as np
import pandas as pd
import re
import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from lib_util import time_dim_to_pandas_periods
"""
What this script does:
Plot a full timeseries fields from mom ocean_scalar.nc
Example:
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_files', nargs='+',
help='The MOM ocean_scalar.nc input data files.')
parser.add_argument('--field', default='temp_global_ave',
help='The fields included in this plot.')
parser.add_argument('--output_dir', default='./',
help='Directory where plots will be written.')
args = parser.parse_args()
title = None
ylabel = None
ts = pd.Series()
# Go through input files one at a time building the timeseries as we go.
for file in args.input_files:
with nc.Dataset(file) as f:
time_var = f.variables['time']
data_var = f.variables[args.field]
title = data_var.long_name
ylabel = data_var.units
# Calculate the times/dates, these will be our indices.
periods = time_dim_to_pandas_periods(f.variables['time'])
data = f.variables[args.field][:]
assert(data.shape[1] == 1)
data = data.flatten()
new_ts = pd.Series(data, periods)
ts = ts.append(new_ts)
ts = ts.sort_index()
plot = ts.plot()
plt.xlabel('Time (years)')
plt.ylabel(ylabel)
plt.title(title)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(9,4.5)
plt.savefig(os.path.join(args.output_dir, '{}.png'.format(args.field)))
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -1,058,263,979,802,645,000 | 24.39726 | 76 | 0.606257 | false | 3.693227 | false | false | false |
CGATOxford/Optic | scripts/gpipe/translate_forward2backward.py | 1 | 4075 | ##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
gpipe/translate_forward2backward.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python gpipe/translate_forward2backward.py --help
Type::
python gpipe/translate_forward2backward.py --help
for command line help.
Documentation
-------------
Code
----
'''
import os
import sys
import string
import re
import getopt
import tempfile
import time
import popen2
USAGE = """python %s [OPTIONS]
Version: $Id: gpipe/translate_forward2backward.py 18 2005-08-09 15:32:24Z andreas $
Wrapper for running gene predictions.
Options:
-h, --help print this message.
-v, --verbose= loglevel.
""" % sys.argv[0]
param_long_options = ["verbose=", "help",
"bracket-increment=", "query-border=",
"border-refinement=",
"exit-identical", "min-score=", "method=",
"recursive", "refinement", "probe", "incremental",
"exons=", "mask-probe", "format=",
"probe-options=", "version"]
param_short_options = "v:hi:b:em:procx:af:"
param_columns = (1, 2, 3, 4)
param_filename_contigs = "contig_sizes"
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
try:
optlist, args = getopt.getopt(
sys.argv[1:], param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o, a in optlist:
if o in ("-v", "--verbose"):
param_loglevel = int(a)
elif o in ("--version", ):
print "version="
sys.exit(0)
elif o in ("-h", "--help"):
print USAGE
sys.exit(0)
elif o in ("-b", "--query-border"):
param_query_border = int(a)
contig_sizes = {}
infile = open(param_filename_contigs, "r")
for line in infile:
if line[0] == "#":
continue
sbjct_token, size, offset = line[:-1].split("\t")
contig_sizes[sbjct_token] = int(size)
for line in sys.stdin:
if line[0] == "#":
continue
data = line[:-1].split("\t")
sbjct_token, sbjct_strand, sbjct_from, sbjct_to = (
data[param_columns[0]],
data[param_columns[1]],
data[param_columns[2]],
data[param_columns[3]])
sbjct_from, sbjct_to = int(sbjct_from), int(sbjct_to)
if sbjct_strand == "-" or sbjct_strand == "-1":
if contig_sizes.has_key(sbjct_token):
size = contig_sizes[sbjct_token]
sbjct_from, sbjct_to = size - sbjct_to, size - sbjct_from
data[param_columns[2]] = sbjct_from
data[param_columns[3]] = sbjct_to
print string.join(map(str, data), "\t")
if __name__ == "__main__":
sys.exit(main(sys.argv))
| mit | 8,499,062,192,981,369,000 | 24.955414 | 83 | 0.551656 | false | 3.615794 | false | false | false |
Impactstory/oadoi | put_repo_requests_in_db.py | 2 | 5955 | import csv
import os
import json
import gspread
import datetime
import re
import unicodecsv as csv
from app import db
from util import safe_commit
from emailer import send
from emailer import create_email
from endpoint import Endpoint
from repository import Repository
from repo_request import RepoRequest
def get_repo_request_rows():
from oauth2client.service_account import ServiceAccountCredentials
# this file inspired by https://www.twilio.com/blog/2017/02/an-easy-way-to-read-and-write-to-a-google-spreadsheet-in-python.html
# use creds to create a client to interact with the Google Drive API
scopes = ['https://spreadsheets.google.com/feeds']
json_creds = os.getenv("GOOGLE_SHEETS_CREDS_JSON")
creds_dict = json.loads(json_creds)
# hack to get around ugly new line escaping issues
# this works for me, but later found links to what might be cleaner solutions:
# use ast.literal_eval? https://github.com/googleapis/google-api-go-client/issues/185#issuecomment-422732250
# or maybe dumping like this might fix it? https://coreyward.svbtle.com/how-to-send-a-multiline-file-to-heroku-config
creds_dict["private_key"] = creds_dict["private_key"].replace("\\\\n", "\n")
# now continue
creds = ServiceAccountCredentials.from_json_keyfile_dict(creds_dict, scopes)
client = gspread.authorize(creds)
# Find a workbook by url
spreadsheet = client.open_by_url("https://docs.google.com/spreadsheets/d/1RcQuetbKVYRRf0GhGZQi38okY8gT1cPUs6l3RM94yQo/edit#gid=704459328")
sheet = spreadsheet.sheet1
# Extract and print all of the values
rows = sheet.get_all_values()
print(rows[0:1])
return rows
def save_repo_request_rows(rows):
with open('out.csv','wb') as f:
w = csv.DictWriter(f, fieldnames=RepoRequest.list_fieldnames(), encoding='utf-8-sig')
for row in rows[1:]: # skip header row
my_repo_request = RepoRequest()
my_repo_request.set_id_seed(row[0])
column_num = 0
for fieldname in RepoRequest.list_fieldnames():
if fieldname != "id":
setattr(my_repo_request, fieldname, row[column_num])
column_num += 1
w.writerow(my_repo_request.to_dict())
print u"adding repo request {}".format(my_repo_request)
db.session.merge(my_repo_request)
safe_commit(db)
def add_endpoint(my_request):
if not my_request.pmh_url:
return None
endpoint_with_this_id = Endpoint.query.filter(Endpoint.repo_request_id==my_request.id).first()
if endpoint_with_this_id:
print u"one already matches {}".format(my_request.id)
return None
raw_endpoint = my_request.pmh_url
clean_endpoint = raw_endpoint.strip()
clean_endpoint = clean_endpoint.strip("?")
clean_endpoint = re.sub(u"\?verb=.*$", "", clean_endpoint, re.IGNORECASE)
clean_endpoint = re.sub(u"^https?://api\.unpaywall\.org/repository/endpoint/test/", "", clean_endpoint, re.IGNORECASE)
print u"raw endpoint is {}, clean endpoint is {}".format(raw_endpoint, clean_endpoint)
matching_endpoint = Endpoint()
matching_endpoint.pmh_url = clean_endpoint
repo_matches = my_request.matching_repositories()
if repo_matches:
matching_repo = repo_matches[0]
print u"yay! for {} {} matches repository {}".format(
my_request.institution_name, my_request.repo_name, matching_repo)
else:
print u"no matching repository for {}: {}".format(
my_request.institution_name, my_request.repo_name)
matching_repo = Repository()
# overwrite stuff with request
matching_repo.institution_name = my_request.institution_name
matching_repo.repository_name = my_request.repo_name
matching_repo.home_page = my_request.repo_home_page
matching_endpoint.repo_unique_id = matching_repo.id
matching_endpoint.email = my_request.email
matching_endpoint.repo_request_id = my_request.id
matching_endpoint.ready_to_run = True
matching_endpoint.set_identify_and_initial_query()
db.session.merge(matching_endpoint)
db.session.merge(matching_repo)
print u"added {} {}".format(matching_endpoint, matching_repo)
print u"see at url http://unpaywall.org/sources/repository/{}".format(matching_endpoint.id)
safe_commit(db)
print "saved"
print "now sending email"
# get the endpoint again, so it gets with all the meta info etc
matching_endpoint = Endpoint.query.get(matching_endpoint.id)
matching_endpoint.contacted_text = "automated welcome email"
matching_endpoint.contacted = datetime.datetime.utcnow().isoformat()
safe_commit(db)
send_announcement_email(matching_endpoint)
print "email sent"
return matching_endpoint
def send_announcement_email(my_endpoint):
my_endpoint_id = my_endpoint.id
email_address = my_endpoint.email
repo_name = my_endpoint.repo.repository_name
institution_name = my_endpoint.repo.institution_name
print my_endpoint_id, email_address, repo_name, institution_name
# prep email
email = create_email(email_address,
"Update on your Unpaywall indexing request (ref: {} )".format(my_endpoint_id),
"repo_pulse",
{"data": {"endpoint_id": my_endpoint_id, "repo_name": repo_name, "institution_name": institution_name}},
[])
send(email, for_real=True)
if __name__ == "__main__":
rows = get_repo_request_rows()
save_repo_request_rows(rows)
my_requests = RepoRequest.query.all()
for my_request in my_requests:
if not my_request.is_duplicate:
add_endpoint(my_request)
# my_endpoints = Endpoint.query.filter(Endpoint.contacted_text=="automated welcome email")
# for my_endpoint in my_endpoints:
# print "would send an email to {}".format(my_endpoint)
# send_announcement_email(my_endpoint)
| mit | -9,057,802,710,195,520,000 | 36.689873 | 142 | 0.68094 | false | 3.591677 | false | false | false |
doda/imagy | imagy/smush/scratch.py | 1 | 1099 | import os, sys, tempfile
class Scratch (object):
def __init__ (self):
tup = tempfile.mkstemp()
self._path = tup[1]
self._file = os.fdopen(tup[0])
self._file.close()
def __del__ (self):
pass
#if self._path != None:
# self.destruct()
def destruct (self):
self.close()
os.unlink(self._path)
self._path = None
self._file = None
def close (self):
if self._file.closed == False:
self._file.flush()
self._file.close()
def read (self):
if self._file.closed == True:
self._reopen()
self._file.seek(0)
return self._file.read()
def _reopen (self):
self._file = open(self._path, 'w+')
def getopened (self):
self.close()
self._reopen()
return self._file
opened = property(getopened, NotImplemented, NotImplemented, "opened file - read only")
def getfile (self):
return self._file
file = property(getfile, NotImplemented, NotImplemented, "file - read only")
| bsd-2-clause | 3,726,509,410,770,841,600 | 24.55814 | 91 | 0.532302 | false | 3.897163 | false | false | false |
Fenykepy/phiroom | src/api/portfolio/serializers.py | 1 | 1312 | from rest_framework import serializers
from librairy.models import Picture
from portfolio.models import Portfolio, PortfolioPicture
class PortfolioSerializer(serializers.ModelSerializer):
pub_date = serializers.DateTimeField(required=False, allow_null=True)
pictures = serializers.SerializerMethodField()
url = serializers.HyperlinkedIdentityField(
view_name='portfolio-detail',
lookup_field='slug'
)
class Meta:
model = Portfolio
fields = ('url', 'title', 'draft',
'author', 'pictures',
'pub_date', 'slug', 'order',
)
read_only_fields = ('slug', 'author')
def get_pictures(self, object):
# because many to many relation order is not respected
# by drf, we get list manually
return object.get_pictures().values_list('picture', flat=True)
class PortfolioPictureSerializer(serializers.ModelSerializer):
portfolio = serializers.SlugRelatedField(
slug_field="slug",
queryset=Portfolio.objects.all()
)
class Meta:
model = PortfolioPicture
fields = ('portfolio', 'picture', 'order')
class PortfolioHeadSerializer(PortfolioSerializer):
class Meta:
model = Portfolio
fields = ('title', 'slug')
| agpl-3.0 | -2,746,211,875,505,529,300 | 26.914894 | 73 | 0.645579 | false | 4.301639 | false | false | false |
sahildua2305/eden | controllers/hms.py | 2 | 19534 | # -*- coding: utf-8 -*-
"""
HMS Hospital Status Assessment and Request Management System
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def s3_menu_postp():
# @todo: rewrite this for new framework
if len(request.args) > 0 and request.args[0].isdigit():
newreq = dict(from_record="hms_hospital.%s" % request.args[0],
from_fields="hospital_id$id")
#selreq = {"req.hospital_id":request.args[0]}
else:
newreq = dict()
selreq = {"req.hospital_id__ne":"NONE"}
menu_selected = []
hospital_id = s3mgr.get_session("hms", "hospital")
if hospital_id:
hospital = s3db.hms_hospital
query = (hospital.id == hospital_id)
record = db(query).select(hospital.id,
hospital.name,
limitby=(0, 1)).first()
if record:
name = record.name
menu_selected.append(["%s: %s" % (T("Hospital"), name), False,
URL(f="hospital",
args=[record.id])])
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
return s3db.cms_index(module, alt_function="index_alt")
# -----------------------------------------------------------------------------
def index_alt():
"""
Module homepage for non-Admin users when no CMS content found
"""
# Just redirect to the Hospitals Map
redirect(URL(f="hospital", args=["map"]))
# -----------------------------------------------------------------------------
def ltc():
""" Filtered REST Controller """
s3.filter = (s3db.hms_hospital.facility_type == 31)
return hospital()
# -----------------------------------------------------------------------------
def marker_fn(record):
"""
Function to decide which Marker to use for Hospital Map
@ToDo: Legend
@ToDo: Move to Templates
@ToDo: Use Symbology
"""
stable = db.hms_status
status = db(stable.hospital_id == record.id).select(stable.facility_status,
limitby=(0, 1)
).first()
if record.facility_type == 31:
marker = "special_needs"
else:
marker = "hospital"
if status:
if status.facility_status == 1:
# Normal
marker = "%s_green" % marker
elif status.facility_status in (3, 4):
# Evacuating or Closed
marker = "%s_red" % marker
elif status.facility_status == 2:
# Compromised
marker = "%s_yellow" % marker
mtable = db.gis_marker
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)).first()
return marker
# -----------------------------------------------------------------------------
def hospital():
""" Main REST controller for hospital data """
table = s3db.hms_hospital
# Load Models to add tabs
if settings.has_module("inv"):
s3db.table("inv_inv_item")
elif settings.has_module("req"):
# (gets loaded by Inv if available)
s3db.table("req_req")
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.interactive:
if r.component:
if r.component.name == "inv_item" or \
r.component.name == "recv" or \
r.component.name == "send":
# Filter out items which are already in this inventory
s3db.inv_prep(r)
elif r.component.name == "human_resource":
# Filter out people which are already staff for this hospital
s3base.s3_filter_staff(r)
# Make it clear that this is for adding new staff, not assigning existing
s3.crud_strings.hrm_human_resource.label_create_button = T("Add New Staff Member")
# Cascade the organisation_id from the hospital to the staff
field = s3db.hrm_human_resource.organisation_id
field.default = r.record.organisation_id
field.writable = False
elif r.component.name == "req":
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
# inc list_create (list_fields over-rides)
s3db.req_create_form_mods()
elif r.component.name == "status":
table = db.hms_status
table.facility_status.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Facility Status"),
T("Status of the facility.")))
table.facility_operations.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Facility Operations"),
T("Overall status of the facility operations.")))
table.clinical_status.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Clinical Status"),
T("Status of the clinical departments.")))
table.clinical_operations.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Clinical Operations"),
T("Overall status of the clinical operations.")))
table.ems_status.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Emergency Medical Services"),
T("Status of operations/availability of emergency medical services at this facility.")))
table.ems_reason.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("EMS Status Reasons"),
T("Report the contributing factors for the current EMS status.")))
table.or_status.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("OR Status"),
T("Status of the operating rooms of this facility.")))
table.or_reason.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("OR Status Reason"),
T("Report the contributing factors for the current OR status.")))
table.morgue_status.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Morgue Status"),
T("Status of morgue capacity.")))
table.morgue_units.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Morgue Units Available"),
T("Number of vacant/available units to which victims can be transported immediately.")))
table.security_status.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Security Status"),
T("Status of security procedures/access restrictions for the facility.")))
table.staffing.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Staffing Level"),
T("Current staffing level at the facility.")))
table.access_status.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Road Conditions"),
T("Describe the condition of the roads from/to the facility.")))
elif r.component.name == "bed_capacity":
table = db.hms_bed_capacity
table.bed_type.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Bed Type"),
T("Specify the bed type of this unit.")))
table.beds_baseline.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Baseline Number of Beds"),
T("Baseline number of beds of that type in this unit.")))
table.beds_available.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Available Beds"),
T("Number of available/vacant beds of that type in this unit at the time of reporting.")))
table.beds_add24.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Additional Beds / 24hrs"),
T("Number of additional beds of that type expected to become available in this unit within the next 24 hours.")))
elif r.component.name == "activity":
table = db.hms_activity
table.date.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Date & Time"),
T("Date and time this report relates to.")))
table.patients.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Patients"),
T("Number of in-patients at the time of reporting.")))
table.admissions24.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Admissions/24hrs"),
T("Number of newly admitted patients during the past 24 hours.")))
table.discharges24.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Discharges/24hrs"),
T("Number of discharged patients during the past 24 hours.")))
table.deaths24.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Deaths/24hrs"),
T("Number of deaths during the past 24 hours.")))
elif r.component.name == "contact":
table = db.hms_contact
table.title.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Title"),
T("The Role this person plays within this hospital.")))
elif r.component.name == "image":
table = s3db.doc_image
table.location_id.readable = table.location_id.writable = False
table.organisation_id.readable = table.organisation_id.writable = False
table.person_id.readable = table.person_id.writable = False
elif r.component.name == "ctc":
table = db.hms_ctc
table.ctc.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Cholera Treatment Center"),
T("Does this facility provide a cholera treatment center?")))
table.number_of_patients.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Current number of patients"),
T("How many patients with the disease are currently hospitalized at this facility?")))
table.cases_24.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("New cases in the past 24h"),
T("How many new cases have been admitted to this facility in the past 24h?")))
table.deaths_24.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Deaths in the past 24h"),
T("How many of the patients with the disease died in the past 24h at this facility?")))
table.icaths_available.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Infusion catheters available"),
T("Specify the number of available sets")))
table.icaths_needed_24.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Infusion catheters need per 24h"),
T("Specify the number of sets needed per 24h")))
table.infusions_available.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Infusions available"),
T("Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions")))
table.infusions_needed_24.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Infusions needed per 24h"),
T("Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h")))
table.antibiotics_available.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Antibiotics available"),
T("Specify the number of available units (adult doses)")))
table.antibiotics_needed_24.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Antibiotics needed per 24h"),
T("Specify the number of units (adult doses) needed per 24h")))
table.problem_types.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Current problems, categories"),
T("Select all that apply")))
table.problem_details.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Current problems, details"),
T("Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.")))
else:
table = r.table
if r.id:
table.obsolete.readable = table.obsolete.writable = True
elif r.method == "map":
# Tell the client to request per-feature markers
s3db.configure("hms_hospital", marker_fn=marker_fn)
s3.formats["have"] = r.url() # .have added by JS
# Add comments
table.gov_uuid.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Government UID"),
T("The Unique Identifier (UUID) as assigned to this facility by the government.")))
table.total_beds.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Total Beds"),
T("Total number of beds in this facility. Automatically updated from daily reports.")))
table.available_beds.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Available Beds"),
T("Number of vacant/available beds in this facility. Automatically updated from daily reports.")))
elif r.representation == "aadata":
pass
# Hide the Implied fields here too to make columns match
#db.rms_req.shelter_id.readable = False
#db.rms_req.organisation_id.readable = False
elif r.representation == "plain":
# Duplicates info in the other fields
r.table.location_id.readable = False
elif r.representation == "geojson":
# Load these models now as they'll be needed when we encode
mtable = s3db.gis_marker
stable = s3db.hms_status
s3db.configure("hms_hospital", marker_fn=marker_fn)
return True
s3.prep = prep
if "map" in request.args:
# S3Map has migrated
hide_filter = False
else:
# Not yet ready otherwise
hide_filter = True
output = s3_rest_controller(rheader=s3db.hms_hospital_rheader,
hide_filter=hide_filter,
)
return output
# -----------------------------------------------------------------------------
def incoming():
""" Incoming Shipments """
return inv_incoming()
# -----------------------------------------------------------------------------
def req_match():
""" Match Requests """
return s3db.req_match()
# END =========================================================================
| mit | -315,330,539,855,334,660 | 57.837349 | 268 | 0.412563 | false | 5.17457 | false | false | false |
kiki86151/CKIP | PyCKIP/PyCCP.py | 1 | 1481 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#########################################################################
# File Name: PyCCP.py
# Author: Carson Wang
# mail: [email protected]
# Created Time: 2017-03-04 21:48:51
#########################################################################
import urllib, urllib2, cookielib, re
def parseTree(string):
if not isinstance(string, unicode):
try:
string = string.decode('utf-8')
except:
raise UnicodeError('Input encoding should be UTF8 of UNICODE')
string = string.encode('cp950')
URL = 'http://parser.iis.sinica.edu.tw/'
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [
('User-Agent', 'Mozilla/5.0 Gecko/20100101 Firefox/29.0'),
('referer', 'http://parser.iis.sinica.edu.tw/'),
('Host', 'parser.iis.sinica.edu.tw')
]
raw = urllib.urlopen(URL).read()
fid = re.search('name="id" value="(\d+)"', raw).group(1)
postdata = dict()
postdata['myTag'] = string
postdata['id'] = fid
postdata = urllib.urlencode(postdata)
resURL = 'http://parser.iis.sinica.edu.tw/svr/webparser.asp'
res = opener.open(resURL, postdata).read()
res = res.decode('cp950')
res = re.findall('<nobr>#\d+:(.*?)</nobr>', res)
return res
| bsd-3-clause | 8,865,408,649,147,205,000 | 30.510638 | 86 | 0.505739 | false | 3.768448 | false | false | false |
JordanReiter/django-messages | django_messages/views.py | 1 | 9916 | import re
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.conf import settings
from django_messages.models import Message
from django_messages.forms import ComposeForm, ComposeToForm
from django_messages.utils import format_quote, get_user_model, get_username_field
User = get_user_model()
if "notification" in settings.INSTALLED_APPS and getattr(settings, 'DJANGO_MESSAGES_NOTIFY', True):
from notification import models as notification
else:
notification = None
@login_required
def inbox(request, template_name='django_messages/inbox.html'):
"""
Displays a list of received messages for the current user.
Optional Arguments:
``template_name``: name of the template to use.
"""
message_list = Message.objects.inbox_for(request.user)
return render_to_response(template_name, {
'message_list': message_list,
}, context_instance=RequestContext(request))
@login_required
def outbox(request, template_name='django_messages/outbox.html'):
"""
Displays a list of sent messages by the current user.
Optional arguments:
``template_name``: name of the template to use.
"""
message_list = Message.objects.outbox_for(request.user)
return render_to_response(template_name, {
'message_list': message_list,
}, context_instance=RequestContext(request))
@login_required
def trash(request, template_name='django_messages/trash.html'):
"""
Displays a list of deleted messages.
Optional arguments:
``template_name``: name of the template to use
Hint: A Cron-Job could periodicly clean up old messages, which are deleted
by sender and recipient.
"""
message_list = Message.objects.trash_for(request.user)
return render_to_response(template_name, {
'message_list': message_list,
}, context_instance=RequestContext(request))
@login_required
def compose(request, recipient=None, form_class=ComposeForm,
recipient_form_class=ComposeToForm,
template_name='django_messages/compose.html', success_url=None,
recipient_filter=None, recipient_format=None):
"""
Displays and handles the ``form_class`` form to compose new messages.
Required Arguments: None
Optional Arguments:
``recipient``: username of a `django.contrib.auth` User, who should
receive the message, optionally multiple usernames
could be separated by a '+'
``form_class``: the form-class to use
``template_name``: the template to use
``success_url``: where to redirect after successfull submission
"""
if recipient:
recipients = User.objects.filter(
**{
'%s__in' % get_username_field(): [
rr for rr in re.split(r'[+,\s]+', recipient) if rr
]
}
)
else:
recipients = None
if request.method == "POST":
sender = request.user
if recipients:
form = recipient_form_class(
request.POST,
recipients=recipients,
recipient_filter=recipient_filter,
recipient_format=recipient_format
)
else:
form = form_class(request.POST, recipient_filter=recipient_filter)
if form.is_valid():
form.save(sender=request.user)
messages.info(request, _(u"Message successfully sent."))
if success_url is None:
success_url = reverse('messages_inbox')
if 'next' in request.GET:
success_url = request.GET['next']
return HttpResponseRedirect(success_url)
else:
if recipient is not None:
form = recipient_form_class(recipients = recipients, recipient_format=recipient_format)
else:
form = form_class()
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
@login_required
def reply(request, message_id, form_class=ComposeToForm,
template_name='django_messages/compose.html', success_url=None,
recipient_filter=None, recipient_format=None,
quote_helper=format_quote,
subject_template=_(u"Re: %(subject)s"),):
"""
Prepares the ``form_class`` form for writing a reply to a given message
(specified via ``message_id``). Uses the ``format_quote`` helper from
``messages.utils`` to pre-format the quote. To change the quote format
assign a different ``quote_helper`` kwarg in your url-conf.
"""
parent = get_object_or_404(Message, id=message_id)
if parent.sender != request.user and parent.recipient != request.user:
raise Http404
if request.method == "POST":
sender = request.user
form = form_class(request.POST, recipients=[parent.sender],
recipient_filter=recipient_filter, recipient_format=recipient_format
)
if form.is_valid():
form.save(sender=request.user, parent_msg=parent)
messages.info(request, _(u"Message successfully sent."))
if success_url is None:
success_url = reverse('messages_inbox')
return HttpResponseRedirect(success_url)
else:
form = form_class(recipients=[parent.sender], initial={
'body': quote_helper(parent.sender, parent.body),
'subject': subject_template % {'subject': parent.subject},
'recipient': [parent.sender,]
}, recipient_format=recipient_format)
return render_to_response(template_name, {
'form': form,
}, context_instance=RequestContext(request))
@login_required
def delete(request, message_id, success_url=None):
"""
Marks a message as deleted by sender or recipient. The message is not
really removed from the database, because two users must delete a message
before it's save to remove it completely.
A cron-job should prune the database and remove old messages which are
deleted by both users.
As a side effect, this makes it easy to implement a trash with undelete.
You can pass ?next=/foo/bar/ via the url to redirect the user to a different
page (e.g. `/foo/bar/`) than ``success_url`` after deletion of the message.
"""
user = request.user
now = timezone.now()
message = get_object_or_404(Message, id=message_id)
deleted = False
if success_url is None:
success_url = reverse('messages_inbox')
if 'next' in request.GET:
success_url = request.GET['next']
if message.sender == user:
message.sender_deleted_at = now
deleted = True
if message.recipient == user:
message.recipient_deleted_at = now
deleted = True
if deleted:
message.save()
messages.info(request, _(u"Message successfully deleted."))
if notification:
notification.send([user], "messages_deleted", {'message': message,})
return HttpResponseRedirect(success_url)
raise Http404
@login_required
def undelete(request, message_id, success_url=None):
"""
Recovers a message from trash. This is achieved by removing the
``(sender|recipient)_deleted_at`` from the model.
"""
user = request.user
message = get_object_or_404(Message, id=message_id)
undeleted = False
if success_url is None:
success_url = reverse('messages_inbox')
if 'next' in request.GET:
success_url = request.GET['next']
if message.sender == user:
message.sender_deleted_at = None
undeleted = True
if message.recipient == user:
message.recipient_deleted_at = None
undeleted = True
if undeleted:
message.save()
messages.info(request, _(u"Message successfully recovered."))
if notification:
notification.send([user], "messages_recovered", {'message': message,})
return HttpResponseRedirect(success_url)
raise Http404
@login_required
def view(request, message_id, form_class=ComposeToForm, quote_helper=format_quote,
subject_template=_(u"Re: %(subject)s"), recipient_format=None,
template_name='django_messages/view.html'):
"""
Shows a single message.``message_id`` argument is required.
The user is only allowed to see the message, if he is either
the sender or the recipient. If the user is not allowed a 404
is raised.
If the user is the recipient and the message is unread
``read_at`` is set to the current datetime.
If the user is the recipient a reply form will be added to the
tenplate context, otherwise 'reply_form' will be None.
"""
user = request.user
now = timezone.now()
message = get_object_or_404(Message, id=message_id)
if (message.sender != user) and (message.recipient != user):
raise Http404
if message.read_at is None and message.recipient == user:
message.read_at = now
message.save()
context = {'message': message, 'reply_form': None}
if message.recipient == user:
form = form_class(
recipients = [message.sender,],
initial={
'body': quote_helper(message.sender, message.body),
'subject': subject_template % {'subject': message.subject},
'recipient': [message.sender,]
},
recipient_format = recipient_format
)
context['reply_form'] = form
return render_to_response(template_name, context,
context_instance=RequestContext(request))
| bsd-3-clause | -8,744,500,396,322,902,000 | 38.193676 | 99 | 0.649556 | false | 4.205259 | false | false | false |
GoogleCloudPlatform/cloudml-samples | census/keras/trainer/model.py | 1 | 6274 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the Keras Sequential model."""
from builtins import range
import keras
from keras import backend as K
from keras import layers
from keras import models
from keras.backend import relu
import pandas as pd
import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model.signature_def_utils_impl import \
predict_signature_def
# CSV columns in the input file.
CSV_COLUMNS = ('age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week',
'native_country', 'income_bracket')
CSV_COLUMN_DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''],
[0], [0], [0], [''], ['']]
# Categorical columns with vocab size
# native_country and fnlwgt are ignored
CATEGORICAL_COLS = (('education', 16), ('marital_status', 7),
('relationship', 6), ('workclass', 9), ('occupation', 15),
('gender', [' Male', ' Female']), ('race', 5))
CONTINUOUS_COLS = ('age', 'education_num', 'capital_gain', 'capital_loss',
'hours_per_week')
LABELS = [' <=50K', ' >50K']
LABEL_COLUMN = 'income_bracket'
UNUSED_COLUMNS = set(CSV_COLUMNS) - set(
list(zip(*CATEGORICAL_COLS))[0] + CONTINUOUS_COLS + (LABEL_COLUMN,))
def model_fn(input_dim,
labels_dim,
hidden_units=[100, 70, 50, 20],
learning_rate=0.1):
"""Create a Keras Sequential model with layers.
Args:
input_dim: (int) Input dimensions for input layer.
labels_dim: (int) Label dimensions for input layer.
hidden_units: [int] the layer sizes of the DNN (input layer first)
learning_rate: (float) the learning rate for the optimizer.
Returns:
A Keras model.
"""
# "set_learning_phase" to False to avoid:
# AbortionError(code=StatusCode.INVALID_ARGUMENT during online prediction.
K.set_learning_phase(False)
model = models.Sequential()
for units in hidden_units:
model.add(
layers.Dense(units=units, input_dim=input_dim, activation=relu))
input_dim = units
# Add a dense final layer with sigmoid function.
model.add(layers.Dense(labels_dim, activation='sigmoid'))
compile_model(model, learning_rate)
return model
def compile_model(model, learning_rate):
model.compile(
loss='binary_crossentropy',
optimizer=keras.optimizers.Adam(lr=learning_rate),
metrics=['accuracy'])
return model
def to_savedmodel(model, export_path):
"""Convert the Keras HDF5 model into TensorFlow SavedModel."""
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(
inputs={'input': model.inputs[0]}, outputs={'income': model.outputs[0]})
with K.get_session() as sess:
builder.add_meta_graph_and_variables(
sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature
})
builder.save()
def to_numeric_features(features, feature_cols=None):
"""Converts the pandas input features to numeric values.
Args:
features: Input features in the data age (continuous) workclass
(categorical) fnlwgt (continuous) education (categorical) education_num
(continuous) marital_status (categorical) occupation (categorical)
relationship (categorical) race (categorical) gender (categorical)
capital_gain (continuous) capital_loss (continuous) hours_per_week
(continuous) native_country (categorical)
feature_cols: Column list of converted features to be returned. Optional,
may be used to ensure schema consistency over multiple executions.
Returns:
A pandas dataframe.
"""
for col in CATEGORICAL_COLS:
features = pd.concat(
[features, pd.get_dummies(features[col[0]], drop_first=True)],
axis=1)
features.drop(col[0], axis=1, inplace=True)
# Remove the unused columns from the dataframe.
for col in UNUSED_COLUMNS:
features.pop(col)
# Re-index dataframe (if categories list changed from the previous dataset)
if feature_cols is not None:
features = features.T.reindex(feature_cols).T.fillna(0)
return features
def generator_input(filenames, chunk_size, batch_size=64):
"""Produce features and labels needed by keras fit_generator."""
feature_cols = None
while True:
input_reader = pd.read_csv(
tf.gfile.Open(filenames[0]),
names=CSV_COLUMNS,
chunksize=chunk_size,
na_values=' ?')
for input_data in input_reader:
input_data = input_data.dropna()
label = pd.get_dummies(input_data.pop(LABEL_COLUMN))
input_data = to_numeric_features(input_data, feature_cols)
# Retains schema for next chunk processing.
if feature_cols is None:
feature_cols = input_data.columns
idx_len = input_data.shape[0]
for index in range(0, idx_len, batch_size):
yield (input_data.iloc[index:min(idx_len, index + batch_size)],
label.iloc[index:min(idx_len, index + batch_size)])
| apache-2.0 | -3,561,300,868,377,775,000 | 34.851429 | 80 | 0.640421 | false | 3.960859 | false | false | false |
bkuczenski/lca-tools | antelope_v2_server/antelope/lc_pub.py | 1 | 5428 | """
A data structure describing the publication of an LCA data resource.
Each publication has two forms:
(1) the serialized form lives in the antelope directory and provides enough information to reconstitute the
This object is supposed to provide the basic information and functionality common to both v1 and v2 resources,
each of which is a subclass with specialized properties.
"""
import os
import json
from .authorization import allowed_interfaces, PrivacyDeclaration
class CatalogRequired(Exception):
pass
class LcPub(object):
"""
Abstract class that handles de/serialization and common features
"""
_type = None
@property
def name(self):
raise NotImplementedError
def serialize(self):
raise NotImplementedError
def write_to_file(self, path):
if os.path.exists(os.path.join(path, self.name)):
raise FileExistsError('Resource is already specified')
with open(os.path.join(path, self.name), 'w') as fp:
json.dump(self.serialize(), fp, indent=2, sort_keys=True)
class AntelopeV1Pub(LcPub):
"""
An Antelope V1 publication is a record of a ForegroundStudy and a list of supported LCIA methods. In order to
create it, we need to pass the things necessary to create the ForegroundStudy. but since that class doesn't exist
yet, neither does this.
Conceptually, we need:
- a CatalogRef for the study's top level fragment
- an iterable of lcia methods, being either caby ref (or by uuid * given that lcia methods should be uniquely
determined)
- an optional mapping between entity refs and indices for 'flows', 'flowproperties', 'processes', 'fragments'
: otherwise these are determined by the order encountered when traversing the top level fragment and children
"""
_type = 'Antelope_v1'
@property
def name(self):
return self._foreground
def __init__(self, foreground, fragment_ref, lcia_methods=None, mapping=None):
"""
:param foreground:
:param fragment_ref:
:param lcia_methods:
:param mapping:
"""
self._foreground = foreground
if not fragment_ref.resolved:
raise CatalogRequired('Fragment ref is not grounded!')
self._fragment = fragment_ref
self._lcia = lcia_methods or []
mapping = mapping or dict()
if not isinstance(mapping, dict):
raise TypeError('Mapping must be a dict')
self._mapping = mapping # ultimately this needs to be populated by traversing the fragment
self._reverse_mapping = dict()
self._populate_mapping()
self._reverse_map()
def _populate_mapping(self):
"""
Beginning at the top-level fragment, traverse the model and identify all local fragments (parent + child)
encountered during a traversal. From that, derive a list of stage names, flows, processes, and flow properties,
and ensure that all are present in the mapping.
:return:
"""
@staticmethod
def _enum(lst):
return {k: i for i, k in enumerate(lst)}
def _reverse_map(self):
self._reverse_mapping['lcia'] = self._enum(self._lcia)
for k in 'flow', 'flowproperty', 'fragment', 'process', 'stage':
self._reverse_mapping[k] = self._enum(self._mapping[k])
def serialize(self):
return {
'type': self._type,
'name': self.name,
'fragment': self._fragment.link,
'lcia': self._lcia,
'mapping': self._mapping
}
class AntelopeV2Pub(LcPub):
"""
An Antelope V2 publication is a catalog-supported publication of a complete LCA data resource, denoted by semantic
origin. It is instantiated essentially in the form of a CatalogQuery, which very little else to do, other than
a privacy specification.
"""
_type = 'Antelope_v2'
@property
def name(self):
return self._query.origin
@property
def query(self):
return self._query
def __init__(self, query, interfaces=allowed_interfaces, privacy=None):
"""
:param query: a grounded query
:param interfaces: interfaces to allow access
:param privacy: a privacy specification: either a blanket number or a dict.
if None, all information is public (though limited to the named interfaces)
if a number, all queries must be authorized with a privacy score lower than or equal to the number
if a dict, queries having the specified scope must authorize with a privacy score lower than or equal to the
corresponding value. The lowest privacy score is 0, so a negative number means authorization is not possible.
Only keys in the list of known scopes are retained
"""
self._query = query
if isinstance(interfaces, str):
interfaces = (interfaces,)
self._interfaces = tuple(k for k in interfaces if k in allowed_interfaces)
if isinstance(privacy, dict):
self._scopes = PrivacyDeclaration.from_dict(privacy)
else:
self._scopes = PrivacyDeclaration(privacy)
def serialize(self):
return {
'type': self._type,
'name': self.name,
'interfaces': self._interfaces,
'privacy': self._scopes.serialize()
}
| gpl-2.0 | 8,605,270,400,607,402,000 | 33.138365 | 120 | 0.649042 | false | 4.35634 | false | false | false |
PKRoma/poedit | deps/boost/tools/build/test/TestCmd.py | 7 | 20915 | """
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing of
executable commands and scripts (in any language, not just Python), especially
commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd module
manages and cleans up one or more temporary workspace directories, and provides
methods for creating files and directories in those workspace directories from
in-line data, here-documents), allowing tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
test = TestCmd()
The TestCmd module provides pass_test(), fail_test(), and no_result() unbound
methods that report test results for use with the Aegis change management
system. These methods terminate the test immediately, reporting PASSED, FAILED
or NO RESULT respectively and exiting with status 0 (success), 1 or 2
respectively. This allows for a distinction between an actual failed test and a
test that could not be properly evaluated because of an external condition (such
as a full file system or incorrect permissions).
"""
# Copyright 2000 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
# Copyright 2002-2003 Vladimir Prus.
# Copyright 2002-2003 Dave Abrahams.
# Copyright 2006 Rene Rivera.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from string import join, split
__author__ = "Steven Knight <[email protected]>"
__revision__ = "TestCmd.py 0.D002 2001/08/31 14:56:12 software"
__version__ = "0.02"
from types import *
import os
import os.path
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import traceback
tempfile.template = 'testcmd.'
_Cleanup = []
def _clean():
global _Cleanup
list = _Cleanup[:]
_Cleanup = []
list.reverse()
for test in list:
test.cleanup()
sys.exitfunc = _clean
def caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name == "?":
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self=None, condition=True, function=None, skip=0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED and exits
with a status of 1. If a condition argument is supplied, the test fails
only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + join(self.program, " ")
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at + """
in directory: """ + os.getcwd() )
sys.exit(1)
def no_result(self=None, condition=True, function=None, skip=0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test and
exits with a status of 2. If a condition argument is supplied, the test
fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
sys.exit(2)
def pass_test(self=None, condition=True, function=None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test and exits
with a status of 0. If a condition argument is supplied, the test passes
only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
class MatchError(object):
def __init__(self, message):
self.message = message
def __nonzero__(self):
return False
def __bool__(self):
return False
def match_exact(lines=None, matches=None):
"""
Returns whether the given lists or strings containing lines separated
using newline characters contain exactly the same data.
"""
if not type(lines) is ListType:
lines = split(lines, "\n")
if not type(matches) is ListType:
matches = split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return MatchError("Mismatch at line %d\n- %s\n+ %s\n" %
(i+1, matches[i], lines[i]))
if len(lines) < len(matches):
return MatchError("Missing lines at line %d\n- %s" %
(len(lines), "\n- ".join(matches[len(lines):])))
if len(lines) > len(matches):
return MatchError("Extra lines at line %d\n+ %s" %
(len(matches), "\n+ ".join(lines[len(matches):])))
return 1
def match_re(lines=None, res=None):
"""
Given lists or strings contain lines separated using newline characters.
This function matches those lines one by one, interpreting the lines in the
res parameter as regular expressions.
"""
if not type(lines) is ListType:
lines = split(lines, "\n")
if not type(res) is ListType:
res = split(res, "\n")
for i in range(min(len(lines), len(res))):
if not re.compile("^" + res[i] + "$").search(lines[i]):
return MatchError("Mismatch at line %d\n- %s\n+ %s\n" %
(i+1, res[i], lines[i]))
if len(lines) < len(res):
return MatchError("Missing lines at line %d\n- %s" %
(len(lines), "\n- ".join(res[len(lines):])))
if len(lines) > len(res):
return MatchError("Extra lines at line %d\n+ %s" %
(len(res), "\n+ ".join(lines[len(res):])))
return 1
class TestCmd:
def __init__(self, description=None, program=None, workdir=None,
subdir=None, verbose=False, match=None, inpath=None):
self._cwd = os.getcwd()
self.description_set(description)
self.program_set(program, inpath)
self.verbose_set(verbose)
if match is None:
self.match_func = match_re
else:
self.match_func = match
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
env = os.environ.get('PRESERVE')
if env:
self._preserve['pass_test'] = env
self._preserve['fail_test'] = env
self._preserve['no_result'] = env
else:
env = os.environ.get('PRESERVE_PASS')
if env is not None:
self._preserve['pass_test'] = env
env = os.environ.get('PRESERVE_FAIL')
if env is not None:
self._preserve['fail_test'] = env
env = os.environ.get('PRESERVE_PASS')
if env is not None:
self._preserve['PRESERVE_NO_RESULT'] = env
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
def cleanup(self, condition=None):
"""
Removes any temporary working directories for the specified TestCmd
environment. If the environment variable PRESERVE was set when the
TestCmd environment was created, temporary working directories are not
removed. If any of the environment variables PRESERVE_PASS,
PRESERVE_FAIL or PRESERVE_NO_RESULT were set when the TestCmd
environment was created, then temporary working directories are not
removed if the test passed, failed or had no result, respectively.
Temporary working directories are also preserved for conditions
specified via the preserve method.
Typically, this method is not called directly, but is used when the
script exits to clean up temporary working directories as appropriate
for the exit status.
"""
if not self._dirlist:
return
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print("Preserved directory %s" % dir)
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors=1)
self._dirlist = []
self.workdir = None
os.chdir(self._cwd)
try:
global _Cleanup
_Cleanup.remove(self)
except (AttributeError, ValueError):
pass
def description_set(self, description):
"""Set the description of the functionality being tested."""
self.description = description
def fail_test(self, condition=True, function=None, skip=0):
"""Cause the test to fail."""
if not condition:
return
self.condition = 'fail_test'
fail_test(self = self,
condition = condition,
function = function,
skip = skip)
def match(self, lines, matches):
"""Compare actual and expected file contents."""
return self.match_func(lines, matches)
def match_exact(self, lines, matches):
"""Compare actual and expected file content exactly."""
return match_exact(lines, matches)
def match_re(self, lines, res):
"""Compare file content with a regular expression."""
return match_re(lines, res)
def no_result(self, condition=True, function=None, skip=0):
"""Report that the test could not be run."""
if not condition:
return
self.condition = 'no_result'
no_result(self = self,
condition = condition,
function = function,
skip = skip)
def pass_test(self, condition=True, function=None):
"""Cause the test to pass."""
if not condition:
return
self.condition = 'pass_test'
pass_test(self, condition, function)
def preserve(self, *conditions):
"""
Arrange for the temporary working directories for the specified
TestCmd environment to be preserved for one or more conditions. If no
conditions are specified, arranges for the temporary working
directories to be preserved for all conditions.
"""
if conditions is ():
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program, inpath):
"""Set the executable program or script to be tested."""
if not inpath and program and not os.path.isabs(program[0]):
program[0] = os.path.join(self._cwd, program[0])
self.program = program
def read(self, file, mode='rb'):
"""
Reads and returns the contents of the specified file name. The file
name may be a list, in which case the elements are concatenated with
the os.path.join() method. The file is assumed to be under the
temporary working directory unless it is an absolute path name. The I/O
mode for the file may be specified and must begin with an 'r'. The
default is 'rb' (binary read).
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
if mode[0] != 'r':
raise ValueError, "mode must begin with 'r'"
return open(file, mode).read()
def run(self, program=None, arguments=None, chdir=None, stdin=None,
universal_newlines=True):
"""
Runs a test of the program or script for the test environment.
Standard output and error output are saved for future retrieval via the
stdout() and stderr() methods.
'universal_newlines' parameter controls how the child process
input/output streams are opened as defined for the same named Python
subprocess.POpen constructor parameter.
"""
if chdir:
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
else:
chdir = self.workdir
cmd = []
if program and program[0]:
if program[0] != self.program[0] and not os.path.isabs(program[0]):
program[0] = os.path.join(self._cwd, program[0])
cmd += program
else:
cmd += self.program
if arguments:
cmd += arguments.split(" ")
if self.verbose:
sys.stderr.write(join(cmd, " ") + "\n")
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=chdir,
universal_newlines=universal_newlines)
if stdin:
if type(stdin) is ListType:
stdin = "".join(stdin)
out, err = p.communicate(stdin)
self._stdout.append(out)
self._stderr.append(err)
self.status = p.returncode
if self.verbose:
sys.stdout.write(self._stdout[-1])
sys.stderr.write(self._stderr[-1])
def stderr(self, run=None):
"""
Returns the error output from the specified run number. If there is
no specified run number, then returns the error output of the last run.
If the run number is less than zero, then returns the error output from
that many runs back from the current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run -= 1
if run < 0:
return ''
return self._stderr[run]
def stdout(self, run=None):
"""
Returns the standard output from the specified run number. If there
is no specified run number, then returns the standard output of the
last run. If the run number is less than zero, then returns the
standard output from that many runs back from the current run.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run -= 1
if run < 0:
return ''
return self._stdout[run]
def subdir(self, *subdirs):
"""
Create new subdirectories under the temporary working directory, one
for each argument. An argument may be a list, in which case the list
elements are concatenated using the os.path.join() method.
Subdirectories multiple levels deep must be created using a separate
argument for each level:
test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
Returns the number of subdirectories actually created.
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if type(sub) is ListType:
sub = apply(os.path.join, tuple(sub))
new = os.path.join(self.workdir, sub)
try:
os.mkdir(new)
except:
pass
else:
count += 1
return count
def unlink(self, file):
"""
Unlinks the specified file name. The file name may be a list, in
which case the elements are concatenated using the os.path.join()
method. The file is assumed to be under the temporary working directory
unless it is an absolute path name.
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
os.unlink(file)
def verbose_set(self, verbose):
"""Set the verbose level."""
self.verbose = verbose
def workdir_set(self, path):
"""
Creates a temporary working directory with the specified path name.
If the path is a null string (''), a unique directory name is created.
"""
if os.path.isabs(path):
self.workdir = path
else:
if path != None:
if path == '':
path = tempfile.mktemp()
if path != None:
os.mkdir(path)
self._dirlist.append(path)
global _Cleanup
try:
_Cleanup.index(self)
except ValueError:
_Cleanup.append(self)
# We would like to set self.workdir like this:
# self.workdir = path
# But symlinks in the path will report things differently from
# os.getcwd(), so chdir there and back to fetch the canonical
# path.
cwd = os.getcwd()
os.chdir(path)
self.workdir = os.getcwd()
os.chdir(cwd)
else:
self.workdir = None
def workpath(self, *args):
"""
Returns the absolute path name to a subdirectory or file within the
current temporary working directory. Concatenates the temporary working
directory name with the specified arguments using os.path.join().
"""
return apply(os.path.join, (self.workdir,) + tuple(args))
def writable(self, top, write):
"""
Make the specified directory tree writable (write == 1) or not
(write == None).
"""
def _walk_chmod(arg, dirname, names):
st = os.stat(dirname)
os.chmod(dirname, arg(st[stat.ST_MODE]))
for name in names:
fullname = os.path.join(dirname, name)
st = os.stat(fullname)
os.chmod(fullname, arg(st[stat.ST_MODE]))
_mode_writable = lambda mode: stat.S_IMODE(mode|0200)
_mode_non_writable = lambda mode: stat.S_IMODE(mode&~0200)
if write:
f = _mode_writable
else:
f = _mode_non_writable
try:
os.path.walk(top, _walk_chmod, f)
except:
pass # Ignore any problems changing modes.
def write(self, file, content, mode='wb'):
"""
Writes the specified content text (second argument) to the specified
file name (first argument). The file name may be a list, in which case
the elements are concatenated using the os.path.join() method. The file
is created under the temporary working directory. Any subdirectories in
the path must already exist. The I/O mode for the file may be specified
and must begin with a 'w'. The default is 'wb' (binary write).
"""
if type(file) is ListType:
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = os.path.join(self.workdir, file)
if mode[0] != 'w':
raise ValueError, "mode must begin with 'w'"
open(file, mode).write(content)
| mit | -138,059,334,429,083,740 | 33.570248 | 80 | 0.589099 | false | 4.185511 | true | false | false |
uffejakobsen/libsigrokdecode | decoders/can/pd.py | 3 | 21034 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012-2013 Uwe Hermann <[email protected]>
## Copyright (C) 2019 Stephan Thiele <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
from common.srdhelper import bitpack_msb
import sigrokdecode as srd
class SamplerateError(Exception):
pass
def dlc2len(dlc):
return [0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 16, 20, 24, 32, 48, 64][dlc]
class Decoder(srd.Decoder):
api_version = 3
id = 'can'
name = 'CAN'
longname = 'Controller Area Network'
desc = 'Field bus protocol for distributed realtime control.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['can']
tags = ['Automotive']
channels = (
{'id': 'can_rx', 'name': 'CAN RX', 'desc': 'CAN bus line'},
)
options = (
{'id': 'nominal_bitrate', 'desc': 'Nominal bitrate (bits/s)', 'default': 1000000},
{'id': 'fast_bitrate', 'desc': 'Fast bitrate (bits/s)', 'default': 2000000},
{'id': 'sample_point', 'desc': 'Sample point (%)', 'default': 70.0},
)
annotations = (
('data', 'Payload data'),
('sof', 'Start of frame'),
('eof', 'End of frame'),
('id', 'Identifier'),
('ext-id', 'Extended identifier'),
('full-id', 'Full identifier'),
('ide', 'Identifier extension bit'),
('reserved-bit', 'Reserved bit 0 and 1'),
('rtr', 'Remote transmission request'),
('srr', 'Substitute remote request'),
('dlc', 'Data length count'),
('crc-sequence', 'CRC sequence'),
('crc-delimiter', 'CRC delimiter'),
('ack-slot', 'ACK slot'),
('ack-delimiter', 'ACK delimiter'),
('stuff-bit', 'Stuff bit'),
('warning', 'Warning'),
('bit', 'Bit'),
)
annotation_rows = (
('bits', 'Bits', (15, 17)),
('fields', 'Fields', tuple(range(15))),
('warnings', 'Warnings', (16,)),
)
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.reset_variables()
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.out_python = self.register(srd.OUTPUT_PYTHON)
def set_bit_rate(self, bitrate):
self.bit_width = float(self.samplerate) / float(bitrate)
self.sample_point = (self.bit_width / 100.0) * self.options['sample_point']
def set_nominal_bitrate(self):
self.set_bit_rate(self.options['nominal_bitrate'])
def set_fast_bitrate(self):
self.set_bit_rate(self.options['fast_bitrate'])
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
self.bit_width = float(self.samplerate) / float(self.options['nominal_bitrate'])
self.sample_point = (self.bit_width / 100.0) * self.options['sample_point']
# Generic helper for CAN bit annotations.
def putg(self, ss, es, data):
left, right = int(self.sample_point), int(self.bit_width - self.sample_point)
self.put(ss - left, es + right, self.out_ann, data)
# Single-CAN-bit annotation using the current samplenum.
def putx(self, data):
self.putg(self.samplenum, self.samplenum, data)
# Single-CAN-bit annotation using the samplenum of CAN bit 12.
def put12(self, data):
self.putg(self.ss_bit12, self.ss_bit12, data)
# Single-CAN-bit annotation using the samplenum of CAN bit 32.
def put32(self, data):
self.putg(self.ss_bit32, self.ss_bit32, data)
# Multi-CAN-bit annotation from self.ss_block to current samplenum.
def putb(self, data):
self.putg(self.ss_block, self.samplenum, data)
def putpy(self, data):
self.put(self.ss_packet, self.es_packet, self.out_python, data)
def reset_variables(self):
self.state = 'IDLE'
self.sof = self.frame_type = self.dlc = None
self.rawbits = [] # All bits, including stuff bits
self.bits = [] # Only actual CAN frame bits (no stuff bits)
self.curbit = 0 # Current bit of CAN frame (bit 0 == SOF)
self.last_databit = 999 # Positive value that bitnum+x will never match
self.ss_block = None
self.ss_bit12 = None
self.ss_bit32 = None
self.ss_databytebits = []
self.frame_bytes = []
self.rtr_type = None
self.fd = False
self.rtr = None
# Poor man's clock synchronization. Use signal edges which change to
# dominant state in rather simple ways. This naive approach is neither
# aware of the SYNC phase's width nor the specific location of the edge,
# but improves the decoder's reliability when the input signal's bitrate
# does not exactly match the nominal rate.
def dom_edge_seen(self, force = False):
self.dom_edge_snum = self.samplenum
self.dom_edge_bcount = self.curbit
# Determine the position of the next desired bit's sample point.
def get_sample_point(self, bitnum):
samplenum = self.dom_edge_snum
samplenum += self.bit_width * (bitnum - self.dom_edge_bcount)
samplenum += self.sample_point
return int(samplenum)
def is_stuff_bit(self):
# CAN uses NRZ encoding and bit stuffing.
# After 5 identical bits, a stuff bit of opposite value is added.
# But not in the CRC delimiter, ACK, and end of frame fields.
if len(self.bits) > self.last_databit + 17:
return False
last_6_bits = self.rawbits[-6:]
if last_6_bits not in ([0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 0]):
return False
# Stuff bit. Keep it in self.rawbits, but drop it from self.bits.
self.bits.pop() # Drop last bit.
return True
def is_valid_crc(self, crc_bits):
return True # TODO
def decode_error_frame(self, bits):
pass # TODO
def decode_overload_frame(self, bits):
pass # TODO
# Both standard and extended frames end with CRC, CRC delimiter, ACK,
# ACK delimiter, and EOF fields. Handle them in a common function.
# Returns True if the frame ended (EOF), False otherwise.
def decode_frame_end(self, can_rx, bitnum):
# Remember start of CRC sequence (see below).
if bitnum == (self.last_databit + 1):
self.ss_block = self.samplenum
if self.fd:
if dlc2len(self.dlc) < 16:
self.crc_len = 27 # 17 + SBC + stuff bits
else:
self.crc_len = 32 # 21 + SBC + stuff bits
else:
self.crc_len = 15
# CRC sequence (15 bits, 17 bits or 21 bits)
elif bitnum == (self.last_databit + self.crc_len):
if self.fd:
if dlc2len(self.dlc) < 16:
crc_type = "CRC-17"
else:
crc_type = "CRC-21"
else:
crc_type = "CRC-15"
x = self.last_databit + 1
crc_bits = self.bits[x:x + self.crc_len + 1]
self.crc = bitpack_msb(crc_bits)
self.putb([11, ['%s sequence: 0x%04x' % (crc_type, self.crc),
'%s: 0x%04x' % (crc_type, self.crc), '%s' % crc_type]])
if not self.is_valid_crc(crc_bits):
self.putb([16, ['CRC is invalid']])
# CRC delimiter bit (recessive)
elif bitnum == (self.last_databit + self.crc_len + 1):
self.putx([12, ['CRC delimiter: %d' % can_rx,
'CRC d: %d' % can_rx, 'CRC d']])
if can_rx != 1:
self.putx([16, ['CRC delimiter must be a recessive bit']])
if self.fd:
self.set_nominal_bitrate()
# ACK slot bit (dominant: ACK, recessive: NACK)
elif bitnum == (self.last_databit + self.crc_len + 2):
ack = 'ACK' if can_rx == 0 else 'NACK'
self.putx([13, ['ACK slot: %s' % ack, 'ACK s: %s' % ack, 'ACK s']])
# ACK delimiter bit (recessive)
elif bitnum == (self.last_databit + self.crc_len + 3):
self.putx([14, ['ACK delimiter: %d' % can_rx,
'ACK d: %d' % can_rx, 'ACK d']])
if can_rx != 1:
self.putx([16, ['ACK delimiter must be a recessive bit']])
# Remember start of EOF (see below).
elif bitnum == (self.last_databit + self.crc_len + 4):
self.ss_block = self.samplenum
# End of frame (EOF), 7 recessive bits
elif bitnum == (self.last_databit + self.crc_len + 10):
self.putb([2, ['End of frame', 'EOF', 'E']])
if self.rawbits[-7:] != [1, 1, 1, 1, 1, 1, 1]:
self.putb([16, ['End of frame (EOF) must be 7 recessive bits']])
self.es_packet = self.samplenum
py_data = tuple([self.frame_type, self.fullid, self.rtr_type,
self.dlc, self.frame_bytes])
self.putpy(py_data)
self.reset_variables()
return True
return False
# Returns True if the frame ended (EOF), False otherwise.
def decode_standard_frame(self, can_rx, bitnum):
# Bit 14: FDF (Flexible data format)
# Has to be sent dominant when FD frame, has to be sent recessive
# when classic CAN frame.
if bitnum == 14:
self.fd = True if can_rx else False
if self.fd:
self.putx([7, ['Flexible data format: %d' % can_rx,
'FDF: %d' % can_rx, 'FDF']])
else:
self.putx([7, ['Reserved bit 0: %d' % can_rx,
'RB0: %d' % can_rx, 'RB0']])
if self.fd:
# Bit 12: Substitute remote request (SRR) bit
self.put12([8, ['Substitute remote request', 'SRR']])
self.dlc_start = 18
else:
# Bit 12: Remote transmission request (RTR) bit
# Data frame: dominant, remote frame: recessive
# Remote frames do not contain a data field.
rtr = 'remote' if self.bits[12] == 1 else 'data'
self.put12([8, ['Remote transmission request: %s frame' % rtr,
'RTR: %s frame' % rtr, 'RTR']])
self.rtr_type = rtr
self.dlc_start = 15
if bitnum == 15 and self.fd:
self.putx([7, ['Reserved: %d' % can_rx, 'R0: %d' % can_rx, 'R0']])
if bitnum == 16 and self.fd:
self.putx([7, ['Bit rate switch: %d' % can_rx, 'BRS: %d' % can_rx, 'BRS']])
if bitnum == 17 and self.fd:
self.putx([7, ['Error state indicator: %d' % can_rx, 'ESI: %d' % can_rx, 'ESI']])
# Remember start of DLC (see below).
elif bitnum == self.dlc_start:
self.ss_block = self.samplenum
# Bits 15-18: Data length code (DLC), in number of bytes (0-8).
elif bitnum == self.dlc_start + 3:
self.dlc = bitpack_msb(self.bits[self.dlc_start:self.dlc_start + 4])
self.putb([10, ['Data length code: %d' % self.dlc,
'DLC: %d' % self.dlc, 'DLC']])
self.last_databit = self.dlc_start + 3 + (dlc2len(self.dlc) * 8)
if self.dlc > 8 and not self.fd:
self.putb([16, ['Data length code (DLC) > 8 is not allowed']])
# Remember all databyte bits, except the very last one.
elif bitnum in range(self.dlc_start + 4, self.last_databit):
self.ss_databytebits.append(self.samplenum)
# Bits 19-X: Data field (0-8 bytes, depending on DLC)
# The bits within a data byte are transferred MSB-first.
elif bitnum == self.last_databit:
self.ss_databytebits.append(self.samplenum) # Last databyte bit.
for i in range(dlc2len(self.dlc)):
x = self.dlc_start + 4 + (8 * i)
b = bitpack_msb(self.bits[x:x + 8])
self.frame_bytes.append(b)
ss = self.ss_databytebits[i * 8]
es = self.ss_databytebits[((i + 1) * 8) - 1]
self.putg(ss, es, [0, ['Data byte %d: 0x%02x' % (i, b),
'DB %d: 0x%02x' % (i, b), 'DB']])
self.ss_databytebits = []
elif bitnum > self.last_databit:
return self.decode_frame_end(can_rx, bitnum)
return False
# Returns True if the frame ended (EOF), False otherwise.
def decode_extended_frame(self, can_rx, bitnum):
# Remember start of EID (see below).
if bitnum == 14:
self.ss_block = self.samplenum
self.fd = False
self.dlc_start = 35
# Bits 14-31: Extended identifier (EID[17..0])
elif bitnum == 31:
self.eid = bitpack_msb(self.bits[14:])
s = '%d (0x%x)' % (self.eid, self.eid)
self.putb([4, ['Extended Identifier: %s' % s,
'Extended ID: %s' % s, 'Extended ID', 'EID']])
self.fullid = self.ident << 18 | self.eid
s = '%d (0x%x)' % (self.fullid, self.fullid)
self.putb([5, ['Full Identifier: %s' % s, 'Full ID: %s' % s,
'Full ID', 'FID']])
# Bit 12: Substitute remote request (SRR) bit
self.put12([9, ['Substitute remote request: %d' % self.bits[12],
'SRR: %d' % self.bits[12], 'SRR']])
# Bit 32: Remote transmission request (RTR) bit
# Data frame: dominant, remote frame: recessive
# Remote frames do not contain a data field.
# Remember start of RTR (see below).
if bitnum == 32:
self.ss_bit32 = self.samplenum
self.rtr = can_rx
if not self.fd:
rtr = 'remote' if can_rx == 1 else 'data'
self.putx([8, ['Remote transmission request: %s frame' % rtr,
'RTR: %s frame' % rtr, 'RTR']])
self.rtr_type = rtr
# Bit 33: RB1 (reserved bit)
elif bitnum == 33:
self.fd = True if can_rx else False
if self.fd:
self.dlc_start = 37
self.putx([7, ['Flexible data format: %d' % can_rx,
'FDF: %d' % can_rx, 'FDF']])
self.put32([7, ['Reserved bit 1: %d' % self.rtr,
'RB1: %d' % self.rtr, 'RB1']])
else:
self.putx([7, ['Reserved bit 1: %d' % can_rx,
'RB1: %d' % can_rx, 'RB1']])
# Bit 34: RB0 (reserved bit)
elif bitnum == 34:
self.putx([7, ['Reserved bit 0: %d' % can_rx,
'RB0: %d' % can_rx, 'RB0']])
elif bitnum == 35 and self.fd:
self.putx([7, ['Bit rate switch: %d' % can_rx,
'BRS: %d' % can_rx, 'BRS']])
elif bitnum == 36 and self.fd:
self.putx([7, ['Error state indicator: %d' % can_rx,
'ESI: %d' % can_rx, 'ESI']])
# Remember start of DLC (see below).
elif bitnum == self.dlc_start:
self.ss_block = self.samplenum
# Bits 35-38: Data length code (DLC), in number of bytes (0-8).
elif bitnum == self.dlc_start + 3:
self.dlc = bitpack_msb(self.bits[self.dlc_start:self.dlc_start + 4])
self.putb([10, ['Data length code: %d' % self.dlc,
'DLC: %d' % self.dlc, 'DLC']])
self.last_databit = self.dlc_start + 3 + (dlc2len(self.dlc) * 8)
# Remember all databyte bits, except the very last one.
elif bitnum in range(self.dlc_start + 4, self.last_databit):
self.ss_databytebits.append(self.samplenum)
# Bits 39-X: Data field (0-8 bytes, depending on DLC)
# The bits within a data byte are transferred MSB-first.
elif bitnum == self.last_databit:
self.ss_databytebits.append(self.samplenum) # Last databyte bit.
for i in range(dlc2len(self.dlc)):
x = self.dlc_start + 4 + (8 * i)
b = bitpack_msb(self.bits[x:x + 8])
self.frame_bytes.append(b)
ss = self.ss_databytebits[i * 8]
es = self.ss_databytebits[((i + 1) * 8) - 1]
self.putg(ss, es, [0, ['Data byte %d: 0x%02x' % (i, b),
'DB %d: 0x%02x' % (i, b), 'DB']])
self.ss_databytebits = []
elif bitnum > self.last_databit:
return self.decode_frame_end(can_rx, bitnum)
return False
def handle_bit(self, can_rx):
self.rawbits.append(can_rx)
self.bits.append(can_rx)
# Get the index of the current CAN frame bit (without stuff bits).
bitnum = len(self.bits) - 1
if self.fd and can_rx:
if bitnum == 16 and self.frame_type == 'standard' \
or bitnum == 35 and self.frame_type == 'extended':
self.dom_edge_seen(force=True)
self.set_fast_bitrate()
# If this is a stuff bit, remove it from self.bits and ignore it.
if self.is_stuff_bit():
self.putx([15, [str(can_rx)]])
self.curbit += 1 # Increase self.curbit (bitnum is not affected).
return
else:
self.putx([17, [str(can_rx)]])
# Bit 0: Start of frame (SOF) bit
if bitnum == 0:
self.ss_packet = self.samplenum
self.putx([1, ['Start of frame', 'SOF', 'S']])
if can_rx != 0:
self.putx([16, ['Start of frame (SOF) must be a dominant bit']])
# Remember start of ID (see below).
elif bitnum == 1:
self.ss_block = self.samplenum
# Bits 1-11: Identifier (ID[10..0])
# The bits ID[10..4] must NOT be all recessive.
elif bitnum == 11:
# BEWARE! Don't clobber the decoder's .id field which is
# part of its boiler plate!
self.ident = bitpack_msb(self.bits[1:])
self.fullid = self.ident
s = '%d (0x%x)' % (self.ident, self.ident),
self.putb([3, ['Identifier: %s' % s, 'ID: %s' % s, 'ID']])
if (self.ident & 0x7f0) == 0x7f0:
self.putb([16, ['Identifier bits 10..4 must not be all recessive']])
# RTR or SRR bit, depending on frame type (gets handled later).
elif bitnum == 12:
# self.putx([0, ['RTR/SRR: %d' % can_rx]]) # Debug only.
self.ss_bit12 = self.samplenum
# Bit 13: Identifier extension (IDE) bit
# Standard frame: dominant, extended frame: recessive
elif bitnum == 13:
ide = self.frame_type = 'standard' if can_rx == 0 else 'extended'
self.putx([6, ['Identifier extension bit: %s frame' % ide,
'IDE: %s frame' % ide, 'IDE']])
# Bits 14-X: Frame-type dependent, passed to the resp. handlers.
elif bitnum >= 14:
if self.frame_type == 'standard':
done = self.decode_standard_frame(can_rx, bitnum)
else:
done = self.decode_extended_frame(can_rx, bitnum)
# The handlers return True if a frame ended (EOF).
if done:
return
# After a frame there are 3 intermission bits (recessive).
# After these bits, the bus is considered free.
self.curbit += 1
def decode(self):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
while True:
# State machine.
if self.state == 'IDLE':
# Wait for a dominant state (logic 0) on the bus.
(can_rx,) = self.wait({0: 'l'})
self.sof = self.samplenum
self.dom_edge_seen(force = True)
self.state = 'GET BITS'
elif self.state == 'GET BITS':
# Wait until we're in the correct bit/sampling position.
pos = self.get_sample_point(self.curbit)
(can_rx,) = self.wait([{'skip': pos - self.samplenum}, {0: 'f'}])
if self.matched[1]:
self.dom_edge_seen()
if self.matched[0]:
self.handle_bit(can_rx)
| gpl-3.0 | -9,165,277,726,687,421,000 | 39.527938 | 93 | 0.530855 | false | 3.424617 | false | false | false |
thequbit/mc911feedwatcher | scraper/popagencies.py | 1 | 2136 | import sys
import _mysql as mysql
def get_mysql_credentials():
# read in credentials file
lines = tuple(open('mysqlcreds.txt', 'r'))
# return the tuple of the lines in the file
#
# host
# dbname
# username
# password
#
return lines
def main(argv):
print "Starting application.";
# pull from the database a list of all of the incidents to date
print "Connecting to Database and pulling all incidents."
# get our db info from our local file
dbcreds = get_mysql_credentials()
# decode responce
host = dbcreds[0].rstrip()
dbname = dbcreds[1].rstrip()
username = dbcreds[2].rstrip()
password = dbcreds[3].rstrip()
# connect to our database
database = mysql.connect(host=host,user=username,passwd=password,db=dbname)
# generate query, and get the number of rows returned
query = 'SELECT DISTINCT itemid FROM incidents'
database.query(query)
dbresult=database.store_result()
#(count,),=dbresult.fetch_row()
# get all of the incident itemid's from the result
itemids = []
for row in dbresult.fetch_row(maxrows=0):
itemids.append(row[0])
print "\tRetrieved {0} items".format(len(itemids))
print "... Done."
print "Generating list of unique agencies ..."
agencies = []
# iterate through and genereate a list of only uniuque agencies
for itemid in itemids:
# get short name of agency ( first four leters of the incident id )
shortname = itemid[0:4]
# see if we have added it already
if any(shortname is a for a in agencies) == False:
# need to add the new agency to the list of agencies
print "\tNew Agency Found! Shortname = {0}".format(shortname)
agencies.append(shortname)
print "... Done."
print "Pushing {0} agencies to database ...".format(len(agencies))
for agency in agencies:
query = 'INSERT INTO agencies (shortname,longname,description,websiteurl) VALUES("{0}","","","")'.format(agency)
database.query(query)
print "... Done."
if __name__ == '__main__': sys.exit(main(sys.argv))
| gpl-3.0 | -2,299,675,220,458,369,300 | 25.37037 | 114 | 0.645131 | false | 3.518946 | false | false | false |
krathjen/studiolibrary | src/studiolibrary/widgets/lineedit.py | 1 | 5144 | # Copyright 2020 by Kurt Rathjen. All Rights Reserved.
#
# This library is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. This library is distributed in the
# hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
import logging
from studiovendor.Qt import QtGui
from studiovendor.Qt import QtCore
from studiovendor.Qt import QtWidgets
import studioqt
import studiolibrary
logger = logging.getLogger(__name__)
class LineEdit(QtWidgets.QLineEdit):
def __init__(self, *args):
QtWidgets.QLineEdit.__init__(self, *args)
icon = studiolibrary.resource.icon("search.svg")
self._iconButton = QtWidgets.QPushButton(self)
self._iconButton.setObjectName("icon")
self._iconButton.clicked.connect(self._iconClicked)
self._iconButton.setIcon(icon)
self._iconButton.setStyleSheet("QPushButton{background-color: transparent;}")
icon = studiolibrary.resource.icon("times.svg")
self._clearButton = QtWidgets.QPushButton(self)
self._clearButton.setObjectName("clear")
self._clearButton.setCursor(QtCore.Qt.ArrowCursor)
self._clearButton.setIcon(icon)
self._clearButton.setToolTip("Clear all search text")
self._clearButton.clicked.connect(self._clearClicked)
self._clearButton.setStyleSheet("QPushButton{background-color: transparent;}")
self.textChanged.connect(self._textChanged)
color = studioqt.Color.fromString("rgb(250,250,250,115)")
self.setIconColor(color)
self.update()
def update(self):
self.updateIconColor()
self.updateClearButton()
def _textChanged(self, text):
"""
Triggered when the text changes.
:type text: str
:rtype: None
"""
self.updateClearButton()
def _clearClicked(self):
"""
Triggered when the user clicks the cross icon.
:rtype: None
"""
self.setText("")
self.setFocus()
def _iconClicked(self):
"""
Triggered when the user clicks on the icon.
:rtype: None
"""
if not self.hasFocus():
self.setFocus()
def updateClearButton(self):
"""
Update the clear button depending on the current text.
:rtype: None
"""
text = self.text()
if text:
self._clearButton.show()
else:
self._clearButton.hide()
def contextMenuEvent(self, event):
"""
Triggered when the user right clicks on the search widget.
:type event: QtCore.QEvent
:rtype: None
"""
self.showContextMenu()
def setIcon(self, icon):
"""
Set the icon for the search widget.
:type icon: QtWidgets.QIcon
:rtype: None
"""
self._iconButton.setIcon(icon)
def setIconColor(self, color):
"""
Set the icon color for the search widget icon.
:type color: QtGui.QColor
:rtype: None
"""
icon = self._iconButton.icon()
icon = studioqt.Icon(icon)
icon.setColor(color)
self._iconButton.setIcon(icon)
icon = self._clearButton.icon()
icon = studioqt.Icon(icon)
icon.setColor(color)
self._clearButton.setIcon(icon)
def updateIconColor(self):
"""
Update the icon colors to the current foregroundRole.
:rtype: None
"""
color = self.palette().color(self.foregroundRole())
color = studioqt.Color.fromColor(color)
self.setIconColor(color)
def settings(self):
"""
Return a dictionary of the current widget state.
:rtype: dict
"""
settings = {
"text": self.text(),
}
return settings
def setSettings(self, settings):
"""
Restore the widget state from a settings dictionary.
:type settings: dict
:rtype: None
"""
text = settings.get("text", "")
self.setText(text)
def resizeEvent(self, event):
"""
Reimplemented so the icon maintains the same height as the widget.
:type event: QtWidgets.QResizeEvent
:rtype: None
"""
QtWidgets.QLineEdit.resizeEvent(self, event)
height = self.height()
size = QtCore.QSize(16, 16)
self.setTextMargins(20, 0, 0, 0)
self._iconButton.setIconSize(size)
self._iconButton.setGeometry(0, 0, height, height)
x = self.width() - height
self._clearButton.setIconSize(size)
self._clearButton.setGeometry(x, 0, height, height)
| lgpl-3.0 | -1,201,288,177,002,947,600 | 27.10929 | 86 | 0.617807 | false | 4.26888 | false | false | false |
DarkSouL11/UIP | uiplib/gui/mainGui.py | 1 | 3273 | """Module that builds the Graphical User Interface."""
from uiplib.scheduler import scheduler
from uiplib.setWallpaper import change_background
from uiplib.utils.utils import update_settings, check_sites
from uiplib.gui.gallery import Gallery
from uiplib.gui import generalTab, settingsTab
from tkinter import *
from tkinter import messagebox
from tkinter.ttk import *
from PIL import Image, ImageTk
from queue import Queue
import os
class MainWindow:
"""The main window that houses the app."""
def __init__(self, settings):
"""Initialize the Main Window."""
# configuration
self.settings = settings
# base window
self.root = Tk()
self.root.resizable(width=False, height=False)
# set window title
self.root.title("UIP")
# self.root.wm_iconbitmap() sets icon bitmap
self.queue = Queue()
self.index = 0
self.images = []
self.update_images()
# create the UI
self.create_ui()
def create_ui(self):
"""Method to initialize UI."""
self.notebook = Notebook(self.root)
self.notebook.pack()
generalTab.create_general_tab(self)
settingsTab.create_settings_tab(self)
def show_progess(self, show):
"""Method to display download progress."""
if show:
self.progressBar = Progressbar(self.headerFrame,
orient=HORIZONTAL,
length='300',
variable=self.progress,
mode='determinate')
self.progressBar.pack(fill=BOTH, padx=5, pady=5)
else:
self.progressBar = None
def push(self, x):
"""Method to push onto UI Queue."""
self.queue.push(x)
def run(self):
"""Method that runs the main event loop."""
self.update_ui()
# run the main event loop of UI
self.root.mainloop()
def update_ui(self):
"""Method that updates UI periodically."""
# update UI with data received
while self.queue and not self.queue.empty():
pass
# update UI after every 200ms
self.root.after(200, self.update_ui)
def next_wallpaper(self):
"""Preview next wallpaper."""
self.index = (self.index + 1) % len(self.images)
self.gallery.set_image(self.images[self.index])
def prev_wallpaper(self):
"""Preview previous wallpaper."""
self.index -= 1
self.gallery.set_image(self.images[self.index])
def set_wallpaper(self):
"""Set the wallpaper which is being previewed."""
image = self.images[self.index]
change_background(image)
def download(self):
"""Method to start download."""
pass
def flush(self):
"""Method to flush all images."""
print("Flush Clicked!")
def update_images(self):
"""Method to get images from directory."""
directory = self.settings['pics-folder']
files = os.listdir(directory)
self.images = [os.path.join(directory, file) for file in files
if (file.endswith('.png') or file.endswith('.jpg'))]
| agpl-3.0 | 4,991,921,592,364,811,000 | 30.776699 | 75 | 0.583562 | false | 4.278431 | false | false | false |
alsimoes/panelaco | website/models.py | 2 | 1511 | # -*- coding: utf-8 -*-
from django.db import models
class Profissao(models.Model):
descricao = models.CharField(max_length=20)
class Meta:
ordering = ["descricao"]
verbose_name_plural = "profissoes"
def __unicode__(self):
return self.descricao
class Membro(models.Model):
nome_completo = models.CharField(max_length=50)
apelido = models.CharField(max_length=20)
apresentacao = models.TextField()
membro_desde = models.DateField()
profissao_membro = models.ForeignKey(Profissao)
class Meta:
ordering = ["membro_desde"]
def __unicode__(self):
return self.nome_completo
class Servico(models.Model):
titulo = models.CharField(max_length=50)
descricao = models.TextField()
def __unicode__(self):
return self.titulo
class Produto(models.Model):
titulo = models.CharField(max_length=50)
descricao = models.TextField()
def __unicode__(self):
return self.titulo
class Portifolio(models.Model):
titulo = models.CharField(max_length=50)
descricao = models.TextField()
def __unicode__(self):
return self.titulo
# class Dados_Contato(models.Model):
# nome_empresa = models.CharField(max_length=50)
# email = models.EmailField(max_length=75)
# telefone = models.CharField(max_length=15)
# def __unicode__(self):
# return self.nome_empresa | gpl-3.0 | 7,481,464,319,621,882,000 | 25.981481 | 52 | 0.613651 | false | 3.50116 | false | false | false |
PLyczkowski/Sticky-Keymap | 2.74/scripts/addons_contrib/ewoc_projects_tools/mesh_deathguppie.py | 4 | 21956 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
This addon implements a subdivision scheme called deathguppie.
It is ideal for creating more detail locally when sculpting.
Documentation
First go to User Preferences->Addons and enable the DeathGuppie addon in the Mesh category.
Go to EditMode, select some elements and invoke the addon (button in the Mesh Tool panel).
The selected area will be subdivided according to the deathguppie algorithm.
Subdivision is destructive so this is no modifier but a mesh operation.
Selected area after operation allows for further sudividing the area.
The smooth tickbox chooses between smooth and non-smooth subdivision.
The Select inner only tickbox sets what is left selected after operation, only inner faces or everything.
BEWARE - deathguppie will only subdivide grids of quads!
If you wish to hotkey DeathGuppie:
In the Input section of User Preferences at the bottom of the 3D View > Mesh section click 'Add New' button.
In the Operator Identifier box put 'mesh.deathguppie'.
Assign a hotkey.
Save as Default (Optional).
"""
bl_info = {
"name": "DeathGuppie",
"author": "Gert De Roost",
"version": (0, 3, 0),
"blender": (2, 63, 0),
"location": "View3D > Tools",
"description": "Deathguppie subdivision operation",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Mesh"}
import bpy
import bmesh
bpy.types.Scene.Smooth = bpy.props.BoolProperty(
name = "Smoothing",
description = "Subdivide smooth",
default = True)
bpy.types.Scene.Inner = bpy.props.BoolProperty(
name = "Select inner only",
description = "After operation only inner verts selected",
default = True)
class DeathGuppie(bpy.types.Operator):
bl_idname = "mesh.deathguppie"
bl_label = "DeathGuppie"
bl_description = "Deathguppie subdivision operation"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
obj = context.active_object
return (obj and obj.type == 'MESH' and context.mode == 'EDIT_MESH')
def invoke(self, context, event):
self.do_deathguppie(context)
return {'FINISHED'}
def do_deathguppie(self, context):
scn = context.scene
selobj = context.active_object
bpy.ops.object.editmode_toggle()
bpy.ops.object.duplicate()
projobj = bpy.context.active_object
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.subdivide(number_cuts=5, smoothness=1.0)
bpy.ops.object.editmode_toggle()
projobj.hide = 1
context.scene.objects.active = selobj
bpy.ops.object.editmode_toggle()
mesh = selobj.data
bm = bmesh.from_edit_mesh(mesh)
bmkeep = bm.copy()
facelist = []
for f1 in bm.faces:
if f1.select:
linked = []
for e in f1.edges:
for f2 in e.link_faces:
if f2 != f1:
if f2.select:
linked.append(f2.index)
break
facelist.insert(0, [])
facelist[0].append(f1)
facelist[0].append(linked)
transfer = {}
holdlist = []
for [f, linked] in facelist:
bpy.ops.mesh.select_all(action = 'DESELECT')
f.select = 1
transfer[f.calc_center_median()[:]] = [f.index, linked]
bpy.ops.mesh.split()
bpy.ops.object.editmode_toggle()
bpy.ops.object.editmode_toggle()
bm = bmesh.from_edit_mesh(mesh)
facelist = []
for f in bm.faces:
num = 0
for e in f.edges:
if len(e.link_faces) == 1:
num += 1
if num == 4:
if f.calc_center_median()[:] in transfer.keys():
f.select = 1
facelist.insert(0, [])
facelist[0].append(f)
facelist[0].append(transfer[f.calc_center_median()[:]])
def createinnerlists(f):
for l in f.loops:
self.cornerlist.append(l.vert)
self.vselset.add(l.vert)
v1 = l.vert
vnext = l.link_loop_next.vert
vprev = l.link_loop_prev.vert
vnextnext = l.link_loop_next.link_loop_next.vert
vprevprev = l.link_loop_prev.link_loop_prev.vert
tempco1 = v1.co + (vprev.co - v1.co) / 3
tempco2 = vnext.co + (vnextnext.co - vnext.co) / 3
vert = bm.verts.new(tempco1 + ((tempco2 - tempco1) / 3))
self.innerlist.append(vert)
self.smoothset.add(vert)
self.vselset = set([])
fselset = set([])
self.smoothset = set([])
for [f, [foldidx, linked]] in facelist:
fold = bmkeep.faces[foldidx]
linked2 = []
for idx in linked:
linked2.append(bmkeep.faces[idx])
self.cornerlist = []
self.innerlist = []
if len(linked) == 4:
createinnerlists(f)
for e in f.edges:
ne, vert1 = bmesh.utils.edge_split(e, e.verts[0], 0.66)
ne, vert2 = bmesh.utils.edge_split(ne, vert1, 0.5)
self.vselset.add(vert1)
self.vselset.add(vert2)
self.smoothset.add(vert1)
self.smoothset.add(vert2)
for idx in range(len(self.cornerlist)):
cv = self.cornerlist[idx]
for l in f.loops:
if l.vert == cv:
fs = bm.faces.new((cv, l.link_loop_next.vert, self.innerlist[idx], l.link_loop_prev.vert))
fselset.add(fs)
fs = bm.faces.new((l.link_loop_prev.vert, l.link_loop_prev.link_loop_prev.vert, self.innerlist[idx - 1], self.innerlist[idx]))
fselset.add(fs)
fs = bm.faces.new((self.innerlist[0], self.innerlist[1], self.innerlist[2], self.innerlist[3]))
fselset.add(fs)
bm.faces.remove(f)
elif len(linked) == 3:
fedges = fold.edges[:]
for e1 in fedges:
for f1 in e1.link_faces:
if len(e1.link_faces) == 1 or (f1 != fold and not(f1 in linked2)):
edge = f.edges[fedges.index(e1)]
createinnerlists(f)
for e in f.edges:
if e != edge:
ne, vert1 = bmesh.utils.edge_split(e, e.verts[0], 0.66)
ne, vert2 = bmesh.utils.edge_split(ne, vert1, 0.5)
self.vselset.add(vert1)
self.vselset.add(vert2)
self.smoothset.add(vert1)
self.smoothset.add(vert2)
for l in edge.link_loops:
if l.face == f:
if l.edge == edge:
v1 = l.vert
vnext = l.link_loop_next.vert
vprev = l.link_loop_prev.vert
vnextnext = l.link_loop_next.link_loop_next.vert
vprevprev = l.link_loop_prev.link_loop_prev.vert
for idx in range(4):
if self.cornerlist[idx] == v1:
co1 = self.innerlist[idx].co + ((self.innerlist[idx].co - self.innerlist[idx-1].co) / 2)
co2 = self.innerlist[idx-3].co + ((self.innerlist[idx-3].co - self.innerlist[idx-2].co) / 2)
sidev1 = bm.verts.new(co1)
sidev2 = bm.verts.new(co2)
fs = bm.faces.new((v1, vnext, sidev2, sidev1))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((v1, sidev1, self.innerlist[idx], vprev))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev2, vnext, vnextnext, self.innerlist[idx-3]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev1, sidev2, self.innerlist[idx-3], self.innerlist[idx]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((self.innerlist[idx], self.innerlist[idx-1], vprevprev, vprev))
fselset.add(fs)
self.cornerlist[self.cornerlist.index(v1)] = None
self.cornerlist[self.cornerlist.index(vnext)] = None
break
for idx in range(len(self.cornerlist)):
cv = self.cornerlist[idx]
if cv != None:
for l in f.loops:
if l.vert == cv:
fs = bm.faces.new((cv, l.link_loop_next.vert, self.innerlist[idx], l.link_loop_prev.vert))
fselset.add(fs)
fs = bm.faces.new((l.link_loop_prev.vert, l.link_loop_prev.link_loop_prev.vert, self.innerlist[idx - 1], self.innerlist[idx]))
fselset.add(fs)
fs = bm.faces.new((self.innerlist[0], self.innerlist[1], self.innerlist[2], self.innerlist[3]))
fselset.add(fs)
bm.faces.remove(f)
self.smoothset.add(sidev1)
self.smoothset.add(sidev2)
elif len(linked) == 2:
case = 'BRIDGE'
for vert in linked2[0].verts:
if vert in linked2[1].verts:
case = 'CORNER'
break
if case == 'CORNER':
fedges = fold.edges[:]
edges = []
for e1 in fedges:
for f1 in e1.link_faces:
if len(e1.link_faces) == 1 or (f1 != fold and not(f1 in linked2)):
edges.append(f.edges[fedges.index(e1)])
for l in edges[1].link_loops:
if l.face == f:
if l.edge == edges[1] and l.link_loop_next.edge == edges[0]:
edges.reverse()
break
createinnerlists(f)
for e in f.edges:
if not(e in edges):
ne, vert1 = bmesh.utils.edge_split(e, e.verts[0], 0.66)
ne, vert2 = bmesh.utils.edge_split(ne, vert1, 0.5)
self.vselset.add(vert1)
self.vselset.add(vert2)
self.smoothset.add(vert1)
self.smoothset.add(vert2)
for l in edges[0].link_loops:
if l.face == f:
if l.edge == edges[0]:
if l.link_loop_next.edge == edges[1]:
v1 = l.vert
vnext = l.link_loop_next.vert
vprev = l.link_loop_prev.vert
vnextnext = l.link_loop_next.link_loop_next.vert
vnnn = l.link_loop_next.link_loop_next.link_loop_next.vert
vprevprev = l.link_loop_prev.link_loop_prev.vert
vppp = l.link_loop_prev.link_loop_prev.link_loop_prev.vert
vpppp = l.link_loop_prev.link_loop_prev.link_loop_prev.link_loop_prev.vert
for idx in range(4):
if self.cornerlist[idx] == v1:
delta1 = (self.innerlist[idx].co - self.innerlist[idx-1].co) / 2
co1 = self.innerlist[idx].co + delta1
delta2 = (self.innerlist[idx-3].co - self.innerlist[idx].co) / 2
delta3 = (self.innerlist[idx-3].co - self.innerlist[idx-2].co) / 2
co2 = self.innerlist[idx-3].co + delta1 + delta2
sidev1 = bm.verts.new(co1)
sidev2 = bm.verts.new(co2)
sidev3 = bm.verts.new(self.innerlist[idx-2].co + ((self.innerlist[idx-2].co - self.innerlist[idx-1].co) / 2))
fs = bm.faces.new((v1, vnext, sidev2, sidev1))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev3, sidev2, vnext, vnextnext))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((v1, sidev1, self.innerlist[idx], vprev))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((self.innerlist[idx-2], sidev3, vnextnext, vnnn))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev1, sidev2, self.innerlist[idx-3], self.innerlist[idx]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev2, sidev3, self.innerlist[idx-2], self.innerlist[idx-3]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((vprevprev, vprev, self.innerlist[idx], self.innerlist[idx-1]))
fselset.add(fs)
fs = bm.faces.new((vpppp, vppp, vprevprev, self.innerlist[idx-1]))
fselset.add(fs)
fs = bm.faces.new((vnnn, vpppp, self.innerlist[idx-1], self.innerlist[idx-2]))
fselset.add(fs)
break
break
fs = bm.faces.new((self.innerlist[0], self.innerlist[1], self.innerlist[2], self.innerlist[3]))
fselset.add(fs)
bm.faces.remove(f)
self.smoothset.add(sidev1)
self.smoothset.add(sidev2)
self.smoothset.add(sidev3)
else:
fedges = fold.edges[:]
edges = []
for e1 in fedges:
for f1 in e1.link_faces:
if len(e1.link_faces) == 1 or (f1 != fold and not(f1 in linked2)):
edges.append(f.edges[fedges.index(e1)])
createinnerlists(f)
for e in f.edges:
if not(e in edges):
ne, vert1 = bmesh.utils.edge_split(e, e.verts[0], 0.66)
ne, vert2 = bmesh.utils.edge_split(ne, vert1, 0.5)
self.vselset.add(vert1)
self.vselset.add(vert2)
self.smoothset.add(vert1)
self.smoothset.add(vert2)
for l in f.loops:
if l.edge == edges[0]:
v1 = l.vert
vnext = l.link_loop_next.vert
vprev = l.link_loop_prev.vert
vnextnext = l.link_loop_next.link_loop_next.vert
vnnn = l.link_loop_next.link_loop_next.link_loop_next.vert
vnnnn = l.link_loop_next.link_loop_next.link_loop_next.link_loop_next.vert
vprevprev = l.link_loop_prev.link_loop_prev.vert
vppp = l.link_loop_prev.link_loop_prev.link_loop_prev.vert
vpppp = l.link_loop_prev.link_loop_prev.link_loop_prev.link_loop_prev.vert
for idx in range(4):
if self.cornerlist[idx] == v1:
delta1 = (self.innerlist[idx].co - self.innerlist[idx-1].co) / 2
co1 = self.innerlist[idx].co + delta1
sidev1 = bm.verts.new(co1)
delta2 = (self.innerlist[idx-3].co - self.innerlist[idx-2].co) / 2
co2 = self.innerlist[idx-3].co + delta2
sidev2 = bm.verts.new(co2)
delta3 = (self.innerlist[idx-2].co - self.innerlist[idx-3].co) / 2
co3 = self.innerlist[idx-2].co + delta3
sidev3 = bm.verts.new(co3)
delta4 = (self.innerlist[idx-1].co - self.innerlist[idx].co) / 2
co4 = self.innerlist[idx-1].co + delta4
sidev4 = bm.verts.new(co4)
fs = bm.faces.new((v1, vnext, sidev2, sidev1))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((v1, sidev1, self.innerlist[idx], vprev))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((vnext, vnextnext, self.innerlist[idx-3], sidev2))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev1, sidev2, self.innerlist[idx-3], self.innerlist[idx]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((vppp, sidev4, sidev3, vnnnn))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((vppp, vprevprev, self.innerlist[idx-1], sidev4))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev3, self.innerlist[idx-2], vnnn, vnnnn))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev3, sidev4, self.innerlist[idx-1], self.innerlist[idx-2]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((vprevprev, vprev, self.innerlist[idx], self.innerlist[idx-1]))
fselset.add(fs)
fs = bm.faces.new((vnextnext, vnnn, self.innerlist[idx-2], self.innerlist[idx-3]))
fselset.add(fs)
fs = bm.faces.new((self.innerlist[0], self.innerlist[1], self.innerlist[2], self.innerlist[3]))
fselset.add(fs)
bm.faces.remove(f)
self.smoothset.add(sidev1)
self.smoothset.add(sidev2)
self.smoothset.add(sidev3)
self.smoothset.add(sidev4)
elif len(linked) == 1:
fedges = fold.edges[:]
edges = []
for e1 in fedges:
for f1 in e1.link_faces:
if len(e1.link_faces) == 1 or (f1 != fold and not(f1 in linked2)):
edges.append(f.edges[fedges.index(e1)])
for l in f.loops:
if not(l.edge in edges):
edges = [l.link_loop_next.edge, l.link_loop_next.link_loop_next.edge, l.link_loop_next.link_loop_next.link_loop_next.edge]
createinnerlists(f)
for e in f.edges:
if not(e in edges):
ne, vert1 = bmesh.utils.edge_split(e, e.verts[0], 0.66)
ne, vert2 = bmesh.utils.edge_split(ne, vert1, 0.5)
self.vselset.add(vert1)
self.vselset.add(vert2)
self.smoothset.add(vert1)
self.smoothset.add(vert2)
for l in f.loops:
if l.edge == edges[0]:
v1 = l.vert
vnext = l.link_loop_next.vert
vprev = l.link_loop_prev.vert
vnextnext = l.link_loop_next.link_loop_next.vert
vnnn = l.link_loop_next.link_loop_next.link_loop_next.vert
vprevprev = l.link_loop_prev.link_loop_prev.vert
vppp = l.link_loop_prev.link_loop_prev.link_loop_prev.vert
vpppp = l.link_loop_prev.link_loop_prev.link_loop_prev.link_loop_prev.vert
for idx in range(4):
if self.cornerlist[idx] == v1:
delta1 = (self.innerlist[idx].co - self.innerlist[idx-1].co) / 2
co1 = self.innerlist[idx].co + delta1
delta2 = (self.innerlist[idx-3].co - self.innerlist[idx].co) / 2
delta3 = (self.innerlist[idx-3].co - self.innerlist[idx-2].co) / 2
co2 = self.innerlist[idx-3].co + delta1 + delta2
sidev1 = bm.verts.new(co1)
sidev2 = bm.verts.new(co2)
delta4 = (self.innerlist[idx-2].co - self.innerlist[idx-1].co) / 2
delta5 = (self.innerlist[idx-2].co - self.innerlist[idx-3].co) / 2
co3 = self.innerlist[idx-2].co + delta4 + delta5
sidev3 = bm.verts.new(co3)
delta6 = (self.innerlist[idx-1].co - self.innerlist[idx].co) / 2
co4 = self.innerlist[idx-1].co + delta6
sidev4 = bm.verts.new(co4)
fs = bm.faces.new((v1, vnext, sidev2, sidev1))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev3, sidev2, vnext, vnextnext))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((v1, sidev1, self.innerlist[idx], vprev))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev1, sidev2, self.innerlist[idx-3], self.innerlist[idx]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev2, sidev3, self.innerlist[idx-2], self.innerlist[idx-3]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev4, sidev3, vnextnext, vppp))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((self.innerlist[idx-2], self.innerlist[idx-1], sidev4, sidev3))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((vprevprev, vppp, sidev4, self.innerlist[idx-1]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((vprev, vprevprev, self.innerlist[idx-1], self.innerlist[idx]))
fselset.add(fs)
fs = bm.faces.new((self.innerlist[0], self.innerlist[1], self.innerlist[2], self.innerlist[3]))
fselset.add(fs)
bm.faces.remove(f)
self.smoothset.add(sidev1)
self.smoothset.add(sidev2)
self.smoothset.add(sidev3)
self.smoothset.add(sidev4)
elif len(linked) == 0:
createinnerlists(f)
l = f.loops[0]
v1 = l.vert
vnext = l.link_loop_next.vert
vprev = l.link_loop_prev.vert
vnextnext = l.link_loop_next.link_loop_next.vert
for idx in range(4):
if self.cornerlist[idx] == v1:
sidev1 = bm.verts.new((self.cornerlist[idx].co + self.innerlist[idx].co) / 2)
sidev2 = bm.verts.new((self.cornerlist[idx-3].co + self.innerlist[idx-3].co) / 2)
sidev3 = bm.verts.new((self.cornerlist[idx-2].co + self.innerlist[idx-2].co) / 2)
sidev4 = bm.verts.new((self.cornerlist[idx-1].co + self.innerlist[idx-1].co) / 2)
fs = bm.faces.new((v1, vnext, sidev2, sidev1))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev3, sidev2, vnext, vnextnext))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev4, sidev3, vnextnext, vprev))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev1, sidev4, vprev, v1))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev1, sidev2, self.innerlist[idx-3], self.innerlist[idx]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev2, sidev3, self.innerlist[idx-2], self.innerlist[idx-3]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev3, sidev4, self.innerlist[idx-1], self.innerlist[idx-2]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((sidev4, sidev1, self.innerlist[idx], self.innerlist[idx-1]))
if not(scn.Inner):
fselset.add(fs)
fs = bm.faces.new((self.innerlist[0], self.innerlist[1], self.innerlist[2], self.innerlist[3]))
fselset.add(fs)
bm.faces.remove(f)
self.smoothset.add(sidev1)
self.smoothset.add(sidev2)
self.smoothset.add(sidev3)
self.smoothset.add(sidev4)
if scn.Smooth:
for v in self.smoothset:
v.co = projobj.closest_point_on_mesh(v.co)[0]
bpy.ops.mesh.select_all(action ='SELECT')
bm.normal_update()
bpy.ops.mesh.normals_make_consistent()
bpy.ops.mesh.select_all(action = 'DESELECT')
for f in fselset:
f.select = 1
for e in f.edges:
e.select = 1
for v in f.verts:
v.select = 1
for e in bm.edges:
if len(e.link_faces) == 1:
e.verts[0].select = 1
e.verts[1].select = 1
bpy.ops.mesh.remove_doubles()
for e in bm.edges:
if len(e.link_faces) == 1:
e.verts[0].select = 0
e.verts[1].select = 0
e.select = 0
mesh.update()
bm.free()
bmkeep.free()
bpy.ops.object.editmode_toggle()
bpy.ops.object.select_all(action = 'DESELECT')
context.scene.objects.active = projobj
projobj.hide = 0
bpy.ops.object.delete()
selobj.select = 1
context.scene.objects.active = selobj
bpy.ops.object.editmode_toggle()
def panel_func(self, context):
scn = bpy.context.scene
self.layout.label(text="DeathGuppie:")
self.layout.operator("mesh.deathguppie", text="Subdivide DG")
self.layout.prop(scn, "Smooth")
self.layout.prop(scn, "Inner")
def register():
bpy.utils.register_module(__name__)
bpy.types.VIEW3D_PT_tools_meshedit.append(panel_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.VIEW3D_PT_tools_meshedit.remove(panel_func)
if __name__ == "__main__":
register()
| gpl-2.0 | 7,140,595,850,951,967,000 | 35.052545 | 134 | 0.619648 | false | 2.623805 | false | false | false |
lhagan/phoshare | Phoshare.py | 8 | 1125 | #!/usr/bin/env python
"""Reads iPhoto library info, and exports photos and movies."""
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import phoshare.phoshare_ui
import phoshare.phoshare_main
def main():
"""Main routine for Phoshare. Decides on UI vs. non-UI version."""
# Remove the funny -psn_xxx_xxx argument (from py2app)
if len(sys.argv) > 1 and sys.argv[1][:4] == '-psn':
del sys.argv[1]
if len(sys.argv) <= 1:
phoshare.phoshare_ui.main()
else:
phoshare.phoshare_main.main()
if __name__ == "__main__":
main()
| apache-2.0 | 3,060,881,644,531,512,300 | 31.142857 | 76 | 0.684444 | false | 3.461538 | false | false | false |
mlex121/fb-chat-messages | get_chat_messages.py | 1 | 3628 | #!/usr/bin/python
"""This is a quick script I wrote up to count how many messages each
of my friends posted in a big group chat.
This script depends on the Facebook Platform Python SDK found here:
https://github.com/pythonforfacebook/facebook-sdk
"""
from __future__ import division
import argparse
import facebook
import json
import time
from collections import defaultdict
from urlparse import urlsplit
def main():
"""Uses the Facebook Graph API to get chat messages for the given ID
and prints the number of messages each chat participant posted.
Will write the chat messages in JSON format to a file if specified.
"""
args = get_arguments()
data = []
graph = facebook.GraphAPI(args.token)
chat_id = args.chat_id
try:
comments = graph.get_object(chat_id + "/comments")
except facebook.GraphAPIError as e:
print e
else:
more_comments = True
while more_comments:
comments_data = comments.get('data', [])
data.extend(comments_data)
paging_next = comments.get('paging', {}).get('next')
if paging_next:
next_page_query = urlsplit(paging_next)[3]
# Prevents hammering the Graph API and getting
# locked out.
time.sleep(args.timeout)
comments = graph.get_object(
chat_id + "/comments?" + next_page_query)
else:
more_comments = False
if len(data) and args.output_file:
with open(args.output_file, 'w+') as f:
f.write(json.dumps(data))
print_results(data)
def get_arguments():
parser = argparse.ArgumentParser(
description='Grabs messages in a given Facebook chat and provides a '
'numerical break-down of the participants\' messages.')
parser.add_argument('token',
help='A Facebook access token. Can be retrieved from '
' the Graph API Explorer: '
'https://developers.facebook.com/tools/explorer')
parser.add_argument('chat_id',
help='The Facebook ID of the chat you want to analyze.'
' You can get these IDs from /me/inbox.')
parser.add_argument('-o', '--output_file',
help='Writes the chat messages in JSON format to the '
'specified file.')
parser.add_argument('-t', '--timeout',
default=1,
help='Provide a timeout (in seconds) between '
'successive Graph API calls to prevent being '
'locked out due to too many. Defaults to 1.')
return parser.parse_args()
def print_results(data):
"""Print the number of messages for each user in the chat.
Calculate how many messages each participant in the chat has sent,
along with what percentage of the chat's messages are theirs.
"""
mapping = defaultdict(lambda: {'count': 0, 'ratio': 0})
for comment in data:
# Sometimes there are chats which are missing a 'from' field in
# the messages.
try:
author = comment['from']['name']
except KeyError as e:
author = '<UNKNOWN_AUTHOR>'
mapping[author]['count'] += 1
for key, value in mapping.items():
value['ratio'] = value['count'] / len(data)
print "{}: {} messages ({:.2%} of the chat)".format(
key, value['count'], value['ratio'])
if __name__ == '__main__':
main()
| mit | -7,504,689,657,368,632,000 | 32.592593 | 79 | 0.577453 | false | 4.451534 | false | false | false |
adsarwate/mergetex | mergetex.py | 1 | 3284 | #!/usr/bin/python
# mergetex.py
#
# Script for merging tex files into a single monolithic file. This
# script should make it easy to generate an ArXiV-friendly single
# .tex file from a paper that is broken into subfiles using LaTeX's
# \input{} command.
#
# USAGE:
# python mergetex.py [input] [output]
# python mergetex.py mypaper.tex mypaperMerged.tex
#
# mergetex takes two arguments, the [input] file and the [output]
# file into which the merged files should go. It recursively
# searches [input] and adds any file given by uncommented \input{}
# commands.
#
#
#
# v0.1 by Anand Sarwate ([email protected])
import argparse
import string
import re
import sys
import os.path
def parseinclude(includefile,outfh):
try:
with open(includefile) as file:
print("Found " + includefile + ". Merging...\n")
except IOError as e:
print('Unable to open ' + includefile + ': does not exist or no read permissions')
fincl = open(includefile, 'r')
# parse file line by line
for line in fincl:
# strip out comments in the line, if any
dc = line.split('\\%') # look for escaped \%
if (len(dc) == 1): # then there is no \% to be escaped
first_comm = dc[0].find('%')
if (first_comm == -1):
decom = line
else:
decom = line[:(first_comm+1)] + '\n'
else: # we had to escape a \%
decom = "" # construct the uncommented part
dc = line.split('%')
for chunk in dc: # look in each chunk to see if there is a %
if (chunk[-1] == '\\'): # if % is escaped...
decom = decom + chunk + '%'
else:
if (chunk[-1] == '\n'):
decom = decom + chunk
else:
decom = decom + chunk + '%\n'
break
# search for the line containing an \input{} command
sec = re.match('\\\\input{(.*?)}', decom)
if sec:
# if the match is nonempty, then
fname = re.sub('\\\\input{', '', sec.group(0))
fname = re.sub('}', '', fname)
if (fname.find('.tex') == -1):
fname = fname + '.tex'
print('\tFound include for ' + fname + '\n')
parseinclude(fname,outfh)
# if no \input{}, print the line to the output file
else:
outfh.write(decom)
fincl.close()
# input argument parser
# args.format will contain filename for format file
# args.bibfile will contain filename of bibliography
inparser = argparse.ArgumentParser(description='Parses argument list')
inparser.add_argument('texfile', metavar='texfile', help='main .tex file')
inparser.add_argument('output', metavar='output', help='desired target output file')
args = inparser.parse_args()
# INPUT PARSING AND WARNING GENERATION
try:
with open(args.texfile) as file:
pass
except IOError as e:
print('Unable to open ' + args.texfile + ': does not exist or no read permissions')
fin = open(args.texfile, 'r')
fout = open(args.output, 'w')
parseinclude(args.texfile,fout)
| gpl-3.0 | -3,236,888,002,015,941,000 | 30.576923 | 90 | 0.561815 | false | 3.723356 | false | false | false |
jdmonaco/vmo-feedback-model | src/figures/remapping.py | 1 | 4689 | #encoding: utf-8
"""
remapping -- Remapping figure showing orthogonalization from initial phase reset
Created by Joe Monaco on 2010-10-12.
Copyright (c) 2009-2011 Johns Hopkins University. All rights reserved.
This software is provided AS IS under the terms of the Open Source MIT License.
See http://www.opensource.org/licenses/mit-license.php.
"""
# Library imports
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
# Package imports
from ..core.analysis import BaseAnalysis
from ..vmo import VMOModel
from ..session import VMOSession
from ..compare import (correlation_matrix, correlation_diagonals,
population_spatial_correlation)
from ..tools.images import array_to_image
from ..tools.radians import circle_diff_vec
class RemappingFigure(BaseAnalysis):
"""
Run complete remapping experiment based on random initial reset
"""
label = "remapping"
def collect_data(self, N_samples=2, **kwargs):
"""Run basic VMOModel remapping experiment by randomly initializing
the phase code of a network of oscillators and place units.
Keyword arguments:
N_samples -- total number of simulations to run (N-1 remapped from 1st)
Additional keyword arguments are passed on to VMOModel.
"""
self.results['N_samples'] = N_samples
# Set up model parameters
pdict = dict( N_outputs=500,
N_theta=1000,
N_cues=1,
C_W=0.05,
gamma_local=0,
gamma_distal=0,
num_trials=N_samples,
refresh_fixed_points=False )
pdict.update(kwargs)
# Set up and run the path integration model
self.out('Running remapping simulations...')
model = VMOModel(**pdict)
model.advance_all()
sessions = VMOSession.get_session_list(model)
VMOSession.save_session_list(sessions,
os.path.join(self.datadir, 'samples'))
# Get unit ordering based on first environment
sortix = list(sessions[0].sortix)
sortix += list(set(range(sessions[0].num_units)) - set(sortix))
self.results['sortix'] = np.array(sortix)
# Save multi-session population responses and activity patterns
self.out('Computing and storing population responses...')
R = [SD.get_population_matrix(clusters=sortix) for SD in sessions]
np.save(os.path.join(self.datadir, 'R.npy'), np.asarray(R))
# Good-bye
self.out('All done!')
def create_plots(self, N_examples=4, examples=None):
"""Create figure(s) with basic data panels
"""
# Change to data directoary and start logging
os.chdir(self.datadir)
self.out.outfd = file('figure.log', 'w')
# Set up main figure for plotting
self.figure = {}
figsize = 9, 12
plt.rcParams['figure.figsize'] = figsize
self.figure['remapping'] = f = plt.figure(figsize=figsize)
f.suptitle(self.label.title())
# Load the data
R = np.load(os.path.join(self.datadir, 'R.npy'))
N = self.results['N_samples']
# Example active unit responses across environments
if examples is None:
active = set()
for j in xrange(N):
active = active.union(set((R[j].max(axis=1)>=1).nonzero()[0]))
active = list(active)
active.sort()
examples = np.random.permutation(len(active))[:N_examples]
examples = np.array(active)[examples]
self.out('Plotting example responses: %s'%repr(examples))
for i,ex in enumerate(examples):
self.out('Unit %d max response = %.2f Hz'%(ex, R[:,ex].max()))
for j in xrange(N):
ax = plt.subplot(2*N_examples, N, N*i+j+1)
ax.plot(R[j,ex], c='k', lw=1.5)
ax.set_xlim(0, 360)
ax.set_ylim(-0.1*R[:,ex].max(), 1.1*R[:,ex].max())
ax.set_axis_off()
# Population responses
for j in xrange(N):
self.out('Environment %d population max = %.2f Hz'%(j+1, R[j].max()))
ax = plt.subplot(2, N, j+1+N)
ax.imshow(R[j], aspect='auto', interpolation='nearest')
array_to_image(R[j], 'pop_env_%02d.png'%(j+1), cmap=mpl.cm.gray_r)
plt.draw()
plt.rcParams['figure.figsize'] = plt.rcParamsDefault['figure.figsize']
self.out.outfd.close()
| mit | -8,417,916,709,066,976,000 | 36.214286 | 81 | 0.579868 | false | 3.88806 | false | false | false |
vpelletier/neoppod | neo/master/pt.py | 1 | 13272 | #
# Copyright (C) 2006-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
import neo.lib.pt
from neo.lib.protocol import CellStates, ZERO_TID
class Cell(neo.lib.pt.Cell):
replicating = ZERO_TID
def setState(self, state):
readable = self.isReadable()
super(Cell, self).setState(state)
if readable and not self.isReadable():
try:
del self.backup_tid, self.replicating
except AttributeError:
pass
neo.lib.pt.Cell = Cell
class MappedNode(object):
def __init__(self, node):
self.node = node
self.assigned = set()
def __getattr__(self, attr):
return getattr(self.node, attr)
class PartitionTable(neo.lib.pt.PartitionTable):
"""This class manages a partition table for the primary master node"""
def setID(self, id):
assert isinstance(id, (int, long)) or id is None, id
self._id = id
def setNextID(self):
if self._id is None:
raise RuntimeError, 'I do not know the last Partition Table ID'
self._id += 1
return self._id
def make(self, node_list):
"""Make a new partition table from scratch."""
# start with the first PTID
self._id = 1
# First, filter the list of nodes.
node_list = [n for n in node_list if n.isRunning() \
and n.getUUID() is not None]
if len(node_list) == 0:
# Impossible.
raise RuntimeError, 'cannot make a partition table with an ' \
'empty storage node list'
# Take it into account that the number of storage nodes may be less
# than the number of replicas.
repeats = min(self.nr + 1, len(node_list))
index = 0
for offset in xrange(self.np):
row = []
for _ in xrange(repeats):
node = node_list[index]
row.append(Cell(node))
self.count_dict[node] = self.count_dict.get(node, 0) + 1
index += 1
if index == len(node_list):
index = 0
self.partition_list[offset] = row
self.num_filled_rows = self.np
def dropNodeList(self, node_list, simulate=False):
partition_list = []
change_list = []
feeding_list = []
for offset, row in enumerate(self.partition_list):
new_row = []
partition_list.append(new_row)
feeding = None
drop_readable = uptodate = False
for cell in row:
node = cell.getNode()
if node in node_list:
change_list.append((offset, node.getUUID(),
CellStates.DISCARDED))
if cell.isReadable():
drop_readable = True
else:
new_row.append(cell)
if cell.isFeeding():
feeding = cell
elif cell.isUpToDate():
uptodate = True
if feeding is not None:
if len(new_row) < len(row):
change_list.append((offset, feeding.getUUID(),
CellStates.UP_TO_DATE))
feeding_list.append(feeding)
elif drop_readable and not uptodate:
raise neo.lib.pt.PartitionTableException(
"Refuse to drop nodes that contain the only readable"
" copies of partition %u" % offset)
if not simulate:
self.partition_list = partition_list
for cell in feeding_list:
cell.setState(CellStates.UP_TO_DATE)
self.count_dict[cell.getNode()] += 1
for node in node_list:
self.count_dict.pop(node, None)
self.num_filled_rows = len(filter(None, self.partition_list))
return change_list
def load(self, ptid, row_list, nm):
"""
Load a partition table from a storage node during the recovery.
Return the new storage nodes registered
"""
# check offsets
for offset, _row in row_list:
if offset >= self.getPartitions():
raise IndexError, offset
# store the partition table
self.clear()
self._id = ptid
new_nodes = []
for offset, row in row_list:
for uuid, state in row:
node = nm.getByUUID(uuid)
if node is None:
node = nm.createStorage(uuid=uuid)
new_nodes.append(node.asTuple())
self.setCell(offset, node, state)
return new_nodes
def setUpToDate(self, node, offset):
"""Set a cell as up-to-date"""
uuid = node.getUUID()
# check the partition is assigned and known as outdated
for cell in self.getCellList(offset):
if cell.getUUID() == uuid:
if cell.isOutOfDate():
break
return
else:
raise neo.lib.pt.PartitionTableException('Non-assigned partition')
# update the partition table
cell_list = [self.setCell(offset, node, CellStates.UP_TO_DATE)]
# If the partition contains a feeding cell, drop it now.
for feeding_cell in self.getCellList(offset):
if feeding_cell.isFeeding():
cell_list.append(self.removeCell(offset,
feeding_cell.getNode()))
break
return cell_list
def addNodeList(self, node_list):
"""Add nodes"""
added_list = []
for node in node_list:
if node not in self.count_dict:
self.count_dict[node] = 0
added_list.append(node)
return added_list
def tweak(self, drop_list=()):
"""Optimize partition table
This is done by computing a minimal diff between current partition table
and what make() would do.
"""
assigned_dict = {x: {} for x in self.count_dict}
readable_list = [set() for x in xrange(self.np)]
for offset, row in enumerate(self.partition_list):
for cell in row:
if cell.isReadable():
readable_list[offset].add(cell)
assigned_dict[cell.getNode()][offset] = cell
pt = PartitionTable(self.np, self.nr)
drop_list = set(drop_list).intersection(assigned_dict)
node_set = {MappedNode(x) for x in assigned_dict
if x not in drop_list}
pt.make(node_set)
for offset, row in enumerate(pt.partition_list):
for cell in row:
if cell.isReadable():
cell.getNode().assigned.add(offset)
def map_nodes():
node_list = []
for node, assigned in assigned_dict.iteritems():
if node in drop_list:
yield node, frozenset()
continue
readable = {offset for offset, cell in assigned.iteritems()
if cell.isReadable()}
# the criterion on UUID is purely cosmetic
node_list.append((len(readable), len(assigned),
-node.getUUID(), readable, node))
node_list.sort(reverse=1)
for _, _, _, readable, node in node_list:
assigned = assigned_dict[node]
mapped = min(node_set, key=lambda m: (
len(m.assigned.symmetric_difference(assigned)),
len(m.assigned ^ readable)))
node_set.remove(mapped)
yield node, mapped.assigned
assert not node_set
changed_list = []
uptodate_set = set()
remove_dict = defaultdict(list)
for node, mapped in map_nodes():
uuid = node.getUUID()
assigned = assigned_dict[node]
for offset, cell in assigned.iteritems():
if offset in mapped:
if cell.isReadable():
uptodate_set.add(offset)
readable_list[offset].remove(cell)
if cell.isFeeding():
self.count_dict[node] += 1
state = CellStates.UP_TO_DATE
cell.setState(state)
changed_list.append((offset, uuid, state))
else:
if not cell.isFeeding():
self.count_dict[node] -= 1
remove_dict[offset].append(cell)
for offset in mapped.difference(assigned):
self.count_dict[node] += 1
state = CellStates.OUT_OF_DATE
self.partition_list[offset].append(Cell(node, state))
changed_list.append((offset, uuid, state))
count_dict = self.count_dict.copy()
for offset, cell_list in remove_dict.iteritems():
row = self.partition_list[offset]
feeding = None if offset in uptodate_set else min(
readable_list[offset], key=lambda x: count_dict[x.getNode()])
for cell in cell_list:
if cell is feeding:
count_dict[cell.getNode()] += 1
if cell.isFeeding():
continue
state = CellStates.FEEDING
cell.setState(state)
else:
state = CellStates.DISCARDED
row.remove(cell)
changed_list.append((offset, cell.getUUID(), state))
assert self.num_filled_rows == len(filter(None, self.partition_list))
return changed_list
def outdate(self, lost_node=None):
"""Outdate all non-working nodes
Do not outdate cells of 'lost_node' for partitions it was the last node
to serve. This allows a cluster restart.
"""
change_list = []
for offset, row in enumerate(self.partition_list):
lost = lost_node
cell_list = []
for cell in row:
if cell.isReadable():
if cell.getNode().isRunning():
lost = None
else :
cell_list.append(cell)
for cell in cell_list:
if cell.getNode() is not lost:
cell.setState(CellStates.OUT_OF_DATE)
change_list.append((offset, cell.getUUID(),
CellStates.OUT_OF_DATE))
return change_list
def iterNodeCell(self, node):
for offset, row in enumerate(self.partition_list):
for cell in row:
if cell.getNode() is node:
yield offset, cell
break
def getOperationalNodeSet(self):
"""
Return a set of all nodes which are part of at least one UP TO DATE
partition. An empty list is returned if these nodes aren't enough to
become operational.
"""
node_set = set()
for row in self.partition_list:
if not any(cell.isReadable() and cell.getNode().isPending()
for cell in row):
return () # not operational
node_set.update(cell.getNode() for cell in row if cell.isReadable())
return node_set
def clearReplicating(self):
for row in self.partition_list:
for cell in row:
try:
del cell.replicating
except AttributeError:
pass
def setBackupTidDict(self, backup_tid_dict):
for row in self.partition_list:
for cell in row:
if cell.isReadable():
cell.backup_tid = backup_tid_dict.get(cell.getUUID(),
ZERO_TID)
def getBackupTid(self, mean=max):
try:
return min(mean(x.backup_tid for x in row if x.isReadable())
for row in self.partition_list)
except ValueError:
return ZERO_TID
def getCheckTid(self, partition_list):
try:
return min(min(cell.backup_tid
for cell in self.partition_list[offset]
if cell.isReadable())
for offset in partition_list)
except ValueError:
return ZERO_TID
| gpl-2.0 | -6,899,602,291,962,877,000 | 37.469565 | 80 | 0.526673 | false | 4.479244 | false | false | false |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v3.8/gnmt-tpuv3-8/code/gnmt/model/t2t/tensor2tensor/models/research/vqa_recurrent_self_attention.py | 3 | 10599 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recurrent self attention models for VQA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import vqa_layers
from tensor2tensor.models.research import universal_transformer
from tensor2tensor.models.research import universal_transformer_util
from tensor2tensor.models.research import vqa_attention
from tensor2tensor.utils import registry
# from tensor2tensor.utils import restore_hook
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import utils
@registry.register_model
class VqaRecurrentSelfAttention(vqa_attention.VqaAttentionBaseline):
"""Recurrent Self attention both on image and question."""
# @staticmethod
# def train_hooks():
# restore_resnet_hook = restore_hook.RestoreHook(
# # TODO(zichaoy): hard code the path given static function.
# checkpoint_path="/home/zichaoy/resnet_v1_152.ckpt",
# new_model_scope="vqa_recurrent_self_attention/body/",
# old_model_scope="resnet_v1_152/",
# )
# return [restore_resnet_hook]
def body(self, features):
hp = self.hparams
# pylint: disable=eval-used
if hp.image_input_type == "image":
image_feat = vqa_layers.image_embedding(
features["inputs"],
model_fn=eval(hp.image_model_fn),
trainable=hp.train_resnet,
is_training=hp.mode == tf.estimator.ModeKeys.TRAIN)
else:
image_feat = features["inputs"]
image_feat = common_layers.flatten4d3d(image_feat)
image_feat = common_layers.dense(image_feat, hp.hidden_size)
utils.collect_named_outputs("norms", "image_feat_after_proj",
tf.norm(image_feat, axis=-1))
question = common_layers.flatten4d3d(features["question"])
utils.collect_named_outputs("norms", "question_embedding",
tf.norm(question, axis=-1))
(encoder_input, encoder_self_attention_bias,
encoder_decoder_attention_bias) = prepare_image_question_encoder(
image_feat, question, hp)
encoder_input = tf.nn.dropout(
encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout)
encoder_output, _ = recurrent_transformer_decoder(
encoder_input, None, encoder_self_attention_bias, None,
hp, name="encoder")
utils.collect_named_outputs(
"norms", "encoder_output", tf.norm(encoder_output, axis=-1))
# scale query by sqrt(hidden_size)
query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5
query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0)
batch_size = common_layers.shape_list(encoder_input)[0]
query = tf.tile(query, [batch_size, 1, 1])
query = tf.nn.dropout(
query, keep_prob=1.-hp.layer_prepostprocess_dropout)
decoder_output, _ = recurrent_transformer_decoder(
query, encoder_output, None, encoder_decoder_attention_bias,
hp, name="decoder")
utils.collect_named_outputs("norms", "decoder_output",
tf.norm(decoder_output, axis=-1))
norm_tensors = utils.convert_collection_to_dict("norms")
vqa_layers.summarize_tensors(norm_tensors, tag="norms/")
# Expand dimension 1 and 2
return tf.expand_dims(decoder_output, axis=1)
def prepare_image_question_encoder(image_feat, question, hparams):
"""Prepare encoder.
Args:
image_feat: a Tensor.
question: a Tensor.
hparams: run hyperparameters
Returns:
encoder_input: a Tensor, bottom of encoder stack
encoder_self_attention_bias: a bias tensor for use in encoder self-attention
"""
encoder_input = tf.concat([image_feat, question], axis=1)
encoder_padding = common_attention.embedding_to_padding(encoder_input)
ignore_padding = common_attention.attention_bias_ignore_padding(
encoder_padding)
encoder_self_attention_bias = ignore_padding
encoder_decoder_attention_bias = ignore_padding
# Usual case - not a packed dataset.
if hparams.pos == "timing":
question = common_attention.add_timing_signal_1d(question)
elif hparams.pos == "emb":
question = common_attention.add_positional_embedding(
question, hparams.max_length, "inputs_positional_embedding",
None)
encoder_input = tf.concat([image_feat, question], axis=1)
return (encoder_input, encoder_self_attention_bias,
encoder_decoder_attention_bias)
def recurrent_transformer_decoder(
decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
name="decoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True):
"""Recurrent decoder function."""
x = decoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
with tf.variable_scope(name):
ffn_unit = functools.partial(
# use encoder ffn, since decoder ffn use left padding
universal_transformer_util.transformer_encoder_ffn_unit,
hparams=hparams,
nonpadding_mask=nonpadding)
attention_unit = functools.partial(
universal_transformer_util.transformer_decoder_attention_unit,
hparams=hparams,
encoder_output=encoder_output,
decoder_self_attention_bias=decoder_self_attention_bias,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
attention_dropout_broadcast_dims=attention_dropout_broadcast_dims,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary)
x, extra_output = universal_transformer_util.universal_transformer_layer(
x, hparams, ffn_unit, attention_unit)
return common_layers.layer_preprocess(x, hparams), extra_output
@registry.register_hparams
def vqa_recurrent_self_attention_base():
"""VQA attention baseline hparams."""
hparams = universal_transformer.universal_transformer_base()
hparams.batch_size = 1024
hparams.use_fixed_batch_size = True
hparams.weight_decay = 0.
hparams.clip_grad_norm = 0.
# use default initializer
# hparams.initializer = "xavier"
hparams.learning_rate_schedule = (
"constant*linear_warmup*rsqrt_normalized_decay")
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate_constant = 7e-4
hparams.learning_rate_decay_rate = 0.5
hparams.learning_rate_decay_steps = 50000
# hparams.dropout = 0.5
hparams.summarize_grads = True
hparams.summarize_vars = True
# not used hparams
hparams.label_smoothing = 0.1
hparams.multiply_embedding_mode = "sqrt_depth"
# add new hparams
# use raw image as input
hparams.add_hparam("image_input_type", "feature")
hparams.add_hparam("image_model_fn", "resnet_v1_152")
hparams.add_hparam("resize_side", 512)
hparams.add_hparam("height", 448)
hparams.add_hparam("width", 448)
hparams.add_hparam("distort", True)
hparams.add_hparam("train_resnet", False)
# question hidden size
# hparams.hidden_size = 512
# hparams.filter_size = 1024
# hparams.num_hidden_layers = 4
# self attention parts
# hparams.norm_type = "layer"
# hparams.layer_preprocess_sequence = "n"
# hparams.layer_postprocess_sequence = "da"
# hparams.layer_prepostprocess_dropout = 0.1
# hparams.attention_dropout = 0.1
# hparams.relu_dropout = 0.1
# hparams.add_hparam("pos", "timing")
# hparams.add_hparam("num_encoder_layers", 0)
# hparams.add_hparam("num_decoder_layers", 0)
# hparams.add_hparam("num_heads", 8)
# hparams.add_hparam("attention_key_channels", 0)
# hparams.add_hparam("attention_value_channels", 0)
# hparams.add_hparam("self_attention_type", "dot_product")
# iterative part
hparams.transformer_ffn_type = "fc"
return hparams
@registry.register_hparams
def vqa_recurrent_self_attention_small():
hparams = vqa_recurrent_self_attention_base()
hparams.learning_rate_constant = 1e-3
hparams.hidden_size = 512
hparams.filter_size = 2048
hparams.num_heads = 8
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def vqa_recurrent_self_attention_big():
hparams = vqa_recurrent_self_attention_base()
hparams.learning_rate_constant = 5e-4
hparams.hidden_size = 2048
hparams.filter_size = 8192
return hparams
@registry.register_hparams
def vqa_recurrent_self_attention_big_l4():
hparams = vqa_recurrent_self_attention_big()
hparams.num_rec_steps = 4
return hparams
@registry.register_hparams
def vqa_recurrent_self_attention_highway():
hparams = vqa_recurrent_self_attention_base()
hparams.recurrence_type = "highway"
return hparams
@registry.register_hparams
def vqa_recurrent_self_attention_gru():
hparams = vqa_recurrent_self_attention_base()
hparams.recurrence_type = "gru"
return hparams
@registry.register_hparams
def vqa_recurrent_self_attention_l8():
hparams = vqa_recurrent_self_attention_base()
hparams.num_rec_steps = 8
return hparams
@registry.register_hparams
def vqa_recurrent_self_attention_mix_before_ut():
hparams = vqa_recurrent_self_attention_base()
hparams.mix_with_transformer = "before_ut"
return hparams
@registry.register_hparams
def vqa_recurrent_self_attention_l4():
hparams = vqa_recurrent_self_attention_base()
hparams.num_rec_steps = 4
return hparams
@registry.register_hparams
def vqa_recurrent_self_attention_ls2():
hparams = vqa_recurrent_self_attention_base()
hparams.label_smoothing = 0.2
return hparams
@registry.register_hparams
def vqa_recurrent_self_attention_drop1():
hparams = vqa_recurrent_self_attention_base()
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def vqa_recurrent_self_attention_drop3():
hparams = vqa_recurrent_self_attention_base()
hparams.relu_dropout = 0.3
hparams.attention_dropout = 0.3
return hparams
| apache-2.0 | 6,046,979,351,341,825,000 | 32.435331 | 80 | 0.714407 | false | 3.458075 | false | false | false |
shirishagaddi/django-simple-pagination | simple_pagination/templatetags/paginate.py | 1 | 12468 | """Django Endless Pagination template tags."""
import re
from django import template
from django.utils.encoding import iri_to_uri
from simple_pagination import settings
from django.core.paginator import (
EmptyPage,
Page,
PageNotAnInteger,
Paginator,
)
from simple_pagination import utils
from simple_pagination import models
PAGINATE_EXPRESSION = re.compile(r"""
^ # Beginning of line.
(((?P<first_page>\w+)\,)?(?P<per_page>\w+)\s+)? # First page, per page.
(?P<objects>[\.\w]+) # Objects / queryset.
(\s+starting\s+from\s+page\s+(?P<number>[\-]?\d+|\w+))? # Page start.
(\s+using\s+(?P<key>[\"\'\-\w]+))? # Querystring key.
(\s+with\s+(?P<override_path>[\"\'\/\w]+))? # Override path.
(\s+as\s+(?P<var_name>\w+))? # Context variable name.
$ # End of line.
""", re.VERBOSE)
SHOW_CURRENT_NUMBER_EXPRESSION = re.compile(r"""
^ # Beginning of line.
(starting\s+from\s+page\s+(?P<number>\w+))?\s* # Page start.
(using\s+(?P<key>[\"\'\-\w]+))?\s* # Querystring key.
(as\s+(?P<var_name>\w+))? # Context variable name.
$ # End of line.
""", re.VERBOSE)
register = template.Library()
@register.tag
def paginate(parser, token, paginator_class=None):
"""Paginate objects.
Usage:
.. code-block:: html+django
{% paginate entries %}
After this call, the *entries* variable in the template context is replaced
by only the entries of the current page.
You can also keep your *entries* original variable (usually a queryset)
and add to the context another name that refers to entries of the current
page, e.g.:
.. code-block:: html+django
{% paginate entries as page_entries %}
The *as* argument is also useful when a nested context variable is provided
as queryset. In this case, and only in this case, the resulting variable
name is mandatory, e.g.:
.. code-block:: html+django
{% paginate entries.all as entries %}
The number of paginated entries is taken from settings, but you can
override the default locally, e.g.:
.. code-block:: html+django
{% paginate 20 entries %}
Of course you can mix it all:
.. code-block:: html+django
{% paginate 20 entries as paginated_entries %}
By default, the first page is displayed the first time you load the page,
but you can change this, e.g.:
.. code-block:: html+django
{% paginate entries starting from page 3 %}
When changing the default page, it is also possible to reference the last
page (or the second last page, and so on) by using negative indexes, e.g:
.. code-block:: html+django
{% paginate entries starting from page -1 %}
This can be also achieved using a template variable that was passed to the
context, e.g.:
.. code-block:: html+django
{% paginate entries starting from page page_number %}
If the passed page number does not exist, the first page is displayed.
If you have multiple paginations in the same page, you can change the
querydict key for the single pagination, e.g.:
.. code-block:: html+django
{% paginate entries using article_page %}
In this case *article_page* is intended to be a context variable, but you
can hardcode the key using quotes, e.g.:
.. code-block:: html+django
{% paginate entries using 'articles_at_page' %}
Again, you can mix it all (the order of arguments is important):
.. code-block:: html+django
{% paginate 20 entries
starting from page 3 using page_key as paginated_entries %}
Additionally you can pass a path to be used for the pagination:
.. code-block:: html+django
{% paginate 20 entries
using page_key with pagination_url as paginated_entries %}
This way you can easily create views acting as API endpoints, and point
your Ajax calls to that API. In this case *pagination_url* is considered a
context variable, but it is also possible to hardcode the URL, e.g.:
.. code-block:: html+django
{% paginate 20 entries with "/mypage/" %}
If you want the first page to contain a different number of items than
subsequent pages, you can separate the two values with a comma, e.g. if
you want 3 items on the first page and 10 on other pages:
.. code-block:: html+django
{% paginate 3,10 entries %}
You must use this tag before calling the {% show_more %} one.
"""
# Validate arguments.
try:
tag_name, tag_args = token.contents.split(None, 1)
except ValueError:
msg = '%r tag requires arguments' % token.contents.split()[0]
raise template.TemplateSyntaxError(msg)
# Use a regexp to catch args.
match = PAGINATE_EXPRESSION.match(tag_args)
if match is None:
msg = 'Invalid arguments for %r tag' % tag_name
raise template.TemplateSyntaxError(msg)
# Retrieve objects.
kwargs = match.groupdict()
objects = kwargs.pop('objects')
# The variable name must be present if a nested context variable is passed.
if '.' in objects and kwargs['var_name'] is None:
msg = (
'%(tag)r tag requires a variable name `as` argumnent if the '
'queryset is provided as a nested context variable (%(objects)s). '
'You must either pass a direct queryset (e.g. taking advantage '
'of the `with` template tag) or provide a new variable name to '
'store the resulting queryset (e.g. `%(tag)s %(objects)s as '
'objects`).'
) % {'tag': tag_name, 'objects': objects}
raise template.TemplateSyntaxError(msg)
# Call the node.
return PaginateNode(paginator_class, objects, **kwargs)
@register.tag
def lazy_paginate(parser, token):
"""Lazy paginate objects.
Paginate objects without hitting the database with a *select count* query.
Use this the same way as *paginate* tag when you are not interested
in the total number of pages.
"""
return paginate(parser, token, paginator_class=LazyPaginator)
class PaginateNode(template.Node):
"""Add to context the objects of the current page.
Also add the Django paginator's *page* object.
"""
def __init__(
self, paginator_class, objects, first_page=None, per_page=None,
var_name=None, number=None, key=None, override_path=None):
self.paginator = paginator_class or Paginator
self.objects = template.Variable(objects)
# If *var_name* is not passed, then the queryset name will be used.
self.var_name = objects if var_name is None else var_name
# If *per_page* is not passed then the default value form settings
# will be used.
self.per_page_variable = None
if per_page is None:
self.per_page = settings.PER_PAGE
elif per_page.isdigit():
self.per_page = int(per_page)
else:
self.per_page_variable = template.Variable(per_page)
# Handle first page: if it is not passed then *per_page* is used.
self.first_page_variable = None
if first_page is None:
self.first_page = None
elif first_page.isdigit():
self.first_page = int(first_page)
else:
self.first_page_variable = template.Variable(first_page)
# Handle page number when it is not specified in querystring.
self.page_number_variable = None
if number is None:
self.page_number = 1
else:
try:
self.page_number = int(number)
except ValueError:
self.page_number_variable = template.Variable(number)
# Set the querystring key attribute.
self.querystring_key_variable = None
if key is None:
self.querystring_key = settings.PAGE_LABEL
elif key[0] in ('"', "'") and key[-1] == key[0]:
self.querystring_key = key[1:-1]
else:
self.querystring_key_variable = template.Variable(key)
# Handle *override_path*.
self.override_path_variable = None
if override_path is None:
self.override_path = None
elif (
override_path[0] in ('"', "'") and
override_path[-1] == override_path[0]):
self.override_path = override_path[1:-1]
else:
self.override_path_variable = template.Variable(override_path)
def render(self, context):
# Handle page number when it is not specified in querystring.
if self.page_number_variable is None:
default_number = self.page_number
else:
default_number = int(self.page_number_variable.resolve(context))
# Calculate the number of items to show on each page.
if self.per_page_variable is None:
per_page = self.per_page
else:
per_page = int(self.per_page_variable.resolve(context))
# Calculate the number of items to show in the first page.
if self.first_page_variable is None:
first_page = self.first_page or per_page
else:
first_page = int(self.first_page_variable.resolve(context))
# User can override the querystring key to use in the template.
# The default value is defined in the settings file.
if self.querystring_key_variable is None:
querystring_key = self.querystring_key
else:
querystring_key = self.querystring_key_variable.resolve(context)
# Retrieve the override path if used.
if self.override_path_variable is None:
override_path = self.override_path
else:
override_path = self.override_path_variable.resolve(context)
# Retrieve the queryset and create the paginator object.
objects = self.objects.resolve(context)
paginator = self.paginator(
objects, per_page)
# Normalize the default page number if a negative one is provided.
if default_number < 0:
default_number = utils.normalize_page_number(
default_number, paginator.page_range)
# The current request is used to get the requested page number.
page_number = utils.get_page_number_from_request(
context['request'], querystring_key, default=default_number)
# Get the page.
try:
page = paginator.page(page_number)
except EmptyPage:
page = paginator.page(1)
# Populate the context with required data.
data = {
'default_number': default_number,
'override_path': override_path,
'page': page,
'querystring_key': querystring_key,
}
context.update({'endless': data, self.var_name: page.object_list})
return ''
@register.tag
def show_pages(parser, token):
"""Show page links.
Usage:
.. code-block:: html+django
{% show_pages %}
It is just a shortcut for:
.. code-block:: html+django
{% get_pages %}
{{ pages }}
You can set ``ENDLESS_PAGINATION_PAGE_LIST_CALLABLE`` in your *settings.py*
to a callable, or to a dotted path representing a callable, used to
customize the pages that are displayed.
See the *__unicode__* method of ``endless_pagination.models.PageList`` for
a detailed explanation of how the callable can be used.
Must be called after ``{% paginate objects %}``.
"""
# Validate args.
if len(token.contents.split()) != 1:
msg = '%r tag takes no arguments' % token.contents.split()[0]
raise template.TemplateSyntaxError(msg)
# Call the node.
return ShowPagesNode()
class ShowPagesNode(template.Node):
"""Show the pagination."""
def render(self, context):
# This template tag could raise a PaginationError: you have to call
# *paginate* or *lazy_paginate* before including the getpages template.
data = utils.get_data_from_context(context)
print data
# Return the string representation of the sequence of pages.
pages = models.PageList(
context['request'],
data['page'],
data['querystring_key'],
default_number=data['default_number'],
override_path=data['override_path'],
)
return utils.text(pages)
| mit | -5,307,911,888,844,843,000 | 32.426273 | 79 | 0.624398 | false | 4.038873 | false | false | false |
malkavi/Flexget | dev_tools.py | 1 | 4640 | import fileinput
import io
import os
import shutil
import subprocess
import zipfile
import click
import requests
def _get_version():
with open('flexget/_version.py') as f:
g = globals()
l = {}
exec(f.read(), g, l) # pylint: disable=W0122
if not l['__version__']:
raise click.ClickException('Could not find __version__ from flexget/_version.py')
return l['__version__']
@click.group()
def cli():
pass
@cli.command()
def version():
"""Prints the version number of the source"""
click.echo(_get_version())
@cli.command()
@click.argument('bump_type', type=click.Choice(['dev', 'release']))
def bump_version(bump_type):
"""Bumps version to the next release, or development version."""
cur_ver = _get_version()
click.echo('current version: %s' % cur_ver)
ver_split = cur_ver.split('.')
if 'dev' in ver_split[-1]:
if bump_type == 'dev':
# If this is already a development version, increment the dev count by 1
ver_split[-1] = 'dev%d' % (int(ver_split[-1].strip('dev') or 0) + 1)
else:
# Just strip off dev tag for next release version
ver_split = ver_split[:-1]
else:
# Increment the revision number by one
if len(ver_split) == 2:
# We don't have a revision number, assume 0
ver_split.append('1')
else:
if 'b' in ver_split[2]:
# beta version
minor, beta = ver_split[-1].split('b')
ver_split[-1] = '%sb%s' % (minor, int(beta) + 1)
else:
ver_split[-1] = str(int(ver_split[-1]) + 1)
if bump_type == 'dev':
ver_split.append('dev')
new_version = '.'.join(ver_split)
for line in fileinput.FileInput('flexget/_version.py', inplace=1):
if line.startswith('__version__ ='):
line = "__version__ = '%s'\n" % new_version
print(line, end='')
click.echo('new version: %s' % new_version)
@cli.command()
def bundle_webui():
"""Bundle webui for release packaging"""
ui_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'flexget', 'ui')
def download_extract(url, dest_path):
print(dest_path)
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(dest_path)
# WebUI V1
click.echo('Bundle WebUI v1...')
try:
# Remove existing
app_path = os.path.join(ui_path, 'v1', 'app')
if os.path.exists(app_path):
shutil.rmtree(app_path)
# Just stashed the old webui zip on a random github release for easy hosting.
# It doesn't get updated anymore, we should probably stop bundling it with releases soon.
download_extract('https://github.com/Flexget/Flexget/releases/download/v3.0.6/webui_v1.zip', os.path.join(ui_path, 'v1'))
except IOError as e:
click.echo('Unable to download and extract WebUI v1 due to %e' % str(e))
raise click.Abort()
# WebUI V2
try:
click.echo('Bundle WebUI v2...')
# Remove existing
app_path = os.path.join(ui_path, 'v2', 'dist')
if os.path.exists(app_path):
shutil.rmtree(app_path)
release = requests.get('https://api.github.com/repos/Flexget/webui/releases/latest').json()
v2_package = None
for asset in release['assets']:
if asset['name'] == 'dist.zip':
v2_package = asset['browser_download_url']
break
if not v2_package:
click.echo('Unable to find dist.zip in assets')
raise click.Abort()
download_extract(v2_package, os.path.join(ui_path, 'v2'))
except (IOError, ValueError) as e:
click.echo('Unable to download and extract WebUI v2 due to %s' % str(e))
raise click.Abort()
@cli.command()
@click.argument('files', nargs=-1)
def autoformat(files):
"""Reformat code with black and isort"""
if not files:
project_root = os.path.dirname(os.path.realpath(__file__))
files = (project_root,)
venv_path = os.environ['VIRTUAL_ENV']
if not venv_path:
raise Exception('Virtualenv and activation required')
# black and isort config are in pyproject.toml
subprocess.call(('black',) + files)
subprocess.call(
(
'isort',
'--virtual-env',
venv_path,
'-rc',
'--skip',
'flexget/__init__.py',
'--skip',
'flexget/manager.py',
)
+ files
)
if __name__ == '__main__':
cli()
| mit | 3,200,527,783,122,051,000 | 30.351351 | 129 | 0.566379 | false | 3.561013 | false | false | false |
opencord/voltha | ofagent/protos/third_party/__init__.py | 1 | 1583 | #
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This helps loading http_pb2 and annotations_pb2.
Without this, the Python importer will not be able to process the lines:
from google.api import http_pb2 or
from google.api import annotations_pb2
(Without importing these, the protobuf loader will not recognize http options
in the protobuf definitions.)
"""
from importlib import import_module
import os
import sys
class GoogleApiImporter(object):
def find_module(self, full_name, path=None):
if full_name == 'google.api':
self.path = [os.path.dirname(__file__)]
return self
def load_module(self, name):
if name in sys.modules:
return sys.modules[name]
full_name = 'ofagent.protos.third_party.' + name
import_module(full_name)
module = sys.modules[full_name]
sys.modules[name] = module
return module
sys.meta_path.append(GoogleApiImporter())
from google.api import http_pb2, annotations_pb2
_ = http_pb2, annotations_pb2
| apache-2.0 | -7,145,884,688,873,406,000 | 30.66 | 77 | 0.714466 | false | 3.908642 | false | false | false |
maemre/rasim | args.py | 1 | 2673 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 10 17:14:37 2014
@author: Mehmet Emre
Parser for command-line arguments, params will use this for setting it's values.
"""
import argparse
parser = argparse.ArgumentParser(description="rasim - A radio network simulator")
parser.add_argument('--batch-run', action='store_true', help='run simulator in batch-mode, no graph windows will be produced')
parser.add_argument('--N-runs', action='store', default=10, help='number of runs per agent', type=int)
parser.add_argument('--t-total', action='store', default=6000, help='total simulation time (time slots) per run, default = 6000', type=int)
parser.add_argument('--individual-q', action='append_const', dest='agents', const='IndividualQ', help='run individual Q-learning agents')
parser.add_argument('--random-channel', action='append_const', dest='agents', const='RandomChannel', help='run randomly channel selecting agents')
parser.add_argument('--highest-snr', action='append_const', dest='agents', const='OptHighestSNR', help='run agents selecting constant')
parser.add_argument('--output-dir', action='store', default='data/', help='set output directory, it must be already created')
parser.add_argument('--n-agent', action='store', default=5, help='number of agents', type=int)
parser.add_argument('--n-stationary', action='store', help='number of stationary agents', type=int)
parser.add_argument('--n-channel', action='store', default=5, help='number of good (type-1) channels among 5 channels', type=int)
parser.add_argument('--n-good-channel', action='store', default=2, help='number of good (type-1) channels among 5 channels', type=int)
parser.add_argument('--buffer-size', action='store', default=512, help='size of buffer, default: 1024 packets', type=int)
parser.add_argument('--buffer-levels', action='store', default=10, help='# of buffer levels in Q-learning, default: 10', type=int)
parser.add_argument('--packet-size', action='store', default=1024, help='size of a packet, default: 1024 bits', type=int)
parser.add_argument('--min-packet-rate', action='store', default=0, help='minimum packet rate per timeslot per agent, default = 0', type=int)
parser.add_argument('--max-packet-rate', action='store', default=6, help='maximum packet rate per timeslot per agent, default = 6', type=int)
parser.add_argument('--beta-idle', action='store', default=10, help='cost coefficient of staying idle for Q Learning default = 4', type=float)
parser.add_argument('--verbose', action='store_true', help='increase verbosity, give statistics about each run')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1')
argv = parser.parse_args()
| apache-2.0 | 1,636,378,716,372,418,000 | 80 | 146 | 0.729892 | false | 3.61705 | false | false | false |
mattjmuw/iam-messaging | messagetools/aws.py | 1 | 2352 | # ========================================================================
# Copyright (c) 2015 The University of Washington
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
#
#
# IAM messaging tools - AWS interface
#
from sys import exit
from copy import deepcopy
import logging
import json
from .dao import AWS_DAO
class AWS(object):
def __init__(self, conf):
self._conf = conf
# SNS actions
def create_topic(self, name):
dao = AWS_DAO(self._conf)
response = dao.create_topic(name)
return response
def send_message(self, msg, context, cryptid, signid):
dao = AWS_DAO(self._conf)
response = dao.send_message(msg, context, cryptid, signid)
return response
# SQS actions
def get_queue(self):
dao = AWS_DAO(self._conf)
response = dao.get_queue()
return response
def get_all_queues(self):
dao = AWS_DAO(self._conf)
response = dao.get_all_queues()
return response
def create_queue(self, name):
dao = AWS_DAO(self._conf)
response = dao.create_queue(name)
return response
def recv_message(self):
dao = AWS_DAO(self._conf)
response = dao.recv_message()
return response
def recv_and_process(self, handler, max=1):
dao = AWS_DAO(self._conf)
response = dao.recv_and_process(handler, max)
return response
def purge_queue(self):
dao = AWS_DAO(self._conf)
response = dao.purge_queue()
return response
# multi-actions
def subscribe_queue(self, topic_name, queue_name):
dao = AWS_DAO(self._conf)
response = dao.subscribe_queue(topic_name, queue_name)
return response
| apache-2.0 | 1,520,452,569,509,629,400 | 26.034483 | 75 | 0.596088 | false | 3.986441 | false | false | false |
sonictk/MARI-Extension-Pack | Scripts/stkMariTools/Tools/Cache/clearHistory.py | 1 | 1841 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
""" Module clearHistory """
# noinspection PyUnresolvedReferences
import logging
import mari
import traceback
from PySide.QtGui import QMessageBox
from stkMariTools.lib.ui_utils import MariToolsMenuItem
def registerMenuItem():
"""
This method acts as a identifier method to be run automatically when
detected. It adds the menu item to the Mari menubar.
:return:
"""
ClearHistoryMenuItem()
class ClearHistoryMenuItem(MariToolsMenuItem):
"""
This class adds a Clear History action.
"""
logger = logging.getLogger(__name__)
def __init__(self):
"""
The constructor.
:return:
"""
super(ClearHistoryMenuItem, self).__init__()
mari.ClearHistoryMenuItem = self
self.actionIdentifier = 'Clear cached history'
self.actionCommand = 'mari.ClearHistoryMenuItem.clearHistory()'
self.actionPath = 'MainWindow/&Scripts/&Cache'
self.addMariToolsMenuItem()
def clearHistory(self):
"""
This method clears the Mari undo stack and cache.
:return:
"""
try: mari.history.clear()
except RuntimeError:
self.logger.error('### Could not clear the project history!!!\n{0}'
.format(traceback.print_exc()))
# Display user prompt
mari.utils.message(text='Could not clear the project history!\n'
'Check if there is no project open, '
'or if the current project requires saving.',
title='Could not clear project history!',
icon=QMessageBox.Icon.Warning)
return
mari.ddi.garbageCollect()
mari.ddi.clearMemoryCache() | bsd-3-clause | -1,078,873,126,308,673,300 | 25.314286 | 79 | 0.593156 | false | 4.625628 | false | false | false |
OpenInkpot-archive/iplinux-xcb-proto | src/type.py | 2 | 2054 | #!/usr/bin/python
from xml.sax.saxutils import XMLFilterBase, XMLGenerator
from xml.sax.xmlreader import AttributesImpl
from xml.sax import make_parser
import sys
def AttributesUnion(base, **values):
baseitems = dict(base)
baseitems.update(values)
return AttributesImpl(baseitems)
class AnnotateType(XMLFilterBase):
scopes = []
map = dict([(name, [name]) for name in [
'BOOL', 'BYTE',
'CARD8', 'CARD16', 'CARD32',
'INT8', 'INT16', 'INT32',
'char', 'void',
'float', 'double',
'XID',
]])
def startScope(self, name):
self.scopes.insert(0, name)
def declareType(self, name):
assert ':' not in name
qname = self.scopes[0] + ':' + name
self.map.setdefault(name, []).insert(0, qname)
def getQualifiedType(self, name):
if ':' in name:
return name
names = self.map.get(name, [])
return names[0]
def endScope(self):
self.scopes.pop(0)
def startElement(self, name, attrs):
attnames = []
if name == 'xcb':
self.startScope(attrs['header'])
elif name in ['struct', 'union', 'xidtype', 'enum', 'event', 'eventcopy', 'error', 'errorcopy']:
self.declareType(attrs['name'])
attnames = ['name']
if name.endswith('copy'):
attnames.append('ref')
elif name == 'typedef':
self.declareType(attrs['newname'])
attnames = ['oldname', 'newname']
elif name == 'valueparam':
attnames = ['value-mask-type']
elif attrs.has_key('type'):
attnames = ['type']
newattrs = {}
for attname in attnames:
newattrs[attname] = self.getQualifiedType(attrs[attname])
if newattrs:
attrs = AttributesUnion(attrs, **newattrs)
XMLFilterBase.startElement(self, name, attrs)
def endElement(self, name):
XMLFilterBase.endElement(self, name)
if name == 'xcb':
self.endScope()
annotator = AnnotateType(make_parser())
annotator.setContentHandler(XMLGenerator())
if len(sys.argv) > 1:
annotator.parse(sys.argv[1])
else:
annotator.parse(sys.stdin)
for name,names in annotator.map.iteritems():
if len(names) != 1:
print "<!-- warning:", name, "has the following definitions:", names, "-->"
| mit | -6,028,549,231,484,408,000 | 26.756757 | 98 | 0.669426 | false | 2.955396 | false | false | false |
xiviwo/baiducloud | TaskDialog.py | 1 | 9943 | from gi.repository import Gtk
import json,os
from log import logger
import settings
import cloudapi
import utils
import re
from Spinner import SpinnerDialog
from VcodeDialog import VcodeDialog
import urllib.parse
class Singleton(type):
def __init__(cls, name, bases, dict):
super(Singleton, cls).__init__(name, bases, dict)
cls._instance = None
def __call__(cls, *args, **kw):
if cls._instance is None:
cls._instance = super(Singleton2, cls).__call__(*args, **kw)
return cls._instance
def __new__(cls, name, bases, dct):
return type.__new__(cls, name, bases, dct)
class TaskDialog(Gtk.Dialog):
__metaclass__ = Singleton
def __init__(self,parent,tokens,save_path):
Gtk.Dialog.__init__(self, "Download Task", parent, 0)
self.file_list = []
#self.downlink = []
self.tokens = tokens
self.bdstoken,sign1,sign3,timestamp = self.tokens
#self.file_list = nlist
#self.remove_list = file_list
self.current_selection = None
self.save_path = save_path
#self.draw_widget(file_list)
#def draw_widget(self,file_list):
self.set_default_size(800, 500)
self.set_border_width(10)
box = self.get_content_area()
## num,filename,size,status,path,
# 0 1 2 3 4
self.liststore = Gtk.ListStore(int,str, str, str,str,str)
#self.liststore.connect("row-changed",self.row_changed)
self.spinn = SpinnerDialog(self)
self.spinn.show()
self.init_view(self.bdstoken)
#creating the treeview, making it use the filter as a model, and adding the columns
self.treeview = Gtk.TreeView(model=self.liststore)
for i, column_title in enumerate(["Num","File", "Size","Status", "Path"]):
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(column_title, renderer,text=i)
self.treeview.append_column(column)
self.treeview.props.activate_on_single_click = False
self.treeview.connect("row-activated",self.on_row_double_click)
self.selection = self.treeview.get_selection()
self.selection.connect("changed", self.on_tree_selection_changed)
self.selection.set_mode(Gtk.SelectionMode.MULTIPLE)
self.buttons = list()
for act in ["Add Magnet or Ed2k Link File","Select All","Unselect All", "Remove Task"]:
button = Gtk.Button(act)
self.buttons.append(button)
funcname = "on_%s_button_clicked"%act.lower().replace(" ","_")
func = getattr(self, funcname)
button.connect("clicked", func)
self.scrollable_treelist = Gtk.ScrolledWindow()
self.scrollable_treelist.set_vexpand(True)
box.pack_start(self.scrollable_treelist, True, True, 0)
for i, button in enumerate(self.buttons):
#box.pack_start(self.buttons[i], False,False, 0)
self.add_action_widget(self.buttons[i],i+1)
self.scrollable_treelist.add(self.treeview)
self.infobar = Gtk.InfoBar()
self.infobar.set_message_type(Gtk.MessageType.ERROR)
#box.pack_end(self.infobar, False, False, 0)
#grid.attach_next_to(self.infobar,lbutton,Gtk.PositionType.BOTTOM,13,1)
box.add(self.infobar)
info_content = self.infobar.get_content_area()
self.info_label = Gtk.Label.new("Add magnet/ed2k file to add offline download task")
info_content.pack_start(self.info_label, False, False, 0)
self.infobar.hide()
box.show_all()
def on_tree_selection_changed(self,*arg):
self.current_selection = self.selection.get_selected_rows()
def populate_view(self,*arg):
listjson,error = arg
print(listjson)
if 'task_info' in list(listjson.keys()):
task_list = listjson['task_info']
file_list = []
for i,row in enumerate(task_list):
if int(row['status']) == 0:
status = "Success"
else:
status = "Not Finised"
nrow = (i,row['task_name'],'0B',status,row['save_path'],row['task_id'])
file_list.append(nrow)
self.fill_liststore(file_list)
elif 'error_msg' in list(listjson.keys()):
info =listjson['error_msg']
logger.info(info)
self.info_label.set_text(info)
self.spinn.destroy()
def init_view(self,bdstoken):
utils.async_call(cloudapi.list_task, bdstoken,
callback=self.populate_view)
self.fill_liststore([])
def fill_liststore(self,file_list):
if file_list:
self.liststore.clear()
for i,filerow in enumerate(file_list):
self.liststore.append(list(filerow))
def on_select_all_button_clicked(self,*arg):
self.selection.select_all()
def on_unselect_all_button_clicked(self,*arg):
self.selection.unselect_all()
def on_remove_task_button_clicked(self,*arg):
def is_current_selection_null():
if not self.current_selection or not self.current_selection[1] :
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO,
Gtk.ButtonsType.OK, "Attention.......")
dialog.format_secondary_text("NO File is selected.!")
dialog.run()
dialog.destroy()
return True
else:
return False
def after_delete_task(data,error):
self.info_label.set_text("Deletion is done")
self.init_view(self.bdstoken)
self.spinn.destroy()
if is_current_selection_null():
return
store,treepaths = self.current_selection
for tpath in treepaths:
task = ()
for i in store[tpath]:
task = task + (i,)
task_id = task[5]
self.spinn = SpinnerDialog(self)
self.spinn.show()
self.info_label.set_text("Deleting task %s "%task[1])
utils.async_call(cloudapi.delete_task, self.bdstoken,task_id ,
callback=after_delete_task)
#self.liststore.clear()
#self.fill_liststore(file_list)
def on_row_double_click(self,*arg):
pass
def after_cancel_task(self,*arg):
taskdata,error = arg
canceljson,task_id,task_name = taskdata
logger.debug("canceljson: %s "%canceljson)
info ="Task:%s,id:%s is cancelled."%(task_name,task_id)
logger.info(info)
self.info_label.set_text(info)
self.init_view(self.bdstoken)
self.spinn.destroy()
def after_query_task(self,*arg):
taskdata,error = arg
taskjson,task_id = taskdata
#self.init_view(self.bdstoken)
#taskjson = cloudapi.query_task(task_id)
logger.debug("taskjson: %s "%taskjson)
#if task_json:
file_size = int(taskjson['task_info'][task_id]['file_size'])
finished_size = int(taskjson['task_info'][task_id]['finished_size'])
task_name = taskjson['task_info'][task_id]['task_name']
logger.debug("file_size: %s "%file_size)
logger.debug("finished_size: %s "%finished_size)
if finished_size/file_size < 1 :
info = "%s : Finished rate is less than 0.6, canceling."%task_name
logger.info(info)
self.info_label.set_text(info)
utils.async_call(cloudapi.cancel_task, self.bdstoken,task_id,task_name,
callback=self.after_cancel_task)
else:
info = "Task:%s,id:%s is successfully created."%(task_name,task_id)
logger.info(info)
self.info_label.set_text(info)
#self.init_view(self.bdstoken)
self.spinn.destroy()
def after_add_task(self,*arg):
taskjson,error = arg
logger.debug("taskjson: %s "%taskjson)
if 'task_id' in taskjson.keys():
task_id = str(taskjson['task_id'])
utils.async_call(cloudapi.query_task, self.bdstoken,task_id,
callback=self.after_query_task)
else:
error = taskjson['error_msg']
logger.info(error)
self.info_label.set_text(error)
#self.init_view(self.bdstoken)
self.spinn.destroy()
#self.spinn.destroy()
def on_add_magnet_or_ed2k_link_file_button_clicked(self,*arg):
dialog = Gtk.FileChooserDialog("Please choose a file", self,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
response = dialog.run()
if response == Gtk.ResponseType.OK:
#linkfile = dialog.get_file().read()
filename = dialog.get_filename()
print("Open clicked")
print("File selected: " + dialog.get_filename())
elif response == Gtk.ResponseType.CANCEL:
return
dialog.destroy()
link_list = open(filename).read()
task_list = []
invalid_list = []
for line in link_list.split("\n"):
line = line.strip()
if line and ( line.startswith("magnet:?xt=urn") or \
line.startswith("ed2k://") ):
task_list.append(line)
elif line:
invalid_list.append(line)
if invalid_list:
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.QUESTION,
Gtk.ButtonsType.OK, "Attention")
dialog.format_secondary_text(
"Only magnet or ed2k protocal is support! Invalid lines :%s"%str(invalid_list))
response = dialog.run()
dialog.destroy()
return
print(self.save_path)
maglist = [ i['source_url'] for i in self.task_list if "magnet:?xt=urn:" in i['source_url'] ]
logger.debug("maglist: %s "%str(maglist))
for i,l in enumerate(task_list):
mag = re.search('(&.*$)',l).group(1)
task_name = dict(urllib.parse.parse_qsl(mag))['dn']
txt = "%s out of %s | %s is running."%(str(i),len(task_list),str(task_name))
logger.info(txt)
self.info_label.set_text(txt)
maglink = re.search("(magnet[^&]*)",l).group(1)
logger.debug("maglink: %s "%maglink)
self.spinn = SpinnerDialog(self)
self.spinn.show()
if maglink not in maglist:
self.info_label.set_text("Adding task: %s "%task_name)
taskjson = cloudapi.add_task(self.bdstoken, l,self.save_path,self)
self.init_view(self.bdstoken)
self.spinn.destroy()
#taskjson = cloudapi.add_task(l,self.save_path)
logger.debug("taskjson: %s "%taskjson)
if 'task_id' in taskjson.keys():
self.spinn = SpinnerDialog(self)
self.spinn.show()
self.info_label.set_text("Querying task: %s "%task_name)
task_id = str(taskjson['task_id'])
utils.async_call(cloudapi.query_task, self.bdstoken,task_id,
callback=self.after_query_task)
self.spinn.destroy()
else:
error = taskjson['error_msg']
logger.info(error)
self.info_label.set_text(error)
#self.spinn.destroy()
else:
info = "Already existed,pass"
logger.info(info)
self.info_label.set_text(info)
self.spinn.destroy()
| gpl-3.0 | 3,616,626,242,526,466,000 | 29.314024 | 97 | 0.672131 | false | 2.861295 | false | false | false |
AmritaLonkar/trunk | SU2_PY/SU2/io/state.py | 2 | 8884 | ## \file state.py
# \brief python package for state
# \author Trent Lukaczyk, Aerospace Design Laboratory (Stanford University) <http://su2.stanford.edu>.
# \version 2.0.6
#
# Stanford University Unstructured (SU2) Code
# Copyright (C) 2012 Aerospace Design Laboratory
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy, time
from ..io import expand_part, get_adjointSuffix, add_suffix, \
get_specialCases
from ..util import bunch
from ..util import ordered_bunch
# ----------------------------------------------------------------------
# State Factory
# ----------------------------------------------------------------------
def State_Factory(state=None):
""" state = SU2.io.State()
Starts a state class, an extension of ordered_bunch().
Stores data generated while traversing SU2 tool chain
Parameters:
FUNCTIONS - ordered bunch of objective function values
GRADIENTS - ordered bunch of gradient value lists
VARIABLES - ordered bunch of variables
FILES - ordered bunch of file types
HISTORY - ordered bunch of history information
Parameters can be accessed by item or attribute
ie: state['FUNCTIONS'] or state.FUNCTIONS
Methods:
update() - updates self with another state
pullnlink() - returns files to pull and link
design_vector() - vectorizes design variables
find_files() - finds existing mesh and solutions
Example of a filled state:
FUNCTIONS:
LIFT: 0.2353065809
DRAG: 0.042149736
SIDEFORCE: 0.0
MOMENT_X: 0.0
MOMENT_Y: 0.0
MOMENT_Z: 0.046370243
FORCE_X: 0.0370065195
FORCE_Y: 0.2361700759
FORCE_Z: 0.0
EFFICIENCY: 5.5826347517
GRADIENTS:
DRAG: [0.133697, 0.41473, 0.698497, (...)
VARIABLES:
DV_VALUE_NEW: [0.002, 0.002, 0.002, (...)
FILES:
MESH: mesh.su2
DIRECT: solution_flow.dat
ADJOINT_DRAG: solution_adj_cd.dat
HISTORY:
DIRECT: {ITERATION=[1.0, 2.0, 3.0, (...)
ADJOINT_DRAG: {ITERATION=[1.0, 2.0, 3.0, (...)
"""
if not state is None:
assert isinstance(state,State) , 'input is must be a state instance'
return state
NewClass = State()
for key in ['FUNCTIONS','GRADIENTS','VARIABLES','FILES','HISTORY']:
NewClass[key] = ordered_bunch()
return NewClass
# ----------------------------------------------------------------------
# State Class
# ----------------------------------------------------------------------
class State(ordered_bunch):
""" state = SU2.io.state.State()
This is the State class that should be generated with the
Factory Function SU2.io.state.State_Factory()
Parameters:
none, should be loaded with State_Factory()
Methods:
update() - updates self with another state
pullnlink() - returns files to pull and link
design_vector() - vectorizes design variables
find_files() - finds existing mesh and solutions
"""
_timestamp = 0
def update(self,ztate):
""" Updates self given another state
"""
if not ztate: return
assert isinstance(ztate,State) , 'must update with another State-type'
for key in self.keys():
if isinstance(ztate[key],dict):
self[key].update( ztate[key] )
elif ztate[key]:
self[key] = ztate[key]
self.set_timestamp()
def __repr__(self):
return self.__str__()
def __str__(self):
output = 'STATE:'
for k1,v1 in self.iteritems():
output += '\n %s:' % k1
if isinstance(v1,dict):
for k2,v2 in v1.iteritems():
output += '\n %s: %s' % (k2,v2)
else:
output += '\n %s' % v1
return output
def pullnlink(self,config):
""" pull,link = SU2.io.State.pullnlink(config)
returns lists pull and link of files for folder
redirection, based on a given config
"""
pull = []; link = []
# choose files to pull and link
for key,value in self.FILES.iteritems():
# link big files
if key == 'MESH':
# mesh (merged and partitions)
if config.DECOMPOSED:
value = expand_part(value,config) # hack - twl
else:
value = [value]
link.extend(value)
elif key == 'DIRECT':
#if config.RESTART_SOL == 'YES':
# direct solution
link.append(value)
elif 'ADJOINT_' in key:
#if config.RESTART_SOL == 'YES':
# adjoint solution
link.append(value)
# copy all other files
else:
pull.append(value)
#: for each filename
return pull,link
def design_vector(self):
""" vectorizes State.VARIABLES
"""
vector = []
for value in self.VARIABLES.values():
if isinstance(value,dict):
for v in value.values():
vector.append(v)
elif not isinstance(value,list):
value = [value]
vector.extend(value)
return vector
def find_files(self,config):
""" SU2.io.State.find_files(config)
finds mesh and solution files for a given config.
updates state.FILES with filenames.
files already logged in state are not overridden.
will ignore solutions if config.RESTART_SOL == 'NO'.
"""
files = self.FILES
mesh_name = config.MESH_FILENAME
direct_name = config.SOLUTION_FLOW_FILENAME
adjoint_name = config.SOLUTION_ADJ_FILENAME
targetea_name = 'TargetEA.dat'
adj_map = get_adjointSuffix()
restart = config.RESTART_SOL == 'YES'
special_cases = get_specialCases(config)
def register_file(label,filename):
if not files.has_key(label):
if os.path.exists(filename):
files[label] = filename
print 'found: %s' % filename
else:
assert os.path.exists(files[label]) , 'state expected file: %s' % filename
#: register_file()
# mesh
register_file('MESH',mesh_name)
# direct solution
if restart:
register_file('DIRECT',direct_name)
# adjoint solutions
if restart:
for obj,suff in adj_map.iteritems():
ADJ_LABEL = 'ADJOINT_' + obj
adjoint_name_suffixed = add_suffix(adjoint_name,suff)
register_file(ADJ_LABEL,adjoint_name_suffixed)
# equivalent area
if 'EQUIV_AREA' in special_cases:
register_file('TARGET_EA',targetea_name)
return
def __setitem__(self,k,v):
if self._initialized:
self.set_timestamp()
super(State,self).__setitem__(k,v)
def set_timestamp(self):
self._timestamp = time.time()
def tic(self):
""" timestamp = State.tic()
returns the time that this state was last modified
"""
return self._timestamp
def toc(self,timestamp):
""" updated = State.toc(timestamp)
returns True if state was modified since last timestamp
"""
return self._timestamp > timestamp
#: def State
| gpl-2.0 | 6,104,734,270,056,771,000 | 32.273408 | 103 | 0.512269 | false | 4.466566 | true | false | false |
komola/swift-robots | robots/middleware.py | 1 | 1570 | # Copyright 2013 komola GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Robots middleware denies access for search engines
"""
from webob import Request, Response
class RobotsMiddleware(object):
"""
Robots middleware denies access for search engines
If the path is /robots.txt, it will respond with Deny All.
"""
def __init__(self, app, *args, **kwargs):
self.app = app
def GET(self, req):
"""Returns a 200 response with "GO AWAY!" in the body."""
return Response(request=req, body="User-agent: *\nDisallow: /", content_type="text/plain")
def __call__(self, env, start_response):
req = Request(env)
try:
if req.path == '/robots.txt':
return self.GET(req)(env, start_response)
except UnicodeError:
# definitely, this is not /robots.txt
pass
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
def robots_filter(app):
return RobotsMiddleware(app)
return robots_filter | apache-2.0 | 2,509,246,652,877,904,400 | 30.42 | 98 | 0.666879 | false | 3.934837 | false | false | false |
swenger/glitter | examples/opencltexture2.py | 1 | 3793 | #!/usr/bin/env/python
"""Minimal example of OpenGL/CL interaction using textures.
@author: Kai Ruhl
@since 2013-02"""
import sys
import numpy as np
import pyopencl as cl
from glitter import Texture2D
from glitter.raw import gl
cl_source = """
const sampler_t T_RAW_SAMPLER = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST;
__kernel void run(uint wid, uint hei, __read_only image2d_t img0) {
}
"""
def get_gl_context(option="g"):
"""Returns an OpenGL context. Options: g(lut), q(t)"""
if "g" == option:
print "Creating GLUT context."
from glitter.contexts.glut import GlutWindow
gl_context = GlutWindow(shape=(1,1), hide=True)
elif "q" == option:
print "Creating QT context."
from PySide import QtGui
from glitter.contexts.qt import QtWidget
app = QtGui.QApplication.instance()
if app is None:
app = QtGui.QApplication(sys.argv)
gl_context = QtWidget(None)
else:
raise Exception("Unknown option: %s" % option)
return gl_context
def get_cl_context(gl_context):
"""Creates a CL context, with or without given GL context."""
if gl_context is not None: # ... with OpenGL interop?
with gl_context:
assert cl.have_gl(), "GL interoperability not enabled."
from pyopencl.tools import get_gl_sharing_context_properties
cl_platform = cl.get_platforms()[0]
cl_properties = [(cl.context_properties.PLATFORM, cl_platform)] + get_gl_sharing_context_properties()
cl_devices = [cl_platform.get_devices()[-1]] # Only one is allowed!
cl_context = cl.Context(properties=cl_properties, devices=cl_devices)
else: # ... or in stand-alone mode, CL context without GL?
cl_platform = cl.get_platforms()[0] # @UndefinedVariable
cl_properties = [(cl.context_properties.PLATFORM, cl_platform)]
cl_devices = [cl_platform.get_devices()[-1]] # Only one is allowed!
cl_context = cl.Context(properties=cl_properties, devices=cl_devices)
return cl_context
def test_clgl_texture_interop(gl_context, cl_context):
"""Tests that an OpenGL texture can be used in an OpenCL kernel."""
from scipy.misc import lena;
img = np.dstack([lena() / 256.] * 3).astype(np.float32); hei, wid = img.shape[:2]
gl_img = Texture2D(img, mipmap=True, context=gl_context)
cl_img = cl.GLTexture(cl_context, cl.mem_flags.READ_ONLY, gl.GL_TEXTURE_2D, 1, gl_img._id, 2)
cl_queue = cl.CommandQueue(cl_context)
cl_program = cl.Program(cl_context, cl_source).build()
if True: # usable in loop
cl_gl_data = [cl_img]
cl.enqueue_acquire_gl_objects(cl_queue, cl_gl_data)
cl_args = [np.uint32(wid), np.uint32(hei), cl_img]; assert 3 == len(cl_args)
cl_program.run(cl_queue, (wid, hei), None, *cl_args)
cl.enqueue_release_gl_objects(cl_queue, cl_gl_data)
cl_queue.flush()
cl_queue.finish()
if __name__ == "__main__":
gl_context = get_gl_context("q" if len(sys.argv) < 2 else sys.argv[1])
cl_context = get_cl_context(gl_context)
test_clgl_texture_interop(gl_context, cl_context);
w, h = 800, 600;
if False:
from glitter.framebuffers.framebuffer import Framebuffer
gl_frame_buffer = Framebuffer(Texture2D(shape=(h, w, 3), context=gl_context), depth=Texture2D(shape=(h, w, 1), depth=True, context=gl_context), context=self)
if False:
import glitter.utils.dtypes as gdtype
gl_int_mipmap_texture = Texture2D(shape=(h, w, 3), dtype=gdtype.uint8, mipmap=True, context=gl_context)
gl_int_mipmap_texture.min_filter = Texture2D.min_filters.LINEAR_MIPMAP_LINEAR
gl_data = gl_int_mipmap_texture.get_data(level=2)
print "Finished."
| mit | 214,331,486,804,581,470 | 40.681319 | 165 | 0.652254 | false | 3.17938 | false | false | false |
mgymrek/pybamview | pybamview/utils.py | 1 | 3400 | # -*- coding: utf-8 -*-
import os
import random
import sys
from subprocess import Popen, PIPE, STDOUT
def message(msg, msgtype='progress'):
"""Send a message to console.
Args:
msgtype (str): one of 'progress', 'warning', 'error', or 'debug'
"""
message = "[%(level)s]: %(text)s" % dict(level=msgtype.upper(), text=msg)
sys.stderr.write(message.strip() + "\n")
if msgtype == 'error':
sys.exit(1)
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
(copied from IPython notebookapp.py)
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def ParseTargets(targetfile):
""" Return list of targets, each is dict with region and name """
x = []
with open(targetfile, "r") as f:
for line in f:
items = line.strip().split("\t")
if len(items) != 4:
message("invalid target file. should have 4 columns", "error")
chrom, start, end, name = items
region = "%s:%s"%(chrom, start)
x.append({"name": name, "region": region, "coords": (chrom, int(start), int(end))})
line = f.readline()
with open(targetfile, "r") as f:
line = f.readline()
return x
def WriteParamFile(paramfile, jspath, filetype, reference_track, samples, alignments_by_sample, fromindex, toindex):
"""
Generate paramfile for creating snapshots from the command line
"""
f = open(paramfile, "w")
f.write('var exports = module.exports = {};\n')
f.write('exports.reference_track = "%s";\n' % reference_track)
f.write('exports.samples = %s;\n' % str(samples))
f.write('exports.alignBySample = {\n')
for sample in alignments_by_sample:
f.write('"%s": "%s",\n' % (sample, alignments_by_sample[sample]))
f.write('};\n')
f.write('exports.fromindex = %s;\n' % fromindex)
f.write('exports.toindex = %s;\n' % toindex)
f.write('exports.jspath = "%s";\n' % jspath)
if filetype in ["html","svg"]:
f.write('exports.filetype = "%s";\n' % filetype)
elif filetype == "pdf":
f.write('exports.filetype = "svg";\n')
else:
f.write('exports.filetype = "none";\n')
f.close()
def RunCommand(cmd):
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, \
stderr=STDOUT, close_fds=True)
ex = p.wait()
if ex != 0:
stdout, stderr = "", ""
if p.stdout is not None: stdout = p.stdout.read()
if p.stderr is not None: stderr = p.stderr.read()
message("ERROR: command '%s' failed.\n\nSTDOUT:%s\nSTDERR:%s"%(cmd, stdout, stderr))
return ex
def CheckProgram(program):
""" Check whether a program is installed """
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file): return True
return False
def CheckNodeJSPackage(package):
""" Check whether a node.js package is installed """
cmd = "node -e \"var d3=require('%s');\"" % package
x = RunCommand(cmd)
return x == 0
| mit | -5,352,880,757,352,825,000 | 32.663366 | 116 | 0.59 | false | 3.393214 | false | false | false |
jtauber/sgf | test.py | 1 | 2727 | #!/usr/bin/env python
import glob
import sgf
try:
from StringIO import StringIO # pragma: no cover
except ImportError: # pragma: no cover
from io import StringIO # pragma: no cover
for filename in glob.glob("examples/*.sgf"):
with open(filename) as f:
sgf.parse(f.read())
example = "(;FF[4]GM[1]SZ[19];B[aa];W[bb];B[cc];W[dd];B[ad];W[bd])"
collection = sgf.parse(example)
for game in collection:
for node in game:
pass
out = StringIO()
collection[0].nodes[1].output(out)
assert out.getvalue() == ";B[aa]"
out.close()
out = StringIO()
collection.output(out)
assert out.getvalue() == example
out.close()
example2 = "(;FF[4]GM[1]SZ[19];B[aa];W[bb](;B[cc];W[dd];B[ad];W[bd])" \
"(;B[hh];W[hg]))"
collection = sgf.parse(example2)
out = StringIO()
collection.output(out)
assert out.getvalue() == example2
out.close()
example3 = "(;C[foo\\]\\\\])"
collection = sgf.parse(example3)
assert collection[0].nodes[0].properties["C"] == ["foo]\\"]
out = StringIO()
collection.output(out)
assert out.getvalue() == example3
out.close()
sgf.parse("foo(;)") # junk before first ( is supported
sgf.parse("( ;)") # whitespace after ( is allowed
sgf.parse("(;;)") # a node after an empty node is allowed
sgf.parse("(;(;))") # a gametree after an empty node is allowed
# errors
try:
sgf.parse("()") # games must have a node
assert False # pragma: no cover
except sgf.ParseException:
pass
try:
sgf.parse("(W[tt])") # a property has to be in a node
assert False # pragma: no cover
except sgf.ParseException:
pass
try:
sgf.parse("(;)W[tt]") # a property has to be in a game
assert False # pragma: no cover
except sgf.ParseException:
pass
try:
sgf.parse("(;1)") # property names can't start with numbers
assert False # pragma: no cover
except sgf.ParseException:
pass
try:
sgf.parse("(;A5[])") # property names can't have numbers at all
assert False # pragma: no cover
except sgf.ParseException:
pass
try:
sgf.parse("(;FOO[bar]5)") # bad character after a property value
assert False # pragma: no cover
except sgf.ParseException:
pass
try:
sgf.parse("(;") # finished mid-gametree
assert False # pragma: no cover
except sgf.ParseException:
pass
# new features for 0.5
with open("examples/ff4_ex.sgf") as f:
ff4_ex = sgf.parse(f.read())
assert len(ff4_ex) == 2
game1 = ff4_ex[0]
assert game1.root.properties["SZ"] == ["19"]
count = 0
for node in game1.rest:
count += 1
assert count == 13
collection = sgf.parse(example2)
count = 0
for node in collection[0].rest:
count += 1
assert count == 6
# test game.rest if only one node
assert sgf.parse("(;)")[0].rest is None
| mit | 2,341,016,438,754,120,000 | 20.139535 | 71 | 0.646498 | false | 2.948108 | false | false | false |
rpetit3-science/call_variants | call_variants/helpers/time_job.py | 1 | 1609 | #! /usr/bin/env python
"""
Add decorator to time pipeline steps.
See the following links for more info:
https://github.com/bunbun/ruffus/issues/15
https://github.com/daler/pipeline-example/blob/master/pipeline-2/helpers.py
"""
import sys
import time
class time_job(object):
"""
@time_job decorator.
Wraps a function and prints elapsed time to standard out, or any other
file-like object with a .write() method.
"""
def __init__(self, stream=sys.stdout, new_stream=False):
""" """
self.stream = stream
self.new_stream = new_stream
def __call__(self, func):
""" """
def inner(*args, **kwargs):
# Start the timer.
start = time.time()
# Run the decorated function.
ret = func(*args, **kwargs)
# Stop the timer.
end = time.time()
elapsed = end - start
name = func.__name__
runtime = "{0}\t{1:.4f}\n".format(name, elapsed)
if type(self.stream) == str:
if self.new_stream:
with open(self.stream, 'w') as log:
log.write(runtime)
else:
with open(self.stream, 'a') as log:
log.write(runtime)
else:
self.stream.write(runtime)
# Return the decorated function's return value.
return ret
inner.__name__ = func.__name__
if hasattr(func, "pipeline_task"):
inner.pipeline_task = func.pipeline_task
return inner
| mit | 5,159,627,858,735,345,000 | 27.22807 | 79 | 0.525171 | false | 4.11509 | false | false | false |
codeforgood13/ability | shakti/migrations/0003_auto__del_field_jobdescriptor_who_can__add_field_jobdescriptor_who_can.py | 1 | 7736 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'JobDescriptor.who_can'
db.delete_column(u'shakti_jobdescriptor', 'who_can')
# Adding field 'JobDescriptor.who_can_o'
db.add_column(u'shakti_jobdescriptor', 'who_can_o',
self.gf('django.db.models.fields.CharField')(default='N', max_length=5),
keep_default=False)
# Adding field 'JobDescriptor.who_can_b'
db.add_column(u'shakti_jobdescriptor', 'who_can_b',
self.gf('django.db.models.fields.CharField')(default='N', max_length=5),
keep_default=False)
# Adding field 'JobDescriptor.who_can_h'
db.add_column(u'shakti_jobdescriptor', 'who_can_h',
self.gf('django.db.models.fields.CharField')(default='N', max_length=5),
keep_default=False)
def backwards(self, orm):
# Adding field 'JobDescriptor.who_can'
db.add_column(u'shakti_jobdescriptor', 'who_can',
self.gf('django.db.models.fields.TextField')(default=-1, max_length=20),
keep_default=False)
# Deleting field 'JobDescriptor.who_can_o'
db.delete_column(u'shakti_jobdescriptor', 'who_can_o')
# Deleting field 'JobDescriptor.who_can_b'
db.delete_column(u'shakti_jobdescriptor', 'who_can_b')
# Deleting field 'JobDescriptor.who_can_h'
db.delete_column(u'shakti_jobdescriptor', 'who_can_h')
models = {
u'shakti.constraints': {
'Meta': {'object_name': 'Constraints'},
'assistance_descr': ('django.db.models.fields.CharField', [], {'default': "' May need help'", 'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'night_shift': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'relocatable': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'special_assistance': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"})
},
u'shakti.hearing': {
'Meta': {'object_name': 'Hearing'},
'hearing_aid': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '5'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"})
},
u'shakti.jobdescriptor': {
'Meta': {'object_name': 'JobDescriptor'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'night_shift': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'post': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'qualification': ('django.db.models.fields.TextField', [], {}),
'skills_required': ('django.db.models.fields.TextField', [], {}),
'who_can_b': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'who_can_h': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'who_can_o': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
u'shakti.orthopedic': {
'Meta': {'object_name': 'Orthopedic'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lhand_amputee': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'lleg_amputee': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'orthopedic_aid': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'rhand_amputee': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'rleg_amputee': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"})
},
u'shakti.other': {
'Meta': {'object_name': 'Other'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'other_description': ('django.db.models.fields.TextField', [], {}),
'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"})
},
u'shakti.personalinfo': {
'Meta': {'object_name': 'PersonalInfo'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'default': "'[email protected]'", 'max_length': '30'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.TextField', [], {}),
'maritial_status': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'mobile_num': ('django.db.models.fields.CharField', [], {'default': "'+9129089998'", 'max_length': '15'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'shakti.qualification': {
'Meta': {'object_name': 'Qualification'},
'eduIndex': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'other_desc': ('django.db.models.fields.TextField', [], {'default': 'None', 'blank': 'True'}),
'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"})
},
u'shakti.skills': {
'Meta': {'object_name': 'Skills'},
'computer_skills': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'projects': ('django.db.models.fields.TextField', [], {}),
'speciality': ('django.db.models.fields.TextField', [], {}),
'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"})
},
u'shakti.tracker': {
'Meta': {'object_name': 'Tracker'},
'details': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10'}),
'doj': ('django.db.models.fields.DateField', [], {'default': 'None'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'placed': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"})
},
u'shakti.vision': {
'Meta': {'object_name': 'Vision'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'severity': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"})
}
}
complete_apps = ['shakti'] | mit | -2,028,688,624,644,257,300 | 56.738806 | 128 | 0.544209 | false | 3.505211 | false | false | false |
Charlotte-Morgan/inasafe | safe/gui/tools/multi_exposure_dialog.py | 1 | 29397 | # coding=utf-8
"""Multi Exposure Tool."""
import logging
from collections import OrderedDict
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import (
QDialog,
QComboBox,
QLabel,
QSizePolicy,
QTreeWidgetItem,
QListWidgetItem
)
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import QgsProject, QgsApplication
from qgis.utils import iface as iface_object
from safe import messaging as m
from safe.common.exceptions import (
NoKeywordsFoundError,
KeywordNotFoundError,
MetadataReadError,
)
from safe.common.signals import send_error_message
from safe.definitions.constants import (
inasafe_keyword_version_key,
ANALYSIS_FAILED_BAD_INPUT,
PREPARE_SUCCESS,
ANALYSIS_FAILED_BAD_CODE,
entire_area_item_aggregation,
MULTI_EXPOSURE_ANALYSIS_FLAG,
)
from safe.definitions.exposure import exposure_all
from safe.definitions.font import bold_font
from safe.definitions.layer_purposes import (
layer_purpose_hazard,
layer_purpose_exposure,
layer_purpose_aggregation,
)
from safe.definitions.reports.components import (
standard_impact_report_metadata_html,
standard_multi_exposure_impact_report_metadata_html)
from safe.definitions.utilities import definition
from safe.gis.tools import full_layer_uri
from safe.gui.analysis_utilities import (
add_impact_layers_to_canvas,
add_layers_to_canvas_with_custom_orders,
)
from safe.gui.gui_utilities import layer_from_combo, add_ordered_combo_item
from safe.gui.widgets.message import (
enable_messaging,
send_static_message,
ready_message,
)
from safe.impact_function.impact_function_utilities import (
LAYER_ORIGIN_ROLE,
FROM_CANVAS,
FROM_ANALYSIS,
LAYER_PARENT_ANALYSIS_ROLE,
LAYER_PURPOSE_KEY_OR_ID_ROLE,
)
from safe.impact_function.multi_exposure_wrapper import (
MultiExposureImpactFunction)
from safe.messaging import styles
from safe.report.impact_report import ImpactReport
from safe.utilities.extent import Extent
from safe.utilities.gis import qgis_version, layer_icon
from safe.utilities.i18n import tr
from safe.utilities.keyword_io import KeywordIO
from safe.utilities.qgis_utilities import display_warning_message_bar
from safe.utilities.qt import disable_busy_cursor, enable_busy_cursor
from safe.utilities.resources import (
get_ui_class, resources_path,
)
from safe.utilities.settings import setting
from safe.utilities.utilities import (
is_keyword_version_supported,
basestring_to_message,
get_error_message,
)
LOGGER = logging.getLogger('InaSAFE')
FORM_CLASS = get_ui_class('multi_exposure_dialog_base.ui')
INFO_STYLE = styles.BLUE_LEVEL_4_STYLE
LOGO_ELEMENT = m.Brand()
class MultiExposureDialog(QDialog, FORM_CLASS):
"""Dialog for multi exposure tool."""
def __init__(self, parent=None, iface=iface_object):
"""Constructor for the multi exposure dialog.
:param parent: Parent widget of this dialog.
:type parent: QWidget
:param iface: An instance of QgisInterface
:type iface: QgisInterface
"""
QDialog.__init__(self, parent)
self.use_selected_only = setting(
'useSelectedFeaturesOnly', expected_type=bool)
self.parent = parent
self.iface = iface
self.setupUi(self)
icon = resources_path('img', 'icons', 'show-multi-exposure.svg')
self.setWindowIcon(QIcon(icon))
self.tab_widget.setCurrentIndex(0)
self.combos_exposures = OrderedDict()
self.keyword_io = KeywordIO()
self._create_exposure_combos()
self._multi_exposure_if = None
self._extent = Extent(iface)
self._extent.show_rubber_bands = setting(
'showRubberBands', False, bool)
enable_messaging(self.message_viewer, self)
self.btn_back.clicked.connect(self.back_clicked)
self.btn_next.clicked.connect(self.next_clicked)
self.btn_cancel.clicked.connect(self.reject)
self.btn_run.clicked.connect(self.accept)
self.validate_impact_function()
self.tab_widget.currentChanged.connect(self._tab_changed)
self.tree.itemSelectionChanged.connect(self._tree_selection_changed)
self.list_layers_in_map_report.itemSelectionChanged.connect(
self._list_selection_changed)
self.add_layer.clicked.connect(self._add_layer_clicked)
self.remove_layer.clicked.connect(self._remove_layer_clicked)
self.move_up.clicked.connect(self.move_layer_up)
self.move_down.clicked.connect(self.move_layer_down)
self.cbx_hazard.currentIndexChanged.connect(
self.validate_impact_function)
self.cbx_aggregation.currentIndexChanged.connect(
self.validate_impact_function)
# Keep track of the current panel
self._current_index = 0
self.tab_widget.setCurrentIndex(self._current_index)
def _tab_changed(self):
"""Triggered when the current tab is changed."""
current = self.tab_widget.currentWidget()
if current == self.analysisTab:
self.btn_back.setEnabled(False)
self.btn_next.setEnabled(True)
elif current == self.reportingTab:
if self._current_index == 0:
# Only if the user is coming from the first tab
self._populate_reporting_tab()
self.reporting_options_layout.setEnabled(
self._multi_exposure_if is not None)
self.btn_back.setEnabled(True)
self.btn_next.setEnabled(True)
else:
self.btn_back.setEnabled(True)
self.btn_next.setEnabled(False)
self._current_index = current
def back_clicked(self):
"""Back button clicked."""
self.tab_widget.setCurrentIndex(self.tab_widget.currentIndex() - 1)
def next_clicked(self):
"""Next button clicked."""
self.tab_widget.setCurrentIndex(self.tab_widget.currentIndex() + 1)
def ordered_expected_layers(self):
"""Get an ordered list of layers according to users input.
From top to bottom in the legend:
[
('FromCanvas', layer name, full layer URI, QML),
('FromAnalysis', layer purpose, layer group, None),
...
]
The full layer URI is coming from our helper.
:return: An ordered list of layers following a structure.
:rtype: list
"""
registry = QgsProject.instance()
layers = []
count = self.list_layers_in_map_report.count()
for i in range(count):
layer = self.list_layers_in_map_report.item(i)
origin = layer.data(LAYER_ORIGIN_ROLE)
if origin == FROM_ANALYSIS['key']:
key = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE)
parent = layer.data(LAYER_PARENT_ANALYSIS_ROLE)
layers.append((
FROM_ANALYSIS['key'],
key,
parent,
None
))
else:
layer_id = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE)
layer = registry.mapLayer(layer_id)
style_document = QDomDocument()
error = ''
layer.exportNamedStyle(style_document, error)
layers.append((
FROM_CANVAS['key'],
layer.name(),
full_layer_uri(layer),
style_document.toString()
))
return layers
def _add_layer_clicked(self):
"""Add layer clicked."""
layer = self.tree.selectedItems()[0]
origin = layer.data(0, LAYER_ORIGIN_ROLE)
if origin == FROM_ANALYSIS['key']:
parent = layer.data(0, LAYER_PARENT_ANALYSIS_ROLE)
key = layer.data(0, LAYER_PURPOSE_KEY_OR_ID_ROLE)
item = QListWidgetItem('%s - %s' % (layer.text(0), parent))
item.setData(LAYER_PARENT_ANALYSIS_ROLE, parent)
item.setData(LAYER_PURPOSE_KEY_OR_ID_ROLE, key)
else:
item = QListWidgetItem(layer.text(0))
layer_id = layer.data(0, LAYER_PURPOSE_KEY_OR_ID_ROLE)
item.setData(LAYER_PURPOSE_KEY_OR_ID_ROLE, layer_id)
item.setData(LAYER_ORIGIN_ROLE, origin)
self.list_layers_in_map_report.addItem(item)
self.tree.invisibleRootItem().removeChild(layer)
self.tree.clearSelection()
def _remove_layer_clicked(self):
"""Remove layer clicked."""
layer = self.list_layers_in_map_report.selectedItems()[0]
origin = layer.data(LAYER_ORIGIN_ROLE)
if origin == FROM_ANALYSIS['key']:
key = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE)
parent = layer.data(LAYER_PARENT_ANALYSIS_ROLE)
parent_item = self.tree.findItems(
parent, Qt.MatchContains | Qt.MatchRecursive, 0)[0]
item = QTreeWidgetItem(parent_item, [definition(key)['name']])
item.setData(0, LAYER_PARENT_ANALYSIS_ROLE, parent)
else:
parent_item = self.tree.findItems(
FROM_CANVAS['name'],
Qt.MatchContains | Qt.MatchRecursive, 0)[0]
item = QTreeWidgetItem(parent_item, [layer.text()])
layer_id = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE)
item.setData(0, LAYER_PURPOSE_KEY_OR_ID_ROLE, layer_id)
item.setData(0, LAYER_ORIGIN_ROLE, origin)
index = self.list_layers_in_map_report.indexFromItem(layer)
self.list_layers_in_map_report.takeItem(index.row())
self.list_layers_in_map_report.clearSelection()
def move_layer_up(self):
"""Move the layer up."""
layer = self.list_layers_in_map_report.selectedItems()[0]
index = self.list_layers_in_map_report.indexFromItem(layer).row()
item = self.list_layers_in_map_report.takeItem(index)
self.list_layers_in_map_report.insertItem(index - 1, item)
self.list_layers_in_map_report.item(index - 1).setSelected(True)
def move_layer_down(self):
"""Move the layer down."""
layer = self.list_layers_in_map_report.selectedItems()[0]
index = self.list_layers_in_map_report.indexFromItem(layer).row()
item = self.list_layers_in_map_report.takeItem(index)
self.list_layers_in_map_report.insertItem(index + 1, item)
self.list_layers_in_map_report.item(index + 1).setSelected(True)
def _list_selection_changed(self):
"""Selection has changed in the list."""
items = self.list_layers_in_map_report.selectedItems()
self.remove_layer.setEnabled(len(items) >= 1)
if len(items) == 1 and self.list_layers_in_map_report.count() >= 2:
index = self.list_layers_in_map_report.indexFromItem(items[0])
index = index.row()
if index == 0:
self.move_up.setEnabled(False)
self.move_down.setEnabled(True)
elif index == self.list_layers_in_map_report.count() - 1:
self.move_up.setEnabled(True)
self.move_down.setEnabled(False)
else:
self.move_up.setEnabled(True)
self.move_down.setEnabled(True)
else:
self.move_up.setEnabled(False)
self.move_down.setEnabled(False)
def _tree_selection_changed(self):
"""Selection has changed in the tree."""
self.add_layer.setEnabled(len(self.tree.selectedItems()) >= 1)
def _populate_reporting_tab(self):
"""Populate trees about layers."""
self.tree.clear()
self.add_layer.setEnabled(False)
self.remove_layer.setEnabled(False)
self.move_up.setEnabled(False)
self.move_down.setEnabled(False)
self.tree.setColumnCount(1)
self.tree.setRootIsDecorated(False)
self.tree.setHeaderHidden(True)
analysis_branch = QTreeWidgetItem(
self.tree.invisibleRootItem(), [FROM_ANALYSIS['name']])
analysis_branch.setFont(0, bold_font)
analysis_branch.setExpanded(True)
analysis_branch.setFlags(Qt.ItemIsEnabled)
if self._multi_exposure_if:
expected = self._multi_exposure_if.output_layers_expected()
for group, layers in list(expected.items()):
group_branch = QTreeWidgetItem(analysis_branch, [group])
group_branch.setFont(0, bold_font)
group_branch.setExpanded(True)
group_branch.setFlags(Qt.ItemIsEnabled)
for layer in layers:
layer = definition(layer)
if layer.get('allowed_geometries', None):
item = QTreeWidgetItem(
group_branch, [layer.get('name')])
item.setData(
0, LAYER_ORIGIN_ROLE, FROM_ANALYSIS['key'])
item.setData(0, LAYER_PARENT_ANALYSIS_ROLE, group)
item.setData(
0, LAYER_PURPOSE_KEY_OR_ID_ROLE, layer['key'])
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
canvas_branch = QTreeWidgetItem(
self.tree.invisibleRootItem(), [FROM_CANVAS['name']])
canvas_branch.setFont(0, bold_font)
canvas_branch.setExpanded(True)
canvas_branch.setFlags(Qt.ItemIsEnabled)
# List layers from the canvas
loaded_layers = list(QgsProject.instance().mapLayers().values())
canvas_layers = self.iface.mapCanvas().layers()
flag = setting('visibleLayersOnlyFlag', expected_type=bool)
for loaded_layer in loaded_layers:
if flag and loaded_layer not in canvas_layers:
continue
title = loaded_layer.name()
item = QTreeWidgetItem(canvas_branch, [title])
item.setData(0, LAYER_ORIGIN_ROLE, FROM_CANVAS['key'])
item.setData(0, LAYER_PURPOSE_KEY_OR_ID_ROLE, loaded_layer.id())
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
self.tree.resizeColumnToContents(0)
def _create_exposure_combos(self):
"""Create one combobox for each exposure and insert them in the UI."""
# Map registry may be invalid if QGIS is shutting down
project = QgsProject.instance()
canvas_layers = self.iface.mapCanvas().layers()
# MapLayers returns a QMap<QString id, QgsMapLayer layer>
layers = list(project.mapLayers().values())
# Sort by name for tests
layers.sort(key=lambda x: x.name())
show_only_visible_layers = setting(
'visibleLayersOnlyFlag', expected_type=bool)
# For issue #618
if len(layers) == 0:
# self.message_viewer.setHtml(getting_started_message())
return
for one_exposure in exposure_all:
label = QLabel(one_exposure['name'])
combo = QComboBox()
combo.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
combo.addItem(tr('Do not use'), None)
self.form_layout.addRow(label, combo)
self.combos_exposures[one_exposure['key']] = combo
for layer in layers:
if (show_only_visible_layers and
(layer not in canvas_layers)):
continue
try:
layer_purpose = self.keyword_io.read_keywords(
layer, 'layer_purpose')
keyword_version = str(self.keyword_io.read_keywords(
layer, inasafe_keyword_version_key))
if not is_keyword_version_supported(keyword_version):
continue
except BaseException: # pylint: disable=W0702
# continue ignoring this layer
continue
# See if there is a title for this layer, if not,
# fallback to the layer's filename
# noinspection PyBroadException
try:
title = self.keyword_io.read_keywords(layer, 'title')
except (NoKeywordsFoundError,
KeywordNotFoundError, MetadataReadError):
# Skip if there are no keywords at all, or missing keyword
continue
except BaseException: # pylint: disable=W0702
pass
else:
# Lookup internationalised title if available
title = self.tr(title)
# Register title with layer
set_layer_from_title = setting(
'set_layer_from_title_flag', True, bool)
if title and set_layer_from_title:
if qgis_version() >= 21800:
layer.setName(title)
else:
# QGIS 2.14
layer.setLayerName(title)
source = layer.id()
icon = layer_icon(layer)
if layer_purpose == layer_purpose_hazard['key']:
add_ordered_combo_item(
self.cbx_hazard, title, source, icon=icon)
elif layer_purpose == layer_purpose_aggregation['key']:
if self.use_selected_only:
count_selected = layer.selectedFeatureCount()
if count_selected > 0:
add_ordered_combo_item(
self.cbx_aggregation,
title,
source,
count_selected,
icon=icon
)
else:
add_ordered_combo_item(
self.cbx_aggregation, title, source, None, icon)
else:
add_ordered_combo_item(
self.cbx_aggregation, title, source, None, icon)
elif layer_purpose == layer_purpose_exposure['key']:
# fetching the exposure
try:
exposure_type = self.keyword_io.read_keywords(
layer, layer_purpose_exposure['key'])
except BaseException: # pylint: disable=W0702
# continue ignoring this layer
continue
for key, combo in list(self.combos_exposures.items()):
if key == exposure_type:
add_ordered_combo_item(
combo, title, source, icon=icon)
self.cbx_aggregation.addItem(entire_area_item_aggregation, None)
for combo in list(self.combos_exposures.values()):
combo.currentIndexChanged.connect(self.validate_impact_function)
def progress_callback(self, current_value, maximum_value, message=None):
"""GUI based callback implementation for showing progress.
:param current_value: Current progress.
:type current_value: int
:param maximum_value: Maximum range (point at which task is complete.
:type maximum_value: int
:param message: Optional message dictionary to containing content
we can display to the user. See safe.definitions.analysis_steps
for an example of the expected format
:type message: dict
"""
report = m.Message()
report.add(LOGO_ELEMENT)
report.add(m.Heading(
self.tr('Analysis status'), **INFO_STYLE))
if message is not None:
report.add(m.ImportantText(message['name']))
report.add(m.Paragraph(message['description']))
report.add(
self._multi_exposure_if
.current_impact_function.performance_log_message())
send_static_message(self, report)
self.progress_bar.setMaximum(maximum_value)
self.progress_bar.setValue(current_value)
QgsApplication.processEvents()
def validate_impact_function(self):
"""Check validity of the current impact function."""
# Always set it to False
self.btn_run.setEnabled(False)
for combo in list(self.combos_exposures.values()):
if combo.count() == 1:
combo.setEnabled(False)
hazard = layer_from_combo(self.cbx_hazard)
aggregation = layer_from_combo(self.cbx_aggregation)
exposures = []
for combo in list(self.combos_exposures.values()):
exposures.append(layer_from_combo(combo))
exposures = [layer for layer in exposures if layer]
multi_exposure_if = MultiExposureImpactFunction()
multi_exposure_if.hazard = hazard
multi_exposure_if.exposures = exposures
multi_exposure_if.debug = False
multi_exposure_if.callback = self.progress_callback
if aggregation:
multi_exposure_if.use_selected_features_only = (
self.use_selected_only)
multi_exposure_if.aggregation = aggregation
else:
multi_exposure_if.crs = (
self.iface.mapCanvas().mapSettings().destinationCrs())
if len(self.ordered_expected_layers()) != 0:
self._multi_exposure_if.output_layers_ordered = (
self.ordered_expected_layers())
status, message = multi_exposure_if.prepare()
if status == PREPARE_SUCCESS:
self._multi_exposure_if = multi_exposure_if
self.btn_run.setEnabled(True)
send_static_message(self, ready_message())
self.list_layers_in_map_report.clear()
return
else:
disable_busy_cursor()
send_error_message(self, message)
self._multi_exposure_if = None
def accept(self):
"""Launch the multi exposure analysis."""
if not isinstance(
self._multi_exposure_if, MultiExposureImpactFunction):
# This should not happen as the "accept" button must be disabled if
# the impact function is not ready.
return ANALYSIS_FAILED_BAD_CODE, None
self.tab_widget.setCurrentIndex(2)
self.set_enabled_buttons(False)
enable_busy_cursor()
try:
code, message, exposure = self._multi_exposure_if.run()
message = basestring_to_message(message)
if code == ANALYSIS_FAILED_BAD_INPUT:
LOGGER.warning(tr(
'The impact function could not run because of the inputs.'
))
send_error_message(self, message)
LOGGER.warning(message.to_text())
disable_busy_cursor()
self.set_enabled_buttons(True)
return code, message
elif code == ANALYSIS_FAILED_BAD_CODE:
LOGGER.warning(tr(
'The impact function could not run because of a bug.'))
LOGGER.exception(message.to_text())
send_error_message(self, message)
disable_busy_cursor()
self.set_enabled_buttons(True)
return code, message
if setting('generate_report', True, bool):
LOGGER.info(
'Reports are going to be generated for the multiexposure.')
# Report for the multi exposure
report = [standard_multi_exposure_impact_report_metadata_html]
error_code, message = (self._multi_exposure_if.generate_report(
report))
message = basestring_to_message(message)
if error_code == ImpactReport.REPORT_GENERATION_FAILED:
LOGGER.warning(
'The impact report could not be generated.')
send_error_message(self, message)
LOGGER.exception(message.to_text())
disable_busy_cursor()
self.set_enabled_buttons(True)
return error_code, message
else:
LOGGER.warning(
'Reports are not generated because of your settings.')
display_warning_message_bar(
tr('Reports'),
tr('Reports are not going to be generated because of your '
'InaSAFE settings.'),
duration=10,
iface_object=self.iface
)
# We always create the multi exposure group because we need
# reports to be generated.
root = QgsProject.instance().layerTreeRoot()
if len(self.ordered_expected_layers()) == 0:
group_analysis = root.insertGroup(
0, self._multi_exposure_if.name)
group_analysis.setItemVisibilityChecked(True)
group_analysis.setCustomProperty(
MULTI_EXPOSURE_ANALYSIS_FLAG, True)
for layer in self._multi_exposure_if.outputs:
QgsProject.instance().addMapLayer(layer, False)
layer_node = group_analysis.addLayer(layer)
layer_node.setItemVisibilityChecked(False)
# set layer title if any
try:
title = layer.keywords['title']
if qgis_version() >= 21800:
layer.setName(title)
else:
layer.setLayerName(title)
except KeyError:
pass
for analysis in self._multi_exposure_if.impact_functions:
detailed_group = group_analysis.insertGroup(
0, analysis.name)
detailed_group.setItemVisibilityChecked(True)
add_impact_layers_to_canvas(analysis, group=detailed_group)
if self.iface:
self.iface.setActiveLayer(
self._multi_exposure_if.analysis_impacted)
else:
add_layers_to_canvas_with_custom_orders(
self.ordered_expected_layers(),
self._multi_exposure_if,
self.iface)
if setting('generate_report', True, bool):
LOGGER.info(
'Reports are going to be generated for each single '
'exposure.')
# Report for the single exposure with hazard
for analysis in self._multi_exposure_if.impact_functions:
# we only want to generate non pdf/qpt report
html_components = [standard_impact_report_metadata_html]
error_code, message = (
analysis.generate_report(html_components))
message = basestring_to_message(message)
if error_code == (
ImpactReport.REPORT_GENERATION_FAILED):
LOGGER.info(
'The impact report could not be generated.')
send_error_message(self, message)
LOGGER.info(message.to_text())
disable_busy_cursor()
self.set_enabled_buttons(True)
return error_code, message
else:
LOGGER.info(
'Reports are not generated because of your settings.')
display_warning_message_bar(
tr('Reports'),
tr('Reports are not going to be generated because of your '
'InaSAFE settings.'),
duration=10,
iface_object=self.iface
)
# If zoom to impact is enabled
if setting(
'setZoomToImpactFlag', expected_type=bool):
self.iface.zoomToActiveLayer()
# If hide exposure layers
if setting('setHideExposureFlag', expected_type=bool):
treeroot = QgsProject.instance().layerTreeRoot()
for combo in list(self.combos_exposures.values()):
layer = layer_from_combo(combo)
if layer is not None:
treelayer = treeroot.findLayer(layer.id())
if treelayer:
treelayer.setItemVisibilityChecked(False)
# Set last analysis extent
self._extent.set_last_analysis_extent(
self._multi_exposure_if.analysis_extent,
self._multi_exposure_if.crs)
self.done(QDialog.Accepted)
except Exception as e:
error_message = get_error_message(e)
send_error_message(self, error_message)
LOGGER.exception(e)
LOGGER.debug(error_message.to_text())
finally:
disable_busy_cursor()
self.set_enabled_buttons(True)
def reject(self):
"""Redefinition of the reject method."""
self._populate_reporting_tab()
super(MultiExposureDialog, self).reject()
def set_enabled_buttons(self, enabled):
self.btn_cancel.setEnabled(enabled)
self.btn_back.setEnabled(enabled)
self.btn_next.setEnabled(enabled)
self.btn_run.setEnabled(enabled)
| gpl-3.0 | -3,040,091,875,839,423,500 | 40.114685 | 79 | 0.579719 | false | 4.264761 | false | false | false |
andrewyoung1991/supriya | supriya/tools/pendingugentools/PulseDivider.py | 1 | 3759 | # -*- encoding: utf-8 -*-
from supriya.tools.ugentools.UGen import UGen
class PulseDivider(UGen):
r'''
::
>>> pulse_divider = ugentools.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider
PulseDivider.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = None
__slots__ = ()
_ordered_input_names = (
'trigger',
'div',
'start',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
div=2,
start=0,
trigger=0,
):
UGen.__init__(
self,
calculation_rate=calculation_rate,
div=div,
start=start,
trigger=trigger,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
div=2,
start=0,
trigger=0,
):
r'''Constructs an audio-rate PulseDivider.
::
>>> pulse_divider = ugentools.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider
PulseDivider.ar()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
div=div,
start=start,
trigger=trigger,
)
return ugen
@classmethod
def kr(
cls,
div=2,
start=0,
trigger=0,
):
r'''Constructs a control-rate PulseDivider.
::
>>> pulse_divider = ugentools.PulseDivider.kr(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider
PulseDivider.kr()
Returns ugen graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
div=div,
start=start,
trigger=trigger,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def div(self):
r'''Gets `div` input of PulseDivider.
::
>>> pulse_divider = ugentools.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider.div
2.0
Returns ugen input.
'''
index = self._ordered_input_names.index('div')
return self._inputs[index]
@property
def start(self):
r'''Gets `start` input of PulseDivider.
::
>>> pulse_divider = ugentools.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider.start
0.0
Returns ugen input.
'''
index = self._ordered_input_names.index('start')
return self._inputs[index]
@property
def trigger(self):
r'''Gets `trigger` input of PulseDivider.
::
>>> pulse_divider = ugentools.PulseDivider.ar(
... div=2,
... start=0,
... trigger=0,
... )
>>> pulse_divider.trigger
0.0
Returns ugen input.
'''
index = self._ordered_input_names.index('trigger')
return self._inputs[index] | mit | -767,428,144,535,581,200 | 20.860465 | 64 | 0.444533 | false | 4.08587 | false | false | false |
beschulz/kyototycoon | tests/test_cas.py | 2 | 2458 | # -*- coding: utf-8 -*-
from nose.tools import *
from pykt import KyotoTycoon, KTException
import time
def clear():
db = KyotoTycoon()
db = db.open()
db.clear()
db.close()
@raises(IOError)
def test_err_cas():
db = KyotoTycoon()
db.cas("A")
@with_setup(setup=clear)
def test_cas():
db = KyotoTycoon()
db = db.open()
db.set("A", "B")
ret = db.cas("A", oval="B", nval="C")
ok_(ret == True)
ret = db.get("A")
ok_(ret == "C")
db.close()
@raises(KTException)
@with_setup(setup=clear)
def test_cas_with_db():
db = KyotoTycoon("test")
db = db.open()
db.set("A", "B")
db.cas("A", oval="B", nval="C")
ok_(False)
@with_setup(setup=clear)
@raises(KTException)
def test_cas_fail():
db = KyotoTycoon()
db = db.open()
db.set("A", "B")
ret = db.cas("A", oval="C", nval="C")
@with_setup(setup=clear)
@raises(KTException)
def test_cas_few_param1():
db = KyotoTycoon()
db = db.open()
db.set("A", "B")
db.cas("A", nval="C")
@with_setup(setup=clear)
def test_cas_few_param2():
db = KyotoTycoon()
db = db.open()
db.set("A", "B")
ret = db.cas("A", oval="B")
ok_(ret == True)
ret = db.get("A")
ok_(ret == None)
db.close()
@with_setup(setup=clear)
def test_cas_utf8():
db = KyotoTycoon()
db = db.open()
db.set("あいうえお", "かきくけこ")
ret = db.cas("あいうえお", oval="かきくけこ", nval="さしすせそ")
ok_(ret == True)
ret = db.get("あいうえお")
ok_(ret == "さしすせそ")
db.close()
@with_setup(setup=clear)
def test_cas_loop():
db = KyotoTycoon()
db = db.open()
db.set("A", "0")
for i in xrange(100):
a = str(i)
b = str(i+1)
ret = db.cas("A", oval=a, nval=b)
ok_(ret == True)
ret = db.get("A")
ok_(ret == b)
db.close()
@with_setup(setup=clear)
def test_cas_expire():
db = KyotoTycoon()
db = db.open()
db.set("A", "B")
ret = db.cas("A", oval="B", nval="C", expire=2)
ok_(ret)
time.sleep(3)
ret = db.get("A")
ok_(ret == None)
db.close()
@with_setup(setup=clear)
def test_cas_expire_not_expire():
db = KyotoTycoon()
db = db.open()
db.set("A", "B")
ret = db.cas("A", oval="B", nval="C", expire=2)
ok_(ret)
time.sleep(2)
ret = db.get("A")
ok_(ret == "C")
time.sleep(2)
ret = db.get("A")
ok_(ret == None)
db.close()
| gpl-2.0 | 1,377,580,386,237,139,700 | 19.586207 | 53 | 0.523451 | false | 2.39039 | true | false | false |
schleichdi2/OPENNFR-6.0-CORE | bitbake/lib/bb/checksum.py | 3 | 4435 | # Local file checksum cache implementation
#
# Copyright (C) 2012 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import glob
import operator
import os
import stat
import pickle
import bb.utils
import logging
from bb.cache import MultiProcessCache
logger = logging.getLogger("BitBake.Cache")
# mtime cache (non-persistent)
# based upon the assumption that files do not change during bitbake run
class FileMtimeCache(object):
cache = {}
def cached_mtime(self, f):
if f not in self.cache:
self.cache[f] = os.stat(f)[stat.ST_MTIME]
return self.cache[f]
def cached_mtime_noerror(self, f):
if f not in self.cache:
try:
self.cache[f] = os.stat(f)[stat.ST_MTIME]
except OSError:
return 0
return self.cache[f]
def update_mtime(self, f):
self.cache[f] = os.stat(f)[stat.ST_MTIME]
return self.cache[f]
def clear(self):
self.cache.clear()
# Checksum + mtime cache (persistent)
class FileChecksumCache(MultiProcessCache):
cache_file_name = "local_file_checksum_cache.dat"
CACHE_VERSION = 1
def __init__(self):
self.mtime_cache = FileMtimeCache()
MultiProcessCache.__init__(self)
def get_checksum(self, f):
entry = self.cachedata[0].get(f)
cmtime = self.mtime_cache.cached_mtime(f)
if entry:
(mtime, hashval) = entry
if cmtime == mtime:
return hashval
else:
bb.debug(2, "file %s changed mtime, recompute checksum" % f)
hashval = bb.utils.md5_file(f)
self.cachedata_extras[0][f] = (cmtime, hashval)
return hashval
def merge_data(self, source, dest):
for h in source[0]:
if h in dest:
(smtime, _) = source[0][h]
(dmtime, _) = dest[0][h]
if smtime > dmtime:
dest[0][h] = source[0][h]
else:
dest[0][h] = source[0][h]
def get_checksums(self, filelist, pn):
"""Get checksums for a list of files"""
def checksum_file(f):
try:
checksum = self.get_checksum(f)
except OSError as e:
bb.warn("Unable to get checksum for %s SRC_URI entry %s: %s" % (pn, os.path.basename(f), e))
return None
return checksum
def checksum_dir(pth):
# Handle directories recursively
dirchecksums = []
for root, dirs, files in os.walk(pth):
for name in files:
fullpth = os.path.join(root, name)
checksum = checksum_file(fullpth)
if checksum:
dirchecksums.append((fullpth, checksum))
return dirchecksums
checksums = []
for pth in filelist.split():
exist = pth.split(":")[1]
if exist == "False":
continue
pth = pth.split(":")[0]
if '*' in pth:
# Handle globs
for f in glob.glob(pth):
if os.path.isdir(f):
if not os.path.islink(f):
checksums.extend(checksum_dir(f))
else:
checksum = checksum_file(f)
if checksum:
checksums.append((f, checksum))
elif os.path.isdir(pth):
if not os.path.islink(pth):
checksums.extend(checksum_dir(pth))
else:
checksum = checksum_file(pth)
if checksum:
checksums.append((pth, checksum))
checksums.sort(key=operator.itemgetter(1))
return checksums
| gpl-2.0 | 7,265,004,168,654,512,000 | 32.097015 | 108 | 0.554904 | false | 4.102683 | false | false | false |
dwdm/splash | splash/tests/test_execute.py | 1 | 94230 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
from base64 import standard_b64decode
import unittest
from cStringIO import StringIO
import numbers
import time
from PIL import Image
import requests
import pytest
from splash.exceptions import ScriptError
lupa = pytest.importorskip("lupa")
from splash import __version__ as splash_version
from splash.har_builder import HarBuilder
from . import test_render
from .test_jsonpost import JsonPostRequestHandler
from .utils import NON_EXISTING_RESOLVABLE, SplashServer
from .mockserver import JsRender
from .. import defaults
class BaseLuaRenderTest(test_render.BaseRenderTest):
endpoint = 'execute'
def request_lua(self, code, query=None):
q = {"lua_source": code}
q.update(query or {})
return self.request(q)
def assertScriptError(self, resp, subtype, message=None):
err = self.assertJsonError(resp, 400, 'ScriptError')
self.assertEqual(err['info']['type'], subtype)
if message is not None:
self.assertRegexpMatches(err['info']['message'], message)
return err
def assertErrorLineNumber(self, resp, line_number):
self.assertEqual(resp.json()['info']['line_number'], line_number)
class MainFunctionTest(BaseLuaRenderTest):
def test_return_json(self):
resp = self.request_lua("""
function main(splash)
local obj = {key="value"}
return {
mystatus="ok",
number=5,
float=-0.5,
obj=obj,
bool=true,
bool2=false,
missing=nil
}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.headers['content-type'], 'application/json')
self.assertEqual(resp.json(), {
"mystatus": "ok",
"number": 5,
"float": -0.5,
"obj": {"key": "value"},
"bool": True,
"bool2": False,
})
def test_unicode(self):
resp = self.request_lua(u"""
function main(splash) return {key="значение"} end
""".encode('utf8'))
self.assertStatusCode(resp, 200)
self.assertEqual(resp.headers['content-type'], 'application/json')
self.assertEqual(resp.json(), {"key": u"значение"})
def test_unicode_direct(self):
resp = self.request_lua(u"""
function main(splash)
return 'привет'
end
""".encode('utf8'))
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, u"привет")
self.assertEqual(resp.headers['content-type'], 'text/plain; charset=utf-8')
def test_number(self):
resp = self.request_lua("function main(splash) return 1 end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "1")
self.assertEqual(resp.headers['content-type'], 'text/plain; charset=utf-8')
def test_number_float(self):
resp = self.request_lua("function main(splash) return 1.5 end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "1.5")
self.assertEqual(resp.headers['content-type'], 'text/plain; charset=utf-8')
def test_bool(self):
resp = self.request_lua("function main(splash) return true end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "True")
self.assertEqual(resp.headers['content-type'], 'text/plain; charset=utf-8')
def test_empty(self):
resp = self.request_lua("function main(splash) end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "")
resp = self.request_lua("function main() end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "")
def test_no_main(self):
resp = self.request_lua("x=1")
self.assertScriptError(resp, ScriptError.MAIN_NOT_FOUND_ERROR,
message="function is not found")
def test_bad_main(self):
resp = self.request_lua("main=1")
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR,
message="is not a function")
def test_ugly_main(self):
resp = self.request_lua("main={coroutine=123}")
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR,
message="is not a function")
def test_nasty_main(self):
resp = self.request_lua("""
main = {coroutine=function()
return {
send=function() end,
next=function() end
}
end}
""")
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR,
message="is not a function")
class ResultContentTypeTest(BaseLuaRenderTest):
def test_content_type(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_content_type('text/plain')
return "hi!"
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.headers['content-type'], 'text/plain')
self.assertEqual(resp.text, 'hi!')
def test_content_type_ignored_for_tables(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_content_type('text/plain')
return {hi="hi!"}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.headers['content-type'], 'application/json')
self.assertEqual(resp.text, '{"hi": "hi!"}')
def test_bad_content_type(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_content_type(55)
return "hi!"
end
""")
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR,
message='argument must be a string')
self.assertEqual(err['info']['splash_method'], 'set_result_content_type')
resp = self.request_lua("""
function main(splash)
splash:set_result_content_type()
return "hi!"
end
""")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_bad_content_type_func(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_content_type(function () end)
return "hi!"
end
""")
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR,
message='argument must be a string')
self.assertEqual(err['info']['splash_method'], 'set_result_content_type')
class ResultHeaderTest(BaseLuaRenderTest):
def test_result_header_set(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_header("foo", "bar")
return "hi!"
end
""")
self.assertStatusCode(resp, 200)
self.assertIn("foo", resp.headers)
self.assertEqual(resp.headers.get("foo"), "bar")
def test_bad_result_header_set(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_header({}, {})
return "hi!"
end
""")
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR,
message='arguments must be strings')
self.assertEqual(err['info']['splash_method'], 'set_result_header')
self.assertErrorLineNumber(resp, 3)
def test_unicode_headers_raise_bad_request(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_header("paweł", "kiść")
return "hi!"
end
""")
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR,
message='must be ascii')
self.assertEqual(err['info']['splash_method'], 'set_result_header')
self.assertErrorLineNumber(resp, 3)
class ErrorsTest(BaseLuaRenderTest):
def test_syntax_error(self):
resp = self.request_lua("function main(splash) sdhgfsajhdgfjsahgd end")
# XXX: message='syntax error' is not checked because older Lua 5.2
# versions have problems with error messages.
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR)
def test_syntax_error_toplevel(self):
resp = self.request_lua("sdg; function main(splash) sdhgfsajhdgfjsahgd end")
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR)
# XXX: message='syntax error' is not checked because older Lua 5.2
# versions have problems with error messages.
def test_unicode_error(self):
resp = self.request_lua(u"function main(splash) 'привет' end".encode('utf8'))
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR,
message="unexpected symbol")
def test_user_error(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
error("User Error Happened") -- 3 <-
end
""")
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="User Error Happened")
self.assertErrorLineNumber(resp, 3)
@pytest.mark.xfail(reason="not implemented, nice to have")
def test_user_error_table(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
error({tp="user error", msg=123}) -- 3 <-
end
""")
err = self.assertScriptError(resp, ScriptError.LUA_ERROR)
self.assertEqual(err['info']['error'],
{'tp': 'user error', 'msg': 123})
self.assertErrorLineNumber(resp, 3)
def test_bad_splash_attribute(self):
resp = self.request_lua("""
function main(splash)
local x = splash.foo
return x == nil
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "True")
def test_return_multiple(self):
resp = self.request_lua("function main(splash) return 'foo', 'bar' end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), ["foo", "bar"])
def test_return_splash(self):
resp = self.request_lua("function main(splash) return splash end")
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR)
def test_return_function(self):
resp = self.request_lua("function main(s) return function() end end")
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR,
message="function objects are not allowed")
def test_return_coroutine(self):
resp = self.request_lua("""
function main(splash)
return coroutine.create(function() end)
end
""")
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="(a nil value)")
def test_return_coroutine_nosandbox(self):
with SplashServer(extra_args=['--disable-lua-sandbox']) as splash:
resp = requests.get(
url=splash.url("execute"),
params={
'lua_source': """
function main(splash)
return coroutine.create(function() end)
end
"""
},
)
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR,
message="function objects are not allowed")
def test_return_started_coroutine(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
local co = coroutine.create(function() -- 3 <-
coroutine.yield() -- 4
end)
coroutine.resume(co)
return co
end
""")
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="(a nil value)")
self.assertErrorLineNumber(resp, 3)
def test_return_started_coroutine_nosandbox(self):
with SplashServer(extra_args=['--disable-lua-sandbox']) as splash:
resp = requests.get(
url=splash.url("execute"),
params={
'lua_source': """ -- 1
function main(splash) -- 2
local co = coroutine.create(function() -- 3
coroutine.yield() -- 4
end) -- 5
coroutine.resume(co) -- 6
return co -- 7
end -- 8
"""
},
)
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR,
message="thread objects are not allowed")
def test_error_line_number_attribute_access(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
local x = 5 -- 3
splash.set_result_content_type("hello") -- 4
end -- 5
""")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
self.assertErrorLineNumber(resp, 4)
def test_error_line_number_bad_argument(self):
resp = self.request_lua("""
function main(splash)
local x = 5
splash:set_result_content_type(48)
end
""")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
self.assertErrorLineNumber(resp, 4)
def test_error_line_number_wrong_keyword_argument(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
splash:set_result_content_type{content_type=48} -- 3 <--
end -- 4
""")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
self.assertErrorLineNumber(resp, 3)
def test_pcall_wrong_keyword_arguments(self):
resp = self.request_lua("""
function main(splash)
local x = function()
return splash:wait{timeout=0.7}
end
local ok, res = pcall(x)
return {ok=ok, res=res}
end
""")
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["ok"], False)
class EnableDisableJSTest(BaseLuaRenderTest):
def test_disablejs(self):
resp = self.request_lua("""
function main(splash)
assert(splash.js_enabled==true)
splash.js_enabled = false
splash:go(splash.args.url)
local html = splash:html()
return html
end
""", {
'url': self.mockurl('jsrender'),
})
self.assertStatusCode(resp, 200)
self.assertIn(u'Before', resp.text)
def test_enablejs(self):
resp = self.request_lua("""
function main(splash)
splash.js_enabled = true
splash:go(splash.args.url)
local html = splash:html()
return html
end
""", {
'url': self.mockurl('jsrender'),
})
self.assertStatusCode(resp, 200)
self.assertNotIn(u'Before', resp.text)
def test_disablejs_after_splash_go(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
splash.js_enabled = false
local html = splash:html()
return html
end
""", {
'url': self.mockurl('jsrender'),
})
self.assertStatusCode(resp, 200)
self.assertNotIn(u'Before', resp.text)
def test_multiple(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
splash.js_enabled = false
local html_1 = splash:html()
splash:go(splash.args.url)
return {html_1=html_1, html_2=splash:html()}
end
""", {
'url': self.mockurl('jsrender')
})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertNotIn(u'Before', data['html_1'])
self.assertIn(u'Before', data['html_2'])
class ImageRenderTest(BaseLuaRenderTest):
def test_disable_images_attr(self):
resp = self.request_lua("""
function main(splash)
splash.images_enabled = false
splash:go(splash.args.url)
local res = splash:evaljs("document.getElementById('foo').clientHeight")
return {res=res}
end
""", {'url': self.mockurl("show-image")})
self.assertEqual(resp.json()['res'], 0)
def test_disable_images_method(self):
resp = self.request_lua("""
function main(splash)
splash:set_images_enabled(false)
splash:go(splash.args.url)
local res = splash:evaljs("document.getElementById('foo').clientHeight")
return {res=res}
end
""", {'url': self.mockurl("show-image")})
self.assertEqual(resp.json()['res'], 0)
def test_enable_images_attr(self):
resp = self.request_lua("""
function main(splash)
splash.images_enabled = false
splash.images_enabled = true
splash:go(splash.args.url)
local res = splash:evaljs("document.getElementById('foo').clientHeight")
return {res=res}
end
""", {'url': self.mockurl("show-image")})
self.assertEqual(resp.json()['res'], 50)
def test_enable_images_method(self):
resp = self.request_lua("""
function main(splash)
splash:set_images_enabled(false)
splash:set_images_enabled(true)
splash:go(splash.args.url)
local res = splash:evaljs("document.getElementById('foo').clientHeight")
return {res=res}
end
""", {'url': self.mockurl("show-image")})
self.assertEqual(resp.json()['res'], 50)
class EvaljsTest(BaseLuaRenderTest):
def _evaljs_request(self, js):
return self.request_lua("""
function main(splash)
local res = splash:evaljs([[%s]])
return {res=res, tp=type(res)}
end
""" % js)
def assertEvaljsResult(self, js, result, type):
resp = self._evaljs_request(js)
self.assertStatusCode(resp, 200)
expected = {'tp': type}
if result is not None:
expected['res'] = result
self.assertEqual(resp.json(), expected)
def assertEvaljsError(self, js, subtype=ScriptError.JS_ERROR, message=None):
resp = self._evaljs_request(js)
err = self.assertScriptError(resp, subtype, message)
self.assertEqual(err['info']['splash_method'], 'evaljs')
return err
def test_numbers(self):
self.assertEvaljsResult("1.0", 1.0, "number")
self.assertEvaljsResult("1", 1, "number")
self.assertEvaljsResult("1+2", 3, "number")
def test_inf(self):
self.assertEvaljsResult("1/0", float('inf'), "number")
self.assertEvaljsResult("-1/0", float('-inf'), "number")
def test_string(self):
self.assertEvaljsResult("'foo'", u'foo', 'string')
def test_bool(self):
self.assertEvaljsResult("true", True, 'boolean')
def test_undefined(self):
self.assertEvaljsResult("undefined", None, 'nil')
def test_null(self):
# XXX: null is converted to an empty string by QT,
# we can't distinguish it from a "real" empty string.
self.assertEvaljsResult("null", "", 'string')
def test_unicode_string(self):
self.assertEvaljsResult("'привет'", u'привет', 'string')
def test_unicode_string_in_object(self):
self.assertEvaljsResult(
'var o={}; o["ключ"] = "значение"; o',
{u'ключ': u'значение'},
'table'
)
def test_nested_object(self):
self.assertEvaljsResult(
'var o={}; o["x"] = {}; o["x"]["y"] = 5; o["z"] = "foo"; o',
{"x": {"y": 5}, "z": "foo"},
'table'
)
def test_array(self):
self.assertEvaljsResult(
'x = [3, 2, 1, "foo", ["foo", [], "bar"], {}]; x',
[3, 2, 1, "foo", ["foo", [], "bar"], {}],
'table',
)
def test_self_referencing(self):
self.assertEvaljsResult(
'var o={}; o["x"] = "5"; o["y"] = o; o',
{"x": "5"}, # self reference is discarded
'table'
)
def test_function(self):
# XXX: functions are not returned by QT
self.assertEvaljsResult(
"x = function(){return 5}; x",
{},
"table"
)
def test_function_direct_unwrapped(self):
# XXX: this is invaild syntax
self.assertEvaljsError("function(){return 5}", message='SyntaxError')
def test_function_direct(self):
# XXX: functions are returned as empty tables by QT
self.assertEvaljsResult("(function(){return 5})", {}, "table")
def test_object_with_function(self):
# XXX: complex objects like function values are unsupported
self.assertEvaljsError('{"x":2, "y": function(){}}')
def test_function_call(self):
self.assertEvaljsResult(
"function x(){return 5}; x();",
5,
"number"
)
def test_dateobj(self):
# XXX: Date objects are converted to ISO8061 strings.
# Does it make sense to do anything else with them?
# E.g. make them available to Lua as tables?
self.assertEvaljsResult(
'x = new Date("21 May 1958 10:12 UTC"); x',
"1958-05-21T10:12:00Z",
"string"
)
def test_regexp(self):
self.assertEvaljsResult(
'/my-regexp/i',
{
u'_jstype': u'RegExp',
'caseSensitive': False,
'pattern': u'my-regexp'
},
'table'
)
self.assertEvaljsResult(
'/my-regexp/',
{
u'_jstype': u'RegExp',
'caseSensitive': True,
'pattern': u'my-regexp'
},
'table'
)
def test_syntax_error(self):
err = self.assertEvaljsError("x--4")
self.assertEqual(err['info']['js_error_type'], 'SyntaxError')
def test_throw_string(self):
err = self.assertEvaljsError("(function(){throw 'ABC'})();")
self.assertEqual(err['info']['js_error_type'], '<custom JS error>')
self.assertEqual(err['info']['js_error_message'], 'ABC')
err = self.assertEvaljsError("throw 'ABC'")
self.assertEqual(err['info']['js_error_type'], '<custom JS error>')
self.assertEqual(err['info']['js_error_message'], 'ABC')
def test_throw_error(self):
err = self.assertEvaljsError("(function(){throw new Error('ABC')})();")
self.assertEqual(err['info']['js_error_type'], 'Error')
self.assertEqual(err['info']['js_error_message'], 'ABC')
class WaitForResumeTest(BaseLuaRenderTest):
def _wait_for_resume_request(self, js, timeout=1.0):
return self.request_lua("""
function main(splash)
local result, error = splash:wait_for_resume([[%s]], %.1f)
local response = {}
if result ~= nil then
response["value"] = result["value"]
response["value_type"] = type(result["value"])
else
response["error"] = error
end
return response
end
""" % (js, timeout))
def test_return_undefined(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume();
}
""")
self.assertStatusCode(resp, 200)
# A Lua table with a nil value is equivalent to not setting that
# key/value pair at all, so there is no "result" key in the response.
self.assertEqual(resp.json(), {"value_type": "nil"})
def test_return_null(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume(null);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": "", "value_type": "string"})
def test_return_string(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume("ok");
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": "ok", "value_type": "string"})
def test_return_non_ascii_string(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume("你好");
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": u"你好", "value_type": "string"})
def test_return_int(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume(42);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": 42, "value_type": "number"})
def test_return_float(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume(1234.5);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": 1234.5, "value_type": "number"})
def test_return_boolean(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume(true);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": True, "value_type": "boolean"})
def test_return_list(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume([1,2,'red','blue']);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {
"value": [1, 2, 'red', 'blue'],
"value_type": "table"}
)
def test_return_dict(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume({'stomach':'empty','brain':'crazy'});
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {
"value": {'stomach': 'empty', 'brain': 'crazy'},
"value_type": "table"}
)
def test_return_additional_keys(self):
resp = self.request_lua("""
function main(splash)
local result, error = splash:wait_for_resume([[
function main(splash) {
splash.set("foo", "bar");
splash.resume("ok");
}
]])
return result
end""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {'foo': 'bar', 'value': 'ok'})
def test_delayed_return(self):
resp = self._wait_for_resume_request("""
function main(splash) {
setTimeout(function () {
splash.resume("ok");
}, 100);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": "ok", "value_type": "string"})
def test_error_string(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.error("not ok");
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"error": "JavaScript error: not ok"})
def test_timed_out(self):
resp = self._wait_for_resume_request("""
function main(splash) {
setTimeout(function () {
splash.resume("ok");
}, 2500);
}
""", timeout=0.1)
expected_error = 'JavaScript error: One shot callback timed out' \
' while waiting for resume() or error().'
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"error": expected_error})
def test_missing_main_function(self):
resp = self._wait_for_resume_request("""
function foo(splash) {
setTimeout(function () {
splash.resume("ok");
}, 500);
}
""")
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message=r"no main\(\) function defined")
def test_js_syntax_error(self):
resp = self._wait_for_resume_request("""
function main(splash) {
)
setTimeout(function () {
splash.resume("ok");
}, 500);
}
""")
# XXX: why is it LUA_ERROR, not JS_ERROR? Should we change that?
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="SyntaxError")
def test_navigation_cancels_resume(self):
resp = self._wait_for_resume_request("""
function main(splash) {
location.href = '%s';
}
""" % self.mockurl('/'))
json = resp.json()
self.assertStatusCode(resp, 200)
self.assertIn('error', json)
self.assertIn('canceled', json['error'])
def test_cannot_resume_twice(self):
"""
We can't easily test that resuming twice throws an exception,
because that exception is thrown in Python code after Lua has already
resumed. The server log (if set to verbose) will show the stack trace,
but Lua will have no idea that it happened; indeed, that's the
_whole purpose_ of the one shot callback.
We can at least verify that if resume is called multiple times,
then the first value is returned and subsequent values are ignored.
"""
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume('ok');
setTimeout(function () {
splash.resume('not ok');
}, 500);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": "ok", "value_type": "string"})
class RunjsTest(BaseLuaRenderTest):
def test_define_variable(self):
resp = self.request_lua("""
function main(splash)
assert(splash:runjs("x=5"))
return {x=splash:evaljs("x")}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"x": 5})
def test_runjs_undefined(self):
resp = self.request_lua("""
function main(splash)
assert(splash:runjs("undefined"))
return {ok=true}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
def test_define_function(self):
resp = self.request_lua("""
function main(splash)
assert(splash:runjs("egg = function(){return 'spam'};"))
local egg = splash:jsfunc("window.egg")
return {egg=egg()}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"egg": "spam"})
def test_runjs_syntax_error(self):
resp = self.request_lua("""
function main(splash)
local res, err = splash:runjs("function()")
return {res=res, err=err}
end
""")
self.assertStatusCode(resp, 200)
err = resp.json()['err']
self.assertEqual(err['type'], ScriptError.JS_ERROR)
self.assertEqual(err['js_error_type'], 'SyntaxError')
self.assertEqual(err['splash_method'], 'runjs')
def test_runjs_exception(self):
resp = self.request_lua("""
function main(splash)
local res, err = splash:runjs("var x = y;")
return {res=res, err=err}
end
""")
self.assertStatusCode(resp, 200)
err = resp.json()['err']
self.assertEqual(err['type'], ScriptError.JS_ERROR)
self.assertEqual(err['js_error_type'], 'ReferenceError')
self.assertRegexpMatches(err['message'], "Can't find variable")
self.assertEqual(err['splash_method'], 'runjs')
class JsfuncTest(BaseLuaRenderTest):
def assertJsfuncResult(self, source, arguments, result):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc([[%s]])
return func(%s)
end
""" % (source, arguments))
self.assertStatusCode(resp, 200)
if isinstance(result, (dict, list)):
self.assertEqual(resp.json(), result)
else:
self.assertEqual(resp.text, result)
def test_Math(self):
self.assertJsfuncResult("Math.pow", "5, 2", "25")
def test_helloworld(self):
self.assertJsfuncResult(
"function(s) {return 'Hello, ' + s;}",
"'world!'",
"Hello, world!"
)
def test_object_argument(self):
self.assertJsfuncResult(
"function(obj) {return obj.foo;}",
"{foo='bar'}",
"bar",
)
def test_object_result(self):
self.assertJsfuncResult(
"function(obj) {return obj.foo;}",
"{foo={x=5, y=10}}",
{"x": 5, "y": 10},
)
def test_object_result_pass(self):
resp = self.request_lua("""
function main(splash)
local func1 = splash:jsfunc("function(){return {foo:{x:5}}}")
local func2 = splash:jsfunc("function(obj){return obj.foo}")
local obj = func1()
return func2(obj)
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"x": 5})
def test_bool(self):
is5 = "function(num){return num==5}"
self.assertJsfuncResult(is5, "5", "True")
self.assertJsfuncResult(is5, "6", "False")
def test_undefined_result(self):
self.assertJsfuncResult("function(){}", "", "None")
def test_undefined_argument(self):
self.assertJsfuncResult("function(foo){return foo}", "", "None")
def test_throw_string(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){throw 'ABC'}")
return func()
end
""")
err = self.assertScriptError(resp, ScriptError.JS_ERROR)
self.assertEqual(err['info']['js_error_message'], 'ABC')
self.assertEqual(err['info']['js_error_type'], '<custom JS error>')
def test_throw_pcall(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){throw 'ABC'}")
local ok, res = pcall(func)
return {ok=ok, res=res}
end
""")
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["ok"], False)
self.assertIn("error during JS function call: u'ABC'", data["res"])
def test_throw_error(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){throw new Error('ABC')}")
return func()
end
""")
err = self.assertScriptError(resp, ScriptError.JS_ERROR)
self.assertEqual(err['info']['js_error_message'], 'ABC')
self.assertEqual(err['info']['js_error_type'], 'Error')
def test_throw_error_empty(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){throw new Error()}")
return func()
end
""")
err = self.assertScriptError(resp, ScriptError.JS_ERROR)
self.assertEqual(err['info']['js_error_message'], '')
self.assertEqual(err['info']['js_error_type'], 'Error')
def test_throw_error_pcall(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){throw new Error('ABC')}")
local ok, res = pcall(func)
return {ok=ok, res=res}
end
""")
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["ok"], False)
self.assertIn("error during JS function call: u'Error: ABC'", data["res"])
def test_js_syntax_error(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){")
return func()
end
""")
err = self.assertScriptError(resp, ScriptError.JS_ERROR)
self.assertEqual(err['info']['js_error_type'], 'SyntaxError')
def test_js_syntax_error_brace(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc('); window.alert("hello")')
return func()
end
""")
err = self.assertScriptError(resp, ScriptError.JS_ERROR)
self.assertEqual(err['info']['js_error_type'], 'SyntaxError')
def test_array_result(self):
self.assertJsfuncResult(
"function(){return [1, 2, 'foo']}",
"",
[1, 2, "foo"]
)
def test_array_result_processed(self):
# XXX: note that index is started from 1
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){return [1, 2, 'foo']}")
local arr = func()
local first = arr[1]
return {arr=arr, first=1, tp=type(arr)}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"arr": [1, 2, "foo"], "first": 1, "tp": "table"})
def test_array_argument(self):
# XXX: note that index is started from 1
self.assertJsfuncResult(
"function(arr){return arr[1]}",
"{5, 6, 'foo'}",
"5",
)
# this doesn't work because table is passed as an object
@pytest.mark.xfail
def test_array_length(self):
self.assertJsfuncResult(
"function(arr){return arr.length}",
"{5, 6, 'foo'}",
"3",
)
def test_jsfunc_attributes(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
local func = splash:jsfunc("function(){return 123}") -- 3
return func.source -- 4 <-
end
""")
err = self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="attempt to index")
self.assertEqual(err['info']['line_number'], 4)
def test_private_jsfunc_not_available(self):
resp = self.request_lua("""
function main(splash)
return {ok = splash.private_jsfunc == nil}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json()['ok'], True)
def test_private_jsfunc_attributes(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
local func = splash:private_jsfunc("function(){return 123}") -- 3 <-
return func.source -- 4
end
""")
err = self.assertScriptError(resp, ScriptError.LUA_ERROR)
self.assertEqual(err['info']['line_number'], 3)
class WaitTest(BaseLuaRenderTest):
def wait(self, wait_args, request_args=None):
code = """
function main(splash)
local ok, reason = splash:wait%s
return {ok=ok, reason=reason}
end
""" % wait_args
return self.request_lua(code, request_args)
def go_and_wait(self, wait_args, request_args):
code = """
function main(splash)
assert(splash:go(splash.args.url))
local ok, reason = splash:wait%s
return {ok=ok, reason=reason}
end
""" % wait_args
return self.request_lua(code, request_args)
def test_timeout(self):
resp = self.wait("(0.01)", {"timeout": 0.1})
self.assertStatusCode(resp, 200)
resp = self.wait("(1)", {"timeout": 0.1})
err = self.assertJsonError(resp, 504, "GlobalTimeoutError")
self.assertEqual(err['info']['timeout'], 0.1)
def test_wait_success(self):
resp = self.wait("(0.01)")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
def test_wait_noredirect(self):
resp = self.wait("{time=0.01, cancel_on_redirect=true}")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
def test_wait_redirect_nocancel(self):
# jsredirect-timer redirects after 0.1ms
resp = self.go_and_wait(
"{time=0.2, cancel_on_redirect=false}",
{'url': self.mockurl("jsredirect-timer")}
)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
def test_wait_redirect_cancel(self):
# jsredirect-timer redirects after 0.1ms
resp = self.go_and_wait(
"{time=0.2, cancel_on_redirect=true}",
{'url': self.mockurl("jsredirect-timer")}
)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"reason": "redirect"}) # ok is nil
@unittest.skipIf(NON_EXISTING_RESOLVABLE, "non existing hosts are resolvable")
def test_wait_onerror(self):
resp = self.go_and_wait(
"{time=2., cancel_on_redirect=false, cancel_on_error=true}",
{'url': self.mockurl("jsredirect-non-existing")}
)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"reason": "network3"}) # ok is nil
@unittest.skipIf(NON_EXISTING_RESOLVABLE, "non existing hosts are resolvable")
def test_wait_onerror_nocancel(self):
resp = self.go_and_wait(
"{time=2., cancel_on_redirect=false, cancel_on_error=false}",
{'url': self.mockurl("jsredirect-non-existing")}
)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
@unittest.skipIf(NON_EXISTING_RESOLVABLE, "non existing hosts are resolvable")
def test_wait_onerror_nocancel_redirect(self):
resp = self.go_and_wait(
"{time=2., cancel_on_redirect=true, cancel_on_error=false}",
{'url': self.mockurl("jsredirect-non-existing")}
)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"reason": "redirect"})
def test_wait_badarg(self):
resp = self.wait('{time="sdf"}')
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_wait_badarg2(self):
resp = self.wait('{time="sdf"}')
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_wait_good_string(self):
resp = self.wait('{time="0.01"}')
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
def test_wait_noargs(self):
resp = self.wait('()')
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_wait_time_missing(self):
resp = self.wait('{cancel_on_redirect=false}')
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_wait_unknown_args(self):
resp = self.wait('{ttime=0.5}')
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_wait_negative(self):
resp = self.wait('(-0.2)')
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
class ArgsTest(BaseLuaRenderTest):
def args_request(self, query):
func = """
function main(splash)
return {args=splash.args}
end
"""
return self.request_lua(func, query)
def assertArgs(self, query):
resp = self.args_request(query)
self.assertStatusCode(resp, 200)
data = resp.json()["args"]
data.pop('lua_source')
data.pop('uid')
return data
def assertArgsPassed(self, query):
args = self.assertArgs(query)
self.assertEqual(args, query)
return args
def test_known_args(self):
self.assertArgsPassed({"wait": "1.0"})
self.assertArgsPassed({"timeout": "2.0"})
self.assertArgsPassed({"url": "foo"})
def test_unknown_args(self):
self.assertArgsPassed({"foo": "bar"})
def test_filters_validation(self):
# 'global' known arguments are still validated
resp = self.args_request({"filters": 'foo,bar'})
err = self.assertJsonError(resp, 400, "BadOption")
self.assertEqual(err['info']['argument'], 'filters')
class JsonPostUnicodeTest(BaseLuaRenderTest):
request_handler = JsonPostRequestHandler
def test_unicode(self):
resp = self.request_lua(u"""
function main(splash) return {key="значение"} end
""".encode('utf8'))
self.assertStatusCode(resp, 200)
self.assertEqual(resp.headers['content-type'], 'application/json')
self.assertEqual(resp.json(), {"key": u"значение"})
class JsonPostArgsTest(ArgsTest):
request_handler = JsonPostRequestHandler
def test_headers(self):
headers = {"user-agent": "Firefox", "content-type": "text/plain"}
self.assertArgsPassed({"headers": headers})
def test_headers_items(self):
headers = [["user-agent", "Firefox"], ["content-type", "text/plain"]]
self.assertArgsPassed({"headers": headers})
def test_access_headers(self):
func = """
function main(splash)
local ua = "Unknown"
if splash.args.headers then
ua = splash.args.headers['user-agent']
end
return {ua=ua, firefox=(ua=="Firefox")}
end
"""
resp = self.request_lua(func, {'headers': {"user-agent": "Firefox"}})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ua": "Firefox", "firefox": True})
resp = self.request_lua(func)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ua": "Unknown", "firefox": False})
def test_custom_object(self):
self.assertArgsPassed({"myobj": {"foo": "bar", "bar": ["egg", "spam", 1]}})
def test_post_numbers(self):
self.assertArgsPassed({"x": 5})
class GoTest(BaseLuaRenderTest):
def go_status(self, url):
resp = self.request_lua("""
function main(splash)
local ok, reason = splash:go(splash.args.url)
return {ok=ok, reason=reason}
end
""", {"url": url})
self.assertStatusCode(resp, 200)
return resp.json()
def _geturl(self, code, empty=False):
if empty:
path = "getrequest?code=%s&empty=1" % code
else:
path = "getrequest?code=%s" % code
return self.mockurl(path)
def assertGoStatusCodeError(self, code):
for empty in [False, True]:
data = self.go_status(self._geturl(code, empty))
self.assertNotIn("ok", data)
self.assertEqual(data["reason"], "http%s" % code)
def assertGoNoError(self, code):
for empty in [False, True]:
data = self.go_status(self._geturl(code, empty))
self.assertTrue(data["ok"])
self.assertNotIn("reason", data)
def test_go_200(self):
self.assertGoNoError(200)
def test_go_400(self):
self.assertGoStatusCodeError(400)
def test_go_401(self):
self.assertGoStatusCodeError(401)
def test_go_403(self):
self.assertGoStatusCodeError(403)
def test_go_404(self):
self.assertGoStatusCodeError(404)
def test_go_500(self):
self.assertGoStatusCodeError(500)
def test_go_503(self):
self.assertGoStatusCodeError(503)
def test_nourl(self):
resp = self.request_lua("function main(splash) splash:go() end")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_nourl_args(self):
resp = self.request_lua("function main(splash) splash:go(splash.args.url) end")
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR,
message="required")
self.assertEqual(err['info']['argument'], 'url')
@unittest.skipIf(NON_EXISTING_RESOLVABLE, "non existing hosts are resolvable")
def test_go_error(self):
data = self.go_status("non-existing")
self.assertEqual(data.get('ok', False), False)
self.assertEqual(data["reason"], "network301")
def test_go_multiple(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url_1)
local html_1 = splash:html()
splash:go(splash.args.url_2)
return {html_1=html_1, html_2=splash:html()}
end
""", {
'url_1': self.mockurl('getrequest?foo=1'),
'url_2': self.mockurl('getrequest?bar=2')
})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertIn("{'foo': ['1']}", data['html_1'])
self.assertIn("{'bar': ['2']}", data['html_2'])
def test_go_404_then_good(self):
resp = self.request_lua("""
function main(splash)
local ok1, err1 = splash:go(splash.args.url_1)
local html_1 = splash:html()
local ok2, err2 = splash:go(splash.args.url_2)
local html_2 = splash:html()
return {html_1=html_1, html_2=html_2, err1=err1, err2=err2, ok1=ok1, ok2=ok2}
end
""", {
'url_1': self.mockurl('--some-non-existing-resource--'),
'url_2': self.mockurl('bad-related'),
})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["err1"], "http404")
self.assertNotIn("err2", data)
self.assertNotIn("ok1", data)
self.assertEqual(data["ok2"], True)
self.assertIn("No Such Resource", data["html_1"])
self.assertIn("http://non-existing", data["html_2"])
@unittest.skipIf(NON_EXISTING_RESOLVABLE, "non existing hosts are resolvable")
def test_go_bad_then_good(self):
resp = self.request_lua("""
function main(splash)
splash:go("--non-existing-host")
local ok, err = splash:go(splash.args.url)
return {ok=ok, err=err}
end
""", {"url": self.mockurl("jsrender")})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
def test_go_headers_cookie(self):
resp = self.request_lua("""
function main(splash)
assert(splash:go{splash.args.url, headers={
["Cookie"] = "foo=bar; egg=spam"
}})
return splash:html()
end
""", {"url": self.mockurl("get-cookie?key=egg")})
self.assertStatusCode(resp, 200)
self.assertIn("spam", resp.text)
def test_go_headers(self):
resp = self.request_lua("""
function main(splash)
assert(splash:go{splash.args.url, headers={
["Custom-Header"] = "Header Value",
}})
local res1 = splash:html()
-- second request is without any custom headers
assert(splash:go(splash.args.url))
local res2 = splash:html()
return {res1=res1, res2=res2}
end
""", {"url": self.mockurl("getrequest")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertIn("'Header Value'", data["res1"])
self.assertNotIn("'Header Value'", data["res2"])
def test_set_custom_headers(self):
resp = self.request_lua("""
function main(splash)
splash:set_custom_headers({
["Header-1"] = "Value 1",
["Header-2"] = "Value 2",
})
assert(splash:go(splash.args.url))
local res1 = splash:html()
assert(splash:go{splash.args.url, headers={
["Header-3"] = "Value 3",
}})
local res2 = splash:html()
assert(splash:go(splash.args.url))
local res3 = splash:html()
return {res1=res1, res2=res2, res3=res3}
end
""", {"url": self.mockurl("getrequest")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertIn("'Value 1'", data["res1"])
self.assertIn("'Value 2'", data["res1"])
self.assertNotIn("'Value 3'", data["res1"])
self.assertNotIn("'Value 1'", data["res2"])
self.assertNotIn("'Value 2'", data["res2"])
self.assertIn("'Value 3'", data["res2"])
self.assertIn("'Value 1'", data["res3"])
self.assertIn("'Value 2'", data["res3"])
self.assertNotIn("'Value 3'", data["res3"])
class ResourceTimeoutTest(BaseLuaRenderTest):
def test_resource_timeout_aborts_first(self):
resp = self.request_lua("""
function main(splash)
splash:on_request(function(req) req:set_timeout(0.1) end)
local ok, err = splash:go{splash.args.url}
return {err=err}
end
""", {"url": self.mockurl("slow.gif?n=4")})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {'err': 'render_error'})
def test_resource_timeout_attribute(self):
# request should be cancelled
resp = self.request_lua("""
function main(splash)
splash.resource_timeout = 0.1
assert(splash:go(splash.args.url))
end
""", {"url": self.mockurl("slow.gif?n=4")})
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message='render_error')
def test_resource_timeout_attribute_priority(self):
# set_timeout should take a priority
resp = self.request_lua("""
function main(splash)
splash.resource_timeout = 0.1
splash:on_request(function(req) req:set_timeout(10) end)
assert(splash:go(splash.args.url))
end
""", {"url": self.mockurl("slow.gif?n=4")})
self.assertStatusCode(resp, 200)
def test_resource_timeout_read(self):
resp = self.request_lua("""
function main(splash)
local default = splash.resource_timeout
splash.resource_timeout = 0.1
local updated = splash.resource_timeout
return {default=default, updated=updated}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"default": 0, "updated": 0.1})
def test_resource_timeout_zero(self):
resp = self.request_lua("""
function main(splash)
splash.resource_timeout = 0
assert(splash:go(splash.args.url))
end
""", {"url": self.mockurl("slow.gif?n=1")})
self.assertStatusCode(resp, 200)
resp = self.request_lua("""
function main(splash)
splash.resource_timeout = nil
assert(splash:go(splash.args.url))
end
""", {"url": self.mockurl("slow.gif?n=1")})
self.assertStatusCode(resp, 200)
def test_resource_timeout_negative(self):
resp = self.request_lua("""
function main(splash)
splash.resource_timeout = -1
assert(splash:go(splash.args.url))
end
""", {"url": self.mockurl("slow.gif?n=1")})
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR,
message='splash.resource_timeout')
self.assertEqual(err['info']['line_number'], 3)
class ResultStatusCodeTest(BaseLuaRenderTest):
def test_set_result_status_code(self):
for code in [200, 404, 500, 999]:
resp = self.request_lua("""
function main(splash)
splash:set_result_status_code(tonumber(splash.args.code))
return "hello"
end
""", {'code': code})
self.assertStatusCode(resp, code)
self.assertEqual(resp.text, 'hello')
def test_invalid_code(self):
for code in ["foo", "", {'x': 3}, 0, -200, 195, 1000]:
resp = self.request_lua("""
function main(splash)
splash:set_result_status_code(splash.args.code)
return "hello"
end
""", {'code': code})
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
self.assertEqual(err['info']['splash_method'],
'set_result_status_code')
class SetUserAgentTest(BaseLuaRenderTest):
def test_set_user_agent(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
local res1 = splash:html()
splash:set_user_agent("Foozilla")
splash:go(splash.args.url)
local res2 = splash:html()
splash:go(splash.args.url)
local res3 = splash:html()
return {res1=res1, res2=res2, res3=res3}
end
""", {"url": self.mockurl("getrequest")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertIn("Mozilla", data["res1"])
self.assertNotIn("Mozilla", data["res2"])
self.assertNotIn("Mozilla", data["res3"])
self.assertNotIn("'user-agent': 'Foozilla'", data["res1"])
self.assertIn("'user-agent': 'Foozilla'", data["res2"])
self.assertIn("'user-agent': 'Foozilla'", data["res3"])
def test_error(self):
resp = self.request_lua("""
function main(splash) splash:set_user_agent(123) end
""")
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
self.assertEqual(err['info']['splash_method'], 'set_user_agent')
class CookiesTest(BaseLuaRenderTest):
def test_cookies(self):
resp = self.request_lua("""
function main(splash)
local function cookies_after(url)
splash:go(url)
return splash:get_cookies()
end
local c0 = splash:get_cookies()
local c1 = cookies_after(splash.args.url_1)
local c2 = cookies_after(splash.args.url_2)
splash:clear_cookies()
local c3 = splash:get_cookies()
local c4 = cookies_after(splash.args.url_2)
local c5 = cookies_after(splash.args.url_1)
splash:delete_cookies("foo")
local c6 = splash:get_cookies()
splash:delete_cookies{url="http://example.com"}
local c7 = splash:get_cookies()
splash:delete_cookies{url="http://localhost"}
local c8 = splash:get_cookies()
splash:init_cookies(c2)
local c9 = splash:get_cookies()
return {c0=c0, c1=c1, c2=c2, c3=c3, c4=c4, c5=c5, c6=c6, c7=c7, c8=c8, c9=c9}
end
""", {
"url_1": self.mockurl("set-cookie?key=foo&value=bar"),
"url_2": self.mockurl("set-cookie?key=egg&value=spam"),
})
self.assertStatusCode(resp, 200)
data = resp.json()
cookie1 = {
'name': 'foo',
'value': 'bar',
'domain': 'localhost',
'path': '/',
'httpOnly': False,
'secure': False
}
cookie2 = {
'name': 'egg',
'value': 'spam',
'domain': 'localhost',
'path': '/',
'httpOnly': False,
'secure': False
}
self.assertEqual(data["c0"], [])
self.assertEqual(data["c1"], [cookie1])
self.assertEqual(data["c2"], [cookie1, cookie2])
self.assertEqual(data["c3"], [])
self.assertEqual(data["c4"], [cookie2])
self.assertEqual(data["c5"], [cookie2, cookie1])
self.assertEqual(data["c6"], [cookie2])
self.assertEqual(data["c7"], [cookie2])
self.assertEqual(data["c8"], [])
self.assertEqual(data["c9"], data["c2"])
def test_add_cookie(self):
resp = self.request_lua("""
function main(splash)
splash:add_cookie("baz", "egg")
splash:add_cookie{"spam", "egg", domain="example.com"}
splash:add_cookie{
name="foo",
value="bar",
path="/",
domain="localhost",
expires="2016-07-24T19:20:30+02:00",
secure=true,
httpOnly=true,
}
return splash:get_cookies()
end""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), [
{"name": "baz", "value": "egg", "path": "",
"domain": "", "httpOnly": False, "secure": False},
{"name": "spam", "value": "egg", "path": "",
"domain": "example.com", "httpOnly": False, "secure": False},
{"name": "foo", "value": "bar", "path": "/",
"domain": "localhost", "httpOnly": True, "secure": True,
"expires": "2016-07-24T19:20:30+02:00"},
])
def test_init_cookies(self):
resp = self.request_lua("""
function main(splash)
splash:init_cookies({
{name="baz", value="egg"},
{name="spam", value="egg", domain="example.com"},
{
name="foo",
value="bar",
path="/",
domain="localhost",
expires="2016-07-24T19:20:30+02:00",
secure=true,
httpOnly=true,
}
})
return splash:get_cookies()
end""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), [
{"name": "baz", "value": "egg", "path": "",
"domain": "", "httpOnly": False, "secure": False},
{"name": "spam", "value": "egg", "path": "",
"domain": "example.com", "httpOnly": False, "secure": False},
{"name": "foo", "value": "bar", "path": "/",
"domain": "localhost", "httpOnly": True, "secure": True,
"expires": "2016-07-24T19:20:30+02:00"},
])
class CurrentUrlTest(BaseLuaRenderTest):
def request_url(self, url, wait=0.0):
return self.request_lua("""
function main(splash)
local ok, res = splash:go(splash.args.url)
splash:wait(splash.args.wait)
return {ok=ok, res=res, url=splash:url()}
end
""", {"url": url, "wait": wait})
def assertCurrentUrl(self, go_url, url=None, wait=0.0):
if url is None:
url = go_url
resp = self.request_url(go_url, wait)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json()["url"], url)
def test_start(self):
resp = self.request_lua("function main(splash) return splash:url() end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "")
def test_blank(self):
self.assertCurrentUrl("about:blank")
def test_not_redirect(self):
self.assertCurrentUrl(self.mockurl("getrequest"))
def test_jsredirect(self):
self.assertCurrentUrl(self.mockurl("jsredirect"))
self.assertCurrentUrl(
self.mockurl("jsredirect"),
self.mockurl("jsredirect-target"),
wait=0.5,
)
class DisableScriptsTest(BaseLuaRenderTest):
def test_nolua(self):
with SplashServer(extra_args=['--disable-lua']) as splash:
# Check that Lua is disabled in UI
resp = requests.get(splash.url("/"))
self.assertStatusCode(resp, 200)
self.assertNotIn("<textarea", resp.text) # no code editor
script = "function main(splash) return 'foo' end"
# Check that /execute doesn't work
resp = requests.get(
url=splash.url("execute"),
params={'lua_source': script},
)
self.assertStatusCode(resp, 404)
class SandboxTest(BaseLuaRenderTest):
def assertTooMuchCPU(self, resp, subtype=ScriptError.LUA_ERROR):
return self.assertScriptError(resp, subtype,
message="script uses too much CPU")
def assertTooMuchMemory(self, resp, subtype=ScriptError.LUA_ERROR):
return self.assertScriptError(resp, subtype,
message="script uses too much memory")
def test_sandbox_string_function(self):
resp = self.request_lua("""
function main(self)
return string.rep("x", 10000)
end
""")
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="nil value")
self.assertErrorLineNumber(resp, 3)
def test_sandbox_string_method(self):
resp = self.request_lua("""
function main(self)
return ("x"):rep(10000)
end
""")
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="attempt to index constant")
self.assertErrorLineNumber(resp, 3)
# TODO: strings should use a sandboxed string module as a metatable
@pytest.mark.xfail
def test_non_sandboxed_string_method(self):
resp = self.request_lua("""
function main(self)
return ("X"):lower()
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "x")
def test_infinite_loop(self):
resp = self.request_lua("""
function main(self)
local x = 0
while true do
x = x + 1
end
return x
end
""")
self.assertTooMuchCPU(resp)
def test_infinite_loop_toplevel(self):
resp = self.request_lua("""
x = 0
while true do
x = x + 1
end
function main(self)
return 5
end
""")
self.assertTooMuchCPU(resp, ScriptError.LUA_INIT_ERROR)
def test_infinite_loop_memory(self):
resp = self.request_lua("""
function main(self)
t = {}
while true do
t = { t }
end
return t
end
""")
# it can be either memory or CPU
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="too much")
def test_memory_attack(self):
resp = self.request_lua("""
function main(self)
local s = "aaaaaaaaaaaaaaaaaaaa"
while true do
s = s..s
end
return s
end
""")
self.assertTooMuchMemory(resp)
def test_memory_attack_toplevel(self):
resp = self.request_lua("""
s = "aaaaaaaaaaaaaaaaaaaa"
while true do
s = s..s
end
function main(self)
return s
end
""")
self.assertTooMuchMemory(resp, ScriptError.LUA_INIT_ERROR)
def test_billion_laughs(self):
resp = self.request_lua("""
s = "s"
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
function main() end
""")
self.assertTooMuchMemory(resp, ScriptError.LUA_INIT_ERROR)
def test_disable_sandbox(self):
# dofile function should be always sandboxed
is_sandbox = "function main(splash) return {s=(dofile==nil)} end"
resp = self.request_lua(is_sandbox)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"s": True})
with SplashServer(extra_args=['--disable-lua-sandbox']) as splash:
resp = requests.get(
url=splash.url("execute"),
params={'lua_source': is_sandbox},
)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"s": False})
class RequireTest(BaseLuaRenderTest):
def _set_title(self, title):
return """
splash:set_content([[
<html>
<head>
<title>%s</title>
</head>
</html>
]])
""" % title
def assertNoRequirePathsLeaked(self, resp):
self.assertNotIn("/lua", resp.text)
self.assertNotIn("init.lua", resp.text)
def test_splash_patching(self):
title = "TEST"
resp = self.request_lua("""
require "utils_patch"
function main(splash)
%(set_title)s
return splash:get_document_title()
end
""" % dict(set_title=self._set_title(title)))
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, title)
def test_splash_patching_no_require(self):
resp = self.request_lua("""
function main(splash)
%(set_title)s
return splash:get_document_title()
end
""" % dict(set_title=self._set_title("TEST")))
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="get_document_title")
self.assertNoRequirePathsLeaked(resp)
def test_require_unsafe(self):
resp = self.request_lua("""
local Splash = require("splash")
function main(splash) return "hello" end
""")
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR)
self.assertErrorLineNumber(resp, 2)
self.assertNoRequirePathsLeaked(resp)
def test_require_not_whitelisted(self):
resp = self.request_lua("""
local utils = require("utils")
local secret = require("secret")
function main(splash) return "hello" end
""")
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR)
self.assertErrorLineNumber(resp, 3)
self.assertNoRequirePathsLeaked(resp)
def test_require_non_existing(self):
resp = self.request_lua("""
local foobar = require("foobar")
function main(splash) return "hello" end
""")
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR)
self.assertNoRequirePathsLeaked(resp)
self.assertErrorLineNumber(resp, 2)
def test_require_non_existing_whitelisted(self):
resp = self.request_lua("""
local non_existing = require("non_existing")
function main(splash) return "hello" end
""")
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR)
self.assertNoRequirePathsLeaked(resp)
self.assertErrorLineNumber(resp, 2)
def test_module(self):
title = "TEST"
resp = self.request_lua("""
local utils = require "utils"
function main(splash)
%(set_title)s
return utils.get_document_title(splash)
end
""" % dict(set_title=self._set_title(title)))
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, title)
def test_module_require_unsafe_from_safe(self):
resp = self.request_lua("""
function main(splash)
return require("utils").hello
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "world")
class HarTest(BaseLuaRenderTest):
def test_har_empty(self):
resp = self.request_lua("""
function main(splash)
return splash:har()
end
""")
self.assertStatusCode(resp, 200)
har = resp.json()["log"]
self.assertEqual(har["entries"], [])
def test_har_about_blank(self):
resp = self.request_lua("""
function main(splash)
splash:go("about:blank")
return splash:har()
end
""")
self.assertStatusCode(resp, 200)
har = resp.json()["log"]
self.assertEqual(har["entries"], [])
def test_har_reset(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
splash:go(splash.args.url)
local har1 = splash:har()
splash:har_reset()
local har2 = splash:har()
splash:go(splash.args.url)
local har3 = splash:har()
return {har1, har2, har3}
end
""", {'url': self.mockurl("jsrender")})
self.assertStatusCode(resp, 200)
har1 = resp.json()["1"]
har2 = resp.json()["2"]
har3 = resp.json()["3"]
self.assertEqual(len(har1['log']['entries']), 2)
self.assertEqual(har2['log']['entries'], [])
self.assertEqual(len(har3['log']['entries']), 1)
def test_har_reset_argument(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
local har1 = splash:har()
splash:go(splash.args.url)
local har2 = splash:har{reset=true}
local har3 = splash:har()
splash:go(splash.args.url)
local har4 = splash:har()
return {har1, har2, har3, har4}
end
""", {'url': self.mockurl("jsrender")})
self.assertStatusCode(resp, 200)
har1 = resp.json()["1"]
har2 = resp.json()["2"]
har3 = resp.json()["3"]
har4 = resp.json()["4"]
self.assertEqual(len(har1['log']['entries']), 1)
self.assertEqual(len(har2['log']['entries']), 2)
self.assertEqual(har3['log']['entries'], [])
self.assertEqual(len(har4['log']['entries']), 1)
def test_har_reset_inprogress(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
splash:wait(0.5)
local har1 = splash:har{reset=true}
splash:wait(2.5)
local har2 = splash:har()
return {har1, har2}
end
""", {'url': self.mockurl("show-image?n=2.0&js=0.1")})
self.assertStatusCode(resp, 200)
data = resp.json()
har1, har2 = data["1"]["log"], data["2"]["log"]
self.assertEqual(len(har1['entries']), 2)
self.assertEqual(har1['entries'][0]['_splash_processing_state'],
HarBuilder.REQUEST_FINISHED)
self.assertEqual(har1['entries'][1]['_splash_processing_state'],
HarBuilder.REQUEST_HEADERS_RECEIVED)
class AutoloadTest(BaseLuaRenderTest):
def test_autoload(self):
resp = self.request_lua("""
function main(splash)
assert(splash:autoload("window.FOO = 'bar'"))
splash:go(splash.args.url)
local foo1 = splash:evaljs("FOO")
splash:evaljs("window.FOO = 'spam'")
local foo2 = splash:evaljs("FOO")
splash:go(splash.args.url)
local foo3 = splash:evaljs("FOO")
return {foo1=foo1, foo2=foo2, foo3=foo3}
end
""", {"url": self.mockurl("getrequest")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data, {"foo1": "bar", "foo2": "spam", "foo3": "bar"})
def test_autoload_remote(self):
resp = self.request_lua("""
function main(splash)
assert(splash:autoload(splash.args.eggspam_url))
assert(splash:go(splash.args.url))
local egg = splash:jsfunc("egg")
return egg()
end
""", {
"url": self.mockurl("getrequest"),
"eggspam_url": self.mockurl("eggspam.js"),
})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "spam")
def test_autoload_bad(self):
resp = self.request_lua("""
function main(splash)
local ok, reason = splash:autoload(splash.args.bad_url)
return {ok=ok, reason=reason}
end
""", {"bad_url": self.mockurl("--non-existing--")})
self.assertStatusCode(resp, 200)
self.assertNotIn("ok", resp.json())
self.assertIn("404", resp.json()["reason"])
def test_noargs(self):
resp = self.request_lua("""
function main(splash)
splash:autoload()
end
""")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
self.assertErrorLineNumber(resp, 3)
def test_autoload_reset(self):
resp = self.request_lua("""
function main(splash)
splash:autoload([[window.FOO = 'foo']])
splash:autoload([[window.BAR = 'bar']])
splash:go(splash.args.url)
local foo1 = splash:evaljs("window.FOO")
local bar1 = splash:evaljs("window.BAR")
splash:autoload_reset()
splash:go(splash.args.url)
local foo2 = splash:evaljs("window.FOO")
local bar2 = splash:evaljs("window.BAR")
return {foo1=foo1, bar1=bar1, foo2=foo2, bar2=bar2}
end
""", {"url": self.mockurl("getrequest")})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"foo1": "foo", "bar1": "bar"})
class HttpGetTest(BaseLuaRenderTest):
def test_get(self):
resp = self.request_lua("""
function main(splash)
local reply = splash:http_get(splash.args.url)
splash:wait(0.1)
return reply.content.text
end
""", {"url": self.mockurl("jsrender")})
self.assertStatusCode(resp, 200)
self.assertEqual(JsRender.template, resp.text)
def test_bad_url(self):
resp = self.request_lua("""
function main(splash)
return splash:http_get(splash.args.url)
end
""", {"url": self.mockurl("--bad-url--")})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json()["status"], 404)
def test_headers(self):
resp = self.request_lua("""
function main(splash)
return splash:http_get{
splash.args.url,
headers={
["Custom-Header"] = "Header Value",
}
}
end
""", {"url": self.mockurl("getrequest")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["status"], 200)
self.assertIn("Header Value", data["content"]["text"])
def test_redirects_follow(self):
resp = self.request_lua("""
function main(splash)
return splash:http_get(splash.args.url)
end
""", {"url": self.mockurl("http-redirect?code=302")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["status"], 200)
self.assertNotIn("redirect to", data["content"]["text"])
self.assertIn("GET request", data["content"]["text"])
def test_redirects_nofollow(self):
resp = self.request_lua("""
function main(splash)
return splash:http_get{url=splash.args.url, follow_redirects=false}
end
""", {"url": self.mockurl("http-redirect?code=302")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["status"], 302)
self.assertEqual(data["redirectURL"], "/getrequest?http_code=302")
self.assertIn("302 redirect to", data["content"]["text"])
def test_noargs(self):
resp = self.request_lua("""
function main(splash)
splash:http_get()
end
""")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
class NavigationLockingTest(BaseLuaRenderTest):
def test_lock_navigation(self):
url = self.mockurl("jsredirect")
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
splash:lock_navigation()
splash:wait(0.3)
return splash:url()
end
""", {"url": url})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, url)
def test_unlock_navigation(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
splash:lock_navigation()
splash:unlock_navigation()
splash:wait(0.3)
return splash:url()
end
""", {"url": self.mockurl("jsredirect")})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, self.mockurl("jsredirect-target"))
def test_go_navigation_locked(self):
resp = self.request_lua("""
function main(splash)
splash:lock_navigation()
local ok, reason = splash:go(splash.args.url)
return {ok=ok, reason=reason}
end
""", {"url": self.mockurl("jsredirect"), "timeout": 1.0})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"reason": "navigation_locked"})
class SetContentTest(BaseLuaRenderTest):
def test_set_content(self):
resp = self.request_lua("""
function main(splash)
assert(splash:set_content("<html><head></head><body><h1>Hello</h1></body></html>"))
return {
html = splash:html(),
url = splash:url(),
}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {
"html": "<html><head></head><body><h1>Hello</h1></body></html>",
"url": "about:blank",
})
def test_unicode(self):
resp = self.request_lua("""
function main(splash)
assert(splash:set_content("проверка"))
return splash:html()
end
""")
self.assertStatusCode(resp, 200)
self.assertIn(u'проверка', resp.text)
def test_related_resources(self):
script = """
function main(splash)
splash:set_content{
data = [[
<html><body>
<img width=50 heigth=50 src="/slow.gif?n=0.2">
</body></html>
]],
baseurl = splash.args.base,
}
return splash:png()
end
"""
resp = self.request_lua(script, {"base": self.mockurl("")})
self.assertStatusCode(resp, 200)
img = Image.open(StringIO(resp.content))
self.assertEqual((0,0,0,255), img.getpixel((10, 10)))
# the same, but with a bad base URL
resp = self.request_lua(script, {"base": ""})
self.assertStatusCode(resp, 200)
img = Image.open(StringIO(resp.content))
self.assertNotEqual((0,0,0,255), img.getpixel((10, 10)))
def test_url(self):
resp = self.request_lua("""
function main(splash)
splash:set_content{"hey", baseurl="http://example.com/foo"}
return splash:url()
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "http://example.com/foo")
class GetPerfStatsTest(BaseLuaRenderTest):
def test_get_perf_stats(self):
func = """
function main(splash)
return splash:get_perf_stats()
end
"""
out = self.request_lua(func).json()
self.assertItemsEqual(out.keys(),
['walltime', 'cputime', 'maxrss'])
self.assertIsInstance(out['cputime'], numbers.Real)
self.assertIsInstance(out['walltime'], numbers.Real)
self.assertIsInstance(out['maxrss'], numbers.Integral)
self.assertLess(out['cputime'], 1000.)
self.assertLess(0., out['cputime'])
# Should be safe to assume that splash process consumes between 1Mb
# and 1Gb of RAM, right?
self.assertLess(1E6, out['maxrss'])
self.assertLess(out['maxrss'], 1E9)
# I wonder if we could break this test...
now = time.time()
self.assertLess(now - 120, out['walltime'])
self.assertLess(out['walltime'], now)
class WindowSizeTest(BaseLuaRenderTest):
"""This is a test for window & viewport size interaction in Lua scripts."""
GET_DIMS_AFTER_SCRIPT = """
function get_dims(splash)
return {
inner = splash:evaljs("window.innerWidth") .. "x" .. splash:evaljs("window.innerHeight"),
outer = splash:evaljs("window.outerWidth") .. "x" .. splash:evaljs("window.outerHeight"),
client = (splash:evaljs("document.documentElement.clientWidth") .. "x"
.. splash:evaljs("document.documentElement.clientHeight"))
}
end
function main(splash)
alter_state(splash)
return get_dims(splash)
end
function alter_state(splash)
%s
end
"""
def return_json_from_lua(self, script, **kwargs):
resp = self.request_lua(script, kwargs)
if resp.ok:
return resp.json()
else:
raise RuntimeError(resp.content)
def get_dims_after(self, lua_script, **kwargs):
return self.return_json_from_lua(
self.GET_DIMS_AFTER_SCRIPT % lua_script, **kwargs)
def assertSizeAfter(self, lua_script, etalon, **kwargs):
out = self.get_dims_after(lua_script, **kwargs)
self.assertEqual(out, etalon)
def test_get_viewport_size(self):
script = """
function main(splash)
local w, h = splash:get_viewport_size()
return {width=w, height=h}
end
"""
out = self.return_json_from_lua(script)
w, h = map(int, defaults.VIEWPORT_SIZE.split('x'))
self.assertEqual(out, {'width': w, 'height': h})
def test_default_dimensions(self):
self.assertSizeAfter("",
{'inner': defaults.VIEWPORT_SIZE,
'outer': defaults.VIEWPORT_SIZE,
'client': defaults.VIEWPORT_SIZE})
def test_set_sizes_as_table(self):
self.assertSizeAfter('splash:set_viewport_size{width=111, height=222}',
{'inner': '111x222',
'outer': defaults.VIEWPORT_SIZE,
'client': '111x222'})
self.assertSizeAfter('splash:set_viewport_size{height=333, width=444}',
{'inner': '444x333',
'outer': defaults.VIEWPORT_SIZE,
'client': '444x333'})
def test_viewport_size_roundtrips(self):
self.assertSizeAfter(
'splash:set_viewport_size(splash:get_viewport_size())',
{'inner': defaults.VIEWPORT_SIZE,
'outer': defaults.VIEWPORT_SIZE,
'client': defaults.VIEWPORT_SIZE})
def test_viewport_size(self):
self.assertSizeAfter('splash:set_viewport_size(2000, 2000)',
{'inner': '2000x2000',
'outer': defaults.VIEWPORT_SIZE,
'client': '2000x2000'})
def test_viewport_size_validation(self):
cases = [
('()', 'set_viewport_size.* takes exactly 3 arguments'),
('{}', 'set_viewport_size.* takes exactly 3 arguments'),
('(1)', 'set_viewport_size.* takes exactly 3 arguments'),
('{1}', 'set_viewport_size.* takes exactly 3 arguments'),
('(1, nil)', 'a number is required'),
('{1, nil}', 'set_viewport_size.* takes exactly 3 arguments'),
('(nil, 1)', 'a number is required'),
('{nil, 1}', 'a number is required'),
('{width=1}', 'set_viewport_size.* takes exactly 3 arguments'),
('{width=1, nil}', 'set_viewport_size.* takes exactly 3 arguments'),
('{nil, width=1}', 'set_viewport_size.* takes exactly 3 arguments'),
('{height=1}', 'set_viewport_size.* takes exactly 3 arguments'),
('{height=1, nil}', 'set_viewport_size.* takes exactly 3 arguments'),
('{nil, height=1}', 'set_viewport_size.* takes exactly 3 arguments'),
('{100, width=200}', 'set_viewport_size.* got multiple values.*width'),
# This thing works.
# ('{height=200, 100}', 'set_viewport_size.* got multiple values.*width'),
('{100, "a"}', 'a number is required'),
('{100, {}}', 'a number is required'),
('{100, -1}', 'Viewport is out of range'),
('{100, 0}', 'Viewport is out of range'),
('{100, 99999}', 'Viewport is out of range'),
('{1, -100}', 'Viewport is out of range'),
('{0, 100}', 'Viewport is out of range'),
('{99999, 100}', 'Viewport is out of range'),
]
def run_test(size_str):
self.get_dims_after('splash:set_viewport_size%s' % size_str)
for size_str, errmsg in cases:
self.assertRaisesRegexp(RuntimeError, errmsg, run_test, size_str)
def test_viewport_full(self):
w = int(defaults.VIEWPORT_SIZE.split('x')[0])
self.assertSizeAfter('splash:go(splash.args.url);'
'splash:wait(0.1);'
'splash:set_viewport_full();',
{'inner': '%dx2000' % w,
'outer': defaults.VIEWPORT_SIZE,
'client': '%dx2000' % w},
url=self.mockurl('tall'))
def test_set_viewport_full_returns_dimensions(self):
script = """
function main(splash)
assert(splash:go(splash.args.url))
assert(splash:wait(0.1))
local w, h = splash:set_viewport_full()
return {width=w, height=h}
end
"""
out = self.return_json_from_lua(script, url=self.mockurl('tall'))
w, h = map(int, defaults.VIEWPORT_SIZE.split('x'))
self.assertEqual(out, {'width': w, 'height': 2000})
def test_render_all_restores_viewport_size(self):
script = """
function main(splash)
assert(splash:go(splash.args.url))
assert(splash:wait(0.1))
local before = {splash:get_viewport_size()}
png = splash:png{render_all=true}
local after = {splash:get_viewport_size()}
return {before=before, after=after, png=png}
end
"""
out = self.return_json_from_lua(script, url=self.mockurl('tall'))
w, h = map(int, defaults.VIEWPORT_SIZE.split('x'))
self.assertEqual(out['before'], {'1': w, '2': h})
self.assertEqual(out['after'], {'1': w, '2': h})
# 2000px is hardcoded in that html
img = Image.open(StringIO(standard_b64decode(out['png'])))
self.assertEqual(img.size, (w, 2000))
def test_set_viewport_size_changes_contents_size_immediately(self):
# GH167
script = """
function main(splash)
splash:set_viewport_size(1024, 768)
assert(splash:set_content([[
<html>
<body style="min-width: 800px; margin: 0px"> </body>
</html>
]]))
result = {}
result.before = {splash:set_viewport_full()}
splash:set_viewport_size(640, 480)
result.after = {splash:set_viewport_full()}
return result
end
"""
out = self.return_json_from_lua(script)
self.assertEqual(out,
{'before': {'1': 1024, '2': 768},
'after': {'1': 800, '2': 480}})
@pytest.mark.xfail
def test_viewport_full_raises_error_if_fails_in_script(self):
# XXX: for local resources loadFinished event generally arrives after
# initialLayoutCompleted, so the error doesn't manifest itself.
self.assertRaisesRegexp(RuntimeError, "zyzzy",
self.get_dims_after,
"""
splash:go(splash.args.url)
splash:set_viewport_full()
""", url=self.mockurl('delay'))
class VersionTest(BaseLuaRenderTest):
def test_version(self):
resp = self.request_lua("""
function main(splash)
local version = splash:get_version()
return version.major .. '.' .. version.minor
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, splash_version)
| bsd-3-clause | 3,293,931,774,307,080,000 | 34.236615 | 97 | 0.541146 | false | 3.798563 | true | false | false |
abought/osf.io | website/addons/box/views.py | 16 | 2647 | """Views for the node settings page."""
# -*- coding: utf-8 -*-
import os
import httplib as http
from box.client import BoxClient, BoxClientException
from urllib3.exceptions import MaxRetryError
from framework.exceptions import HTTPError
from website.addons.box.model import Box
from website.addons.base import generic_views
from website.addons.box.serializer import BoxSerializer
SHORT_NAME = 'box'
FULL_NAME = 'Box'
box_account_list = generic_views.account_list(
SHORT_NAME,
BoxSerializer
)
box_import_auth = generic_views.import_auth(
SHORT_NAME,
BoxSerializer
)
def _get_folders(node_addon, folder_id):
node = node_addon.owner
if folder_id is None:
return [{
'id': '0',
'path': 'All Files',
'addon': 'box',
'kind': 'folder',
'name': '/ (Full Box)',
'urls': {
'folders': node.api_url_for('box_folder_list', folderId=0),
}
}]
try:
Box(node_addon.external_account).refresh_oauth_key()
client = BoxClient(node_addon.external_account.oauth_key)
except BoxClientException:
raise HTTPError(http.FORBIDDEN)
try:
metadata = client.get_folder(folder_id)
except BoxClientException:
raise HTTPError(http.NOT_FOUND)
except MaxRetryError:
raise HTTPError(http.BAD_REQUEST)
# Raise error if folder was deleted
if metadata.get('is_deleted'):
raise HTTPError(http.NOT_FOUND)
folder_path = '/'.join(
[
x['name']
for x in metadata['path_collection']['entries']
] + [metadata['name']]
)
return [
{
'addon': 'box',
'kind': 'folder',
'id': item['id'],
'name': item['name'],
'path': os.path.join(folder_path, item['name']),
'urls': {
'folders': node.api_url_for('box_folder_list', folderId=item['id']),
}
}
for item in metadata['item_collection']['entries']
if item['type'] == 'folder'
]
box_folder_list = generic_views.folder_list(
SHORT_NAME,
FULL_NAME,
_get_folders
)
box_get_config = generic_views.get_config(
SHORT_NAME,
BoxSerializer
)
def _set_folder(node_addon, folder, auth):
uid = folder['id']
node_addon.set_folder(uid, auth=auth)
node_addon.save()
box_set_config = generic_views.set_config(
SHORT_NAME,
FULL_NAME,
BoxSerializer,
_set_folder
)
box_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
box_root_folder = generic_views.root_folder(
SHORT_NAME
)
| apache-2.0 | 3,163,338,642,290,676,700 | 23.284404 | 84 | 0.595769 | false | 3.626027 | false | false | false |
raspibo/Livello1 | var/www/cgi-bin/trimlistredis.py | 1 | 3590 | #!/usr/bin/env python3
# Questo file visualizza la chiave "lists" redis
#
# Prima verifica che ci sia la chiave nel form
# Serve per la parte di gestione html in python
import cgi
import cgitb
# Abilita gli errori al server web/http
cgitb.enable()
# Le mie librerie mjl (Json, Files), mhl (Html), flt (T w/ Redis)
import mjl, mhl, flt
import redis
# Parametri generali
TestoPagina="Taglia valori da chiave \"lists\" Redis"
DirBase="/var/www"
ConfigFile=DirBase+"/conf/config.json"
#ExecFile="/cgi-bin/<exefile>"
# Redis "key"
RedisKey = "*" # Tutte le chiavi
# Form name/s
FormName = "rkey"
# Apro il database Redis con l'istruzione della mia libreria
MyDB = flt.OpenDBFile(ConfigFile)
# Start web page - Sono blocchi di html presenti nella libreria
print (mhl.MyHtml())
print (mhl.MyHtmlHead())
# Scrivo il Titolo/Testo della pagina
print ("<h1>","<center>",TestoPagina,"</center>","</h1>")
#print ("<hr/>","<br/>")
# Eventuale help/annotazione
#print ("Non ho rinominato i campi e non sono stato a riordinare le voci.<br/>")
form=cgi.FieldStorage()
if FormName not in form:
print ("<h2>ERRORE: Non e` stata passata la chiave Redis</h2>")
elif "VStart" not in form:
print ("<h3>Manca il valore: Start</h3>")
elif "VStop" not in form:
print ("<h3>Manca il valore: Stop</h3>")
else:
RedisKey = cgi.escape(form[FormName].value)
print ("<b>Prima:</b>")
print ("<table>") # 2 colonne
# La prima voce non e` modificabile ed e` la chiave Redis (solo visualizzazione)
print ("<tr>")
print ("<td>")
print ("Key: ")
print ("</td>")
print ("<td>")
print (mhl.MyTextForm(FormName,RedisKey,"40","required","readonly"))
print ("</td>")
print ("</tr>")
print ("<tr>")
print ("<td>")
print ("Primo valore:")
print ("</td>")
print ("<td>")
print (str(MyDB.lindex(RedisKey,"0")))
print ("</td>")
print ("</tr>")
print ("<br/>") # Aggiungo uno spazio (una riga)
print ("<tr>")
print ("<td>")
print ("Valori:")
print ("</td>")
print ("<td>")
print (str(MyDB.llen(RedisKey)))
print ("</td>")
print ("</tr>")
print ("<br/>") # Aggiungo uno spazio (una riga)
print ("<tr>")
print ("<td>")
print ("Ultimo valore:")
print ("</td>")
print ("<td>")
print (str(MyDB.lindex(RedisKey,"-1")))
print ("</td>")
print ("</tr>")
print ("</table>")
RedisKeyStart = cgi.escape(form["VStart"].value)
RedisKeyStop = cgi.escape(form["VStop"].value)
print ("</br></br> <b>Command</b>: ltrim {0:s} {1:s} {2:s} </br></br></br>".format(RedisKey,RedisKeyStart,RedisKeyStop))
if MyDB.ltrim(RedisKey,RedisKeyStart,RedisKeyStop):
print ("<b>Dopo:</b>")
print ("<table>") # 2 colonne
# La prima voce non e` modificabile ed e` la chiave Redis (solo visualizzazione)
print ("<tr>")
print ("<td>")
print ("Key: ")
print ("</td>")
print ("<td>")
print (mhl.MyTextForm(FormName,RedisKey,"40","required","readonly"))
print ("</td>")
print ("</tr>")
print ("<tr>")
print ("<td>")
print ("Primo valore:")
print ("</td>")
print ("<td>")
print (str(MyDB.lindex(RedisKey,"0")))
print ("</td>")
print ("</tr>")
print ("<br/>") # Aggiungo uno spazio (una riga)
print ("<tr>")
print ("<td>")
print ("Valori:")
print ("</td>")
print ("<td>")
print (str(MyDB.llen(RedisKey)))
print ("</td>")
print ("</tr>")
print ("<br/>") # Aggiungo uno spazio (una riga)
print ("<tr>")
print ("<td>")
print ("Ultimo valore:")
print ("</td>")
print ("<td>")
print (str(MyDB.lindex(RedisKey,"-1")))
print ("</td>")
print ("</tr>")
print ("</table>")
# End web page
print (mhl.MyHtmlBottom())
| mit | -8,274,615,101,611,687,000 | 22.774834 | 121 | 0.612256 | false | 2.558803 | false | true | false |
hazelcast/hazelcast-python-client | hazelcast/protocol/codec/custom/sql_query_id_codec.py | 1 | 2446 | from hazelcast.protocol.builtin import FixSizedTypesCodec, CodecUtil
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import END_FRAME_BUF, END_FINAL_FRAME_BUF, SIZE_OF_FRAME_LENGTH_AND_FLAGS, create_initial_buffer_custom
from hazelcast.sql import _SqlQueryId
_MEMBER_ID_HIGH_ENCODE_OFFSET = 2 * SIZE_OF_FRAME_LENGTH_AND_FLAGS
_MEMBER_ID_HIGH_DECODE_OFFSET = 0
_MEMBER_ID_LOW_ENCODE_OFFSET = _MEMBER_ID_HIGH_ENCODE_OFFSET + LONG_SIZE_IN_BYTES
_MEMBER_ID_LOW_DECODE_OFFSET = _MEMBER_ID_HIGH_DECODE_OFFSET + LONG_SIZE_IN_BYTES
_LOCAL_ID_HIGH_ENCODE_OFFSET = _MEMBER_ID_LOW_ENCODE_OFFSET + LONG_SIZE_IN_BYTES
_LOCAL_ID_HIGH_DECODE_OFFSET = _MEMBER_ID_LOW_DECODE_OFFSET + LONG_SIZE_IN_BYTES
_LOCAL_ID_LOW_ENCODE_OFFSET = _LOCAL_ID_HIGH_ENCODE_OFFSET + LONG_SIZE_IN_BYTES
_LOCAL_ID_LOW_DECODE_OFFSET = _LOCAL_ID_HIGH_DECODE_OFFSET + LONG_SIZE_IN_BYTES
_INITIAL_FRAME_SIZE = _LOCAL_ID_LOW_ENCODE_OFFSET + LONG_SIZE_IN_BYTES - 2 * SIZE_OF_FRAME_LENGTH_AND_FLAGS
class SqlQueryIdCodec(object):
@staticmethod
def encode(buf, sql_query_id, is_final=False):
initial_frame_buf = create_initial_buffer_custom(_INITIAL_FRAME_SIZE)
FixSizedTypesCodec.encode_long(initial_frame_buf, _MEMBER_ID_HIGH_ENCODE_OFFSET, sql_query_id.member_id_high)
FixSizedTypesCodec.encode_long(initial_frame_buf, _MEMBER_ID_LOW_ENCODE_OFFSET, sql_query_id.member_id_low)
FixSizedTypesCodec.encode_long(initial_frame_buf, _LOCAL_ID_HIGH_ENCODE_OFFSET, sql_query_id.local_id_high)
FixSizedTypesCodec.encode_long(initial_frame_buf, _LOCAL_ID_LOW_ENCODE_OFFSET, sql_query_id.local_id_low)
buf.extend(initial_frame_buf)
if is_final:
buf.extend(END_FINAL_FRAME_BUF)
else:
buf.extend(END_FRAME_BUF)
@staticmethod
def decode(msg):
msg.next_frame()
initial_frame = msg.next_frame()
member_id_high = FixSizedTypesCodec.decode_long(initial_frame.buf, _MEMBER_ID_HIGH_DECODE_OFFSET)
member_id_low = FixSizedTypesCodec.decode_long(initial_frame.buf, _MEMBER_ID_LOW_DECODE_OFFSET)
local_id_high = FixSizedTypesCodec.decode_long(initial_frame.buf, _LOCAL_ID_HIGH_DECODE_OFFSET)
local_id_low = FixSizedTypesCodec.decode_long(initial_frame.buf, _LOCAL_ID_LOW_DECODE_OFFSET)
CodecUtil.fast_forward_to_end_frame(msg)
return _SqlQueryId(member_id_high, member_id_low, local_id_high, local_id_low)
| apache-2.0 | 2,555,215,645,692,374,500 | 60.15 | 142 | 0.729763 | false | 2.915375 | false | false | false |
python-xmp-toolkit/python-xmp-toolkit | libxmp/files.py | 1 | 7164 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2009, European Space Agency & European Southern
# Observatory (ESA/ESO)
# Copyright (c) 2008-2009, CRS4 - Centre for Advanced Studies, Research and
# Development in Sardinia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the European Space Agency, European Southern
# Observatory, CRS4 nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ESA/ESO AND CRS4 ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL ESA/ESO BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
"""
The Files module provides support for locating the XMP in a file, adding XMP to
a file, or updating the XMP in a file. It returns the entire XMP packet, the
core pacakage can then be used to manipulate the individual XMP properties.
:class:`XMPFiles` contains a number of "smart" file handlers that know how to
efficiently access the XMP in specific file formats. It also includes a
fallback packet scanner that can be used for unknown file formats.
"""
import os
import sys
from . import XMPError, XMPMeta
from .consts import options_mask
from .consts import XMP_CLOSE_NOOPTION
from .consts import XMP_OPEN_OPTIONS
from .consts import XMP_OPEN_NOOPTION
from . import exempi as _cexempi
__all__ = ['XMPFiles']
class XMPFiles(object):
"""API for access to the "main" metadata in a file.
XMPFiles provides the API for the Exempi's File Handler component. This
provides convenient access to the main, or document level, XMP for a file.
The general model is to open a file, read and write the metadata, then
close the file. While open, portions of the file might be maintained in RAM
data structures. Memory usage can vary considerably depending on file
format and access options. The file may be opened for read-only or
read-write access, with typical exclusion for both modes.
Errors result in raising of an :exc:`libxmp.XMPError` exception.
:keyword file_path: Path to file to open.
.. todo::
Documentation
"""
def __init__(self, **kwargs ):
self._file_path = None
self.xmpfileptr = _cexempi.files_new()
if 'file_path' in kwargs:
file_path = kwargs['file_path']
del kwargs['file_path']
self.open_file( file_path, **kwargs )
def __repr__(self):
if self._file_path is None:
return "XMPFiles()"
msg = "XMPFiles(file_path='{0}')"
if sys.hexversion < 0x03000000 and isinstance(self._file_path,
unicode):
# Unicode filenames can cause trouble in python2 because __repr__
# must return byte strings, not unicode. Get around this by
# turning the unicode filename into escaped ASCII. This means that
# in this case, the result cannot be used to recreate the object
# with the same value.
msg = msg.format(repr(self._file_path))
else:
# Python3 does not suffer from this problem.
msg = msg.format(self._file_path)
return msg
def __del__(self):
"""
Free up the memory associated with the XMP file instance.
"""
_cexempi.files_free( self.xmpfileptr )
def open_file(self, file_path, **kwargs ):
"""
Open a given file and read XMP from file. File must be closed again with
:func:`close_file`
:param str file_path: Path to file to open.
:raises XMPError: in case of errors.
.. todo::
Change signature into using kwargs to set option flag
"""
if kwargs:
open_flags = options_mask( XMP_OPEN_OPTIONS, **kwargs )
else:
open_flags = XMP_OPEN_NOOPTION
if self._file_path != None:
raise XMPError('A file is already open - close it first.')
_cexempi.files_open( self.xmpfileptr, file_path, open_flags )
self._file_path = file_path
def close_file( self, close_flags=XMP_CLOSE_NOOPTION):
"""
Close file after use. XMP will not be written to file until
this method has been called.
:param close_flags: One of the close flags
:raises XMPError: in case of errors.
.. todo::
Change signature into using kwargs to set option flag
"""
_cexempi.files_close( self.xmpfileptr, close_flags )
self._file_path = None
def get_xmp( self ):
"""
Get XMP from file.
:return: A new :class:`libxmp.core.XMPMeta` instance.
:raises XMPError: in case of errors.
"""
xmpptr = _cexempi.files_get_new_xmp(self.xmpfileptr)
if xmpptr:
return XMPMeta( _xmp_internal_ref = xmpptr )
else:
return None
def put_xmp(self, xmp_obj):
"""
Write XMPMeta object to file. See also :func:`can_put_xmp`.
:param xmp_obj: An :class:`libxmp.core.XMPMeta` object
"""
xmpptr = xmp_obj.xmpptr
if not self.can_put_xmp(xmp_obj):
msg = 'Cannot write XMP packet into {filename}'
msg = msg.format(filename=os.path.basename(self._file_path))
raise XMPError(msg)
_cexempi.files_put_xmp(self.xmpfileptr, xmpptr)
def can_put_xmp( self, xmp_obj ):
"""Determine if XMP can be written into the file.
Determines if a given :class:`libxmp.core.XMPMeta` object can be
written into the file.
:param xmp_obj: An :class:`libxmp.core.XMPMeta` object
:return: true if :class:`libxmp.core.XMPMeta` object writeable to file.
:rtype: bool
"""
if not isinstance( xmp_obj, XMPMeta ):
raise XMPError('Not a XMPMeta object')
xmpptr = xmp_obj.xmpptr
if xmpptr != None:
return _cexempi.files_can_put_xmp(self.xmpfileptr, xmpptr)
else:
return False
| bsd-3-clause | 8,964,087,153,198,126,000 | 36.705263 | 80 | 0.649079 | false | 3.882927 | false | false | false |
fdouetteau/PyBabe | pybabe/charset.py | 1 | 3016 |
import codecs
import csv
import cStringIO
import datetime
## From samples in http://docs.python.org/library/csv.html
class UTF8Recoder(object):
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
Made mandatory by the csv module operating only on 'str'
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UTF8RecoderWithCleanup(UTF8Recoder):
"Rencode a stream in utf-8, with 'charset' clenaup algorithm in the middle"
def __init__(self, f, encoding):
super(UTF8RecoderWithCleanup, self).__init__(f, encoding)
from encoding_cleaner import get_map_table
(regex, m) = get_map_table(encoding, 'latin1')
self.regex = regex
self.m = m
def next(self):
u = self.reader.next()
tu = self.regex.sub(lambda g: self.m[g.group(0)], u)
return tu.encode('utf-8')
class PrefixReader(object):
def __init__(self, prefix, stream, linefilter):
self.prefix = prefix
self.stream = stream
self.linefilter = linefilter
def __iter__(self):
linefilter = self.linefilter
if linefilter:
if linefilter(self.prefix):
yield self.prefix
for k in self.stream:
if linefilter(k):
yield k
else:
yield self.prefix
for k in self.stream:
yield k
def write_value(s):
if isinstance(s, unicode):
return s.encode('utf-8')
elif isinstance(s, datetime.datetime):
# Remove timezone
return s.strftime('%Y-%m-%d %H:%M:%S')
else:
return s
class UnicodeCSVWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
if encoding == 'utf_16_le':
self.stream.write(codecs.BOM_UTF16_LE)
elif encoding == 'utf_16_be':
self.stream.write(codecs.BOM_UTF16_BE)
elif encoding == 'utf_16':
self.stream.write(codecs.BOM_UTF16)
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow(map(write_value, row))
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
| bsd-3-clause | 1,657,490,347,166,072,600 | 28.281553 | 79 | 0.590849 | false | 3.827411 | false | false | false |
ResearchSoftwareInstitute/MyHPOM | theme/tests/functional.py | 1 | 5026 | import time
from django.core.urlresolvers import reverse
from selenium import webdriver
from selenium.webdriver.common.by import By
from theme.tests.multiplatform import SeleniumTestsParentClass, create_driver
class DesktopTests(SeleniumTestsParentClass.MultiPlatformTests):
def setUp(self, driver=None):
super(DesktopTests, self).setUp()
self.driver = driver
if not driver:
self.driver = create_driver('desktop')
self.driver.get(self.live_server_url)
def _logout_helper(self):
self.driver.get(self.live_server_url)
self.wait_for_visible(By.CSS_SELECTOR, '#profile-menu .dropdown-toggle').click()
self.wait_for_visible(By.CSS_SELECTOR, '#signout-menu').click()
def test_login_email(self):
super(DesktopTests, self).test_login_email()
# home page: returned after successful login with profile info in dropdown
profile_dropdown_selector = 'li[id="profile-menu"] a[class="dropdown-toggle"]'
self.wait_for_visible(By.CSS_SELECTOR, profile_dropdown_selector).click()
email = self.wait_for_visible(By.CSS_SELECTOR, '#profile-menu-email').text
self.assertEquals(self.user.email, email)
full_name = self.wait_for_visible(By.CSS_SELECTOR, '#profile-menu-fullname').text
self.assertTrue(self.user.first_name in full_name)
self.assertTrue(self.user.last_name in full_name)
def test_show_login_link_desktop(self):
self.driver.get(self.live_server_url)
self.wait_for_visible(By.CSS_SELECTOR, '#signin-menu')
def test_folder_drag(self):
self._login_helper(self.user.email, self.user_password)
self._create_resource_helper()
self.wait_for_visible(By.CSS_SELECTOR, '#edit-metadata').click()
# Find the files area and click button to create new folder
self.wait_for_visible(By.CSS_SELECTOR, '.fb-file-name').location_once_scrolled_into_view
time.sleep(1.5)
self.wait_for_visible(By.CSS_SELECTOR, '.fb-file-name').click()
self.wait_for_visible(By.CSS_SELECTOR, '#fb-create-folder').click()
# Fill in new folder modal
self.wait_for_visible(By.CSS_SELECTOR, '#txtFolderName').send_keys('Button Folder')
self.wait_for_visible(By.CSS_SELECTOR, '#btn-create-folder').click()
# TODO: try context click for creating a new folder
# drag and drop into new folder
folder_drag_dest = self.wait_for_visible(By.CSS_SELECTOR, '.fb-folder')
file_to_drag = self.wait_for_visible(By.CSS_SELECTOR, '.fb-file')
action_chain = webdriver.ActionChains(self.driver)
action_chain.drag_and_drop(file_to_drag, folder_drag_dest).perform()
time.sleep(1.5)
# Enter new folder and verify contents
self.wait_for_visible(By.CSS_SELECTOR, '#fb-files-container').click()
# Create a mouse down (not click) event on the folder in order to select
# prior to sending the double click.
time.sleep(2.5)
self.driver.find_element_by_css_selector('#hs-file-browser li.fb-folder').click()
self.driver.execute_script('$("#hs-file-browser li.fb-folder").dblclick()')
active_folder_in_crumbs = '//li[@class="active"]/span[contains(text(),"Button Folder")]'
self.wait_for_visible(By.XPATH, active_folder_in_crumbs)
self.assertEqual(self.driver.find_element_by_class_name('fb-file-name').text, 'file.png')
class MobileTests(SeleniumTestsParentClass.MultiPlatformTests):
def setUp(self, driver=None):
super(MobileTests, self).setUp()
self.driver = driver
if not driver:
self.driver = create_driver('mobile')
self.driver.get(self.live_server_url)
def _open_nav_menu_helper(self):
if self.wait_for_visible(By.CSS_SELECTOR, 'ul.navbar-nav', except_fail=False):
return
self.wait_for_visible(By.CSS_SELECTOR, 'button.navbar-toggle').click()
self.wait_for_visible(By.CSS_SELECTOR, 'ul.navbar-nav')
def _login_helper(self, login_name, user_password):
self._open_nav_menu_helper()
super(MobileTests, self)._login_helper(login_name, user_password)
self._open_nav_menu_helper()
def _logout_helper(self):
self._open_nav_menu_helper()
self.wait_for_visible(By.CSS_SELECTOR, 'a[href="{}"]'.format(reverse('logout')))
def test_register_account(self):
self.driver.get(self.live_server_url)
self._open_nav_menu_helper()
super(MobileTests, self).test_register_account()
def test_show_login_link_mobile(self):
self.driver.get(self.live_server_url)
desktop_login = self.driver.find_element_by_css_selector('#signin-menu')
mobile_login = self.driver.find_element_by_css_selector('li.visible-xs a')
self.assertFalse(desktop_login.is_displayed())
self.assertFalse(mobile_login.is_displayed())
self._open_nav_menu_helper()
self.assertTrue(mobile_login.is_displayed())
| bsd-3-clause | -6,951,728,978,178,363,000 | 43.087719 | 97 | 0.667927 | false | 3.524544 | true | false | false |
sparkslabs/kamaelia_ | Sketches/MPS/BugReports/FixTests/Kamaelia/Examples/OpenGL/MiniExamples/SimpleCube.py | 6 | 1357 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
from Kamaelia.Util.Graphline import Graphline
from Kamaelia.UI.OpenGL.SimpleCube import SimpleCube
Graphline(
CUBEC = SimpleCube(position=(0, 0,-12), rotation=(225,45,135), size=(1,1,1)).activate(),
CUBER = SimpleCube(position=(4,0,-22), size=(2,2,2)).activate(),
CUBEB = SimpleCube(position=(0,-4,-18), rotation=(0,180,20), size=(1,3,2)).activate(),
linkages = {}
).run()
# Licensed to the BBC under a Contributor Agreement: THF
| apache-2.0 | -6,773,426,909,429,491,000 | 41.40625 | 92 | 0.680914 | false | 3.609043 | false | false | false |
jk977/twitch-plays | bot/interfaces/emuinput.py | 1 | 4275 | import config
import os
import re
from . import *
from interfaces.validator import Validator
class EmuInput(Validator):
'''
Base class for emulator inputs. Children classes only need to implement functions
_validate_content and _validate_count to return true when the respective fields are valid,
and may optionally define a delimiter other than '*' and a destination path other than
project_root/game.
'''
delimiter = '*'
path = config.data_dir
def __init__(self, content, count=1):
content = str(content)
count = int(count)
if not type(self)._validate_count(count):
raise ValueError('Invalid count "{}".'.format(count))
elif not type(self)._validate_content(content):
raise ValueError('Invalid content "{}".'.format(content))
self._content = content
self._count = count
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.count == other.count and
self.content == other.content)
def __hash__(self):
return hash((self.content, self.count))
def __str__(self):
return self.content + str(self.count)
@abstractstaticmethod
def _validate_content(content):
pass
@abstractstaticmethod
def _validate_count(count):
pass
@classmethod
def _parse_content(cls, message):
'''
Retrieves content portion of input.
:param cls: Current class.
:param message: Message to parse.
'''
message = message.lower()
if cls.delimiter in message:
result = message.split(cls.delimiter)[0]
else:
result = re.sub('\\d+$', '', message)
if not cls._validate_content(result):
raise ValueError('Invalid content "{}".'.format(result))
return result
@classmethod
def _parse_count(cls, message):
'''
Retrieves count portion of input.
:param cls: Current class.
:param message: Message to parse.
:returns: int
'''
if cls.delimiter in message:
result = message.split(cls.delimiter)[1]
else:
match = re.search('\\d+$', message)
result = match.group(0) if match else 1
result = int(result)
if not cls._validate_count(result):
raise ValueError('Invalid count "{}".'.format(result))
return int(result)
@property
def content(self):
return self._content
@property
def count(self):
return self._count
@property
def destination(self):
cls = type(self)
if not cls._filename:
raise NotImplementedError('Class does not define a destination file in {}._filename.'.format(cls.__name__))
return os.path.join(type(self)._location, cls._filename)
@classmethod
def condense(cls, inputs):
'''
Condenses list of inputs into equivalent list with identical consecutive inputs
merged into one, then returns condensed list.
:param inputs: List of inputs to condense.
'''
inputs = list(inputs) # in case of immutable tuple
changed = True
while changed:
changed = False
for i in range(1, len(inputs)):
in1 = inputs[i - 1]
in2 = inputs[i]
if in1.content == in2.content:
count = in1.count + in2.count
button = cls(in1.content, count)
inputs[i - 1] = None
inputs[i] = button
changed = True
inputs = [i for i in inputs if i]
return inputs
def serialize(self):
'''
Serializes input to send to NES.
'''
return self.delimiter.join((str(x) for x in [self.content, self.count]))
@classmethod
def deserialize(cls, serialized):
'''
Deserializes serialized input.
:param cls: Current class.
:param serialized: The serialized input.
:returns: EmuInput object
'''
content = cls._parse_content(serialized)
count = cls._parse_count(serialized)
return cls(content, count) | gpl-3.0 | 2,922,635,052,704,346,000 | 27.506667 | 119 | 0.573099 | false | 4.543039 | false | false | false |
enoodle/virt-deploy | setup.py | 2 | 1268 | #
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from setuptools import find_packages
from setuptools import setup
setup(
name='virt-deploy',
description='Standardized deployment of virtual machines',
author='Federico Simoncelli',
author_email='[email protected]',
url='https://github.com/simon3z/virt-deploy',
version='0.1.7',
packages=find_packages(),
entry_points={
'console_scripts': [
'virt-deploy = virtdeploy.cli:main',
]
},
)
| gpl-2.0 | -5,959,320,822,127,086,000 | 32.368421 | 78 | 0.722397 | false | 4 | false | false | false |
escudocloud/django_proxy_swift | proxy_server/enc_swift/simpleKeystoneClient.py | 1 | 2290 | #!/usr/bin/env python
from keystoneclient.exceptions import NotFound, Conflict
from keystoneauth1.identity import v3
from keystoneclient.auth import identity
from keystoneclient import session
from keystoneclient.v3 import client
from myLogger import *
class SimpleKeystoneClient:
"""
Add enc functions to creation request
"""
def __init__(self, admin_user, admin_pass, admin_tenant, auth_url):
auth = v3.Password(auth_url=auth_url, username=admin_user,
password=admin_pass, project_name=admin_tenant,
user_domain_id="default", project_domain_id="default")
sess = session.Session(auth=auth)
self.ks_client = client.Client(session=sess)
def create_tenant(self, name, **kwargs):
try:
project = self.ks_client.projects.find(name=name)
logger.info('Project %s exists [id: %s].' % (name, project.id))
except NotFound:
project = self.ks_client.projects.create(name=name, domain="default",**kwargs)
logger.info('Project %s created [id: %s].' % (name, project.id))
return project
def create_user(self, name, password, tenant_name, **kwargs):
try:
user = self.ks_client.users.find(name=name)
logger.info('User %s exists (password unchanged).' % name)
except NotFound:
tenant = self.create_tenant(tenant_name)
user = self.ks_client.users.create(name=name, password=password,
tenant_id=tenant.id, **kwargs)
logger.info('User %s created.' % name)
return user
def create_role(self, role_name, **kwargs):
try:
role = self.ks_client.roles.find(name=role_name)
logger.info('Role %s exists.' % role_name)
except NotFound:
role = self.ks_client.roles.create(role_name, **kwargs)
logger.info('Role %s created.' % role_name)
return role
def add_user_role(self, user, role, tenant, **kwargs):
try:
self.ks_client.roles.grant(user=user, role =role, project =tenant,**kwargs)
logger.info('Role given to user.')
except Conflict:
logger.info('User already has the requested role.')
| apache-2.0 | -7,565,121,562,511,033,000 | 39.892857 | 90 | 0.608734 | false | 3.927959 | false | false | false |
2franix/homewatcher | homewatcher/configuration.py | 1 | 63634 | #!/usr/bin/python3
# Copyright (C) 2014-2017 Cyrille Defranoux
#
# This file is part of Homewatcher.
#
# Homewatcher is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Homewatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Homewatcher. If not, see <http://www.gnu.org/licenses/>.
#
# For any question, feature requests or bug reports, feel free to contact me at:
# knx at aminate dot net
from homewatcher import ensurepyknx
from pyknx import logger
import xml.dom.minidom
import os.path
import itertools
import re
from functools import cmp_to_key
class Property(object):
"""
Represents a property of an object which is part of the configuration.
A Property is an atomic piece of data that composes the object.
"""
class XMLEntityTypes(object):
ATTRIBUTE = 1 << 0
CHILD_ELEMENT = 1 << 1
INNER_TEXT = 1 << 2
def __init__(self, name, type, xmlEntityType, namesInXML=None, groupNameInXML = None, isCollection=False, isUnique=False, values=None, getter=None):
# Use property's name as name in XML by default.
if namesInXML == None: namesInXML = name
# Names in XML must always be iterable.
if isinstance(namesInXML, str): namesInXML = (namesInXML,)
self.name = name
self.type = type
self.namesInXML = namesInXML # Names of the attribute or child element in xml when applicable, None if this property does not come from a single attribute.
self.groupNameInXML = groupNameInXML
self.xmlEntityType = xmlEntityType
self.isCollection = isCollection # Whether this property is a collection of values.
self.isUnique = isUnique
self.values = values # Collection of possible values. May be a callable (configuration object and property's owner object are passed as arguments). If None, no restriction on values.
self.getter = getter # Optional method to call to retrieve property value. If set to None, the owner object's field named the same as this property is used.
def isOfPrimitiveType(self):
return self.type in (str, int, float, bool)
def isOfClassType(self):
return not self.isOfPrimitiveType()
def isDefinedOn(self, object):
return self.name in vars(object) and vars(object)[self.name] != None
def checkValue(self, configuration, object, value, collectedValues):
if self.isCollection:
if not isinstance(value, list):
raise Configuration.IntegrityException('A list is expected.')
values = value
else:
values = [value]
acceptableValues = self.getAcceptablesValues(configuration, object)
if self.type == str:
acceptableTypes = (str,)
elif self.type == float:
# Accept int too!
acceptableTypes = (self.type, int)
else:
acceptableTypes = (self.type,)
for v in values:
if v == None: continue
if not isinstance(v, acceptableTypes):
raise Configuration.IntegrityException('A value of type {0} was expected, \"{1}\" of type {2} found.'.format(acceptableTypes, v, type(v)))
if acceptableValues != None and not v in acceptableValues:
raise Configuration.IntegrityException('A value in {0} is expected, {1} found.'.format(acceptableValues, v))
# Is this value unique?
if self.isUnique and self.name in collectedValues and v in collectedValues[self.name]:
raise Configuration.IntegrityException('Value {0} is already assigned to another object.'.format(v))
# Collect this value.
if not self.name in collectedValues:
collectedValues[self.name] = []
collectedValues[self.name].append(v)
def getAcceptablesValues(self, configuration, object):
if self.values == None: return None
if callable(self.values):
return self.values(configuration, object)
else:
return self.values
def getValueFor(self, object, config):
if not self.isDefinedOn(object): return None
if self.getter == None:
return vars(object)[self.name]
else:
return self.getter(object, config)
def checkObjectIntegrity(self, configuration, object, collectedValues):
if not self.isDefinedOn(object): return
value = self.getValueFor(object, configuration)
try:
self.checkValue(configuration, object, value, collectedValues)
except Configuration.IntegrityException as e:
raise Configuration.IntegrityException('Property {0} is invalid: {1}'.format(self, e), problematicObject=object)
if self.isOfClassType():
if hasattr(self.type, 'PROPERTY_DEFINITIONS'):
self.type.PROPERTY_DEFINITIONS.checkIntegrity(configuration, value)
def clone(self, source, destination):
if self.name in vars(source):
if vars(source)[self.name] == None:
vars(destination)[self.name] = None
return
copyProperty = lambda p: p if self.isOfPrimitiveType() else p.copy()
if self.isCollection:
vars(destination)[self.name] = []
for prop in vars(source)[self.name]:
vars(destination)[self.name].append(copyProperty(prop))
else:
vars(destination)[self.name] = copyProperty(vars(source)[self.name])
def fromXML(self, xmlElement):
# Scan sources for this property.
sources = []
for nameInXML in self.namesInXML:
if self.xmlEntityType & Property.XMLEntityTypes.ATTRIBUTE != 0:
attributeValue = Configuration.getXmlAttribute(xmlElement, nameInXML, None)
if attributeValue != None:
sources.append(attributeValue)
if self.xmlEntityType & Property.XMLEntityTypes.CHILD_ELEMENT != 0:
sources += Configuration.getElementsInConfig(xmlElement, nameInXML, self.groupNameInXML)
if self.xmlEntityType & Property.XMLEntityTypes.INNER_TEXT != 0:
sources.append(Configuration.getTextInElement(xmlElement, mustFind=False))
values = []
for source in sources:
if source == None: continue
if self.isOfPrimitiveType():
# Property type is a primitive type, let's get a string from
# source.
if not isinstance(source, str):
# Source is assumed to be an xml element.
sourceStr = Configuration.getTextInElement(source, mustFind = True)
else:
sourceStr = source
if self.type == str:
values.append(sourceStr)
elif self.type == int:
values.append(int(sourceStr))
elif self.type == float:
values.append(float(sourceStr))
elif self.type == bool:
if sourceStr.lower() == 'true':
values.append(True)
elif sourceStr.lower() == 'false':
values.append(False)
else:
raise Configuration.IntegrityException('Property {0}={1} is not a boolean constant. Expecting {{true, false}}, case insensitive.'.format(self, sourceStr), xmlContext=xmlElement.toxml())
else:
# Type corresponds to a class.
if isinstance(source, str):
values.append(self.type.fromString(source))
else:
# Call the static method "fromXML" if present. Otherwise,
# run the predefined behaviour.
if hasattr(self.type, 'fromXML') and callable(self.type.fromXML):
newPropertyValue = self.type.fromXML(source)
else:
# Create a default instance.
try:
newPropertyValue = self.type()
if hasattr(self.type, 'PROPERTY_DEFINITIONS'):
self.type.PROPERTY_DEFINITIONS.readObjectFromXML(newPropertyValue, source)
except:
# logger.reportException('Type {type} has neither static fromXML(xmlElement) nor __init__() method. At least one is required to parse it properly.'.format(type=self.type))
raise
# Assign attributes from XML.
if hasattr(newPropertyValue, 'attributes'):
for k, v in source.attributes.items():
newPropertyValue.attributes[k] = v
values.append(newPropertyValue)
if not values: return None
if self.isCollection:
return values
else:
if len(values) > 1:
raise Configuration.IntegrityException('Property {0} is not a collection, it must have a single value.'.format(self), xmlContext=xmlElement.toxml())
return values[0]
def toXml(self, config, propertyOwner, xmlDoc, xmlElement):
# Create group if necessary.
if self.groupNameInXML != None:
group = next(Configuration.getElementsInConfig(xmlElement, self.groupNameInXML, None), None)
if not group:
group = xmlDoc.createElement(self.groupNameInXML)
xmlElement.appendChild(group)
xmlElement = group
value = self.getValueFor(propertyOwner, config)
# Make sure the remainder of this method works on a collection of values.
values = value if isinstance(value, list) else [value]
for value in values:
if hasattr(value, 'toXml') and callable(value.toXml):
# Use the instance toXml() method.
value.toXml(config, self, propertyOwner, xmlDoc, xmlElement)
else:
# Format property using its inner properties.
logger.reportDebug('toXml for {0} on {1}'.format(self, propertyOwner))
if self.xmlEntityType & Property.XMLEntityTypes.ATTRIBUTE != 0:
valueStr = str(value)
xmlElement.setAttribute(self.namesInXML[0], valueStr)
elif self.xmlEntityType & Property.XMLEntityTypes.CHILD_ELEMENT != 0:
childNode = xmlDoc.createElement(self.namesInXML[0])
if self.isOfPrimitiveType():
textNode = xmlDoc.createTextNode(str(value))
childNode.appendChild(textNode)
else:
childNode = xmlDoc.createElement(self.namesInXML[0])
type(value).PROPERTY_DEFINITIONS.toXml(config, value, xmlDoc, childNode)
xmlElement.appendChild(childNode)
elif self.xmlEntityType & Property.XMLEntityTypes.INNER_TEXT != 0:
textNode = xmlDoc.createTextNode(str(value))
xmlElement.appendChild(textNode)
def __repr__(self):
s = self.name
attributeOrChild = ''
if self.xmlEntityType & Property.XMLEntityTypes.ATTRIBUTE != 0:
attributeOrChild = 'attribute'
if self.xmlEntityType & Property.XMLEntityTypes.CHILD_ELEMENT != 0:
if attributeOrChild: attributeOrChild += ' or '
attributeOrChild += 'element'
if self.xmlEntityType & Property.XMLEntityTypes.INNER_TEXT != 0:
if attributeOrChild: attributeOrChild += ' or '
attributeOrChild += 'inner text'
if len(self.namesInXML) > 1:
plural = 's'
namesInXML = self.namesInXML
else:
plural = ''
namesInXML = self.namesInXML[0]
s += ' (cf. the "{namesInXML}" {attributeOrChild}{plural} in XML)'.format(attributeOrChild=attributeOrChild, namesInXML=namesInXML, plural=plural)
return s
class PropertyGroup(object):
""" Group properties that must be considered simultaneously when determining whether they are mandatory or not.
If the group is mandatory, the configuration is full of integrity as long as at least one of the group's properties is defined. """
class GroupUseContext(object):
def __init__(self, configuration, object):
self.configuration = configuration
self.object = object
def __init__(self, properties, isMandatory):
self.properties = properties
self.isMandatoryCallable = isMandatory if callable(isMandatory) else lambda context: isMandatory
def isMandatory(self, object):
return self.isMandatoryCallable(object)
def checkObjectIntegrity(self, configuration, object, collectedValues):
isDefined = False
for prop in self.properties:
prop.checkObjectIntegrity(configuration, object, collectedValues)
isDefined |= prop.isDefinedOn(object)
if self.isMandatory(PropertyGroup.GroupUseContext(configuration, object)) and not isDefined:
if len(self.properties) == 1:
raise Configuration.IntegrityException('"{0}" should define the property {1}.'.format(object, self.properties[0]), problematicObject=object)
else:
raise Configuration.IntegrityException('"{0}" should define at least one of the properties {1}.'.format(object, self.properties), problematicObject=object)
class PropertyCollection(object):
""" Collection of properties stored in groups with an associated mandatory status. """
def __init__(self):
self.propertyGroups = []
self.ignoreCheckIntegrityCallable = lambda object: False
def addProperty(self, propertyName, isMandatory, type, xmlEntityType, namesInXML=None, groupNameInXML = None, isCollection=False, isUnique=False, values=None, getter=None):
self.propertyGroups.append(PropertyGroup([Property(name=propertyName, type=type, xmlEntityType = xmlEntityType, namesInXML=namesInXML, groupNameInXML=groupNameInXML, isCollection=isCollection, isUnique=isUnique, values=values, getter=getter)], isMandatory))
def addPropertyGroup(self, properties, isGroupMandatory = True):
group = PropertyGroup(properties[:], isGroupMandatory)
self.propertyGroups.append(group)
def cloneProperties(self, source, destination):
for propDef in self.properties:
propDef.clone(source, destination)
@property
def properties(self):
return itertools.chain(*[group.properties for group in self.propertyGroups])
def getProperty(self, propertyName):
for group in self.propertyGroups:
prop = [p for p in group.properties if p.name == propertyName]
if prop:
return prop[0]
raise Exception('No property {0} found in group {1}.'.format(propertyName, self))
def readObjectFromXML(self, object, xmlElement):
object.xmlSource = xmlElement.toxml()
for prop in self.properties:
if prop.namesInXML != None:
value = prop.fromXML(xmlElement)
if value is None:
if prop.isDefinedOn(object):
# We are better off keeping the current value than
# overriding it with the never explicitly-defined (hence rather meaningless) None value.
continue
else:
# Assigning the None value guarantees that all properties are always defined on the
# destination object even if the XML configuration is not complete.
vars(object)[prop.name] = value
else:
if prop.isCollection and prop.isDefinedOn(object):
# Do not override current items!
vars(object)[prop.name].extend(value)
else:
# First definition of collection or assignment of a simple field.
vars(object)[prop.name] = value
def checkIntegrity(self, configuration, obj, collectedValues=None):
"""
Checks the integrity of an object wrt this collection of properties.
configuration: Configuration object that contains the object to check.
obj: Object to check
collectedValues: Properties' values. It is a dictionary that indexes list of values with property names as keys.
"""
if collectedValues == None: collectedValues = {}
objects = obj if isinstance(obj, list) else [obj]
for o in objects:
if self.ignoreCheckIntegrityCallable(o): continue
for group in self.propertyGroups:
group.checkObjectIntegrity(configuration, o, collectedValues)
def toXml(self, config, propertyOwner, xmlDoc, xmlElement):
for prop in self.properties:
logger.reportDebug('toXml {0} on {1}'.format(prop, propertyOwner))
if prop.isDefinedOn(propertyOwner):
prop.toXml(config, propertyOwner, xmlDoc, xmlElement)
else:
logger.reportDebug('not defined')
# def generateDocumentation(self, classs, collector):
# # Check for reentrance.
# if collector.containsDocumentationForClass(classs): return
#
# # f.write('#{0}\n'.format(classs.__name__))
# for propertyGroup in self.propertyGroups:
# for header, entityType in [('Attributes', Property.XMLEntityTypes.ATTRIBUTE), ('Text', Property.XMLEntityTypes.INNER_TEXT), ('Children', Property.XMLEntityTypes.CHILD_ELEMENT)]:
# for property in propertyGroup.properties:
# if entityType & property.xmlEntityType == 0: continue
# collector.addDocumentationFor(class, '## {0}'.format(header))
# if property.isOfClassType():
# collector.addDocumentationFor(classs, '- [{0}](#{1}): {2}'.format(property.namesInXML[0], property.type, property.documentation.summary))
# else:
# collector.addDocumentationFor(classs, '- {0} ({1}): {2}'.format(property.namesInXML[0], property.type, property.documentation.summary))
# if property.documentation != None:
# collector.addDocumentationForClass(classs, property.documentation.summary + '\n')
# if property.isOfClassType():
# typeContent = '[{propType}](#{propType})'.format(propType=property.type.__name__)
# if hasattr(property.type, 'PROPERTY_DEFINITIONS'):
# property.type.PROPERTY_DEFINITIONS.generateDocumentation(property.type, collector)
# else:
# typeContent = property.type.__name__
# if len(property.namesInXML) > 1: raise Exception('The documentation generator assumes that there is only a single XML tag name associated to each property.')
# collector.addDocumentationForClass(classs, 'Xml tag name: {0}'.format('`<{0}/>`'.format(property.namesInXML[0])))
# collector.addDocumentationForClass(classs, 'type: {0}'.format(typeContent))
# if property.values != None and not callable(property.values):
# collector.addDocumentationForClass(classs, 'Accepted Values: {0}'.format(list(property.values)))
class ParameterizableString(object):
"""
Represents a string in the XML configuration that can be parameterized with <context> children.
Refer to the 'context handler' concept to understand how parameterization can take place with those children.
This class is quite useless but is required to have an object that holds the automatically-created xmlSource property.
"""
pass
class PyknxService(object):
"""Represents the configuration for the communication with the hosting Pyknx daemon.
The Pyknx daemon is the underlying process for Homewatcher that handles the communication with the Linknx daemon.
"""
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('host', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
PROPERTY_DEFINITIONS.addProperty('port', isMandatory=False, type=int, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
def __init__(self):
self.host = '127.0.0.1'
self.port = 1029
def __repr__(self):
return 'PyknxService(host={host}, port={port})'.format(**vars(self))
# class SMTPService(object):
# PROPERTY_DEFINITIONS = PropertyCollection()
# PROPERTY_DEFINITIONS.addProperty('host', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
# PROPERTY_DEFINITIONS.addProperty('port', isMandatory=False, type=int, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
# PROPERTY_DEFINITIONS.addProperty('fromAddress', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
#
# def __init__(self):
# self.host = 'localhost'
# self.port = 25
#
# def __repr__(self):
# return 'SMTPService(host={host}, port={port})'.format(**vars(self))
class LinknxService(object):
PROPERTY_DEFINITIONS = PropertyCollection()
hostProp = Property('host', type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
portProp = Property('port', type=int, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
PROPERTY_DEFINITIONS.addPropertyGroup((hostProp, portProp))
PROPERTY_DEFINITIONS.addProperty('ignoreEmail', isMandatory=False, type=bool, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
def __init__(self):
self.host = '127.0.0.1'
self.port = 1028
self.ignoreEmail = False
@property
def address(self):
return (self.host, self.port)
def __repr__(self):
return 'LinknxService(host={host},port={port})'.format(**vars(self))
class ServicesRepository(object):
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('linknx', isMandatory=False, type=LinknxService, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT)
PROPERTY_DEFINITIONS.addProperty('daemon', isMandatory=False, type=PyknxService, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT)
# PROPERTY_DEFINITIONS.addProperty('smtp', isMandatory=False, type=SMTPService, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT)
def __init__(self):
self.linknx = LinknxService()
self.daemon = PyknxService()
class ModeDependentValue(object):
class Value(object):
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('value', isMandatory=True, type=float, xmlEntityType=Property.XMLEntityTypes.INNER_TEXT)
PROPERTY_DEFINITIONS.addProperty('modeName', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, namesInXML='mode', isUnique=True)
def __init__(self, value, modeName):
if type(value) not in [int, float]:
raise ValueError('int or float expected, {0} found'.format(type(value)))
self.value = value
self.modeName = modeName
def copy(self):
v = ModeDependentValue.Value(0.0, None)
self.PROPERTY_DEFINITIONS.cloneProperties(self, v)
return v
@staticmethod
def fromString(string):
return ModeDependentValue.Value(float(string), None)
@staticmethod
def fromXML(xmlElement):
val = ModeDependentValue.Value(0, None)
ModeDependentValue.Value.PROPERTY_DEFINITIONS.readObjectFromXML(val, xmlElement)
return val
def __repr__(self):
return 'Value({value},{modeName})'.format(**vars(self))
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('values', isMandatory=True, type=Value, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='value', isCollection=True)
def __init__(self, defaultValue=None):
self.values = []
if defaultValue != None:
self.values.append(ModeDependentValue.Value(value=defaultValue, modeName=None))
def copy(self):
v = ModeDependentValue()
self.PROPERTY_DEFINITIONS.cloneProperties(self, v)
return v
@staticmethod
def fromString(string):
# This is assumed to be the default value (i.e the one used for modes
# that do not have a specific value.
return ModeDependentValue(float(string))
@staticmethod
def fromXML(xmlElement):
value=ModeDependentValue()
ModeDependentValue.PROPERTY_DEFINITIONS.readObjectFromXML(value, xmlElement)
return value
def toXml(self, config, property, propertyOwner, xmlDoc, xmlElement):
# Opt for an xml attribute if possible as it makes XML simpler.
if len(self.values) == 1 and self.values[0].modeName == None:
xmlElement.setAttribute(property.namesInXML[0], str(self.values[0].value))
else:
container = xmlDoc.createElement(property.namesInXML[0])
xmlElement.appendChild(container)
for value in self.values:
valueChild = xmlDoc.createElement('value')
container.appendChild(valueChild)
type(value).PROPERTY_DEFINITIONS.toXml(config, value, xmlDoc, valueChild)
def hasDefaultValue(self):
return None in self.values
def getDefinedModes(self):
return {value.modeName for value in self.values}
def getForMode(self, modeName):
if not isinstance(modeName, str) and modeName != None:
raise Exception('A mode name or None is expected.')
for value in self.values:
if value.modeName == modeName:
return value.value
if modeName == None: raise Exception('Default value not found.')
# Fall back to the default value.
return self.getForMode(None)
def setForMode(self, mode, value):
self.values[mode] = value
def inherit(self, other):
""" Inherits values from another instance for modes that have no specific value in this instance. """
logger.reportDebug('{0} inherits from {1}'.format(self, other))
definedModes = self.getDefinedModes()
for value in other.values:
# Do not overwrite the value in this instance!
if value.modeName in definedModes: continue
self.values.append(value.copy())
logger.reportDebug('That gives {0}'.format(self, other))
def __repr__(self):
return 'ModeDependentValue({values})'.format(**vars(self))
class ActivationCriterion(object):
""" Describes the rule that determine whether a sensor that is involved in a mode can be activated or if its activation should be deferred. """
class Type(object):
SENSOR = 'sensor'
AND = 'and'
OR = 'or'
@staticmethod
def getAll():
return (ActivationCriterion.Type.SENSOR, ActivationCriterion.Type.AND, ActivationCriterion.Type.OR)
def __init__(self):
self._attributes = {}
@property
def attributes(self):
return self._attributes
def copy(self):
clone = ActivationCriterion()
clone._attributes = self._attributes.copy()
ActivationCriterion.PROPERTY_DEFINITIONS.cloneProperties(self, clone)
return clone
def inherit(self, other):
# Inheritance does not apply for this object.
pass
@staticmethod
def makeSensorCriterion(sensorName, whenTriggered = False):
crit = ActivationCriterion()
crit.type = ActivationCriterion.Type.SENSOR
crit.sensorName = sensorName
crit.whenTriggered = whenTriggered
return crit
@staticmethod
def makeAndCriterion():
return makeBooleanCriterion()
@staticmethod
def makeOrCriterion():
return makeBooleanCriterion()
@staticmethod
def makeBooleanCriterion(type):
if not type in [ActivationCriterion.Type.AND, ActivationCriterion.Type.OR]: raise Exception('Invalid boolean criterion type: {0}'.format(type))
crit = ActivationCriterion()
crit.type = type
crit.children = []
return crit
# @staticmethod
# def fromXML(xmlElement):
# type = Configuration.getXmlAttribute(xmlElement, 'type', None, mustBeDefined=True)
# criterion = ActivationCriterion(type)
#
# ActivationCriterion.PROPERTY_DEFINITIONS.readObjectFromXML(criterion, xmlElement)
#
# return criterion
# Define properties outside class because of a reference to the class itself.
ActivationCriterion.PROPERTY_DEFINITIONS = PropertyCollection()
ActivationCriterion.PROPERTY_DEFINITIONS.addProperty('type', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, values=ActivationCriterion.Type.getAll())
isOfSensorType=lambda context: context.object.type==ActivationCriterion.Type.SENSOR
getSensorNames = lambda configuration, owner: [s.name for s in configuration.sensors]
ActivationCriterion.PROPERTY_DEFINITIONS.addProperty('sensorName', isMandatory=isOfSensorType, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, namesInXML='sensor', values=getSensorNames)
ActivationCriterion.PROPERTY_DEFINITIONS.addProperty('whenTriggered', isMandatory=isOfSensorType, type=bool, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
ActivationCriterion.PROPERTY_DEFINITIONS.addProperty('children', isMandatory=lambda context: context.object.type in (ActivationCriterion.Type.AND, ActivationCriterion.Type.OR), type=ActivationCriterion, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='activationCriterion', isCollection=True)
class Sensor(object):
class Type(object):
ROOT = 'root'
BOOLEAN = 'boolean'
FLOAT = 'float'
@staticmethod
def getAll():
return [Sensor.Type.ROOT, Sensor.Type.BOOLEAN, Sensor.Type.FLOAT]
@staticmethod
def getBasicTypes():
all = Sensor.Type.getAll()
all.remove(Sensor.Type.ROOT)
return all
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.ignoreCheckIntegrityCallable = lambda sensor: sensor.isClass
# Generic mandatory properties of various types.
PROPERTY_DEFINITIONS.addProperty('name', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
getClassNamesExceptRoot = lambda configuration, owner: [c.name for c in configuration.classes if (not c.isRootType() or owner.name in Sensor.Type.getAll()) and c != owner and not configuration.doesSensorInherit(c, owner)]
PROPERTY_DEFINITIONS.addProperty('type', isMandatory=lambda context: not context.object.isRootType(), type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, values=getClassNamesExceptRoot)
PROPERTY_DEFINITIONS.addProperty('isClass', isMandatory=True, type=bool, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
isNotClass = lambda context: not context.object.isClass
PROPERTY_DEFINITIONS.addProperty('alertName', isMandatory=isNotClass, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, namesInXML='alert')
PROPERTY_DEFINITIONS.addProperty('enabledObjectId', isMandatory=isNotClass, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
PROPERTY_DEFINITIONS.addProperty('watchedObjectId', isMandatory=isNotClass, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
for propName in ['activationDelay', 'prealertDuration', 'alertDuration']:
PROPERTY_DEFINITIONS.addProperty(propName, isMandatory=isNotClass, type=ModeDependentValue, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE | Property.XMLEntityTypes.CHILD_ELEMENT)
PROPERTY_DEFINITIONS.addProperty('activationCriterion', isMandatory=isNotClass, type=ActivationCriterion, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT)
# Mandatory properties for booleans.
isBoolean = lambda context: not context.object.isClass and context.configuration.doesSensorInherit(context.object, Sensor.Type.BOOLEAN)
for propName in ['triggerValue']:
PROPERTY_DEFINITIONS.addProperty(propName, isMandatory=isBoolean, type=bool, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
# Mandatory properties for float sensors.
isFloat = lambda context: not context.object.isClass and context.configuration.doesSensorInherit(context.object, Sensor.Type.FLOAT)
PROPERTY_DEFINITIONS.addPropertyGroup([Property(name, type=float, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE) for name in ['lowerBound', 'upperBound']], isFloat)
PROPERTY_DEFINITIONS.addProperty('hysteresis', isMandatory=isFloat, type=float, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
# Optional properties.
PROPERTY_DEFINITIONS.addProperty('description', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
PROPERTY_DEFINITIONS.addProperty('persistenceObjectId', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
def __init__(self, type, name, isBuiltIn):
self.type = type # Sensor type from Sensor.Type or base class name if class.
self.name = name # Sensor's name or class's name.
self.isClass = False
self.isBuiltIn = isBuiltIn
self._attributes = {}
@staticmethod
def makeNew(type, name, desc, isClass, isBuiltIn, alertName=None, enabledObjectId=None, watchedObjectId=None, persistenceObjectId=None):
s = Sensor(type, name, isBuiltIn)
s.description = desc
s.isClass = isClass
s.alertName = alertName
s.enabledObjectId = enabledObjectId
s.watchedObjectId = watchedObjectId
s.persistenceObjectId = persistenceObjectId
return s
def isRootType(self):
return self.name == Sensor.Type.ROOT
def addAttribute(self, attributeName, attributeValue):
self._attributes[attributeName] = attributeValue
@staticmethod
def fromXML(xmlElement):
s = Sensor(None, None, isBuiltIn=False)
Sensor.PROPERTY_DEFINITIONS.readObjectFromXML(s, xmlElement)
return s
@property
def attributes(self):
return self._attributes
def __repr__(self):
return '{classOrSensor} {name}'.format(classOrSensor='Class' if self.isClass else 'Sensor', name=self.name)
class Action(object):
def __init__(self, type, eventName):
pass
@property
def type(self):
return self.linknxActionXml.getAttribute('type')
@staticmethod
def fromXML(xmlElement):
e=Action(None, None)
Action.PROPERTY_DEFINITIONS.readObjectFromXML(e, xmlElement)
# Store the input XML to be able to send it to linknx when executing the
# action.
e.linknxActionXml = xmlElement
return e
def toXml(self, config, property, propertyOwner, xmlDoc, xmlElement):
linknxActionClone = xmlDoc.importNode(self.linknxActionXml, True)
xmlElement.appendChild(linknxActionClone)
def __repr__(self):
return 'Action of type={type}'.format(type=self.type)
Action.PROPERTY_DEFINITIONS = PropertyCollection()
Action.PROPERTY_DEFINITIONS.addProperty('type', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
# Subject properties: one for the static, Linknx-defined "subject" attribute,
# one for a Homewatcher-specific, dynamic "subject" element.
staticSubjectProp = Property('staticSubject', type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, namesInXML=('subject',))
parameterizableSubjectProp = Property('parameterizableSubject', type=ParameterizableString, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML=('subject'))
Action.PROPERTY_DEFINITIONS.addPropertyGroup((staticSubjectProp, parameterizableSubjectProp), isGroupMandatory=lambda context: context.object.type == 'send-email')
# Body properties: one for the static, Linknx-defined inner text of the <action>
# element, one for a Homewatcher-specific, dynamic "body" element.
staticBodyProp = Property('staticBody', type=str, xmlEntityType=Property.XMLEntityTypes.INNER_TEXT)
parameterizableBodyProp = Property('parameterizableBody', type=ParameterizableString, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML=('body'))
Action.PROPERTY_DEFINITIONS.addPropertyGroup((staticBodyProp, parameterizableBodyProp), isGroupMandatory=lambda context: context.object.type == 'send-email')
staticValueProp = Property('staticValue', type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, namesInXML=('value',))
parameterizableValueProp = Property('parameterizableValue', type=ParameterizableString, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML=('value'))
Action.PROPERTY_DEFINITIONS.addPropertyGroup((staticValueProp, parameterizableValueProp), isGroupMandatory=lambda context: context.object.type == 'send-sms')
# All actions are handled by linknx except send-email that has to be reworked by
# Homewatcher to customize email text.
# for propName in ('objectId', 'value'):
# Action.PROPERTY_DEFINITIONS.addProperty(propName, isMandatory=lambda context: context.object.type==Action.Type.CHANGE_OBJECT, type=str, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT|Property.XMLEntityTypes.ATTRIBUTE)
class Event(object):
def __repr__(self):
return 'Event "{type}"'.format(**vars(self))
Event.PROPERTY_DEFINITIONS = PropertyCollection()
Event.PROPERTY_DEFINITIONS.addProperty('type', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, values=lambda configuration, owner:type(owner).Type.getAll(), isUnique=True)
Event.PROPERTY_DEFINITIONS.addProperty('actions', isMandatory=True, type=Action, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='action', isCollection=True)
class ModeEvent(Event):
class Type:
ENTERED = 'entered'
LEFT = 'left'
@staticmethod
def getAll():
return [ModeEvent.Type.ENTERED, ModeEvent.Type.LEFT]
class Mode(object):
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('name', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
PROPERTY_DEFINITIONS.addProperty('value', isMandatory=True, type=int, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
PROPERTY_DEFINITIONS.addProperty('sensorNames', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='sensor', isCollection=True, values=lambda configuration, object: [s.name for s in configuration.sensors])
PROPERTY_DEFINITIONS.addProperty('events', isMandatory=False, type=ModeEvent, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='event', isCollection=True)
def __init__(self, name, value):
self.name = name # Unique identifier for the mode.
self.value = value
self.sensorNames = []
self.events = []
@staticmethod
def fromXML(xmlElement):
m = Mode(None, None)
Mode.PROPERTY_DEFINITIONS.readObjectFromXML(m, xmlElement)
return m
def __repr__(self):
return '{name} [value={value}]'.format(**vars(self))
class ModesRepository:
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('objectId', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
# Temporarily removed in version 1. Mode-independent events imply additional
# testing that is beyond the scope of the initial version.
# PROPERTY_DEFINITIONS.addProperty('events', isMandatory=False, type=ModeEvent, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML="event", isCollection=True)
PROPERTY_DEFINITIONS.addProperty('modes', isMandatory=False, type=Mode, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML="mode", isCollection=True)
PROPERTY_DEFINITIONS.addProperty('events', isMandatory=False, type=ModeEvent, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='event', isCollection=True)
def __init__(self):
self.events = []
self.modes = []
def __iter__(self):
if ModesRepository.PROPERTY_DEFINITIONS.getProperty('modes').isDefinedOn(self):
return self.modes.__iter__()
else:
return [].__iter__()
def __len__(self):
return len(self.modes)
def __getitem__(self, index):
return self.modes[index]
def __repr__(self):
return 'ModesRepository({0})'.format(self.modes)
class AlertEvent(Event):
class Type:
PREALERT_STARTED = 'prealert started'
ALERT_ACTIVATED = 'activated'
ALERT_DEACTIVATED = 'deactivated'
ALERT_PAUSED = 'paused'
ALERT_RESUMED = 'resumed'
ALERT_STOPPED = 'stopped'
ALERT_ABORTED = 'aborted'
ALERT_RESET = 'reset'
SENSOR_JOINED = 'sensor joined'
SENSOR_LEFT = 'sensor left'
@staticmethod
def getAll():
return [AlertEvent.Type.PREALERT_STARTED, AlertEvent.Type.ALERT_ACTIVATED, AlertEvent.Type.ALERT_DEACTIVATED, AlertEvent.Type.ALERT_PAUSED, AlertEvent.Type.ALERT_RESUMED, AlertEvent.Type.ALERT_STOPPED, AlertEvent.Type.ALERT_ABORTED, AlertEvent.Type.ALERT_RESET, AlertEvent.Type.SENSOR_JOINED, AlertEvent.Type.SENSOR_LEFT]
class Alert(object):
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('name', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
PROPERTY_DEFINITIONS.addProperty('persistenceObjectId', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
PROPERTY_DEFINITIONS.addProperty('inhibitionObjectId', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
PROPERTY_DEFINITIONS.addProperty('events', isMandatory=False, type=AlertEvent, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='event', isCollection=True)
def __init__(self):
self.events = []
@staticmethod
def makeNew(id, persistenceObjectId, inhibitionObjectId):
alert = Alert(id, persistenceObjectId)
alert.inhibitionObjectId = inhibitionObjectId
return alert
def __repr__(self):
return 'Alert {name}'.format(**vars(self))
class AlertsRepository(object):
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('alerts', isMandatory=True, type=Alert, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='alert', isCollection=True)
PROPERTY_DEFINITIONS.addProperty('events', isMandatory=False, type=AlertEvent, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='event', isCollection=True)
def __init__(self):
self.alerts = []
self.events = []
def __iter__(self):
if AlertsRepository.PROPERTY_DEFINITIONS.getProperty('alerts').isDefinedOn(self):
return self.alerts.__iter__()
else:
return [].__iter__()
def __len__(self):
return len(self.alerts)
def __getitem__(self, index):
return self.alerts[index]
def __repr__(self):
return 'AlertsRepository({0})'.format(self.alerts)
class Configuration(object):
class IntegrityException(Exception):
def __init__(self, message, cause = None, problematicObject=None, xmlContext=None):
Exception.__init__(self, message)
self.cause = cause
self._problematicObject = None
self.xmlContext = None
self.problematicObject = problematicObject
@property
def problematicObject(self):
return self._problematicObject
@problematicObject.setter
def problematicObject(self, obj):
self._problematicObject = obj
if self.xmlContext == None and hasattr(self._problematicObject, 'xmlSource'):
self.xmlContext = self._problematicObject.xmlSource
def __str__(self):
s = Exception.__str__(self)
if self.problematicObject != None:
s += '\nProblematic object: {0} of type {1}'.format(self.problematicObject, type(self.problematicObject))
if self.xmlContext != None:
s += '\nXML context: {0}'.format(self.xmlContext)
if self.cause != None:
s += '\nCaused by {0}'.format(self.cause)
return s
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('modesRepository', isMandatory=True, type=ModesRepository, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='modes')
PROPERTY_DEFINITIONS.addProperty('alerts', isMandatory=True, type=AlertsRepository, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT)
PROPERTY_DEFINITIONS.addProperty('sensorsAndClasses', isMandatory=True, type=Sensor, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML=('sensor',), groupNameInXML='sensors', isCollection=True, getter=lambda config, configAgain: config.sensorsAndClassesWithoutBuiltIns)
PROPERTY_DEFINITIONS.addProperty('servicesRepository', isMandatory=False, type=ServicesRepository, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='services')
def __init__(self):
# Default services repository.
self.servicesRepository = ServicesRepository()
# Add built-in sensor classes.
rootClass = Sensor(None, Sensor.Type.ROOT, True)
rootClass.isClass = True
rootClass.activationDelay = ModeDependentValue(0)
rootClass.activationCriterion = ActivationCriterion.makeSensorCriterion('{name}', False) # {name} is resolved for each sensor so that this criterion is true if the sensor is not triggered.
rootClass.prealertDuration = ModeDependentValue(0)
rootClass.alertDuration = ModeDependentValue(0)
booleanClass = Sensor(Sensor.Type.ROOT, Sensor.Type.BOOLEAN, True)
booleanClass.isClass = True
booleanClass.triggerValue = True
floatClass = Sensor(Sensor.Type.ROOT, Sensor.Type.FLOAT, True)
floatClass.isClass = True
self.sensorsAndClasses = [rootClass, booleanClass, floatClass]
@staticmethod
def parseFile(filename):
# xsdFilename = os.path.join(os.path.dirname(__file__), 'config.xsd')
# schema = etree.XMLSchema(file=xsdFilename)
# parser = etree.XMLParser(schema=schema)
# try:
# tree = etree.parse(source=filename, parser=parser)
# except:
# logger.reportError('{0} parse errors.'.format(len(parser.error_log)))
# errIx = 0
# for err in parser.error_log:
# errIx += 1
# logger.reportError('#{ix}@{line}:{col} {message}'.format(ix=errIx, line=err.line, col=err.column, message=err.message))
# raise
doc = xml.dom.minidom.parse(filename)
return Configuration.parse(doc)
@staticmethod
def parseString(string):
doc = xml.dom.minidom.parseString(string)
return Configuration.parse(doc)
@staticmethod
def parse(xmlDocument):
config = xmlDocument.getElementsByTagName('config')[0]
configuration = Configuration()
context = None
try:
Configuration.PROPERTY_DEFINITIONS.readObjectFromXML(configuration, config)
# # Sensors (classes and concrete ones).
# context = 'sensors block'
# classesIt = Configuration.getElementsInConfig(config, 'class', 'sensors')
# sensorsIt = Configuration.getElementsInConfig(config, 'sensor', 'sensors')
# for xmlElement in itertools.chain(classesIt, sensorsIt):
# context = xmlElement.toxml()
# # Consider 'name' and 'type' as optional for now. Integrity checks on the
# # built configuration will take care of them later (which is
# # better than checking only the XML way to define
# # configuration).
# sensor = Sensor(Configuration.getXmlAttribute(xmlElement, 'type', None, mustBeDefined=False), Configuration.getXmlAttribute(xmlElement, 'name', None, mustBeDefined=False))
# sensor.isClass = xmlElement.tagName.lower() == 'class'
#
# # Automatically read properties that come from attributes or
# # child elements.
# Sensor.PROPERTY_DEFINITIONS.readObjectFromXML(sensor, xmlElement)
#
# # Xml attributes can be used as parameters for parameterized
# # values in the config (this is advanced usage).
# for k, v in xmlElement.attributes.items():
# sensor.addAttribute(k, v)
#
# configuration.addSensor(sensor)
#
# # Modes.
# context = 'modes block'
# for modesElement in Configuration.getElementsInConfig(config, 'modes', None):
# context = modesElement.toxml()
# ModesRepository.PROPERTY_DEFINITIONS.readObjectFromXML(configuration.modes, modesElement)
#
# # Alerts.
# context = 'alerts block'
# for alertElement in Configuration.getElementsInConfig(config, 'alert', 'alerts'):
# context = alertElement.toxml()
# alert = Alert(None, None)
# Alert.PROPERTY_DEFINITIONS.readObjectFromXML(alert, alertElement)
# configuration.addAlert(alert)
except Configuration.IntegrityException as e:
if e.xmlContext != None:
e.xmlContext = context
raise e
except ValueError as e:
raise Configuration.IntegrityException('An exception occurred while parsing {0}'.format(context), e)
return configuration
def toXml(self):
# Creates a new empty DOM.
doc = xml.dom.minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
Configuration.PROPERTY_DEFINITIONS.toXml(self, self, doc, config)
return doc
@staticmethod
def parseProperty(object, xmlElement, propertyDefinition):
# Parse individual properties if definition is a group.
attributeValue = Configuration.getXmlAttribute(xmlElment, attributeName, defaultAttributeValue)
vars(object)[attributeName] = valueBuilder(attributeValue)
@staticmethod
def getXmlAttribute(xmlElement, attributeName, defaultValue=None, mustBeDefined=False):
"""
Returns the value of the given element's attribute or None if element does not have such attribute.
Unlike the getAttribute method on Element, this method does not return an empty string but None whenever attribute does not exist.
"""
if(xmlElement.hasAttribute(attributeName)):
return xmlElement.getAttribute(attributeName)
else:
if mustBeDefined:
raise Configuration.IntegrityException('Element {0} misses attribute {1}'.format(xmlElement.tagName, attributeName), xmlContext=xmlElement.toxml() )
else:
return defaultValue
@staticmethod
def getElementsInConfig(config, sectionName, groupName):
if not groupName is None:
for sections in config.childNodes:
if sections.nodeType != sections.ELEMENT_NODE or sections.tagName != groupName: continue
for section in sections.childNodes:
if section.nodeType != section.ELEMENT_NODE or section.tagName != sectionName: continue
yield section
else:
for section in config.childNodes:
if section.nodeType != section.ELEMENT_NODE or section.tagName != sectionName: continue
yield section
@staticmethod
def getTextInElement(elt, mustFind = True):
text = None
for node in elt.childNodes:
if node.nodeType == node.TEXT_NODE:
if not text:
text = ''
text += node.data
if mustFind and not text:
raise Exception('Missing text in element {0}'.format(elt.nodeName))
return text
def getClassesInheritedBySensor(self, sensor, includesBuiltIns=False):
s = sensor if type(sensor) == Sensor else self._getSensorOrClassByName(sensor)
if s.isRootType():
return []
else:
inheritedClasses = self.getClassesInheritedBySensor(s.type, includesBuiltIns)
baseClass = self.getClassByName(s.type)
if baseClass.isBuiltIn and not includesBuiltIns:
return inheritedClasses
else:
return [baseClass] + inheritedClasses
def doesSensorInherit(self, sensor, classs):
if isinstance(sensor, Sensor):
s = sensor
else:
s = self._getSensorOrClassByName(sensor)
if s == None:
return False
if isinstance(classs, Sensor):
className = classs.name
else:
className = classs
if s.isRootType():
return False
elif s.type == className:
return True
else:
return self.doesSensorInherit(s.type, className)
def checkIntegrity(self):
"""
Checks that the configuration described by this object is full of integrity.
An exception is raised if a problem is detected. Otherwise, it is safe to assume that configuration is well defined.
"""
Configuration.PROPERTY_DEFINITIONS.checkIntegrity(self, self)
@property
def sensors(self):
if not self.sensorsAndClasses: return []
return [s for s in self.sensorsAndClasses if not s.isClass]
@property
def classes(self):
if not self.sensorsAndClasses: return []
return [s for s in self.sensorsAndClasses if s.isClass]
@property
def sensorsAndClassesWithoutBuiltIns(self):
return [s for s in self.sensorsAndClasses if not s.isBuiltIn]
def getBuiltInRootClass(self, sensorOrClass):
if isinstance(sensorOrClass, str):
sensorOrClass = self._getSensorOrClassByName(sensorOrClass)
# May happen if None has been passed or if no sensor by the given name
# could be found (can happen on a misconfigured instance of
# homewatcher). This should not crash.
if sensorOrClass == None: return None
if not sensorOrClass.isBuiltIn:
return self.getBuiltInRootClass(self.getClassByName(sensorOrClass.type))
else:
return sensorOrClass
def getModeByName(self, modeName):
modes = [m for m in self.modesRepository.modes if m.name == modeName]
if modes:
return modes[0]
else:
raise Exception('No mode {0}.'.format(modeName))
def resolve(self, checkIntegrityWhenDone=True):
resolvedSensors = []
for sensor in self.sensorsAndClasses:
if sensor.isClass:
resolvedSensors.append(sensor)
else:
resolvedSensors.append(self._getResolvedSensor(sensor))
self.sensorsAndClasses = resolvedSensors
# Force integrity checks immediately, as this guarantees that resolution
# did not lead to weird results.
if checkIntegrityWhenDone: self.checkIntegrity()
def _getResolvedSensor(self, sensor):
if sensor.isClass: raise Exception('Sensor classes cannot be resolved.')
resolvedCopy = Sensor(sensor.type, sensor.name, sensor.isBuiltIn)
currentClass = sensor
resolvedCopyVars = vars(resolvedCopy)
# Recursively assign members from the whole ancestor branch.
primitiveTypes = (type(None), str, int, float, bool)
customTypes = (ModeDependentValue, ActivationCriterion)
while currentClass != None:
for k, v in vars(currentClass).items():
if k == '_attributes':
newAttributes = v.copy()
newAttributes.update(resolvedCopy._attributes)
resolvedCopy._attributes = newAttributes
continue
doesMemberExist = not(currentClass == sensor or not k in resolvedCopyVars or resolvedCopyVars[k] is None)
if isinstance(v, primitiveTypes):
if not doesMemberExist:
resolvedCopyVars[k] = v
elif isinstance(v, customTypes):
if not doesMemberExist:
resolvedCopyVars[k] = v.copy()
else:
resolvedCopyVars[k].inherit(v)
else:
raise Exception('Unsupported member {0}={1}, type={2}'.format(k, v, type(v)))
if not currentClass.isRootType():
currentClass = self.getClassByName(currentClass.type)
else:
currentClass = None
# # Replace the base class by the first class that still exists in the
# # resolved configuration: this is the first builtin class. In case
# # something goes wrong when searching for this builtin class, simply
# # reuse the base class of the original sensor. This will not work
# # properly but configuration's integrity checks will be more accurate.
# builtinRootClass = self.getBuiltInRootClass(sensor.type)
# resolvedCopy.type = sensor.type if builtinRootClass is None else builtinRootClass.name
# Resolve parameterized string fields.
self.resolveObject(resolvedCopy, {})
return resolvedCopy
def getClassByName(self, name):
c = self._getSensorOrClassByName(name)
if c == None or not c.isClass: return None
return c
def getSensorByName(self, name):
s = self._getSensorOrClassByName(name)
if s is None or s.isClass: return None
return s
def _getSensorOrClassByName(self, name):
# Make sure we do not compare None to any sensor's name. If None is
# passed, this query must return None even if the configuration is
# badly defined.
if name == None: return None
byNames = [o for o in self.sensorsAndClasses if o.name == name]
if len(byNames) == 0:
return None
elif len(byNames) > 1:
raise Configuration.IntegrityException('Those sensors are homonymous: {0}'.format(byNames))
else:
return byNames[0]
@staticmethod
def resolveObject(obj, attributes):
if obj is None: return obj
# Logic: some object's members may be parameterized with attributes
# stored in a 'attributes' dictionary. Attributes may themselves be
# parameterized with other attributes.
# First, resolve attributes, taking care of the priority order if
# required. Then, resolve members. Last, resolve members that are
# objects by passing them the dictionary of attributes as a base source
# for attributes.
# Notice that the attributes passed to this method are assumed to be
# already resolved.
# Define comparator method.
def parameterSort(a, b):
paramsInA = regex.findall(obj.attributes[a])
paramsInB = regex.findall(obj.attributes[b])
if b in paramsInA:
if a in paramsInB:
raise Exception('{a} and {b} are mutually dependent.'.format(a=a, b=b))
# b must be resolved after a.
return 1
elif a in paramsInB:
# a must be resolved after b.
return -1
else:
# a and b are independent.
return 0
# Combine object's attributes with the passed ones. Object's attributes
# take precedence in case of name conflicts.
combinedAttributes = attributes.copy()
if hasattr(obj, 'attributes'):
combinedAttributes.update(obj.attributes)
# Resolve object's attributes that need to.
regex = re.compile('{([a-zA-Z]\w*)}')
if hasattr(obj, 'attributes'):
parameterizedAttributeNames = []
for k, v in obj.attributes.items():
if isinstance(v, str) and regex.search(v):
# Store attribute name, not its value! The comparator will
# evaluate the attribute when needed.
parameterizedAttributeNames.append(k)
# Sort attributes by order of resolution.
parameterizedAttributeNames = sorted(parameterizedAttributeNames, key=cmp_to_key(parameterSort))
# Resolve them.
for attributeName in parameterizedAttributeNames:
attrValue = obj.attributes[attributeName]
attrValue = attrValue.format(**combinedAttributes)
obj.attributes[attributeName] = attrValue
combinedAttributes[attributeName] = attrValue
# Resolve string members and internal objects.
isString = lambda o: isinstance(o, str)
isObject = lambda o: not isinstance(o, (type(None), int, float, bool))
resolve = lambda v: v.format(**combinedAttributes) if isString(v) else Configuration.resolveObject(v, combinedAttributes) if isObject(v) else v
if isinstance(obj, (list, tuple)):
for i in range(len(obj)):
obj[i] = resolve(obj[i])
elif isinstance(obj, dict):
for k, v in obj.items():
obj[k] = resolve(v)
else:
objVars = vars(obj)
for k, v in objVars.items():
if k == 'xmlSource': continue
objVars[k] = resolve(v)
return obj
def addAlert(self, alert):
self.alerts.append(alert)
def getAlertByName(self, name):
if name == None: return None
for a in self.alerts:
if a.name == name:
return a
raise KeyError(name)
@staticmethod
def replaceParametersInString(inputString, parameters):
""" Replaces parameters identified by their name enclosed in curly brackets by their value specified in the passed dictionary. """
outputString = inputString
for parameterName, parameterValue in parameters.items():
outputString = outputString.replace('{{0}}'.format(parameterName), parameterValue)
return outputString
| gpl-3.0 | -8,028,309,703,692,791,000 | 46.594615 | 333 | 0.66136 | false | 4.20526 | true | false | false |
DoddyPhysics/AxionNet | RadiationBackground/gstar.py | 1 | 1045 | """
gstarS and gstarR fits from Wantz and Shellard, 0910.1066, Appendix A
"""
import numpy as np
a0S=1.36
a1S=np.asarray([0.498,0.327,0.579,0.140,0.109])
a2S=np.asarray([-8.74,-2.89,-1.79,-0.102,3.82])
a3S=np.asarray([0.693,1.01,0.155,0.963,0.907])
def gS(T):
"""
The input temperature is measured in eV
gstarS as a function of T from fits
"""
T=T/1.e9
t=np.log(T)
f=a0S
for i in range(0,5):
f=f+a1S[i]*(1.+np.tanh((t-a2S[i])/a3S[i]))
return np.exp(f)
a0R=1.21
a1R=np.asarray([0.572,0.330,0.579,0.138,0.108])
a2R=np.asarray([-8.77,-2.95,-1.80,-0.162,3.76])
a3R=np.asarray([0.682,1.01,0.165,0.934,0.869])
def gR(T):
"""
The input temperature is measured in eV
gstarR as a function of T from fits
"""
T=T/1.e9
t=np.log(T)
f=a0R
for i in range(0,5):
f=f+a1R[i]*(1.+np.tanh((t-a2R[i])/a3R[i]))
return np.exp(f)
#import matplotlib.pyplot as plt
#T=np.logspace(-6,3,100)
#plt.plot(T,gS(T),linewidth=2.0)
#plt.plot(T,gR(T),'-r',linewidth=2.0)
#plt.ylim([1.,200.])
#plt.xscale('log')
#plt.yscale('log')
#plt.show()
| mit | 1,661,348,891,281,186,300 | 17.660714 | 69 | 0.626794 | false | 1.856128 | false | false | false |
kantale/molgenis-impute | molgenis-impute.py | 1 | 4150 |
"""
molgenis-impute v.0.7.0
Alexandros Kanterakis, [email protected]
Please read documentation in README.md
"""
import argparse
from imputation import Imputation
if __name__ == '__main__':
description = """
MOLGENIS-compute imputation version 0.7.0
"""
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--tools_dir', help='Installation directory for imputation tools. Default: <currrent working dir>/tools')
parser.add_argument('--reference_dir', help='Installation directory for the imputation reference panels. Default: <currrent working dir>/resources/imputationReference')
parser.add_argument('--list', help='List of all available reference panels either already downloaded, or available for downloading', action='store_true')
parser.add_argument('--dl_tools', help='download all necessary imputation tools', action='store_true')
parser.add_argument('--dl_reference', help='download and install an imputation reference panel')
parser.add_argument('--study', help='Absolute path of the directory off the study panel')
parser.add_argument('--output', help='Absolute path of the output (results) directory')
parser.add_argument('--chromosomes', help='comma separated values of chromosomes (If not set, imputation for all chromosomes will be performed')
parser.add_argument('--additional_shapeit_parameters', help='Extra command line arguments to pass to SHAPEIT tool', default=' ')
parser.add_argument('--additional_impute2_parameters', help='Extra command line arguments to pass to impute2 tool', default=' ')
parser.add_argument('--position_batch_size', help='Size of the chromosomal size of each imputation batch', default=5000000, type=int)
parser.add_argument('--sample_batch_size', help='Minimum number of samples in imputation batches', default=500, type=int)
parser.add_argument('--reference', help='name of the imputation reference panel')
parser.add_argument('--action', help='Action to do: liftover, phase, impute', choices=['liftover', 'phase', 'impute'])
parser.add_argument('--add_reference', help='Add a new reference panel', action='store_true')
parser.add_argument('--backend', help='Execution environment. Default: local', choices=['pbs', 'grid', 'local'], default='local')
parser.add_argument('--nosubmit', help='Create scripts but don\'t submit them for execution', action='store_true')
args = parser.parse_args()
imp = Imputation(tools_dir=args.tools_dir, reference_dir=args.reference_dir)
if args.dl_tools:
imp.install_imputation_tools()
elif args.list:
imp.list_reference_panels()
elif args.dl_reference:
imp.install_reference_panel(args.dl_reference)
elif args.add_reference:
imp.add_custom_reference_panels()
elif args.action:
if not args.study:
raise Exception('You need to define a directory where the study panel is, in order to perform this action (parameter --study)')
if not args.output:
raise Exception('You need to define a directory where the output results will be stored (parameter --output')
if args.action == 'liftover':
imp.perform_liftover(args.study, args.output, backend=args.backend, submit=not args.nosubmit)
elif args.action == 'phase':
imp.perform_phase(args.study, args.output, additional_shapeit_parameters=args.additional_shapeit_parameters, backend=args.backend, submit=not args.nosubmit)
elif args.action == 'impute':
if not args.reference:
raise Exception('You need to define a reference panel. Use the --reference parameter. For a list for all available reference panels, use --list')
imp.perform_impute(args.study, args.output, args.reference,
additional_impute2_parameters=args.additional_impute2_parameters,
custom_chromosomes=args.chromosomes,
sample_batch_size=args.sample_batch_size,
position_batch_size=args.position_batch_size,
backend=args.backend,
submit=not args.nosubmit)
else:
print description
print 'For a full set of options run:'
print 'python molgenis-impute.py --help'
print 'For documentation check: https://github.com/molgenis/molgenis-imputation'
| bsd-2-clause | -6,199,077,299,045,178,000 | 47.255814 | 169 | 0.752289 | false | 3.611836 | false | false | false |
vintol/listal-dl | Listal.py | 1 | 7404 | # Listal.py
# 08/11/2016 - 31/03/2019
# v 1.2.2
import urllib.request, urllib.parse
import http.cookiejar, ssl
import bs4
import queue
import threading
import re
import os
import sys
import argparse
import time
# Scrapers
def get_ipages():
global IMG, STOP_AT
while not qq.empty():
local = threading.local()
local.url = qq.get()
local.keep_going = True
local.skip = False
if STOP_AT is not None and int(local.url.split('//')[2]) > STOP_AT:continue
while local.keep_going:
try:local.html = urllib.request.urlopen(local.url,timeout=10)
except urllib.error.HTTPError as HERR:
if HERR.code == 404:
local.keep_going = False
local.skip = True
continue
except:continue
if local.html.getcode() == 200:local.keep_going = False
if local.skip:continue
local.data = local.html.read()
local.soup = bs4.BeautifulSoup(local.data,'lxml')
for each in local.soup.find_all('div','imagewrap-inner'):
local.img = int(each.a.get('href').strip().split('/')[-1])
if IMG is None:ipages.append(local.img)
elif local.img > IMG:ipages.append(local.img)
elif local.img == IMG:STOP_AT = int(local.url.split('//')[2])
else:pass
def get_images():
while not qq.empty():
local = threading.local()
local.url = qq.get()
local.keep_going = True
local.skip = True
local.retry = 0
while local.keep_going and local.retry < 5:
try:
local.retry += 1
local.html = urllib.request.urlopen(local.url,timeout=25)
if local.html.getcode() == 200:
local.keep_going = False
local.skip = False
except urllib.error.HTTPError as HERR:
if HERR is not None and HERR.code == 404:
local.keep_going = False
continue
except:continue
if local.skip:continue
for i in range(2):
try:
local.data = local.html.read()
images.append(find_image(local.data))
except:continue
break
# Functions
def mksoup(url):
tmp = urllib.request.urlopen(url)
return bs4.BeautifulSoup(tmp.read(),"lxml")
def find_image(data):
return bs4.BeautifulSoup(data,"lxml").find('img','pure-img').get('src').replace("https:","http:")
def post_req():
tmp = urllib.parse.urlencode({ 'listid' : list_id , 'offset' : offset})
return urllib.request.urlopen("https://www.listal.com/item-list/",tmp.encode())
def mkqueue(url):
global no_pics,no_pages
no_pics = int(mksoup(url).find('a','picturesbutton').span.text.strip())
no_pages = no_pics/50
if no_pages.is_integer():no_pages = int(no_pages)
else:no_pages = int(no_pages) + 1
for i in range(int(args.first_page),no_pages+1):qq.put(url+"/pictures//"+str(i))
def enqueue():
global qq,ipages
if not qq.empty():print("WARNING : Queue was not empty.")
qq = queue.Queue()
ipages = sorted(set(ipages))
for each in ipages:
qq.put("http://www.listal.com/viewimage/"+str(each)+"h")
def stop_at(IMG):
tmp = []
for each in ipages:
if each > IMG:tmp.append(each)
ipages = tmp
def update_progress():
progress = 100 - int((100*qq.qsize()) / len(ipages))
pbar = "\r {:0>3}% [{:<50}] ({},{}) ".format(progress, '#'*int((progress/2)), (len(ipages)-qq.qsize()), len(ipages))
sys.stdout.write(pbar)
sys.stdout.flush()
def get_listinfo(url):
global list_type,list_id,list_name,total_pic,offset
soup = mksoup(url)
list_type = soup.find(id='customlistitems').get('data-listformat')
if list_type != "images":
print("This is not a Image list. Currently listal.dl suppots only Image lists.")
quit()
list_id = int(soup.find(id='customlistitems').get('data-listid'))
try:list_name = soup.find('div','headertitle').text.strip()
except AttributeError:list_name = urls.path[6:].replace('-',' ').title()
total_pic = int(soup.find(id='customlistitems').div.get('data-itemtotal'))
offset = int(soup.find('div','loadmoreitems').get('data-offset'))
for each in soup.find_all('div','imagelistbox'):
ipages.append(int(each.a.get('href').strip().split('/')[-1]))
def get_list():
global offset
while True:
data = post_req().read()
for each in sorted(set(re.findall("viewimage\\\/([0-9]{4,10})'" ,data.decode()))):
ipages.append(int(each))
offset = offset + 1
if offset == total_pic:break
def write():
if urls.path.startswith("/list/"):fhand = open(list_name+".txt",'a')
else:fhand = open(name+".txt",'a')
fhand.write("### {} : {} Images\n".format(finished,len(images)))
for each in images:fhand.write(each+"\n")
fhand.close()
# Global
qq = queue.Queue()
threads = []
ipages = []
images = []
IMG = None
STOP_AT = None
started = time.time()
# Main
parser = argparse.ArgumentParser(description='Scrape Images from \'listal.com\'.')
parser.add_argument('url', type=str,
help='URL to the List or Profile on listal.com.')
parser.add_argument('--from', dest='first_page', type = int, default = None, required = False,
help='The profile page no to start scraping images from')
parser.add_argument('--upto', dest='last_page' , type = int, default = None, required = False,
help='Scrap images only upto the page no.')
parser.add_argument('--threads', dest='threads', type = int, default = 10, required = False,
help='No. of threads to use.')
args = parser.parse_args()
urls = urllib.parse.urlparse(args.url)
if urls.netloc != 'www.listal.com':
print ("Check the Entered URL.")
quit()
#CookieJar Initiation
urllib.request.HTTPCookieProcessor(http.cookiejar.CookieJar())
if urls.path.startswith("/list/"):
if args.first_page is not None:print("Entered URL is of a list. The '--from' option is ignored.")
if args.last_page is not None:print("Entered URL is of a list. The '--upto' option is ignored.")
get_listinfo(urls.geturl())
get_list()
else:
urls = urllib.parse.urlparse(urls.geturl().split('/picture')[0])
name = urls.path[1:].replace('-',' ').title()
if args.first_page is None:args.first_page = 1
if args.last_page is not None:
for i in range(args.first_page,args.last_page+1):qq.put(args.url+"/pictures//"+str(i))
else:mkqueue(urls.geturl())
for n in range(args.threads):
t = threading.Thread(target=get_ipages)
threads.append(t)
t.start()
for t in threads:t.join()
print("Phase I Complete.",len(ipages),"Images Found.")
print("Time Taken :",time.strftime("%H:%M:%S",time.gmtime(time.time()-started)))
print("Phase II :")
enqueue()
threads.clear()
for n in range(args.threads):
t = threading.Thread(target=get_images)
threads.append(t)
t.start()
while not qq.empty():
update_progress()
sys.stdout.flush()
time.sleep(1)
for t in threads:t.join()
time_taken = time.time() - started
finished = time.strftime("%d/%m/%Y %H:%M",time.localtime())
write()
print("Time Taken :",time.strftime("%H:%M:%S",time.gmtime(time_taken)))
# END
| gpl-3.0 | 646,103,235,721,944,200 | 33.437209 | 120 | 0.603052 | false | 3.360872 | false | false | false |
Hitatm/Concentrator | app/views.py | 1 | 61580 | #coding:UTF-8
__author__ = 'dj'
from app import app
from flask import Flask, render_template, request, flash, redirect, url_for, send_from_directory
from forms import Upload, ProtoFilter,User_and_pwd
from utils.upload_tools import allowed_file, get_filetype, random_name
from utils.gxn_topo_handler import getfile_content,getall_topo,showdata_from_id,topo_filter
from utils.gxn_topo_decode import TopoDecode
from utils.gxn_get_sys_config import Config
from utils.connect import Connect
from utils.db_operate import DBClass
from utils.display import multipledisplay,singledisplay,NetID_list,NetID_all,AppID_all,selectall,node_time_display,topo_display,energy_display,flowdisplay,protodisplay,nodesearch_display,appflowdisplay
from utils.error import data_error_new,syn_error
from utils.old_data_display import Display, Modify
from utils.gxn_supervisor import getAllProcessInfo,stopProcess,startProcess,startAllProcesses,stopAllProcesses
import os
import collections
import time,datetime
from time import strftime
# import sqlite3
import socket
import json
import math
#导入函数到模板中
app.jinja_env.globals['enumerate'] = enumerate
#全局变量
PCAP_NAME = '' #上传文件名
# PD = PcapDecode() #解析器
PDF_NAME = ''
# ---------------------------------------------------------------------------
PCAPS = 'yeslogin' #login
HIT_USER ='root'#用户名
HIT_PWD ='xiaoming' #默认密码
TOPODATA = None #login
REALDATA = None #login
DATABASE =DBClass()
# TOPODATA_DICT =collections.OrderedDict()
# TPDECODE =TopoDecode()
NODE_DICT_NET=dict()
NODE_SET=set()
#--------------------------------------------------------首页,上传---------------------------------------------
#首页
@app.route('/', methods=['POST', 'GET'])
@app.route('/index/', methods=['POST', 'GET'])
def index():
if PCAPS == None:
return redirect(url_for('login'))
else:
return render_template('./home/index.html')
# return render_template('./login/login.html')
#历史数据时间选择
@app.route('/upload/', methods=['POST', 'GET'])
@app.route('/upload', methods=['POST', 'GET'])
def upload():
if PCAPS==None:
redirect(url_for('login'))
else:
json_dict = dict()
configfile = Connect()
json_dict = configfile.display_config()
return render_template('./upload/upload.html',json_dict = json_dict)
@app.route('/upload_modify/', methods=['POST', 'GET'])
@app.route('/upload_modify', methods=['POST', 'GET'])
def upload_modify():
c = Connect()
config_dicts = c.all_config_json() # read config.json and put all items in this dict
if request.method == 'POST':
val1 = request.form.get("id")
if val1:
config_dicts["id"] = val1
val2 = request.form.get("HeartIntSec")
if val2:
config_dicts["HeartIntSec"] = val2
val3 = request.form.get("AckHeartInt")
if val3:
config_dicts["AckHeartInt"] = val3
val4 = request.form.get("rootAddr")
if val4:
config_dicts["rootAddr"] = val4
val5 = request.form.get("ftpuser")
if val5:
config_dicts["ftpuser"] = val5
val6 = request.form.get("ftphost")
if val6:
config_dicts["ftphost"] = val6
val7 = request.form.get("ftpPwd")
if val7:
config_dicts["ftpPwd"] = val7
val8 = request.form.get("ftpPort")
if val8:
config_dicts["ftpPort"] = val8
val9 = request.form.get("serverIp")
if val9:
config_dicts["serverIp"] = val9
json_config_dicts = json.dumps(config_dicts,sort_keys=True,indent =4,separators=(',', ': '),encoding="gbk",ensure_ascii=True)
# print json_config_dicts
# conf_file = os.path.join(app.config['CONFIG_FOLDER'],"config.json")
# with open(conf_file, 'w') as f:
# f.write(json_config_dicts)
# f.close()
c.update_config(json_config_dicts)
return "It works"
else:
return "Error when writing to the config.json file"
# rtmetric展示
@app.route('/rtmetricdisplay/', methods=['POST', 'GET'])
@app.route('/rtmetricdisplay', methods=['POST', 'GET'])
def rtmetricdisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
time1=time.time()
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
rtxdata_list = multipledisplay(start_time,end_time,"rtimetric")
return render_template('./dataanalyzer/rtmetricdisplay.html',rtxdata_list=rtxdata_list[0],time=rtxdata_list[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
rtxdata_list = multipledisplay(previous_time,current_time,"rtimetric")
return render_template('./dataanalyzer/rtmetricdisplay.html',rtxdata_list=rtxdata_list[0],time=rtxdata_list[1])
#电流随时间变化
@app.route('/currentdisplay/', methods=['POST', 'GET'])
@app.route('/currentdisplay', methods=['POST', 'GET'])
def currentdisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
currentdata_list = multipledisplay(start_time,end_time,"electric")
return render_template('./dataanalyzer/currentdisplay.html',currentdata_list=currentdata_list[0],time=currentdata_list[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
currentdata_list = multipledisplay(previous_time,current_time,"electric")
return render_template('./dataanalyzer/currentdisplay.html',currentdata_list=currentdata_list[0],time=currentdata_list[1])
#时间同步展示
@app.route('/syntime/', methods=['POST', 'GET'])
@app.route('/syntime', methods=['POST', 'GET'])
def syntime():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
syntimedata_list = multipledisplay(start_time,end_time,"syntime")
return render_template('./dataanalyzer/syntime.html',syntimedata_list=syntimedata_list[0],time=syntimedata_list[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
syntimedata_list = multipledisplay(previous_time,current_time,"syntime")
return render_template('./dataanalyzer/syntime.html',syntimedata_list=syntimedata_list[0],time=syntimedata_list[1])
# 节点能耗展示
@app.route('/energydisplay/', methods=['POST', 'GET'])
@app.route('/energydisplay', methods=['POST', 'GET'])
def energydisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
ID_list = NetID_list(start_time,end_time)
data = energy_display(start_time,end_time)
return render_template('./dataanalyzer/energydisplay.html', nodecount=len(ID_list), ID_list=ID_list, cpu_list=data[0], lpm_list=data[1], tx_list=data[2], rx_list=data[3],time=data[4])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
ID_list = NetID_list(previous_time,current_time)
data = energy_display(previous_time,current_time)
return render_template('./dataanalyzer/energydisplay.html', nodecount=len(ID_list), ID_list=ID_list, cpu_list=data[0], lpm_list=data[1], tx_list=data[2], rx_list=data[3],time=data[4])
# 采样电压展示
@app.route('/voltagedisplay/', methods=['POST', 'GET'])
@app.route('/voltagedisplay', methods=['POST', 'GET'])
def voltagedisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
voltagedata_list = multipledisplay(start_time,end_time,"volage")
return render_template('./dataanalyzer/voltagedisplay.html',voltagedata_list=voltagedata_list[0],time=voltagedata_list[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
voltagedata_list = multipledisplay(previous_time,current_time,"volage")
return render_template('./dataanalyzer/voltagedisplay.html',voltagedata_list=voltagedata_list[0],time=voltagedata_list[1])
#重启情况展示
@app.route('/restartdisplay/', methods=['POST', 'GET'])
@app.route('/restartdisplay', methods=['POST', 'GET'])
def restartdisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
dataset = singledisplay(start_time,end_time,"reboot")
return render_template('./dataanalyzer/restartdisplay.html', nodecount = len(dataset[0]), ID_list = dataset[0], reboot_list = dataset[1],time=dataset[2])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
dataset = singledisplay(previous_time,current_time,"reboot")
return render_template('./dataanalyzer/restartdisplay.html', nodecount = len(dataset[0]), ID_list = dataset[0], reboot_list = dataset[1],time=dataset[2])
#节点邻居数展示
@app.route('/nbdisplay/', methods=['POST', 'GET'])
@app.route('/nbdisplay', methods=['POST', 'GET'])
def nbdisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data_list = multipledisplay(start_time,end_time,"numneighbors")
return render_template('./dataanalyzer/nbdisplay.html',data_list=data_list[0],time=data_list[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
data_list = multipledisplay(previous_time,current_time,"numneighbors")
return render_template('./dataanalyzer/nbdisplay.html',data_list=data_list[0],time=data_list[1])
#信标间隔展示
@app.route('/beacondisplay/', methods=['POST', 'GET'])
@app.route('/beacondisplay', methods=['POST', 'GET'])
def beacondisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data_list = multipledisplay(start_time,end_time,"beacon")
return render_template('./dataanalyzer/beacondisplay.html',data_list=data_list[0],time=data_list[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
data_list = multipledisplay(previous_time,current_time,"beacon")
return render_template('./dataanalyzer/beacondisplay.html',data_list=data_list[0],time=data_list[1])
# 部署信息表
@app.route('/deploy_info/', methods=['POST', 'GET'])
@app.route('/deploy_info', methods=['POST', 'GET'])
def deploy_info():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
nodeplace = DATABASE.my_db_execute("select ID, NodeID, MeterID, Place from NodePlace;",None)
return render_template('./dataanalyzer/deploy_info.html',nodeplace = nodeplace)
@app.route('/deploy_modify/', methods=['POST', 'GET'])
@app.route('/deploy_modify', methods=['POST', 'GET'])
def deploy_modify():
flag = 0 #flag==0 未修改 flag==1 修改了 flag==2 NodeID长度过长 flag==3 NodeID长度为3 flag==4 NodeID长度为2 flag==5 NodeID长度为1 flag==1 NodeID长度为4
if request.method == 'POST':
ID = request.form["ID"]
old_data = DATABASE.my_db_execute("select ID, NodeID, MeterID, Place from NodePlace where ID=?;",(ID,))
# conn.close()
NodeID = str(request.form["NodeID"])
MeterID = str(request.form["MeterID"])
Place = request.form["Place"]
if len(NodeID) == 4:
# print old_data[0]
if (str(old_data[0][1]) != NodeID):
flag = 1
elif (str(old_data[0][2]) != MeterID):
flag = 1
elif (old_data[0][3] != Place):
flag = 1
else:
flag = 0
elif len(NodeID) > 4:
flag = 2
elif len(NodeID) == 3:
flag = 3
elif len(NodeID) == 2:
flag = 4
elif len(NodeID) == 1:
flag = 5
# print ID, NodeID, MeterID, Place
if flag==0:
return "未进行更改"
elif flag==2:
return "节点ID长度过长,请重新输入!(4位)"
elif flag==3:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",("0"+str(NodeID),))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("delete from NodePlace where ID = ?;",(ID,))
DATABASE.db_del_or_insert("insert into NodePlace (ID,NodeID,Place,MeterID) VALUES (?,?,?,?);",(ID,str("0"+str(NodeID)),Place,str(MeterID)))
return "更改成功"
elif flag==4:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",("00"+str(NodeID),))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("delete from NodePlace where ID = ?;",(ID,))
DATABASE.db_del_or_insert("insert into NodePlace (ID,NodeID,Place,MeterID) VALUES (?,?,?,?);",(ID,str("00"+str(NodeID)),Place,str(MeterID)))
return "更改成功"
elif flag==5:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",("000"+str(NodeID),))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("delete from NodePlace where ID = ?;",(ID,))
DATABASE.db_del_or_insert("insert into NodePlace (ID,NodeID,Place,MeterID) VALUES (?,?,?,?);",(ID,str("000"+str(NodeID)),Place,str(MeterID)))
return "更改成功"
elif flag==1:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",(NodeID,))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("delete from NodePlace where ID = ?;",(ID,))
DATABASE.db_del_or_insert("insert into NodePlace (ID,NodeID,Place,MeterID) VALUES (?,?,?,?);",(ID,NodeID,Place,str(MeterID)))
return "更改成功"
else:
DATABASE.db_del_or_insert("delete from NodePlace where ID = ?;",(ID,))
DATABASE.db_del_or_insert("insert into NodePlace (ID,NodeID,Place,MeterID) VALUES (?,?,?,?);",(ID,str(NodeID),Place,str(MeterID)))
return "更改成功"
@app.route('/deploy_del/', methods=['POST', 'GET'])
@app.route('/deploy_del', methods=['POST', 'GET'])
def deploy_del():
del_list = list()
if request.method == 'POST':
get_list = request.form.getlist("del_list[]")
for item in get_list:
del_list.append(item.encode('ascii'))
# print del_list
for item in del_list:
if item:
DATABASE.db_del_or_insert("delete from NodePlace where ID=? ;",(item,))
nodeplace = DATABASE.my_db_execute("select ID, NodeID, MeterID, Place from NodePlace;",None)
return render_template('./dataanalyzer/deploy_info.html',nodeplace = nodeplace)
@app.route('/deploy_add/', methods=['POST', 'GET'])
@app.route('/deploy_add', methods=['POST', 'GET'])
def deploy_add():
databasepath = os.path.join(app.config['TOPO_FOLDER'],"topo3.db")
if request.method == 'POST':
NodeID = str(request.form["NodeID"])
MeterID = str(request.form["MeterID"])
Place = request.form["Place"]
# print NodeID, MeterID, Place
if len(NodeID) == 4:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",(NodeID,))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("insert into NodePlace (NodeID,Place,MeterID) VALUES (?,?,?);",(str(NodeID),Place,str(MeterID)))
elif len(NodeID) > 4:
return "节点ID长度过长,请重新输入!(4位)"
elif len(NodeID) == 3:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",("0"+str(NodeID),))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("insert into NodePlace (NodeID,Place,MeterID) VALUES (?,?,?);",("0"+str(NodeID),Place,str(MeterID)))
elif len(NodeID) == 2:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",("00"+str(NodeID),))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("insert into NodePlace (NodeID,Place,MeterID) VALUES (?,?,?);",("00"+str(NodeID),Place,str(MeterID)))
elif len(NodeID) == 1:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",("000"+str(NodeID),))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("insert into NodePlace (NodeID,Place,MeterID) VALUES (?,?,?);",("000"+str(NodeID),Place,str(MeterID)))
nodeplace = DATABASE.my_db_execute("select ID, NodeID, MeterID, Place from NodePlace;",None)
return "添加成功"
#节点信息查询
@app.route('/node_search/', methods=['POST', 'GET'])
@app.route('/node_search', methods=['POST', 'GET'])
def node_search():
nodeid_list = NetID_all()
nodeid_list.sort()
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
nodepick = request.form['nodeselect']
data = nodesearch_display(start_time,end_time,nodepick)
return render_template('./dataanalyzer/node_search.html',
nodeid=nodepick,nodelist = data[0],cpu=data[1],lpm=data[2],tx=data[3],rx=data[4],
voltage_list=data[5],time_list_1=data[6],time_list_2=data[7],current_list=data[8],time_list_3=data[9],rtx_list=data[10],deploy=data[11],time=data[12])
else:
nodepick = nodeid_list[0]
end_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
start_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time() - 6*60*60))
data = nodesearch_display(start_time,end_time,nodepick)
return render_template('./dataanalyzer/node_search.html',
nodeid=str(nodepick),nodelist = data[0],cpu=data[1],lpm=data[2],tx=data[3],rx=data[4],
voltage_list=data[5],time_list_1=data[6],time_list_2=data[7],current_list=data[8],time_list_3=data[9],rtx_list=data[10],deploy=data[11],time=data[12])
#节点部署信息查询
@app.route('/deploysearch/', methods=['POST', 'GET'])
@app.route('/deploysearch', methods=['POST', 'GET'])
def deploysearch():
nodeid_list = list()
nodeid = DATABASE.my_db_execute('select distinct NodeID from NodePlace;',None)
for i in range(len(nodeid)):
nodeid_list.append(nodeid[i][0].encode('ascii'))
nodeid_list.sort()
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
nodepick = request.form['nodeselect']
# print nodepick
deploy_info = DATABASE.my_db_execute('select NodeID, MeterID, Place from NodePlace where NodeID == ?;',(nodepick,))
deploy = list()
deploy.append(deploy_info[0][0].encode('ascii'))
deploy.append(deploy_info[0][1].encode('ascii'))
deploy.append(deploy_info[0][2].encode('ascii'))
index_of_pick=nodeid_list.index(nodepick)
temp=nodeid_list[index_of_pick]
nodeid_list[index_of_pick]=nodeid_list[0]
nodeid_list[0]=temp
nodepick = "\""+nodepick+"\""
return render_template('./dataanalyzer/deploysearch.html',
nodeid=nodepick,nodelist = nodeid_list,deploy=deploy)
else:
return render_template('./dataanalyzer/deploysearch.html',
nodeid="",nodelist = nodeid_list,deploy=[])
@app.route('/network_data/', methods=['POST', 'GET'])
@app.route('/network_data', methods=['POST', 'GET'])
def network_data():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
select = request.form['filter_type']
nid = request.form['value']
if select == "all":
pcaps = DATABASE.my_db_execute("select * from NetMonitor where currenttime >= ? and currenttime <= ?;",(start_time, end_time))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",查询所有节点")
elif select == "ID":
pcaps = DATABASE.my_db_execute("select * from NetMonitor where currenttime >= ? and currenttime <= ? and NodeID == ?;",(start_time, end_time, nid))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",节点ID为:\""+nid+"\"")
elif select == "parentID":
pcaps = DATABASE.my_db_execute("select * from NetMonitor where currenttime >= ? and currenttime <= ? and ParentID == ?;",(start_time, end_time, nid))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",父节点ID为:\""+nid+"\"")
else:
pcaps = DATABASE.my_db_execute("select * from NetMonitor where currenttime >= ? and currenttime <= ?;",(start_time, end_time))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",查询所有节点")
return render_template('./dataanalyzer/network_data.html',pcaps=pcaps,length=len(pcaps),time=timedisplay)
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
timedisplay = ("\""+previous_time + ' - ' + current_time+u"\",未选取节点")
pcaps = DATABASE.my_db_execute("select * from NetMonitor where currenttime >= ? and currenttime <= ?;",(previous_time, current_time))
return render_template('./dataanalyzer/network_data.html',pcaps=pcaps,length=len(pcaps),time=timedisplay)
@app.route('/app_data/', methods=['POST', 'GET'])
@app.route('/app_data', methods=['POST', 'GET'])
def app_data():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
select = request.form['filter_type']
nid = request.form['value']
if select == "all":
pcaps = DATABASE.my_db_execute("select * from ApplicationData where currenttime >= ? and currenttime <= ?;",(start_time, end_time))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",查询所有节点")
elif select == "ID":
pcaps = DATABASE.my_db_execute("select * from ApplicationData where currenttime >= ? and currenttime <= ? and NodeID == ?;",(start_time, end_time, nid))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",节点ID为:\""+nid+"\"")
else:
pcaps = DATABASE.my_db_execute("select * from ApplicationData where currenttime >= ? and currenttime <= ?;",(start_time, end_time))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",查询所有节点")
lendict = dict()
for pcap in pcaps:
lendict[int(pcap[0])] = len(str(pcap[3]))
return render_template('./dataanalyzer/app_data.html',appdata=pcaps,lendict = lendict,length=len(pcaps),time=timedisplay)
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
timedisplay = ("\""+previous_time + ' - ' + current_time+u"\",未选取节点")
pcaps = DATABASE.my_db_execute("select * from ApplicationData where currenttime >= ? and currenttime <= ?;",(previous_time, current_time))
lendict = dict()
for pcap in pcaps:
lendict[int(pcap[0])] = len(str(pcap[3]))
return render_template('./dataanalyzer/app_data.html',appdata=pcaps,lendict = lendict,length=len(pcaps),time=timedisplay)
#--------------------------------------------与后台通信----------------------------------------------------
@app.route('/monitor/', methods=['POST', 'GET'])
@app.route('/monitor', methods=['POST', 'GET'])
def monitor():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
display = Display()
send_data = display.send_display() #旧数据展示
write_data = display.write_display()
adjtime_data = display.adjtime_display()
display_datadict = display.parameters_display()
# print display_datadict
return render_template('./client/monitor.html',send_data = send_data, write_data = write_data, adjtime_data = adjtime_data, display_datadict = display_datadict)
@app.route('/instruction_send/', methods=['POST', 'GET'])
@app.route('/instruction_send', methods=['POST', 'GET'])
def instruction_send():
#指令下发
modify = Modify() #将新配置数据写入配置文件
sendins = Connect()
datalist = []
dicts = {}
datalist.append("80")
datalength = ""
if request.method == 'POST':
recvdata = request.form['emit_data']
if recvdata:
modify.send_modify(recvdata)
if (len(recvdata)%2 != 0):
recvdata = "0"+recvdata
if (len(recvdata)<32):
datalength = "0"+hex(len(recvdata)/2)[2:]
else:
datalength = hex(len(recvdata))[2:]
else:
display = Display()
recvdata = display.send_display() #旧数据
transmit_type = request.form['mySelect']
nodeip = request.form['nodeIP']
if datalength:
datalist.append(datalength)
datalist.append(recvdata)
data = ''.join(datalist)
dicts["type"] = transmit_type
dicts["pama_data"] = data
if (transmit_type=="mcast"):
ins = json.dumps(dicts)
else:
addrlist = []
addrlist.append(nodeip)
dicts["addrList"] = addrlist
ins = json.dumps(dicts)
sendins.TCP_send(ins)
# print ins
return render_template('./client/monitor.html',display_datadict=None)
@app.route('/instruction_write/', methods=['POST', 'GET'])
@app.route('/instruction_write', methods=['POST', 'GET'])
def instruction_write():
#指令烧写
modify = Modify() #将新配置数据写入配置文件
sendins = Connect()
datalist = []
datalist.append("82")
datalength = ""
dicts = {}
if request.method == 'POST':
recvdata = request.form['write_data']
if recvdata:
modify.write_modify(recvdata)
if (len(recvdata)%2 != 0):
recvdata = "0"+recvdata
if (len(recvdata)<32):
datalength = "0"+hex(len(recvdata)/2)[2:]
else:
datalength = hex(len(recvdata))[2:]
else:
display = Display()
recvdata = display.write_display() #旧数据
transmit_type = request.form['mySelect2']
nodeip = request.form['nodeIP2']
if datalength:
datalist.append(datalength)
datalist.append(recvdata)
data = ''.join(datalist)
dicts["type"] = transmit_type
dicts["pama_data"] = data
if (transmit_type=="mcast"):
ins = json.dumps(dicts)
else:
addrlist = []
addrlist.append(nodeip)
dicts["addrList"] = addrlist
ins = json.dumps(dicts)
sendins.TCP_send(ins)
return render_template('./client/monitor.html',display_datadict=None)
@app.route('/instruction_restart/', methods=['POST', 'GET'])
@app.route('/instruction_restart', methods=['POST', 'GET'])
#重启指令下发
def instruction_restart():
sendins = Connect()
dicts = {}
dicts["pama_data"] = "C0"
if request.method == 'POST':
transmit_type = request.form['mySelect4']
nodeip = request.form['nodeIP4']
dicts["type"] = transmit_type
if (transmit_type=="mcast"):
ins = json.dumps(dicts)
else:
addrlist = []
addrlist.append(nodeip)
dicts["addrList"] = addrlist
ins = json.dumps(dicts)
# print ins
sendins.TCP_send(ins)
return render_template('./client/monitor.html',display_datadict=None)
@app.route('/instruction_reset/', methods=['POST', 'GET'])
@app.route('/instruction_reset', methods=['POST', 'GET'])
#恢复出厂设置
def instruction_reset():
sendins = Connect()
dicts = {}
dicts["pama_data"] = "C1"
if request.method == 'POST':
transmit_type = request.form['mySelect5']
nodeip = request.form['nodeIP5']
dicts["type"] = transmit_type
if (transmit_type=="mcast"):
ins = json.dumps(dicts)
else:
addrlist = []
addrlist.append(nodeip)
dicts["addrList"] = addrlist
ins = json.dumps(dicts)
sendins.TCP_send(ins)
# print ins
return render_template('./client/monitor.html',display_datadict=None)
@app.route('/instruction_adjtime/', methods=['POST', 'GET'])
@app.route('/instruction_adjtime', methods=['POST', 'GET'])
def instruction_adjtime():
#设定根节点校时周期
modify = Modify() #将新配置数据写入配置文件
sendins = Connect()
dicts = {}
if request.method == 'POST':
recvdata = request.form['timeperiod']
if recvdata:
modify.adjtime_modify(recvdata)
else:
display = Display()
recvdata = display.adjtime_display() #旧数据
dicts["pama_data"] = recvdata
dicts["type"] = "pama_corr"
ins = json.dumps(dicts)
sendins.TCP_send(ins)
return render_template('./client/monitor.html',display_datadict=None)
@app.route('/instruction3/', methods=['POST', 'GET'])
@app.route('/instruction3', methods=['POST', 'GET'])
#网络参数配置指令下发
def instruction3():
modify = Modify() #将新配置数据写入配置文件
sendins = Connect()
dicts= {}
dicts["type"] = "mcast_ack"
data0 = "40"
datalist = []
datalist.append(data0)
if request.method == 'POST':
data1 = request.form['PANID']
if data1:
modify.PANID_modify(data1)
data1 = hex(int(data1))[2:]
else:
data1 = "ff"
datalist.append(data1)
data2 = request.form['channel']
if data2:
modify.channel_modify(data2)
data2 = hex(int(data2))[2:]
else:
data2 = "ff"
datalist.append(data2)
data3 = request.form['CCA']
if data3:
modify.CCA_modify(data3)
data3 = hex(int(data3))[2:]
else:
data3 = "ff"
datalist.append(data3)
data4 = request.form['emitpower']
if data4:
modify.emitpower_modify(data4)
data4 = hex(int(data4))[2:]
else:
data4 = "ff"
datalist.append(data4)
data5 = request.form['CCAcheckingperiod']
if data5:
modify.CCAcheckingperiod_modify(data5)
data5 = hex(int(data5))[2:]
else:
data5 = "ff"
datalist.append(data5)
data6 = request.form['inactive']
if data6:
modify.inactive_modify(data6)
data6 = hex(int(data6))[2:]
else:
data6 = "ff"
datalist.append(data6)
data7 = request.form['DIO_minlen']
if data7:
modify.DIO_minlen_modify(data7)
data7 = hex(int(data7))[2:]
else:
data7 = "ff"
datalist.append(data7)
data8 = request.form['DIO_max']
if data8:
modify.DIO_max_modify(data8)
data8 = hex(int(data8))[2:]
else:
data8 = "ff"
datalist.append(data8)
# cli.send(json.dumps(dicts).encode('utf-8'))
data = ''.join(datalist)
dicts["pama_data"] = data
ins = json.dumps(dicts)
# print "adsadsfasdf"
sendins.TCP_send(ins)
# return
return render_template('./client/monitor.html',display_datadict=None)
@app.route('/update_net/', methods=['POST', 'GET'])
@app.route('/update_net', methods=['POST', 'GET'])
#获取网络监测数据
def update_net():
global NODE_DICT_NET
dicts= {}
for node ,value in NODE_DICT_NET.items():
# print node,value
temp = DATABASE.my_db_execute("select nodeID, count(nodeID) from NetMonitor where nodeID == ?", (node,))
# print temp
if int(temp[0][1])-value>0:
# NUMBER_NET+= 1
if(str(temp[0][0]) in NODE_SET):
NODE_SET.remove(str(temp[0][0]))
if len(NODE_DICT_NET):
dicts["total"] = len(NODE_DICT_NET)
dicts["now"] = dicts["total"] - len(NODE_SET)
else:
dicts["total"] = 1
dicts["now"] = 0
ins = json.dumps(dicts)
# print ins
return ins
@app.route('/scheduling/',methods=['POST', 'GET'])
def scheduling():
syn_config = Config()
l=syn_config.get_active_list()
dicts={'lists':l}
lists= json.dumps(dicts,sort_keys=True,indent =4,separators=(',', ': '),encoding="gbk",ensure_ascii=True)
return render_template('./client/scheduling.html',scheduleNow=lists)
@app.route('/setall_schedule/',methods=['POST', 'GET'])
@app.route('/setall_schedule',methods=['POST', 'GET'])
def setall_schedule():
if request.method == 'POST':
syn_config = Config()
syn_config.bitmap_checkall()
return "1"
@app.route('/cancelall_schedule/',methods=['POST', 'GET'])
@app.route('/cancelall_schedule',methods=['POST', 'GET'])
def cancelall_schedule():
if request.method == 'POST':
syn_config = Config()
syn_config.bitmap_cancelall()
return "2"
@app.route('/recommend_schedule1/',methods=['POST', 'GET'])
@app.route('/recommend_schedule1',methods=['POST', 'GET'])
def recommend_schedule1():
if request.method == 'POST':
syn_config = Config()
syn_config.recommend_schedule1()
return "2"
@app.route('/recommend_schedule2/',methods=['POST', 'GET'])
@app.route('/recommend_schedule2',methods=['POST', 'GET'])
def recommend_schedule2():
if request.method == 'POST':
syn_config = Config()
syn_config.recommend_schedule2()
return "2"
@app.route('/recommend_schedule3/',methods=['POST', 'GET'])
@app.route('/recommend_schedule3',methods=['POST', 'GET'])
def recommend_schedule3():
if request.method == 'POST':
syn_config = Config()
syn_config.recommend_schedule3()
return "2"
@app.route('/update_schedule/',methods=['POST', 'GET'])
def update_schedule():
syn_config = Config()
sendins = Connect()
senddicts = {}
if request.method == 'POST':
data = request.get_json()
bitmap_array = data['x']
if not bitmap_array:
bitmap_array = [0]*18
syn_config.set_SynBitMap(bitmap_array)
config_dict =syn_config.get_New_Synconfig()
period = data['p']
config_dict["bitmap"]=syn_config.format_To_SendBitMap(config_dict["bitmap"])
if period:
syn_config.get_syn_period(period)
# config_dict["bitmap"]=syn_config.format_To_SendBitMap(config_dict["bitmap"])
senddicts["pama_data"] = config_dict
senddicts["type"] = "pama_syn"
update_synperiod_ins = json.dumps(senddicts)
sendins.TCP_send(update_synperiod_ins)
# print update_synperiod_ins
else:
bitmaplist = config_dict["bitmap"]
subkey = ['minute', 'seqNum', 'level', 'bitmap', 'second', 'hour']
update_schedule_dict = {key:config_dict[key] for key in subkey}
senddicts["pama_data"] = update_schedule_dict
senddicts["type"] = "schedule"
update_schedule_ins = json.dumps(senddicts)
config_dict["bitmap"] = bitmaplist
sendins.TCP_send(update_schedule_ins)
# print update_schedule_ins
l=syn_config.get_active_list()
dicts={'lists':l}
lists= json.dumps(dicts)
return render_template('./client/scheduling.html',scheduleNow=lists)
#上报监测控制
@app.route('/sendmonitor/', methods=['POST', 'GET'])
@app.route('/sendmonitor', methods=['POST', 'GET'])
def sendmonitor():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
display = Display()
display_data = display.monitor_update_period_display() #旧数据展示
return render_template('./client/sendmonitor.html', display_data = display_data)
@app.route('/monitor_update_period/', methods=['POST', 'GET'])
@app.route('/monitor_update_period', methods=['POST', 'GET'])
# 修改网络监测数据上报周期
def monitor_update_period():
modify = Modify() #将新配置数据写入配置文件
sendins = Connect()
dicts = {}
if request.method == 'POST':
recvdata = request.form['update_period']
if recvdata:
modify.monitor_update_period_modify(recvdata)
else:
display = Display()
recvdata = display.monitor_update_period_display()
if (int(recvdata)<16):
dicts["pama_data"] = "410" + hex(int(recvdata))[2:]
else:
dicts["pama_data"] = "41"+ hex(int(recvdata))[2:]
dicts["type"] = "mcast_ack"
ins = json.dumps(dicts)
sendins.TCP_send(ins)
# print ins
return render_template('./client/sendmonitor.html')
@app.route('/post_monitor_data/', methods=['POST', 'GET'])
@app.route('/post_monitor_data', methods=['POST', 'GET'])
#上报网络监测数据指令
def post_monitor_data():
global NODE_DICT_NET
# global NUMBER_NET
global NODE_SET
NODE_SET = set()
# NUMBER_NET=0
nodes = list(DATABASE.my_db_execute("select distinct NodeID from NodePlace;",None))
# nodes = list(c.fetchall()) #tuple -- list
total = len(nodes)
previous = 0 #total - len(nodes)
now = previous
sendins = Connect()
dicts = {}
if request.method == 'GET':
for node in nodes:
NODE_SET.add(str(node[0]))
temp = DATABASE.my_db_execute("select nodeID, count(nodeID) from NetMonitor where nodeID == ?", (node))
NODE_DICT_NET[temp[0][0]] = temp[0][1]
dicts["pama_data"] = "00"
dicts["type"] = "mcast"
ins = json.dumps(dicts)
sendins.TCP_send(ins)
# print ins
return render_template('./client/sendmonitor.html')
@app.route('/post_config/', methods=['POST', 'GET'])
@app.route('/post_config', methods=['POST', 'GET'])
#上报网络参数配置指令
def post_config():
sendins = Connect()
dicts = {}
if request.method == 'POST':
dicts["pama_data"] = "01"
dicts["type"] = "mcast"
ins = json.dumps(dicts)
sendins.TCP_send(ins)
return render_template('./client/sendmonitor.html')
#--------------------------------------------认证登陆---------------------------------------------------
@app.route('/login/',methods=['POST', 'GET'])
def login():
login_msg=User_and_pwd()
if request.method == 'GET':
return render_template('./login/login.html')
elif request.method == 'POST':
USERNAME = login_msg.username.data
PASSWRD = login_msg.password.data
if USERNAME==HIT_USER and PASSWRD==HIT_PWD:
global PCAPS
PCAPS= 'yes:'
return render_template('./home/index.html')
else:
flash(u"用户名或密码错误!")
return render_template('./login/login.html')
@app.route('/logout/',methods=['POST', 'GET'])
def logout():
global PCAPS
PCAPS = None
return redirect(url_for('login'))
#-------------------------------------------数据分析----------------------------------------------------
#协议分析
@app.route('/protoanalyzer/', methods=['POST', 'GET'])
def protoanalyzer():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data = protodisplay(start_time,end_time)
return render_template('./dataanalyzer/protoanalyzer.html',num_of_nodes=data[0],postrate=data[1] ,post=data[2], thispostrate=data[3] , http_key=data[4], http_value=data[5] ,nodecount=len(data[4]),time=data[6])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
data = protodisplay(previous_time,current_time)
return render_template('./dataanalyzer/protoanalyzer.html',num_of_nodes=data[0],postrate=data[1] ,post=data[2], thispostrate=data[3] , http_key=data[4], http_value=data[5] ,nodecount=len(data[4]),time=data[6])
#流量分析
@app.route('/flowanalyzer/', methods=['POST', 'GET'])
def flowanalyzer():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data = flowdisplay(start_time,end_time)
return render_template('./dataanalyzer/trafficanalyzer.html', timeline=data[0],templist=data[1], topo_traffic_key=data[2],topo_traffic_value=data[3],time=data[4])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
data = flowdisplay(previous_time,current_time)
return render_template('./dataanalyzer/trafficanalyzer.html', timeline=data[0],templist=data[1], topo_traffic_key=data[2],topo_traffic_value=data[3],time=data[4])
@app.route('/appflowanalyzer/', methods=['POST', 'GET'])
def appflowanalyzer():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data = appflowdisplay(start_time,end_time)
return render_template('./dataanalyzer/appflowdisplay.html', timeline=data[0],templist=data[1], topo_traffic_key=data[2],topo_traffic_value=data[3],time=data[4])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
data = appflowdisplay(previous_time,current_time)
return render_template('./dataanalyzer/appflowdisplay.html', timeline=data[0],templist=data[1], topo_traffic_key=data[2],topo_traffic_value=data[3],time=data[4])
#上报数量分析
@app.route('/count_appdata/', methods=['POST', 'GET'])
def count_appdata():
databasepath = os.path.join(app.config['TOPO_FOLDER'],"topo3.db")
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
dataset = selectall(start_time,end_time,"ApplicationData")
return render_template('./dataanalyzer/count_appdata.html',nodelist=dataset[0], countlist=dataset[1],time=dataset[2])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
dataset = selectall(previous_time,current_time,"ApplicationData")
return render_template('./dataanalyzer/count_appdata.html',nodelist=dataset[0], countlist=dataset[1],time=dataset[2])
# 应用数据分析
@app.route('/appdataanalyzer/', methods=['POST', 'GET'])
def appdataanalyzer():
nodeid_list = AppID_all()
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
nodepick = request.form['nodeselect']
timelist = node_time_display(start_time,end_time,"ApplicationData",nodepick)
return render_template('./dataanalyzer/appdataanalyzer.html',timelist=timelist[0], nodelist = nodeid_list,time=timelist[1],node=nodepick)
else:
node = DATABASE.my_db_execute('select distinct NodeID from ApplicationData limit 1;',None)
nodeid = (node[0][0].encode('ascii'))
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
timelist = node_time_display(previous_time,current_time,"ApplicationData",nodeid)
return render_template('./dataanalyzer/appdataanalyzer.html',timelist=timelist[0], nodelist = nodeid_list,time=timelist[1],node=nodeid)
#网络数据个数随时间变化曲线
@app.route('/netcountdisplay/', methods=['POST', 'GET'])
def netcountdisplay():
nodeid_list = list()
appdata = DATABASE.my_db_execute('select distinct NodeID from NetMonitor;',None)
for i in range(len(appdata)):
nodeid_list.append(appdata[i][0].encode('ascii'))
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
nodepick = request.form['nodeselect']
timelist = node_time_display(start_time,end_time,"NetMonitor",nodepick)
return render_template('./dataanalyzer/netcountdisplay.html',timelist=timelist[0], nodelist = nodeid_list,time=timelist[1],node=nodepick)
else:
node = DATABASE.my_db_execute('select distinct NodeID from NetMonitor limit 1;',None)
nodeid = (node[0][0].encode('ascii'))
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
timelist = node_time_display(previous_time,current_time,"NetMonitor",nodeid)
return render_template('./dataanalyzer/netcountdisplay.html',timelist=timelist[0], nodelist = nodeid_list,time=timelist[1],node=nodeid)
#同步时差随时间变化
@app.route('/syntimediffdisplay/', methods=['POST', 'GET'])
@app.route('/syntimediffdisplay', methods=['POST', 'GET'])
def syntimediffdisplay():
syntime_list = list()
time_list = list()
nodeid_list = NetID_all()
nodeid_list.sort()
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
nodepick = request.form['nodeselect']
syntime = DATABASE.my_db_execute('select currenttime, syntime from NetMonitor where currenttime >= ? and currenttime <= ? and NodeID == ?;',(start_time, end_time, nodepick))
for i in range(len(syntime)):
time_list.append(syntime[i][0].encode('ascii'))
syntime_list.append(syntime[i][1])
timedisplay = ("\""+start_time + ' - ' + end_time+"\"").encode('ascii')
return render_template('./dataanalyzer/syntimediffdisplay.html',
nodeid=nodepick,nodelist = nodeid_list,time_list=time_list,syntime_list=syntime_list,time=timedisplay)
else:
nodepick = nodeid_list[0]
end_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
start_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time() - 6*60*60))
syntime = DATABASE.my_db_execute('select currenttime, syntime from NetMonitor where currenttime >= ? and currenttime <= ? and NodeID == ?;',(start_time, end_time, nodepick))
for i in range(len(syntime)):
time_list.append(syntime[i][0].encode('ascii'))
syntime_list.append(syntime[i][1])
timedisplay = ("\""+start_time + ' - ' + end_time+"\"").encode('ascii')
# print nodepick,nodeid_list,cpu,lpm,tx,rx,voltage_list,time_list
return render_template('./dataanalyzer/syntimediffdisplay.html',
nodeid=nodepick,nodelist = nodeid_list,time_list=time_list,syntime_list=syntime_list,time=timedisplay)
# 拓扑展示
@app.route('/topodisplay/', methods=['POST', 'GET'])
def topodisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
echarts_start_time = selectime.encode("utf-8")[0:19]
echarts_end_time = selectime.encode("utf-8")[22:41]
topodata = topo_display(echarts_start_time,echarts_end_time)
return render_template('./dataanalyzer/topodisplay.html',nodes = topodata[0], links = topodata[1],time=topodata[2])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
topodata = topo_display(previous_time,current_time)
# lasttime = DATABASE.my_db_execute("select currenttime from NetMonitor where currenttime >= ? and currenttime <= ? order by currenttime desc LIMIT 1;",(previous_time, current_time))
# if lasttime:
# real_end_time = time.mktime(time.strptime(lasttime[0][0],'%Y-%m-%d %H:%M:%S')) #取选定时间内的最后一个时间,算这个时间与它前十分钟内的数据
# real_start_time = real_end_time - 10 * 60
# start_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(real_start_time))
# end_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(real_end_time))
# ID_list = DATABASE.my_db_execute("select NodeID, ParentID from NetMonitor where currenttime >= ? and currenttime <= ?;",(start_time, end_time))
# for node in ID_list:
# ID = node[0] # ID
# ParentID = node[1] # parentID
# if ID in Parentnode:
# continue
# else:
# Parentnode[ID] = ParentID
# # 遍历Parentnode的key,绘制散点图;遍历Parentnode的key和value,画箭头
# nodes = list()
# links = list()
# n = dict()
# m = dict()
# if lasttime:
# if rootID not in Parentnode.keys():
# rootIDjson = {"category":3, "name":"root:"+str(rootID.encode('ascii'))}
# nodes.append(rootIDjson)
# for key ,value in Parentnode.items():
# n = {"category":1, "name":key.encode('ascii')}
# nodes.append(n)
# m = {"source":value.encode('ascii'), "target":key.encode('ascii'), "weight":1}
# links.append(m)
# else:
# for key ,value in Parentnode.items():
# if key==rootID:
# n = {"category":3, "name":key.encode('ascii')}
# nodes.append(n)
# m = {"source":value.encode('ascii'), "target":key.encode('ascii'), "weight":1}
# links.append(m)
# else:
# n = {"category":1, "name":key.encode('ascii')}
# nodes.append(n)
# m = {"source":value.encode('ascii'), "target":key.encode('ascii'), "weight":1}
# links.append(m)
return render_template('./dataanalyzer/topodisplay.html',nodes = topodata[0], links = topodata[1],time=topodata[2])
# ----------------------------------------------系统配置工具---------------------------------------------
@app.route('/terminaltool/', methods=['POST', 'GET'])
def terminaltool():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
config=Connect()
url="http://"+config.all_config_json()["serverIp"]+":6175"
# print url
return redirect(url)
# return render_template('./systemctrl/index.html')
# ----------------------------------------------一异常信息页面---------------------------------------------
#异常数据
@app.route('/exceptinfo/', methods=['POST', 'GET'])
def exceptinfo():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data = data_error_new(start_time,end_time)
return render_template('./exceptions/exception.html', vwarning=data[0],iwarning=data[1],lists=data[2],time=data[3])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
# 电流过大
data = data_error_new(previous_time,current_time)
return render_template('./exceptions/exception.html', vwarning=data[0],iwarning=data[1],lists=data[2],time=data[3])
#时间同步节点异常列表
@app.route('/synerror/', methods=['POST', 'GET'])
def synerror():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
# 时间同步节点异常
warning_list = syn_error(start_time,end_time)
return render_template('./exceptions/synerror.html', warning=warning_list[0],lists=warning_list[1],time=warning_list[2])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
warning_list = syn_error(previous_time,current_time)
return render_template('./exceptions/synerror.html', warning=warning_list,lists=warning_list[1],time=warning_list[2])
# ----------------------------------------------进程监管---------------------------------------------
#进程监管
@app.route('/supervisor/', methods=['POST', 'GET'])
def supervisor():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
processInfo = getAllProcessInfo()
return render_template('./supervisor/supervisor.html',processInfo=processInfo)
@app.route('/supervisor_set_status/', methods=['POST', 'GET'])
def supervisor_set_status():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
deal_process = request.args.get('Processname')
handle = deal_process.split('_')[0]
Processname = deal_process.split('_')[1]
if handle=='stop':
stopProcess(Processname)
if handle=='start':
startProcess(Processname)
if handle=='restart':
stopProcess(Processname)
startProcess(Processname)
processInfo = getAllProcessInfo()
return render_template('./supervisor/supervisor.html',processInfo=processInfo)
@app.route('/supervisor_restart_all/', methods=['POST', 'GET'])
def supervisor_restart_all():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
stopAllProcesses()
startAllProcesses()
processInfo = getAllProcessInfo()
return render_template('./supervisor/supervisor.html',processInfo=processInfo)
@app.route('/supervisor_start_all/', methods=['POST', 'GET'])
def supervisor_start_all():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
startAllProcesses()
processInfo = getAllProcessInfo()
return render_template('./supervisor/supervisor.html',processInfo=processInfo)
@app.route('/supervisor_stop_all/', methods=['POST', 'GET'])
def supervisor_stop_all():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
stopAllProcesses()
processInfo = getAllProcessInfo()
return render_template('./supervisor/supervisor.html',processInfo=processInfo)
@app.route('/test/', methods=['POST', 'GET'])
def test():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data = data_error_new(start_time,end_time)
# print data
return render_template('./upload/timestamp.html', vwarning=data[0],iwarning=data[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
data = data_error_new(previous_time,current_time)
return render_template('./upload/timestamp.html', vwarning=data[0],iwarning=data[1])
# ----------------------------------------------数据包构造页面---------------------------------------------
#协议说明
@app.route('/nettools/', methods=['POST', 'GET'])
def nettools():
return u'网络工具'
@app.route('/protohelp/', methods=['POST', 'GET'])
def protohelp():
return u'协议说明'
# ----------------------------------------------错误处理页面---------------------------------------------
@app.errorhandler(404)
def internal_error(error):
return render_template('./error/404.html'), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('./error/500.html'), 500
@app.route('/about/', methods=['POST', 'GET'])
def about():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
return render_template('./home/about.html')
| gpl-3.0 | -7,940,013,802,798,929,000 | 40.75838 | 217 | 0.591809 | false | 3.210976 | true | false | false |
Rochester-NRT/RocAlphaGo | benchmarks/reinforcement_policy_training_benchmark.py | 6 | 1150 | from AlphaGo.training.reinforcement_policy_trainer import run_training
from AlphaGo.models.policy import CNNPolicy
import os
from cProfile import Profile
# make a miniature model for playing on a miniature 7x7 board
architecture = {'filters_per_layer': 32, 'layers': 4, 'board': 7}
features = ['board', 'ones', 'turns_since', 'liberties', 'capture_size',
'self_atari_size', 'liberties_after', 'sensibleness']
policy = CNNPolicy(features, **architecture)
datadir = os.path.join('benchmarks', 'data')
modelfile = os.path.join(datadir, 'mini_rl_model.json')
weights = os.path.join(datadir, 'init_weights.hdf5')
outdir = os.path.join(datadir, 'rl_output')
stats_file = os.path.join(datadir, 'reinforcement_policy_trainer.prof')
if not os.path.exists(datadir):
os.makedirs(datadir)
if not os.path.exists(weights):
policy.model.save_weights(weights)
policy.save_model(modelfile)
profile = Profile()
arguments = (modelfile, weights, outdir, '--learning-rate', '0.001', '--save-every', '2',
'--game-batch', '20', '--iterations', '10', '--verbose')
profile.runcall(run_training, arguments)
profile.dump_stats(stats_file)
| mit | 2,693,906,516,910,817,000 | 38.655172 | 89 | 0.713913 | false | 3.142077 | false | false | false |
SKA-ScienceDataProcessor/integration-prototype | demos/02_running_a_workflow/generate_sbi_config.py | 1 | 1542 | # coding: utf-8
"""Script to generate an SBI configuration for this demo."""
import datetime
import json
import sys
from random import randint
def generate_sbi(index: int = None):
"""Generate a SBI config JSON string."""
date = datetime.datetime.utcnow().strftime('%Y%m%d')
if index is None:
index = randint(0, 999)
sbi_id = 'SBI-{}-sip-demo-{:03d}'.format(date, index)
sb_id = 'SBI-{}-sip-demo-{:03d}'.format(date, index)
pb_id = 'PB-{}-sip-demo-{:03d}'.format(date, index)
print('* Generating SBI: %s, PB: %s' % (sb_id, pb_id))
sbi = dict(
id=sbi_id,
version='1.0.0',
scheduling_block=dict(
id=sb_id,
project='sip',
programme_block='sip_demos'
),
processing_blocks=[
dict(
id=pb_id,
version='1.0.0',
type='offline',
priority=1,
dependencies=[],
resources_required=[],
workflow=dict(
id='mock_workflow',
version='1.0.0',
parameters=dict(
stage1=dict(duration=30),
stage2=dict(duration=30),
stage3=dict(duration=30)
)
)
)
]
)
return sbi
if __name__ == '__main__':
_index = int(sys.argv[1]) if len(sys.argv) == 2 else None
sbi_config = generate_sbi(_index)
sbi_json = json.dumps(sbi_config)
| bsd-3-clause | 4,506,671,685,868,727,000 | 28.653846 | 61 | 0.479248 | false | 3.671429 | false | false | false |
quiqueporta/django-rest-tools | tests/django_rest_tools_tests/settings.py | 2 | 2299 | """
Django settings for tests project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_1r4ul8i5s7_juk=n=kj_n)(e0q!w=ifq#mf78s2-&p3gfya%g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'django_rest_tools_app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_rest_tools_tests.urls'
WSGI_APPLICATION = 'django_rest_tools_tests.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'django_rest_tools_tests',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': ''
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
}
| gpl-3.0 | 1,358,720,159,917,750,000 | 23.2 | 82 | 0.707699 | false | 3.260993 | false | false | false |
MDAnalysis/mdanalysis | package/MDAnalysis/converters/ParmEdParser.py | 1 | 10818 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
ParmEd topology parser --- :mod:`MDAnalysis.converters.ParmEdParser`
====================================================================
Converts a `ParmEd <https://parmed.github.io/ParmEd/html>`_
:class:`parmed.structure.Structure` into a :class:`MDAnalysis.core.Topology`.
Example
-------
If you want to use an MDAnalysis-written ParmEd structure for simulation
in ParmEd, you need to first read your files with ParmEd to include the
necessary topology parameters. ::
>>> import parmed as pmd
>>> import MDAnalysis as mda
>>> from MDAnalysis.tests.datafiles import PRM7_ala2, RST7_ala2
>>> prm = pmd.load_file(PRM7_ala2, RST7_ala2)
>>> prm
<AmberParm 3026 atoms; 1003 residues; 3025 bonds; PBC (orthogonal); parametrized>
We can then convert this to an MDAnalysis structure, select only the
protein atoms, and then convert it back to ParmEd. ::
>>> u = mda.Universe(prm)
>>> u
<Universe with 3026 atoms>
>>> prot = u.select_atoms('protein')
>>> prm_prot = prot.convert_to('PARMED')
>>> prm_prot
<Structure 23 atoms; 2 residues; 22 bonds; PBC (orthogonal); parametrized>
From here you can create an OpenMM simulation system and minimize the
energy. ::
>>> import simtk.openmm as mm
>>> import simtk.openmm.app as app
>>> from parmed import unit as u
>>> system = prm_prot.createSystem(nonbondedMethod=app.NoCutoff,
... constraints=app.HBonds,
... implicitSolvent=app.GBn2)
>>> integrator = mm.LangevinIntegrator(
... 300*u.kelvin, # Temperature of heat bath
... 1.0/u.picoseconds, # Friction coefficient
... 2.0*u.femtoseconds, # Time step
... )
>>> sim = app.Simulation(prm_prot.topology, system, integrator)
>>> sim.context.setPositions(prm_prot.positions)
>>> sim.minimizeEnergy(maxIterations=500)
Now you can continue on and run a simulation, if you wish.
Classes
-------
.. autoclass:: ParmEdParser
:members:
:inherited-members:
.. versionchanged:: 2.0.0
The ParmEdParser class was moved from :mod:`~MDAnalysis.topology` to
:mod:`~MDAnalysis.converters`
"""
import logging
import numpy as np
from ..topology.base import TopologyReaderBase, change_squash
from ..topology.tables import Z2SYMB
from ..core.topologyattrs import (
Atomids,
Atomnames,
AltLocs,
ChainIDs,
Atomtypes,
Occupancies,
Tempfactors,
Elements,
Masses,
Charges,
Resids,
Resnums,
Resnames,
Segids,
GBScreens,
SolventRadii,
NonbondedIndices,
RMins,
Epsilons,
RMin14s,
Epsilon14s,
Bonds,
UreyBradleys,
Angles,
Dihedrals,
Impropers,
CMaps
)
from ..core.topology import Topology
logger = logging.getLogger("MDAnalysis.converters.ParmEdParser")
def squash_identical(values):
if len(values) == 1:
return values[0]
else:
return tuple(values)
class ParmEdParser(TopologyReaderBase):
"""
For ParmEd structures
"""
format = 'PARMED'
@staticmethod
def _format_hint(thing):
"""Can this Parser read object *thing*?
.. versionadded:: 1.0.0
"""
try:
import parmed as pmd
except ImportError: # if no parmed, probably not parmed
return False
else:
return isinstance(thing, pmd.Structure)
def parse(self, **kwargs):
"""Parse PARMED into Topology
Returns
-------
MDAnalysis *Topology* object
.. versionchanged:: 2.0.0
Elements are no longer guessed, if the elements present in the
parmed object are not recoginsed (usually given an atomic mass of 0)
then they will be assigned an empty string.
"""
structure = self.filename
#### === ATOMS === ####
names = []
masses = []
charges = []
types = []
atomic_numbers = []
serials = []
resnames = []
resids = []
chainids = []
segids = []
altLocs = []
bfactors = []
occupancies = []
screens = []
solvent_radii = []
nonbonded_indices = []
rmins = []
epsilons = []
rmin14s = []
epsilon14s = []
for atom in structure.atoms:
names.append(atom.name)
masses.append(atom.mass)
charges.append(atom.charge)
types.append(atom.type)
atomic_numbers.append(atom.atomic_number)
serials.append(atom.number)
resnames.append(atom.residue.name)
resids.append(atom.residue.number)
chainids.append(atom.residue.chain)
segids.append(atom.residue.segid)
altLocs.append(atom.altloc)
bfactors.append(atom.bfactor)
occupancies.append(atom.occupancy)
screens.append(atom.screen)
solvent_radii.append(atom.solvent_radius)
nonbonded_indices.append(atom.nb_idx)
rmins.append(atom.rmin)
epsilons.append(atom.epsilon)
rmin14s.append(atom.rmin_14)
epsilon14s.append(atom.epsilon_14)
attrs = []
n_atoms = len(names)
elements = []
for z, name in zip(atomic_numbers, names):
try:
elements.append(Z2SYMB[z])
except KeyError:
elements.append('')
# Make Atom TopologyAttrs
for vals, Attr, dtype in (
(names, Atomnames, object),
(masses, Masses, np.float32),
(charges, Charges, np.float32),
(types, Atomtypes, object),
(elements, Elements, object),
(serials, Atomids, np.int32),
(chainids, ChainIDs, object),
(altLocs, AltLocs, object),
(bfactors, Tempfactors, np.float32),
(occupancies, Occupancies, np.float32),
(screens, GBScreens, np.float32),
(solvent_radii, SolventRadii, np.float32),
(nonbonded_indices, NonbondedIndices, np.int32),
(rmins, RMins, np.float32),
(epsilons, Epsilons, np.float32),
(rmin14s, RMin14s, np.float32),
(epsilon14s, Epsilon14s, np.float32),
):
attrs.append(Attr(np.array(vals, dtype=dtype)))
resids = np.array(resids, dtype=np.int32)
resnames = np.array(resnames, dtype=object)
chainids = np.array(chainids, dtype=object)
segids = np.array(segids, dtype=object)
residx, (resids, resnames, chainids, segids) = change_squash(
(resids, resnames, chainids, segids),
(resids, resnames, chainids, segids))
n_residues = len(resids)
attrs.append(Resids(resids))
attrs.append(Resnums(resids.copy()))
attrs.append(Resnames(resnames))
segidx, (segids,) = change_squash((segids,), (segids,))
n_segments = len(segids)
attrs.append(Segids(segids))
#### === OTHERS === ####
bond_values = {}
bond_types = []
bond_orders = []
ub_values = {}
ub_types = []
angle_values = {}
angle_types = []
dihedral_values = {}
dihedral_types = []
improper_values = {}
improper_types = []
cmap_values = {}
cmap_types = []
for bond in structure.bonds:
idx = (bond.atom1.idx, bond.atom2.idx)
if idx not in bond_values:
bond_values[idx] = ([bond], [bond.order])
else:
bond_values[idx][0].append(bond)
bond_values[idx][1].append(bond.order)
try:
bond_values, values = zip(*list(bond_values.items()))
except ValueError:
bond_values, bond_types, bond_orders = [], [], []
else:
bond_types, bond_orders = zip(*values)
bond_types = list(map(squash_identical, bond_types))
bond_orders = list(map(squash_identical, bond_orders))
attrs.append(Bonds(bond_values, types=bond_types, guessed=False,
order=bond_orders))
for pmdlist, na, values, types in (
(structure.urey_bradleys, 2, ub_values, ub_types),
(structure.angles, 3, angle_values, angle_types),
(structure.dihedrals, 4, dihedral_values, dihedral_types),
(structure.impropers, 4, improper_values, improper_types),
(structure.cmaps, 5, cmap_values, cmap_types),
):
for p in pmdlist:
atoms = ['atom{}'.format(i) for i in range(1, na+1)]
idx = tuple(getattr(p, a).idx for a in atoms)
if idx not in values:
values[idx] = [p]
else:
values[idx].append(p)
for dct, Attr in (
(ub_values, UreyBradleys),
(angle_values, Angles),
(dihedral_values, Dihedrals),
(improper_values, Impropers),
(cmap_values, CMaps),
):
try:
vals, types = zip(*list(dct.items()))
except ValueError:
vals, types = [], []
types = list(map(squash_identical, types))
attrs.append(Attr(vals, types=types, guessed=False, order=None))
top = Topology(n_atoms, n_residues, n_segments,
attrs=attrs,
atom_resindex=residx,
residue_segindex=segidx)
return top
| gpl-2.0 | -8,937,481,923,130,624,000 | 29.645892 | 85 | 0.566741 | false | 3.598802 | false | false | false |
ayleph/mediagoblin-recaptcha | recaptcha/tools.py | 1 | 2772 | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from mediagoblin import messages
from mediagoblin.tools import pluginapi
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from recaptcha.client import captcha
import json
import urllib2
_log = logging.getLogger(__name__)
def extra_validation(register_form):
config = pluginapi.get_config('mediagoblin.plugins.recaptcha')
recaptcha_secret_key = config.get('RECAPTCHA_SECRET_KEY')
# Our hacky method of adding CAPTCHA fields to the form results
# in multiple fields with the same name. Check the raw_data for
# a non-empty string.
if 'g_recaptcha_response' in register_form:
recaptcha_response = register_form.g_recaptcha_response.data
if recaptcha_response == u'':
for raw_data in register_form.g_recaptcha_response.raw_data:
if raw_data != u'':
recaptcha_response = raw_data
if 'remote_address' in register_form:
remote_address = register_form.remote_address.data
if remote_address == u'':
for raw_data in register_form.remote_address.raw_data:
if raw_data != u'':
remote_address = raw_data
captcha_challenge_passes = False
server_response = ''
if recaptcha_response:
url = "https://www.google.com/recaptcha/api/siteverify?secret=%s&response=%s&remoteip=%s" % (recaptcha_secret_key, recaptcha_response, remote_address)
server_response = json.loads(urllib2.urlopen(url).read())
captcha_challenge_passes = server_response['success']
if not captcha_challenge_passes:
register_form.g_recaptcha_response.errors.append(
_('Sorry, CAPTCHA attempt failed.'))
_log.info('Failed registration CAPTCHA attempt from %r.', remote_address)
_log.debug('captcha response is: %r', recaptcha_response)
if server_response:
_log.debug('server response is: %r' % server_response)
return captcha_challenge_passes
| agpl-3.0 | 2,041,587,880,511,200,800 | 41.646154 | 158 | 0.699495 | false | 3.834025 | false | false | false |
ljchang/nltools | nltools/utils.py | 1 | 12415 | '''
NeuroLearn Utilities
====================
handy utilities.
'''
__all__ = ['get_resource_path',
'get_anatomical',
'set_algorithm',
'attempt_to_import',
'all_same',
'concatenate',
'_bootstrap_apply_func',
'set_decomposition_algorithm'
]
__author__ = ["Luke Chang"]
__license__ = "MIT"
from os.path import dirname, join, sep as pathsep
import nibabel as nib
import importlib
import os
from sklearn.pipeline import Pipeline
from sklearn.utils import check_random_state
from scipy.spatial.distance import squareform
import numpy as np
import pandas as pd
import collections
from types import GeneratorType
def _df_meta_to_arr(df):
"""Check what kind of data exists in pandas columns or index. If string return as numpy array 'S' type, otherwise regular numpy array. Used when saving Brain_Data objects to hdf5.
"""
if len(df.columns):
if isinstance(df.columns[0], str):
columns = df.columns.values.astype("S")
else:
columns = df.columns.values
else:
columns = []
if len(df.index):
if isinstance(df.index[0], str):
index = df.index.values.astype("S")
else:
index = df.index.values
else:
index = []
return columns, index
def get_resource_path():
""" Get path to nltools resource directory. """
return join(dirname(__file__), 'resources') + pathsep
def get_anatomical():
""" Get nltools default anatomical image.
DEPRECATED. See MNI_Template and resolve_mni_path from nltools.prefs
"""
return nib.load(os.path.join(get_resource_path(), 'MNI152_T1_2mm.nii.gz'))
def get_mni_from_img_resolution(brain, img_type='plot'):
"""
Get the path to the resolution MNI anatomical image that matches the resolution of a Brain_Data instance. Used by Brain_Data.plot() and .iplot() to set backgrounds appropriately.
Args:
brain: Brain_Data instance
Returns:
file_path: path to MNI image
"""
if img_type not in ['plot', 'brain']:
raise ValueError("img_type must be 'plot' or 'brain' ")
res_array = np.abs(np.diag(brain.nifti_masker.affine_)[:3])
voxel_dims = np.unique(abs(res_array))
if len(voxel_dims) != 1:
raise ValueError("Voxels are not isometric and cannot be visualized in standard space")
else:
dim = str(int(voxel_dims[0])) + 'mm'
if img_type == 'brain':
mni = f'MNI152_T1_{dim}_brain.nii.gz'
else:
mni = f'MNI152_T1_{dim}.nii.gz'
return os.path.join(get_resource_path(), mni)
def set_algorithm(algorithm, *args, **kwargs):
""" Setup the algorithm to use in subsequent prediction analyses.
Args:
algorithm: The prediction algorithm to use. Either a string or an
(uninitialized) scikit-learn prediction object. If string,
must be one of 'svm','svr', linear','logistic','lasso',
'lassopcr','lassoCV','ridge','ridgeCV','ridgeClassifier',
'randomforest', or 'randomforestClassifier'
kwargs: Additional keyword arguments to pass onto the scikit-learn
clustering object.
Returns:
predictor_settings: dictionary of settings for prediction
"""
# NOTE: function currently located here instead of analysis.py to avoid circular imports
predictor_settings = {}
predictor_settings['algorithm'] = algorithm
def load_class(import_string):
class_data = import_string.split(".")
module_path = '.'.join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
return getattr(module, class_str)
algs_classify = {
'svm': 'sklearn.svm.SVC',
'logistic': 'sklearn.linear_model.LogisticRegression',
'ridgeClassifier': 'sklearn.linear_model.RidgeClassifier',
'ridgeClassifierCV': 'sklearn.linear_model.RidgeClassifierCV',
'randomforestClassifier': 'sklearn.ensemble.RandomForestClassifier'
}
algs_predict = {
'svr': 'sklearn.svm.SVR',
'linear': 'sklearn.linear_model.LinearRegression',
'lasso': 'sklearn.linear_model.Lasso',
'lassoCV': 'sklearn.linear_model.LassoCV',
'ridge': 'sklearn.linear_model.Ridge',
'ridgeCV': 'sklearn.linear_model.RidgeCV',
'randomforest': 'sklearn.ensemble.RandomForest'
}
if algorithm in algs_classify.keys():
predictor_settings['prediction_type'] = 'classification'
alg = load_class(algs_classify[algorithm])
predictor_settings['predictor'] = alg(*args, **kwargs)
elif algorithm in algs_predict:
predictor_settings['prediction_type'] = 'prediction'
alg = load_class(algs_predict[algorithm])
predictor_settings['predictor'] = alg(*args, **kwargs)
elif algorithm == 'lassopcr':
predictor_settings['prediction_type'] = 'prediction'
from sklearn.linear_model import Lasso
from sklearn.decomposition import PCA
predictor_settings['_lasso'] = Lasso()
predictor_settings['_pca'] = PCA()
predictor_settings['predictor'] = Pipeline(
steps=[('pca', predictor_settings['_pca']),
('lasso', predictor_settings['_lasso'])])
elif algorithm == 'pcr':
predictor_settings['prediction_type'] = 'prediction'
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
predictor_settings['_regress'] = LinearRegression()
predictor_settings['_pca'] = PCA()
predictor_settings['predictor'] = Pipeline(
steps=[('pca', predictor_settings['_pca']),
('regress', predictor_settings['_regress'])])
else:
raise ValueError("""Invalid prediction/classification algorithm name.
Valid options are 'svm','svr', 'linear', 'logistic', 'lasso',
'lassopcr','lassoCV','ridge','ridgeCV','ridgeClassifier',
'randomforest', or 'randomforestClassifier'.""")
return predictor_settings
def set_decomposition_algorithm(algorithm, n_components=None, *args, **kwargs):
""" Setup the algorithm to use in subsequent decomposition analyses.
Args:
algorithm: The decomposition algorithm to use. Either a string or an
(uninitialized) scikit-learn decomposition object.
If string must be one of 'pca','nnmf', ica','fa',
'dictionary', 'kernelpca'.
kwargs: Additional keyword arguments to pass onto the scikit-learn
clustering object.
Returns:
predictor_settings: dictionary of settings for prediction
"""
# NOTE: function currently located here instead of analysis.py to avoid circular imports
def load_class(import_string):
class_data = import_string.split(".")
module_path = '.'.join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
return getattr(module, class_str)
algs = {
'pca': 'sklearn.decomposition.PCA',
'ica': 'sklearn.decomposition.FastICA',
'nnmf': 'sklearn.decomposition.NMF',
'fa': 'sklearn.decomposition.FactorAnalysis',
'dictionary': 'sklearn.decomposition.DictionaryLearning',
'kernelpca': 'sklearn.decomposition.KernelPCA'}
if algorithm in algs.keys():
alg = load_class(algs[algorithm])
alg = alg(n_components, *args, **kwargs)
else:
raise ValueError("""Invalid prediction/classification algorithm name.
Valid options are 'pca','ica', 'nnmf', 'fa'""")
return alg
def isiterable(obj):
''' Returns True if the object is one of allowable iterable types. '''
return isinstance(obj, (list, tuple, GeneratorType))
module_names = {}
Dependency = collections.namedtuple('Dependency', 'package value')
def attempt_to_import(dependency, name=None, fromlist=None):
if name is None:
name = dependency
try:
mod = __import__(dependency, fromlist=fromlist)
except ImportError:
mod = None
module_names[name] = Dependency(dependency, mod)
return mod
def all_same(items):
return np.all(x == items[0] for x in items)
def concatenate(data):
'''Concatenate a list of Brain_Data() or Adjacency() objects'''
if not isinstance(data, list):
raise ValueError('Make sure you are passing a list of objects.')
if all([isinstance(x, data[0].__class__) for x in data]):
# Temporarily Removing this for circular imports (LC)
# if not isinstance(data[0], (Brain_Data, Adjacency)):
# raise ValueError('Make sure you are passing a list of Brain_Data'
# ' or Adjacency objects.')
out = data[0].__class__()
for i in data:
out = out.append(i)
else:
raise ValueError('Make sure all objects in the list are the same type.')
return out
def _bootstrap_apply_func(data, function, random_state=None, *args, **kwargs):
'''Bootstrap helper function. Sample with replacement and apply function'''
random_state = check_random_state(random_state)
data_row_id = range(data.shape()[0])
new_dat = data[random_state.choice(data_row_id,
size=len(data_row_id),
replace=True)]
return getattr(new_dat, function)(*args, **kwargs)
def check_square_numpy_matrix(data):
'''Helper function to make sure matrix is square and numpy array'''
from nltools.data import Adjacency
if isinstance(data, Adjacency):
data = data.squareform()
elif isinstance(data, pd.DataFrame):
data = data.values
else:
data = np.array(data)
if len(data.shape) != 2:
try:
data = squareform(data)
except ValueError:
raise ValueError("Array does not contain the correct number of elements to be square")
return data
def check_brain_data(data, mask=None):
'''Check if data is a Brain_Data Instance.'''
from nltools.data import Brain_Data
if not isinstance(data, Brain_Data):
if isinstance(data, nib.Nifti1Image):
data = Brain_Data(data, mask=mask)
else:
raise ValueError("Make sure data is a Brain_Data instance.")
else:
if mask is not None:
data = data.apply_mask(mask)
return data
def check_brain_data_is_single(data):
'''Logical test if Brain_Data instance is a single image
Args:
data: brain data
Returns:
(bool)
'''
data = check_brain_data(data)
if len(data.shape()) > 1:
return False
else:
return True
def _roi_func(brain, roi, algorithm, cv_dict, **kwargs):
'''Brain_Data.predict_multi() helper function'''
return brain.apply_mask(roi).predict(algorithm=algorithm, cv_dict=cv_dict, plot=False, **kwargs)
class AmbiguityError(Exception):
pass
def generate_jitter(n_trials, mean_time=5, min_time=2, max_time=12, atol=.2):
'''Generate jitter from exponential distribution with constraints
Draws from exponential distribution until the distribution satisfies the constraints:
np.abs(np.mean(min_time > data < max_time) - mean_time) <= atol
Args:
n_trials: (int) number of trials to generate jitter
mean_time: (float) desired mean of distribution
min_time: (float) desired min of distribution
max_time: (float) desired max of distribution
atol: (float) precision of deviation from mean
Returns:
data: (np.array) jitter for each trial
'''
def generate_data(n_trials, scale=5, min_time=2, max_time=12):
data = []
i=0
while i < n_trials:
datam = np.random.exponential(scale=5)
if (datam > min_time) & (datam < max_time):
data.append(datam)
i+=1
return data
mean_diff = False
while ~mean_diff:
data = generate_data(n_trials, min_time=min_time, max_time=max_time)
mean_diff = np.isclose(np.mean(data), mean_time, rtol=0, atol=atol)
return data
| mit | 8,238,618,207,601,119,000 | 33.29558 | 183 | 0.618284 | false | 4.013902 | false | false | false |
HG-Dev/tweetfeeder | tweetfeeder/tweeting.py | 1 | 10358 | """
Timed Tweet publishing
"""
from threading import Timer, Event
from datetime import datetime, timedelta
from queue import deque
from time import sleep
from random import uniform
from tweepy import API
from tweepy.models import Status
from tweepy.error import TweepError
from tweetfeeder.logs import Log
from tweetfeeder.file_io.models import Feed, Stats
from tweetfeeder.exceptions import TweetFeederError, LoadFeedError, NoTimerError, ExistingTimerError
from tweetfeeder.file_io.config import Config
class TweetLoop():
''' Interprets TweetFeeder configuration to publish Tweets on a schedule '''
def __init__(self, config: Config, feed: Feed, stats: Stats = None):
"""
Creates an object capable of timed publishing of Tweets.
Automatically starts if config.functionality.Tweet
"""
self.config = config
self.api = API(self.config.authorization, retry_count=1, retry_delay=10, wait_on_rate_limit=True)
self.feed: Feed = feed
self.stats: Stats = stats or Stats()
self.current_index: int = 0 #Set in start
self.current_timer: Timer = None
self._current_started = datetime.now()
self.lock: Event = Event()
self.timers: deque = deque()
if config.functionality.Tweet:
self.start()
def get_next_tweet_datetime(self):
''' Gets the next datetime at which tweeting will occur. '''
# Supply immediate times if no tweet times
if not self.config.tweet_times:
Log.debug("TWT.datetime", "No tweet times; tweet NOW")
return (
datetime.now() +
timedelta(seconds=self.config.min_tweet_delay*0.2)
)
if self.config.tweet_times:
final_time = self.config.tweet_times[-1]
now_t = datetime.now()
next_t = now_t.replace(
hour=final_time.hour,
minute=final_time.minute,
second=0,
microsecond=0)
Log.debug("TWT.datetime", "Compare now {} to next {}".format(now_t, next_t))
if now_t > next_t: #The final time lies before the current
next_t = next_t + timedelta(days=1)
if self.config.rand_deviation: #Add random deviation in minutes
next_t = next_t + timedelta(minutes=(self.config.rand_deviation * uniform(-1, 1)))
Log.debug("TWT.datatime", "Added random deviation to next {}".format(next_t))
for time in self.config.tweet_times:
next_t = next_t.replace(hour=time.hour, minute=time.minute)
if now_t < next_t: # If next_t is in the future
return next_t.replace(second=0)
#Failure
return None
def start(self):
''' Begin the tweet loop '''
if not self.is_running():
self.lock.set()
self.current_index = self.stats.last_feed_index
Log.debug("TWT.start", "Set current index to " + str(self.current_index))
# Add the next timer tweet starting from
# the last successfully tweeted index
self._next()
self.lock.clear()
else:
Log.warning("TWT.start", "Couldn't start: Loop is already running")
def _next(self):
''' When only one timer is left, queue up more '''
# Replenish timers when all queued timers have been popped off
if not self.timers:
Log.debug("TWT.next", "Creating next timers")
# Check to see that the current_index hasn't reached the end of the feed
if self.current_index >= self.feed.total_tweets:
if self.stats.times_rerun < self.config.looping_max_times:
# If looping's enabled, loop the index around
Log.info("TWT.next", "Looping back to start of feed.")
self.stats.times_rerun = self.stats.times_rerun + 1
self.stats.last_rerun_index = self.current_index
self.current_index = 0
else:
# Terminate loop
Log.info("TWT.next", "Reached end of feed, but not allowed to loop.")
self.stop()
return False
# Check to see that the current_index has not surpassed a previous rerun
if self.stats.last_rerun_index > 0 and self.current_index > self.stats.last_rerun_index:
self.stats.times_rerun = 0 # Restore normal tweeting mode
# make_tweet_timers will start searching from current_index,
# but will continue iterating down the feed until it finds timers
# it can actually use (this is important in rerun mode)
index_inc = 0
for timer in self._make_tweet_timers(self.current_index):
#_make_tweet_timers passes back None for spots where reruns are not allowed
index_inc += 1
if timer:
# Skip None, but count it as a passed index
self.timers.append(timer)
Log.debug("TWT.next", "Timer: " + str(timer))
if self.timers: # Set first timer to wait until next tweet time
self.timers[0].interval = (
(self.get_next_tweet_datetime() - datetime.now()).total_seconds()
)
# If a rest_period is required, add it as a final Timer
# This can be used to alternate between tweet times on different days
# This does not affect index_inc
if self.config.rest_period:
self.timers.append(
Timer(abs(self.config.rest_period), self._next)
)
# Update current index with the feed entries both used and skipped
self.current_index += index_inc
if self.current_timer and not self.lock.is_set() and self.current_timer.args:
# Current timer exists, but hasn't tweeted yet; fast forward
self.current_timer.cancel()
Log.debug("TWT.next", "Fast forward")
self._tweet(*self.current_timer.args)
# Update queued timer intervals
elif self.timers:
# current_timer is finishing up tweeting or doesn't exist;
# pop off a timer and start it
self.current_timer = self.timers.popleft()
self.current_timer.start()
self._current_started = datetime.now()
Log.debug("TWT.next", "Starting new timer with interval {}".format(self.current_timer.interval))
else:
# No timers were created or the last timer was just a delay
Log.debug("TWT.next", "Forced into recursion as no timers were produced")
return self._next()
return True
def stop(self):
''' Cancels the current timer, which prevents futher timers from starting. '''
Log.info("TWT.stop", "Stopping current timer and clearing timer list.")
if self.current_timer:
self.current_timer.cancel()
self.timers.clear()
def _make_tweet_timers(self, from_index: int):
''' Returns a tweet timer (multiple if chained), all with the same interval. '''
# This can throw a LoadFeedError
Log.debug("TWT.make_timers", "Making tweet timers starting from {}".format(from_index))
try:
next_tweets = self.feed.get_tweets(from_index)
except LoadFeedError:
return [None] #Returning one None will increase the current index, at least
timers = []
for idx, t_data in enumerate(next_tweets):
# If rerunning, skip tweets which don't have a True "rerun" trait
if self.stats.times_rerun > 0 and not t_data['rerun']:
timers.append(None)
else:
timers.append(
Timer(self.config.min_tweet_delay, self._tweet, (t_data, from_index+idx))
)
return timers
def _tweet(self, data: dict, index: int):
''' Tweet, then signal for the next to begin '''
assert not self.lock.is_set()
self.lock.set()
success = 1
if self.config.functionality.Online:
Log.debug("TWT.tweet", "update_status using {}".format(data['title']))
try:
status = self.api.update_status(data['text'])
except TweepError as e: #TODO: Switch over to Tweepy's retry system, configurable when creating API
Log.error("TWT.tweet", str(e))
success = 0
else:
Log.debug("TWT.tweet (id)", "Status ID: {}".format(status.id))
self.stats.register_tweet(status.id, data['title'])
else:
Log.info("TWT.tweet", data['title'])
self.stats.last_feed_index = index + success
self._next()
self.lock.clear()
def wait_for_tweet(self, timeout=None, timer_expected=True, last_timer=False):
''' Hangs up the calling thread while the CURRENT timer loops. '''
if self.current_timer and not self.current_timer.finished.is_set() and not last_timer:
return self.current_timer.finished.wait(timeout)
search = self.timers
if last_timer:
search = reversed(self.timers)
for timer in search:
if not timer.finished.is_set():
Log.debug("TWT.wait", "Selected timer: " + str(timer))
return timer.finished.wait(timeout)
if timer_expected:
raise NoTimerError("No tweet timers available to wait for")
def time_until_tweet(self):
''' Returns the amount of time until the current timer finishes naturally. '''
if self.is_running():
return self.current_timer.interval - (datetime.now() - self._current_started).total_seconds()
else:
return -1
def force_tweet(self):
''' Forces the oldest timer to finish immediately. '''
self._next()
def is_running(self):
''' Returns true if the TweetLoop has non-popped timers. '''
if self.lock.is_set() or (self.current_timer and not self.current_timer.finished.is_set()):
return True
return False
| gpl-3.0 | 7,369,639,674,734,697,000 | 44.034783 | 111 | 0.584379 | false | 4.151503 | true | false | false |
roskakori/vcdb | vcdb/command.py | 1 | 1897 | """
Command line interface for vcdb.
"""
# Copyright (C) 2016 Thomas Aglassinger.
# Distributed under the GNU Lesser General Public License v3 or later.
import argparse
import logging
import os
import sys
import tempfile
from sqlalchemy.exc import SQLAlchemyError
import vcdb
import vcdb.common
import vcdb.subversion
_log = logging.getLogger('vcdb')
def vcdb_command(arguments=None):
result = 1
if arguments is None:
arguments = sys.argv[1:]
default_database = 'sqlite:///' + os.path.join(tempfile.gettempdir(), 'vcdb.db')
parser = argparse.ArgumentParser(description='build SQL database from version control repository')
parser.add_argument('repository', metavar='REPOSITORY', help='URI to repository')
parser.add_argument(
'database', metavar='DATABASE', nargs='?', default=default_database,
help='URI for sqlalchemy database engine; default: %s' % default_database)
parser.add_argument('--verbose', '-v', action='store_true', help='explain what is being done')
parser.add_argument('--version', action='version', version='%(prog)s ' + vcdb.__version__)
args = parser.parse_args(arguments)
if args.verbose:
_log.setLevel(logging.DEBUG)
try:
_log.info('connect to database %s', args.database)
session = vcdb.common.vcdb_session(args.database)
vcdb.subversion.update_repository(session, args.repository)
_log.info('finished')
result = 0
except KeyboardInterrupt:
_log.error('interrupted as requested by user')
except OSError as error:
_log.error(error)
except SQLAlchemyError as error:
_log.error('cannot access database: %s', error)
except Exception as error:
_log.exception(error)
return result
def main():
logging.basicConfig(level=logging.INFO)
sys.exit(vcdb_command())
if __name__ == '__main__':
main()
| lgpl-3.0 | -774,233,551,628,635,300 | 31.152542 | 102 | 0.68213 | false | 3.927536 | false | false | false |
eman/tempodb-archive | setup.py | 1 | 1078 | import os
from setuptools import setup
project_dir = os.path.abspath(os.path.dirname(__file__))
long_descriptions = []
for rst in ('README.rst', 'LICENSE.rst'):
with open(os.path.join(project_dir, rst), 'r') as f:
long_descriptions.append(f.read())
setup(name='tempodb-archive',
version='1.0.0',
description='Archive TempoDB Datapoints',
long_description='\n\n'.join(long_descriptions),
author='Emmanuel Levijarvi',
author_email='[email protected]',
url='https://github.com/eman/tempodb-archive',
license='BSD',
py_modules=['tempodb_archive'],
install_requires=['tempodb'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
],
keywords='tempodb archive',
entry_points={
'console_scripts': ['tempodb-archive=tempodb_archive:main'],
})
| bsd-2-clause | -4,740,467,918,732,794,000 | 32.6875 | 70 | 0.608534 | false | 3.809187 | false | false | false |
bmispelon/weirdict | weirdict/base.py | 1 | 2306 | from abc import abstractmethod
from collections import MutableMapping, Mapping
from .decorators import apply_keyfunc
from functools import total_ordering
from itertools import repeat, izip
_SENTINEL = object()
@total_ordering
class AbstractNormalizedDict(MutableMapping):
"""A dictionary where keys are normalized through a given function
before being inserted in the dict.
All of dict's methods have been implemented so it should be possible to use
it as a drop-in replacement.
Subclasses should define a keyfunc method that takes one argument
(a key to be inserted/retrieved/deleted) and return a normalized version of it.
"""
@abstractmethod
def keyfunc(self, key):
pass
def __init__(self, map_or_seq=_SENTINEL, **kwargs):
"""Normalize the keys before inserting them in the internal dictionary.
The signature is (hopefully) the same as the one for dict.
"""
if map_or_seq is _SENTINEL:
args = []
elif isinstance(map_or_seq, Mapping):
args = [((self.keyfunc(k), v) for k, v in map_or_seq.items())]
else: # sequence of two-tuples
args = [((self.keyfunc(k), v) for k, v in map_or_seq)]
kwargs = {self.keyfunc(k): v for k, v in kwargs.iteritems()}
self._dict = dict(*args, **kwargs)
def copy(self):
return type(self)(self.iteritems())
@apply_keyfunc
def __getitem__(self, key):
return self._dict[key]
@apply_keyfunc
def __setitem__(self, key, value):
self._dict[key] = value
@apply_keyfunc
def __delitem__(self, key):
del self._dict[key]
@apply_keyfunc
def has_key(self, key):
return self._dict.has_key(key)
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def viewitems(self):
return self._dict.viewitems()
def viewkeys(self):
return self._dict.viewkeys()
def viewvalues(self):
return self._dict.viewvalues()
@classmethod
def fromkeys(cls, seq, value=None):
return cls(izip(seq, repeat(value)))
def __cmp__(self, other):
return cmp(self._dict, other)
def __lt__(self, other):
return self._dict < other
| bsd-2-clause | 5,566,931,536,926,515,000 | 26.452381 | 83 | 0.619254 | false | 3.92845 | false | false | false |
nextstrain/augur | augur/utils.py | 1 | 24646 | import argparse
import Bio
import Bio.Phylo
import gzip
import os, json, sys
import pandas as pd
import subprocess
import shlex
from contextlib import contextmanager
from treetime.utils import numeric_date
from collections import defaultdict
from pkg_resources import resource_stream
from io import TextIOWrapper
from .__version__ import __version__
from augur.io import open_file
from augur.util_support.color_parser import ColorParser
from augur.util_support.date_disambiguator import DateDisambiguator
from augur.util_support.metadata_file import MetadataFile
from augur.util_support.node_data_reader import NodeDataReader
from augur.util_support.shell_command_runner import ShellCommandRunner
class AugurException(Exception):
pass
def is_vcf(fname):
"""Convenience method to check if a file is a vcf file.
>>> is_vcf("./foo")
False
>>> is_vcf("./foo.vcf")
True
>>> is_vcf("./foo.vcf.GZ")
True
"""
return fname.lower().endswith(".vcf") or fname.lower().endswith(".vcf.gz")
def myopen(fname, mode):
if fname.endswith('.gz'):
import gzip
return gzip.open(fname, mode, encoding='utf-8')
else:
return open(fname, mode, encoding='utf-8')
def get_json_name(args, default=None):
if args.output_node_data:
return args.output_node_data
else:
if default:
print("WARNING: no name for the output file was specified. Writing results to %s."%default, file=sys.stderr)
return default
else:
raise ValueError("Please specify a name for the JSON file containing the results.")
def ambiguous_date_to_date_range(uncertain_date, fmt, min_max_year=None):
return DateDisambiguator(uncertain_date, fmt=fmt, min_max_year=min_max_year).range()
def read_metadata(fname, query=None):
return MetadataFile(fname, query).read()
def is_date_ambiguous(date, ambiguous_by="any"):
"""
Returns whether a given date string in the format of YYYY-MM-DD is ambiguous by a given part of the date (e.g., day, month, year, or any parts).
Parameters
----------
date : str
Date string in the format of YYYY-MM-DD
ambiguous_by : str
Field of the date string to test for ambiguity ("day", "month", "year", "any")
"""
date_components = date.split('-', 2)
if len(date_components) == 3:
year, month, day = date_components
elif len(date_components) == 2:
year, month = date_components
day = "XX"
else:
year = date_components[0]
month = "XX"
day = "XX"
# Determine ambiguity hierarchically such that, for example, an ambiguous
# month implicates an ambiguous day even when day information is available.
return any((
"X" in year,
"X" in month and ambiguous_by in ("any", "month", "day"),
"X" in day and ambiguous_by in ("any", "day")
))
def get_numerical_dates(meta_dict, name_col = None, date_col='date', fmt=None, min_max_year=None):
if fmt:
from datetime import datetime
numerical_dates = {}
for k,m in meta_dict.items():
v = m[date_col]
if type(v)!=str:
print("WARNING: %s has an invalid data string:"%k,v)
continue
elif 'XX' in v:
ambig_date = ambiguous_date_to_date_range(v, fmt, min_max_year)
if ambig_date is None or None in ambig_date:
numerical_dates[k] = [None, None] #don't send to numeric_date or will be set to today
else:
numerical_dates[k] = [numeric_date(d) for d in ambig_date]
else:
try:
numerical_dates[k] = numeric_date(datetime.strptime(v, fmt))
except:
numerical_dates[k] = None
else:
numerical_dates = {k:float(v) for k,v in meta_dict.items()}
return numerical_dates
class InvalidTreeError(Exception):
"""Represents an error loading a phylogenetic tree from a filename.
"""
pass
def read_tree(fname, min_terminals=3):
"""Safely load a tree from a given filename or raise an error if the file does
not contain a valid tree.
Parameters
----------
fname : str
name of a file containing a phylogenetic tree
min_terminals : int
minimum number of terminals required for the parsed tree as a sanity
check on the tree
Raises
------
InvalidTreeError
If the given file exists but does not seem to contain a valid tree format.
Returns
-------
Bio.Phylo :
BioPython tree instance
"""
T = None
supported_tree_formats = ["newick", "nexus"]
for fmt in supported_tree_formats:
try:
T = Bio.Phylo.read(fname, fmt)
# Check the sanity of the parsed tree to handle cases when non-tree
# data are still successfully parsed by BioPython. Too few terminals
# in a tree indicates that the input is not valid.
if T.count_terminals() < min_terminals:
T = None
else:
break
except ValueError:
# We cannot open the tree in the current format, so we will try
# another.
pass
# If the tree cannot be loaded, raise an error to that effect.
if T is None:
raise InvalidTreeError(
"Could not read the given tree %s using the following supported formats: %s" % (fname, ", ".join(supported_tree_formats))
)
return T
def read_node_data(fnames, tree=None):
return NodeDataReader(fnames, tree).read()
def write_json(data, file_name, indent=(None if os.environ.get("AUGUR_MINIFY_JSON") else 2), include_version=True):
"""
Write ``data`` as JSON to the given ``file_name``, creating parent directories
if necessary. The augur version is included as a top-level key "augur_version".
Parameters
----------
data : dict
data to write out to JSON
file_name : str
file name to write to
indent : int or None, optional
JSON indentation level. Default is `None` if the environment variable `AUGUR_MINIFY_JSON`
is truthy, else 1
include_version : bool, optional
Include the augur version. Default: `True`.
Raises
------
OSError
"""
#in case parent folder does not exist yet
parent_directory = os.path.dirname(file_name)
if parent_directory and not os.path.exists(parent_directory):
try:
os.makedirs(parent_directory)
except OSError: #Guard against race condition
if not os.path.isdir(parent_directory):
raise
if include_version:
data["generated_by"] = {"program": "augur", "version": get_augur_version()}
with open(file_name, 'w', encoding='utf-8') as handle:
json.dump(data, handle, indent=indent, sort_keys=True)
def load_features(reference, feature_names=None):
#read in appropriately whether GFF or Genbank
#checks explicitly for GFF otherwise assumes Genbank
if not os.path.isfile(reference):
print("ERROR: reference sequence not found. looking for", reference)
return None
features = {}
if '.gff' in reference.lower():
#looks for 'gene' and 'gene' as best for TB
try:
from BCBio import GFF #Package name is confusing - tell user exactly what they need!
except ImportError:
print("ERROR: Package BCBio.GFF not found! Please install using \'pip install bcbio-gff\' before re-running.")
return None
limit_info = dict( gff_type = ['gene'] )
with open(reference, encoding='utf-8') as in_handle:
for rec in GFF.parse(in_handle, limit_info=limit_info):
for feat in rec.features:
if feature_names is not None: #check both tags; user may have used either
if "gene" in feat.qualifiers and feat.qualifiers["gene"][0] in feature_names:
fname = feat.qualifiers["gene"][0]
elif "locus_tag" in feat.qualifiers and feat.qualifiers["locus_tag"][0] in feature_names:
fname = feat.qualifiers["locus_tag"][0]
else:
fname = None
else:
if "gene" in feat.qualifiers:
fname = feat.qualifiers["gene"][0]
else:
fname = feat.qualifiers["locus_tag"][0]
if fname:
features[fname] = feat
if feature_names is not None:
for fe in feature_names:
if fe not in features:
print("Couldn't find gene {} in GFF or GenBank file".format(fe))
else:
from Bio import SeqIO
for feat in SeqIO.read(reference, 'genbank').features:
if feat.type=='CDS':
if "locus_tag" in feat.qualifiers:
fname = feat.qualifiers["locus_tag"][0]
if feature_names is None or fname in feature_names:
features[fname] = feat
elif "gene" in feat.qualifiers:
fname = feat.qualifiers["gene"][0]
if feature_names is None or fname in feature_names:
features[fname] = feat
elif feat.type=='source': #read 'nuc' as well for annotations - need start/end of whole!
features['nuc'] = feat
return features
def read_config(fname):
if not (fname and os.path.isfile(fname)):
print("ERROR: config file %s not found."%fname)
return defaultdict(dict)
try:
with open(fname, 'rb') as ifile:
config = json.load(ifile)
except json.decoder.JSONDecodeError as err:
print("FATAL ERROR:")
print("\tCouldn't parse the JSON file {}".format(fname))
print("\tError message: '{}'".format(err.msg))
print("\tLine number: '{}'".format(err.lineno))
print("\tColumn number: '{}'".format(err.colno))
print("\tYou must correct this file in order to proceed.")
sys.exit(2)
return config
def read_lat_longs(overrides=None, use_defaults=True):
coordinates = {}
# TODO: make parsing of tsv files more robust while allow for whitespace delimiting for backwards compatibility
def add_line_to_coordinates(line):
if line.startswith('#') or line.strip() == "":
return
fields = line.strip().split() if not '\t' in line else line.strip().split('\t')
if len(fields) == 4:
geo_field, loc = fields[0].lower(), fields[1].lower()
lat, long = float(fields[2]), float(fields[3])
coordinates[(geo_field, loc)] = {
"latitude": lat,
"longitude": long
}
else:
print("WARNING: geo-coordinate file contains invalid line. Please make sure not to mix tabs and spaces as delimiters (use only tabs):",line)
if use_defaults:
with resource_stream(__package__, "data/lat_longs.tsv") as stream:
with TextIOWrapper(stream, "utf-8") as defaults:
for line in defaults:
add_line_to_coordinates(line)
if overrides:
if os.path.isfile(overrides):
with open(overrides, encoding='utf-8') as ifile:
for line in ifile:
add_line_to_coordinates(line)
else:
print("WARNING: input lat/long file %s not found." % overrides)
return coordinates
def read_colors(overrides=None, use_defaults=True):
return ColorParser(mapping_filename=overrides, use_defaults=use_defaults).mapping
def write_VCF_translation(prot_dict, vcf_file_name, ref_file_name):
"""
Writes out a VCF-style file (which seems to be minimally handleable
by vcftools and pyvcf) of the AA differences between sequences and the reference.
This is a similar format created/used by read_in_vcf except that there is one
of these dicts (with sequences, reference, positions) for EACH gene.
Also writes out a fasta of the reference alignment.
EBH 12 Dec 2017
"""
import numpy as np
#for the header
seqNames = list(prot_dict[list(prot_dict.keys())[0]]['sequences'].keys())
#prepare the header of the VCF & write out
header=["#CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT"]+seqNames
with open(vcf_file_name, 'w', encoding='utf-8') as the_file:
the_file.write( "##fileformat=VCFv4.2\n"+
"##source=NextStrain_Protein_Translation\n"+
"##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n")
the_file.write("\t".join(header)+"\n")
refWrite = []
vcfWrite = []
#go through for every gene/protein
for fname, prot in prot_dict.items():
sequences = prot['sequences']
ref = prot['reference']
positions = prot['positions']
#write out the reference fasta
refWrite.append(">"+fname)
refWrite.append(ref)
#go through every variable position
#There are no deletions here, so it's simpler than for VCF nuc sequenes!
for pi in positions:
pos = pi+1 #change numbering to match VCF not python
refb = ref[pi] #reference base at this position
#try/except is (much) faster than list comprehension!
pattern = []
for k,v in sequences.items():
try:
pattern.append(sequences[k][pi])
except KeyError:
pattern.append('.')
pattern = np.array(pattern)
#get the list of ALTs - minus any '.'!
uniques = np.unique(pattern)
uniques = uniques[np.where(uniques!='.')]
#Convert bases to the number that matches the ALT
j=1
for u in uniques:
pattern[np.where(pattern==u)[0]] = str(j)
j+=1
#Now convert these calls to #/# (VCF format)
calls = [ j+"/"+j if j!='.' else '.' for j in pattern ]
if len(uniques)==0:
print("UNEXPECTED ERROR WHILE CONVERTING TO VCF AT POSITION {}".format(str(pi)))
break
#put it all together and write it out
output = [fname, str(pos), ".", refb, ",".join(uniques), ".", "PASS", ".", "GT"] + calls
vcfWrite.append("\t".join(output))
#write it all out
with open(ref_file_name, 'w', encoding='utf-8') as the_file:
the_file.write("\n".join(refWrite))
with open(vcf_file_name, 'a', encoding='utf-8') as the_file:
the_file.write("\n".join(vcfWrite))
if vcf_file_name.lower().endswith('.gz'):
import os
#must temporarily remove .gz ending, or gzip won't zip it!
os.rename(vcf_file_name, vcf_file_name[:-3])
call = ["gzip", vcf_file_name[:-3]]
run_shell_command(" ".join(call), raise_errors = True)
shquote = shlex.quote
def run_shell_command(cmd, raise_errors=False, extra_env=None):
"""
Run the given command string via Bash with error checking.
Returns True if the command exits normally. Returns False if the command
exits with failure and "raise_errors" is False (the default). When
"raise_errors" is True, exceptions are rethrown.
If an *extra_env* mapping is passed, the provided keys and values are
overlayed onto the default subprocess environment.
"""
return ShellCommandRunner(cmd, raise_errors=raise_errors, extra_env=extra_env).run()
def first_line(text):
"""
Returns the first line of the given text, ignoring leading and trailing
whitespace.
"""
return text.strip().splitlines()[0]
def available_cpu_cores(fallback: int = 1) -> int:
"""
Returns the number (an int) of CPU cores available to this **process**, if
determinable, otherwise the number of CPU cores available to the
**computer**, if determinable, otherwise the *fallback* number (which
defaults to 1).
"""
try:
# Note that this is the correct function to use, not os.cpu_count(), as
# described in the latter's documentation.
#
# The reason, which the documentation does not detail, is that
# processes may be pinned or restricted to certain CPUs by setting
# their "affinity". This is not typical except in high-performance
# computing environments, but if it is done, then a computer with say
# 24 total cores may only allow our process to use 12. If we tried to
# naively use all 24, we'd end up with two threads across the 12 cores.
# This would degrade performance rather than improve it!
return len(os.sched_getaffinity(0))
except:
# cpu_count() returns None if the value is indeterminable.
return os.cpu_count() or fallback
def nthreads_value(value):
"""
Argument value validation and casting function for --nthreads.
"""
if value.lower() == 'auto':
return available_cpu_cores()
try:
return int(value)
except ValueError:
raise argparse.ArgumentTypeError("'%s' is not an integer or the word 'auto'" % value) from None
def get_parent_name_by_child_name_for_tree(tree):
'''
Return dictionary mapping child node names to parent node names
'''
parents = {}
for clade in tree.find_clades(order='level'):
for child in clade:
parents[child.name] = clade.name
return parents
def annotate_parents_for_tree(tree):
"""Annotate each node in the given tree with its parent.
>>> import io
>>> tree = Bio.Phylo.read(io.StringIO("(A, (B, C))"), "newick")
>>> not any([hasattr(node, "parent") for node in tree.find_clades()])
True
>>> tree = annotate_parents_for_tree(tree)
>>> tree.root.parent is None
True
>>> all([hasattr(node, "parent") for node in tree.find_clades()])
True
"""
tree.root.parent = None
for node in tree.find_clades(order="level"):
for child in node.clades:
child.parent = node
# Return the tree.
return tree
def json_to_tree(json_dict, root=True):
"""Returns a Bio.Phylo tree corresponding to the given JSON dictionary exported
by `tree_to_json`.
Assigns links back to parent nodes for the root of the tree.
Test opening a JSON from augur export v1.
>>> import json
>>> json_fh = open("tests/data/json_tree_to_nexus/flu_h3n2_ha_3y_tree.json", "r")
>>> json_dict = json.load(json_fh)
>>> tree = json_to_tree(json_dict)
>>> tree.name
'NODE_0002020'
>>> len(tree.clades)
2
>>> tree.clades[0].name
'NODE_0001489'
>>> hasattr(tree, "attr")
True
>>> "dTiter" in tree.attr
True
>>> tree.clades[0].parent.name
'NODE_0002020'
>>> tree.clades[0].branch_length > 0
True
Test opening a JSON from augur export v2.
>>> json_fh = open("tests/data/zika.json", "r")
>>> json_dict = json.load(json_fh)
>>> tree = json_to_tree(json_dict)
>>> hasattr(tree, "name")
True
>>> len(tree.clades) > 0
True
>>> tree.clades[0].branch_length > 0
True
"""
# Check for v2 JSON which has combined metadata and tree data.
if root and "meta" in json_dict and "tree" in json_dict:
json_dict = json_dict["tree"]
node = Bio.Phylo.Newick.Clade()
# v1 and v2 JSONs use different keys for strain names.
if "name" in json_dict:
node.name = json_dict["name"]
else:
node.name = json_dict["strain"]
if "children" in json_dict:
# Recursively add children to the current node.
node.clades = [json_to_tree(child, root=False) for child in json_dict["children"]]
# Assign all non-children attributes.
for attr, value in json_dict.items():
if attr != "children":
setattr(node, attr, value)
# Only v1 JSONs support a single `attr` attribute.
if hasattr(node, "attr"):
node.numdate = node.attr.get("num_date")
node.branch_length = node.attr.get("div")
if "translations" in node.attr:
node.translations = node.attr["translations"]
elif hasattr(node, "node_attrs"):
node.branch_length = node.node_attrs.get("div")
if root:
node = annotate_parents_for_tree(node)
return node
def get_augur_version():
"""
Returns a string of the current augur version.
"""
return __version__
def read_bed_file(bed_file):
"""Read a BED file and return a list of excluded sites.
Note: This function assumes the given file is a BED file. On parsing
failures, it will attempt to skip the first line and retry, but no
other error checking is attempted. Incorrectly formatted files will
raise errors.
Parameters
----------
bed_file : str
Path to the BED file
Returns:
--------
list[int]:
Sorted list of unique zero-indexed sites
"""
mask_sites = []
try:
bed = pd.read_csv(bed_file, sep='\t', header=None, usecols=[1,2],
dtype={1:int,2:int})
except ValueError:
# Check if we have a header row. Otherwise, just fail.
bed = pd.read_csv(bed_file, sep='\t', header=None, usecols=[1,2],
dtype={1:int,2:int}, skiprows=1)
print("Skipped row 1 of %s, assuming it is a header." % bed_file)
for _, row in bed.iterrows():
mask_sites.extend(range(row[1], row[2]))
return sorted(set(mask_sites))
def read_mask_file(mask_file):
"""Read a masking file and return a list of excluded sites.
Masking files have a single masking site per line, either alone
or as the second column of a tab-separated file. These sites
are assumed to be one-indexed, NOT zero-indexed. Incorrectly
formatted lines will be skipped.
Parameters
----------
mask_file : str
Path to the masking file
Returns:
--------
list[int]:
Sorted list of unique zero-indexed sites
"""
mask_sites = []
with open(mask_file, encoding='utf-8') as mf:
for idx, line in enumerate(l.strip() for l in mf.readlines()):
if "\t" in line:
line = line.split("\t")[1]
try:
mask_sites.append(int(line) - 1)
except ValueError as err:
print("Could not read line %s of %s: '%s' - %s" %
(idx, mask_file, line, err), file=sys.stderr)
raise
return sorted(set(mask_sites))
def load_mask_sites(mask_file):
"""Load masking sites from either a BED file or a masking file.
Parameters
----------
mask_file: str
Path to the BED or masking file
Returns
-------
list[int]
Sorted list of unique zero-indexed sites
"""
if mask_file.lower().endswith(".bed"):
mask_sites = read_bed_file(mask_file)
else:
mask_sites = read_mask_file(mask_file)
print("%d masking sites read from %s" % (len(mask_sites), mask_file))
return mask_sites
VALID_NUCLEOTIDES = { # http://reverse-complement.com/ambiguity.html
"A", "G", "C", "T", "U", "N", "R", "Y", "S", "W", "K", "M", "B", "V", "D", "H", "-",
"a", "g", "c", "t", "u", "n", "r", "y", "s", "w", "k", "m", "b", "v", "d", "h", "-"
}
def read_strains(*files, comment_char="#"):
"""Reads strain names from one or more plain text files and returns the
set of distinct strains.
Strain names can be commented with full-line or inline comments. For
example, the following is a valid strain names file:
# this is a comment at the top of the file
strain1 # exclude strain1 because it isn't sequenced properly
strain2
# this is an empty line that will be ignored.
Parameters
----------
files : one or more str
one or more names of text files with one strain name per line
Returns
-------
set :
strain names from the given input files
"""
strains = set()
for input_file in files:
with open_file(input_file, 'r') as ifile:
for line in ifile:
# Allow comments anywhere in a given line.
strain_name = line.split(comment_char)[0].strip()
if len(strain_name) > 0:
strains.add(strain_name)
return strains
| agpl-3.0 | 4,288,972,644,168,853,000 | 33.615169 | 152 | 0.597095 | false | 3.88126 | false | false | false |
tdickers/mitmproxy | release/rtool.py | 2 | 12215 | #!/usr/bin/env python
from __future__ import absolute_import, print_function, division
from os.path import join
import contextlib
import os
import shutil
import subprocess
import re
import shlex
import runpy
import zipfile
import tarfile
import platform
import click
import pysftp
import fnmatch
# https://virtualenv.pypa.io/en/latest/userguide.html#windows-notes
# scripts and executables on Windows go in ENV\Scripts\ instead of ENV/bin/
if platform.system() == "Windows":
VENV_BIN = "Scripts"
else:
VENV_BIN = "bin"
if platform.system() == "Windows":
def Archive(name):
a = zipfile.ZipFile(name, "w")
a.add = a.write
return a
else:
def Archive(name):
return tarfile.open(name, "w:gz")
RELEASE_DIR = join(os.path.dirname(os.path.realpath(__file__)))
DIST_DIR = join(RELEASE_DIR, "dist")
ROOT_DIR = os.path.normpath(join(RELEASE_DIR, ".."))
RELEASE_SPEC_DIR = join(RELEASE_DIR, "specs")
VERSION_FILE = join(ROOT_DIR, "netlib/version.py")
BUILD_DIR = join(RELEASE_DIR, "build")
PYINSTALLER_TEMP = join(BUILD_DIR, "pyinstaller")
PYINSTALLER_DIST = join(BUILD_DIR, "binaries")
VENV_DIR = join(BUILD_DIR, "venv")
VENV_PIP = join(VENV_DIR, VENV_BIN, "pip")
VENV_PYINSTALLER = join(VENV_DIR, VENV_BIN, "pyinstaller")
project = {
"name": "mitmproxy",
"tools": ["pathod", "pathoc", "mitmproxy", "mitmdump", "mitmweb"],
"bdists": {
"mitmproxy": ["mitmproxy", "mitmdump", "mitmweb"],
"pathod": ["pathoc", "pathod"]
},
"dir": ROOT_DIR,
"python_version": "py2"
}
if platform.system() == "Windows":
project["tools"].remove("mitmproxy")
project["bdists"]["mitmproxy"].remove("mitmproxy")
def get_version():
return runpy.run_path(VERSION_FILE)["VERSION"]
def get_snapshot_version():
last_tag, tag_dist, commit = git("describe --tags --long").strip().rsplit(b"-", 2)
tag_dist = int(tag_dist)
if tag_dist == 0:
return get_version()
else:
# The wheel build tag (we use the commit) must start with a digit, so we include "0x"
return "{version}dev{tag_dist:04}-0x{commit}".format(
version=get_version(), # this should already be the next version
tag_dist=tag_dist,
commit=commit
)
def archive_name(project):
platform_tag = {
"Darwin": "osx",
"Windows": "win32",
"Linux": "linux"
}.get(platform.system(), platform.system())
if platform.system() == "Windows":
ext = "zip"
else:
ext = "tar.gz"
return "{project}-{version}-{platform}.{ext}".format(
project=project,
version=get_version(),
platform=platform_tag,
ext=ext
)
def wheel_name():
return "{project}-{version}-{py_version}-none-any.whl".format(
project=project["name"],
version=get_version(),
py_version=project["python_version"]
)
@contextlib.contextmanager
def empty_pythonpath():
"""
Make sure that the regular python installation is not on the python path,
which would give us access to modules installed outside of our virtualenv.
"""
pythonpath = os.environ.get("PYTHONPATH", "")
os.environ["PYTHONPATH"] = ""
yield
os.environ["PYTHONPATH"] = pythonpath
@contextlib.contextmanager
def chdir(path):
old_dir = os.getcwd()
os.chdir(path)
yield
os.chdir(old_dir)
def git(args):
with chdir(ROOT_DIR):
return subprocess.check_output(["git"] + shlex.split(args))
@click.group(chain=True)
def cli():
"""
mitmproxy build tool
"""
pass
@cli.command("contributors")
def contributors():
"""
Update CONTRIBUTORS.md
"""
with chdir(ROOT_DIR):
print("Updating CONTRIBUTORS...")
contributors_data = git("shortlog -n -s")
with open("CONTRIBUTORS", "w") as f:
f.write(contributors_data)
@cli.command("set-version")
@click.argument('version')
def set_version(version):
"""
Update version information
"""
print("Update versions...")
version = ", ".join(version.split("."))
print("Update %s..." % VERSION_FILE)
with open(VERSION_FILE, "rb") as f:
content = f.read()
new_content = re.sub(
r"IVERSION\s*=\s*\([\d,\s]+\)", "IVERSION = (%s)" % version,
content
)
with open(VERSION_FILE, "wb") as f:
f.write(new_content)
@cli.command("wheels")
def wheels():
"""
Build wheels
"""
with empty_pythonpath():
print("Building release...")
if os.path.exists(DIST_DIR):
shutil.rmtree(DIST_DIR)
print("Creating wheel for %s ..." % project["name"])
subprocess.check_call(
[
"python", "./setup.py", "-q",
"bdist_wheel", "--dist-dir", DIST_DIR,
],
cwd=project["dir"]
)
print("Creating virtualenv for test install...")
if os.path.exists(VENV_DIR):
shutil.rmtree(VENV_DIR)
subprocess.check_call(["virtualenv", "-q", VENV_DIR])
with chdir(DIST_DIR):
print("Installing %s..." % project["name"])
subprocess.check_call([VENV_PIP, "install", "-q", wheel_name()])
print("Running binaries...")
for tool in project["tools"]:
tool = join(VENV_DIR, VENV_BIN, tool)
print("> %s --version" % tool)
print(subprocess.check_output([tool, "--version"]))
print("Virtualenv available for further testing:")
print("source %s" % os.path.normpath(join(VENV_DIR, VENV_BIN, "activate")))
@cli.command("bdist")
@click.option("--use-existing-wheels/--no-use-existing-wheels", default=False)
@click.argument("pyinstaller_version", envvar="PYINSTALLER_VERSION", default="PyInstaller~=3.1.1")
@click.pass_context
def bdist(ctx, use_existing_wheels, pyinstaller_version):
"""
Build a binary distribution
"""
if os.path.exists(PYINSTALLER_TEMP):
shutil.rmtree(PYINSTALLER_TEMP)
if os.path.exists(PYINSTALLER_DIST):
shutil.rmtree(PYINSTALLER_DIST)
if not use_existing_wheels:
ctx.invoke(wheels)
print("Installing PyInstaller...")
subprocess.check_call([VENV_PIP, "install", "-q", pyinstaller_version])
for bdist_project, tools in project["bdists"].items():
with Archive(join(DIST_DIR, archive_name(bdist_project))) as archive:
for tool in tools:
# This is PyInstaller, so it messes up paths.
# We need to make sure that we are in the spec folder.
with chdir(RELEASE_SPEC_DIR):
print("Building %s binary..." % tool)
subprocess.check_call(
[
VENV_PYINSTALLER,
"--clean",
"--workpath", PYINSTALLER_TEMP,
"--distpath", PYINSTALLER_DIST,
# This is PyInstaller, so setting a
# different log level obviously breaks it :-)
# "--log-level", "WARN",
"%s.spec" % tool
]
)
# Test if it works at all O:-)
executable = join(PYINSTALLER_DIST, tool)
if platform.system() == "Windows":
executable += ".exe"
print("> %s --version" % executable)
subprocess.check_call([executable, "--version"])
archive.add(executable, os.path.basename(executable))
print("Packed {}.".format(archive_name(bdist_project)))
@cli.command("upload-release")
@click.option('--username', prompt=True)
@click.password_option(confirmation_prompt=False)
@click.option('--repository', default="pypi")
def upload_release(username, password, repository):
"""
Upload wheels to PyPI
"""
filename = wheel_name()
print("Uploading {} to {}...".format(filename, repository))
subprocess.check_call([
"twine",
"upload",
"-u", username,
"-p", password,
"-r", repository,
join(DIST_DIR, filename)
])
@cli.command("upload-snapshot")
@click.option("--host", envvar="SNAPSHOT_HOST", prompt=True)
@click.option("--port", envvar="SNAPSHOT_PORT", type=int, default=22)
@click.option("--user", envvar="SNAPSHOT_USER", prompt=True)
@click.option("--private-key", default=join(RELEASE_DIR, "rtool.pem"))
@click.option("--private-key-password", envvar="SNAPSHOT_PASS", prompt=True, hide_input=True)
@click.option("--wheel/--no-wheel", default=False)
@click.option("--bdist/--no-bdist", default=False)
def upload_snapshot(host, port, user, private_key, private_key_password, wheel, bdist):
"""
Upload snapshot to snapshot server
"""
with pysftp.Connection(host=host,
port=port,
username=user,
private_key=private_key,
private_key_pass=private_key_password) as sftp:
dir_name = "snapshots/v{}".format(get_version())
sftp.makedirs(dir_name)
with sftp.cd(dir_name):
files = []
if wheel:
files.append(wheel_name())
for bdist in project["bdists"].keys():
files.append(archive_name(bdist))
for f in files:
local_path = join(DIST_DIR, f)
remote_filename = f.replace(get_version(), get_snapshot_version())
symlink_path = "../{}".format(f.replace(get_version(), "latest"))
# Delete old versions
old_version = f.replace(get_version(), "*")
for f_old in sftp.listdir():
if fnmatch.fnmatch(f_old, old_version):
print("Removing {}...".format(f_old))
sftp.remove(f_old)
# Upload new version
print("Uploading {} as {}...".format(f, remote_filename))
with click.progressbar(length=os.stat(local_path).st_size) as bar:
sftp.put(
local_path,
"." + remote_filename,
callback=lambda done, total: bar.update(done - bar.pos)
)
# We hide the file during upload.
sftp.rename("." + remote_filename, remote_filename)
# update symlink for the latest release
if sftp.lexists(symlink_path):
print("Removing {}...".format(symlink_path))
sftp.remove(symlink_path)
sftp.symlink("v{}/{}".format(get_version(), remote_filename), symlink_path)
@cli.command("wizard")
@click.option('--next-version', prompt=True)
@click.option('--username', prompt="PyPI Username")
@click.password_option(confirmation_prompt=False, prompt="PyPI Password")
@click.option('--repository', default="pypi")
@click.pass_context
def wizard(ctx, next_version, username, password, repository):
"""
Interactive Release Wizard
"""
is_dirty = git("status --porcelain")
if is_dirty:
raise RuntimeError("Repository is not clean.")
# update contributors file
ctx.invoke(contributors)
# Build test release
ctx.invoke(bdist)
try:
click.confirm("Please test the release now. Is it ok?", abort=True)
except click.Abort:
# undo changes
git("checkout CONTRIBUTORS")
raise
# Everything ok - let's ship it!
git("tag v{}".format(get_version()))
git("push --tags")
ctx.invoke(
upload_release,
username=username, password=password, repository=repository
)
click.confirm("Now please wait until CI has built binaries. Finished?")
# version bump commit
ctx.invoke(set_version, version=next_version)
git("commit -a -m \"bump version\"")
git("push")
click.echo("All done!")
if __name__ == "__main__":
cli()
| mit | -2,785,414,311,183,450,600 | 30.97644 | 98 | 0.567745 | false | 3.880241 | true | false | false |
terrelln/python-zstandard | bench.py | 1 | 34426 | #!/usr/bin/env python
# Copyright (c) 2016-present, Gregory Szorc
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
"""Very hacky script for benchmarking zstd.
Like most benchmarks, results should be treated with skepticism.
"""
import io
import os
import struct
import sys
import time
import zlib
if sys.version_info[0] >= 3:
bio = io.BytesIO
else:
import cStringIO
bio = cStringIO.StringIO
import zstandard as zstd
def timer(fn, miniter=3, minwall=3.0):
"""Runs fn() multiple times and returns the results.
Runs for at least ``miniter`` iterations and ``minwall`` wall time.
"""
results = []
count = 0
# Ideally a monotonic clock, but doesn't matter too much.
wall_begin = time.time()
while True:
wstart = time.time()
start = os.times()
fn()
end = os.times()
wend = time.time()
count += 1
user = end[0] - start[0]
system = end[1] - start[1]
cpu = user + system
wall = wend - wstart
results.append((cpu, user, system, wall))
# Ensure we run at least ``miniter`` times.
if count < miniter:
continue
# And for ``minwall`` seconds.
elapsed = wend - wall_begin
if elapsed < minwall:
continue
break
return results
BENCHES = []
def bench(mode, title, require_content_size=False,
simple=False, zlib=False, threads_arg=False,
chunks_as_buffer=False, decompressed_sizes_arg=False):
def wrapper(fn):
if not fn.__name__.startswith(('compress_', 'decompress_')):
raise ValueError('benchmark function must begin with '
'compress_ or decompress_')
fn.mode = mode
fn.title = title
fn.require_content_size = require_content_size
fn.simple = simple
fn.zlib = zlib
fn.threads_arg = threads_arg
fn.chunks_as_buffer = chunks_as_buffer
fn.decompressed_sizes_arg = decompressed_sizes_arg
BENCHES.append(fn)
return fn
return wrapper
@bench('discrete', 'compress() single use zctx')
def compress_one_use(chunks, opts):
for chunk in chunks:
zctx = zstd.ZstdCompressor(**opts)
zctx.compress(chunk)
@bench('discrete', 'compress() reuse zctx', simple=True)
def compress_reuse(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
zctx.compress(chunk)
@bench('discrete', 'multi_compress_to_buffer() w/ buffer input',
simple=True, threads_arg=True, chunks_as_buffer=True)
def compress_multi_compress_to_buffer_buffer(chunks, opts, threads):
zctx= zstd.ZstdCompressor(**opts)
zctx.multi_compress_to_buffer(chunks, threads=threads)
@bench('discrete', 'multi_compress_to_buffer() w/ list input',
threads_arg=True)
def compress_multi_compress_to_buffer_list(chunks, opts, threads):
zctx = zstd.ZstdCompressor(**opts)
zctx.multi_compress_to_buffer(chunks, threads=threads)
@bench('discrete', 'stream_reader()')
def compress_stream_reader(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
with zctx.stream_reader(chunk) as reader:
while reader.read(16384):
pass
@bench('discrete', 'write_to()')
def compress_write_to(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
b = bio()
with zctx.write_to(b) as compressor:
compressor.write(chunk)
@bench('discrete', 'write_to() w/ input size')
def compress_write_to_size(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
b = bio()
with zctx.write_to(b, size=len(chunk)) as compressor:
compressor.write(chunk)
@bench('discrete', 'read_to_iter()')
def compress_read_to_iter(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
for d in zctx.read_to_iter(chunk):
pass
@bench('discrete', 'read_to_iter() w/ input size')
def compress_read_to_iter_size(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
for d in zctx.read_to_iter(chunk, size=len(chunk)):
pass
@bench('discrete', 'compressobj()')
def compress_compressobj(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
cobj = zctx.compressobj()
cobj.compress(chunk)
cobj.flush()
@bench('discrete', 'compressobj() w/ input size')
def compress_compressobj_size(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
cobj = zctx.compressobj(size=len(chunk))
cobj.compress(chunk)
cobj.flush()
@bench('discrete', 'compress()', simple=True, zlib=True)
def compress_zlib_discrete(chunks, opts):
level = opts['zlib_level']
c = zlib.compress
for chunk in chunks:
c(chunk, level)
@bench('stream', 'compressobj()', simple=True, zlib=True)
def compress_zlib_compressobj(chunks, opts):
compressor = zlib.compressobj(opts['zlib_level'])
f = zlib.Z_SYNC_FLUSH
for chunk in chunks:
compressor.compress(chunk)
compressor.flush(f)
compressor.flush()
@bench('stream', 'write_to()')
def compress_stream_write_to(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
b = bio()
with zctx.write_to(b) as compressor:
for chunk in chunks:
compressor.write(chunk)
compressor.flush()
@bench('stream', 'compressobj()', simple=True)
def compress_stream_compressobj(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
compressor = zctx.compressobj()
flush = zstd.COMPRESSOBJ_FLUSH_BLOCK
for chunk in chunks:
compressor.compress(chunk)
compressor.flush(flush)
@bench('content-dict', 'compress()', simple=True)
def compress_content_dict_compress(chunks, opts):
zstd.ZstdCompressor(**opts).compress(chunks[0])
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
zstd.ZstdCompressor(dict_data=d, **opts).compress(chunk)
@bench('content-dict', 'write_to()')
def compress_content_dict_write_to(chunks, opts, use_size=False):
zctx = zstd.ZstdCompressor(**opts)
b = bio()
with zctx.write_to(b, size=len(chunks[0]) if use_size else 0) as compressor:
compressor.write(chunks[0])
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
b = bio()
zctx = zstd.ZstdCompressor(dict_data=d, **opts)
with zctx.write_to(b, size=len(chunk) if use_size else 0) as compressor:
compressor.write(chunk)
@bench('content-dict', 'write_to() w/ input size')
def compress_content_dict_write_to_size(chunks, opts):
compress_content_dict_write_to(chunks, opts, use_size=True)
@bench('content-dict', 'read_to_iter()')
def compress_content_dict_read_to_iter(chunks, opts, use_size=False):
zctx = zstd.ZstdCompressor(**opts)
size = len(chunks[0]) if use_size else 0
for o in zctx.read_to_iter(chunks[0], size=size):
pass
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
zctx = zstd.ZstdCompressor(dict_data=d, **opts)
size = len(chunk) if use_size else 0
for o in zctx.read_to_iter(chunk, size=size):
pass
@bench('content-dict', 'read_to_iter() w/ input size')
def compress_content_dict_read_to_iter_size(chunks, opts):
compress_content_dict_read_to_iter(chunks, opts, use_size=True)
@bench('content-dict', 'compressobj()')
def compress_content_dict_compressobj(chunks, opts, use_size=False):
zctx = zstd.ZstdCompressor(**opts)
cobj = zctx.compressobj(size=len(chunks[0]) if use_size else 0)
cobj.compress(chunks[0])
cobj.flush()
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
zctx = zstd.ZstdCompressor(dict_data=d, **opts)
cobj = zctx.compressobj(len(chunk) if use_size else 0)
cobj.compress(chunk)
cobj.flush()
@bench('content-dict', 'compressobj() w/ input size')
def compress_content_dict_compressobj_size(chunks, opts):
compress_content_dict_compressobj(chunks, opts, use_size=True)
@bench('discrete', 'decompress() single use zctx', require_content_size=True)
def decompress_one_use(chunks, opts):
for chunk in chunks:
zctx = zstd.ZstdDecompressor(**opts)
zctx.decompress(chunk)
@bench('discrete', 'decompress() reuse zctx', require_content_size=True,
simple=True)
def decompress_reuse(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
zctx.decompress(chunk)
@bench('discrete', 'decompress()', simple=True, zlib=True)
def decompress_zlib_decompress(chunks):
d = zlib.decompress
for chunk in chunks:
d(chunk)
@bench('discrete', 'multi_decompress_to_buffer() w/ buffer input + sizes',
simple=True, threads_arg=True, decompressed_sizes_arg=True,
chunks_as_buffer=True)
def decompress_multi_decompress_to_buffer_buffer_and_size(chunks, opts, threads,
decompressed_sizes):
zctx = zstd.ZstdDecompressor(**opts)
zctx.multi_decompress_to_buffer(chunks,
decompressed_sizes=decompressed_sizes,
threads=threads)
@bench('discrete', 'multi_decompress_to_buffer() w/ buffer input',
require_content_size=True, threads_arg=True, chunks_as_buffer=True)
def decompress_multi_decompress_to_buffer_buffer(chunks, opts, threads):
zctx = zstd.ZstdDecompressor(**opts)
zctx.multi_decompress_to_buffer(chunks, threads=threads)
@bench('discrete', 'multi_decompress_to_buffer() w/ list of bytes input + sizes',
threads_arg=True, decompressed_sizes_arg=True)
def decompress_multi_decompress_to_buffer_list_and_sizes(chunks, opts, threads,
decompressed_sizes):
zctx = zstd.ZstdDecompressor(**opts)
zctx.multi_decompress_to_buffer(chunks,
decompressed_sizes=decompressed_sizes,
threads=threads)
@bench('discrete', 'multi_decompress_to_buffer() w/ list of bytes input',
require_content_size=True, threads_arg=True)
def decompress_multi_decompress_to_buffer_list(chunks, opts, threads):
zctx = zstd.ZstdDecompressor(**opts)
zctx.multi_decompress_to_buffer(chunks, threads=threads)
@bench('discrete', 'stream_reader()')
def decompress_stream_reader(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
with zctx.stream_reader(chunk) as reader:
while reader.read(16384):
pass
@bench('discrete', 'write_to()')
def decompress_write_to(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
with zctx.write_to(bio()) as decompressor:
decompressor.write(chunk)
@bench('discrete', 'read_to_iter()')
def decompress_read_to_iter(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
for d in zctx.read_to_iter(chunk):
pass
@bench('discrete', 'decompressobj()')
def decompress_decompressobj(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
decompressor = zctx.decompressobj()
decompressor.decompress(chunk)
@bench('stream', 'decompressobj()', simple=True, zlib=True)
def decompress_zlib_stream(chunks):
dobj = zlib.decompressobj()
for chunk in chunks:
dobj.decompress(chunk)
dobj.flush()
@bench('stream', 'write_to()')
def decompress_stream_write_to(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
with zctx.write_to(bio()) as decompressor:
for chunk in chunks:
decompressor.write(chunk)
@bench('stream', 'decompressobj()', simple=True)
def decompress_stream_decompressobj(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
decompressor = zctx.decompressobj()
for chunk in chunks:
decompressor.decompress(chunk)
@bench('content-dict', 'decompress()', require_content_size=True)
def decompress_content_dict_decompress(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
last = zctx.decompress(chunks[0])
for chunk in chunks[1:]:
d = zstd.ZstdCompressionDict(last)
zctx = zstd.ZstdDecompressor(dict_data=d, **opts)
last = zctx.decompress(chunk)
@bench('content-dict', 'write_to()')
def decompress_content_dict_write_to(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
b = bio()
with zctx.write_to(b) as decompressor:
decompressor.write(chunks[0])
last = b.getvalue()
for chunk in chunks[1:]:
d = zstd.ZstdCompressionDict(last)
zctx = zstd.ZstdDecompressor(dict_data=d, **opts)
b = bio()
with zctx.write_to(b) as decompressor:
decompressor.write(chunk)
last = b.getvalue()
@bench('content-dict', 'read_to_iter()')
def decompress_content_dict_read_to_iter(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
last = b''.join(zctx.read_to_iter(chunks[0]))
for chunk in chunks[1:]:
d = zstd.ZstdCompressionDict(last)
zctx = zstd.ZstdDecompressor(dict_data=d, **opts)
last = b''.join(zctx.read_to_iter(chunk))
@bench('content-dict', 'decompressobj()')
def decompress_content_dict_decompressobj(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
last = zctx.decompressobj().decompress(chunks[0])
for chunk in chunks[1:]:
d = zstd.ZstdCompressionDict(last)
zctx = zstd.ZstdDecompressor(dict_data=d, **opts)
last = zctx.decompressobj().decompress(chunk)
@bench('content-dict', 'decompress_content_dict_chain()',
simple=True)
def decompress_content_dict_chain_api(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
zctx.decompress_content_dict_chain(chunks)
def get_chunks(paths, limit_count, encoding):
chunks = []
def process_file(p):
with open(p, 'rb') as fh:
data = fh.read()
if not data:
return
if encoding == 'raw':
pass
elif encoding == 'zlib':
data = zlib.decompress(data)
else:
raise Exception('unexpected chunk encoding: %s' % encoding)
chunks.append(data)
for path in paths:
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
dirs.sort()
for f in sorted(files):
try:
process_file(os.path.join(root, f))
if limit_count and len(chunks) >= limit_count:
return chunks
except IOError:
pass
else:
process_file(path)
if limit_count and len(chunks) >= limit_count:
return chunks
return chunks
def get_benches(mode, direction, zlib=False):
assert direction in ('compress', 'decompress')
prefix = '%s_' % direction
fns = []
for fn in BENCHES:
if not fn.__name__.startswith(prefix):
continue
if fn.mode != mode:
continue
if fn.zlib != zlib:
continue
fns.append(fn)
return fns
def format_results(results, title, prefix, total_size):
best = min(results)
rate = float(total_size) / best[3]
print('%s %s' % (prefix, title))
print('%.6f wall; %.6f CPU; %.6f user; %.6f sys %.2f MB/s (best of %d)' % (
best[3], best[0], best[1], best[2], rate / 1000000.0, len(results)))
def bench_discrete_zlib_compression(chunks, opts):
total_size = sum(map(len, chunks))
for fn in get_benches('discrete', 'compress', zlib=True):
results = timer(lambda: fn(chunks, opts))
format_results(results, fn.title, 'compress discrete zlib', total_size)
def bench_discrete_zlib_decompression(chunks, total_size):
for fn in get_benches('discrete', 'decompress', zlib=True):
results = timer(lambda: fn(chunks))
format_results(results, fn.title, 'decompress discrete zlib',
total_size)
def bench_discrete_compression(chunks, opts, cover=False, threads=None):
total_size = sum(map(len, chunks))
if 'dict_data' in opts:
if cover:
prefix = 'compress discrete cover dict'
else:
prefix = 'compress discrete dict'
else:
prefix = 'compress discrete'
for fn in get_benches('discrete', 'compress'):
chunks_arg = chunks
kwargs = {}
if fn.threads_arg:
kwargs['threads'] = threads
if fn.chunks_as_buffer:
s = struct.Struct('=QQ')
offsets = io.BytesIO()
current_offset = 0
for chunk in chunks:
offsets.write(s.pack(current_offset, len(chunk)))
current_offset += len(chunk)
chunks_arg = zstd.BufferWithSegments(b''.join(chunks),
offsets.getvalue())
results = timer(lambda: fn(chunks_arg, opts, **kwargs))
format_results(results, fn.title, prefix, total_size)
def bench_discrete_decompression(orig_chunks, compressed_chunks,
total_size, opts, cover=False,
threads=None):
dopts = {}
if opts.get('dict_data'):
dopts['dict_data'] = opts['dict_data']
if cover:
prefix = 'decompress discrete cover dict'
else:
prefix = 'decompress discrete dict'
else:
prefix = 'decompress discrete'
for fn in get_benches('discrete', 'decompress'):
if not opts.get('write_content_size') and fn.require_content_size:
continue
chunks_arg = compressed_chunks
kwargs = {}
if fn.threads_arg:
kwargs['threads'] = threads
# Pass compressed frames in a BufferWithSegments rather than a list
# of bytes.
if fn.chunks_as_buffer:
s = struct.Struct('=QQ')
offsets = io.BytesIO()
current_offset = 0
for chunk in compressed_chunks:
offsets.write(s.pack(current_offset, len(chunk)))
current_offset += len(chunk)
chunks_arg = zstd.BufferWithSegments(b''.join(compressed_chunks),
offsets.getvalue())
if fn.decompressed_sizes_arg:
# Ideally we'd use array.array here. But Python 2 doesn't support the
# Q format.
s = struct.Struct('=Q')
kwargs['decompressed_sizes'] = b''.join(s.pack(len(c)) for c in orig_chunks)
results = timer(lambda: fn(chunks_arg, dopts, **kwargs))
format_results(results, fn.title, prefix, total_size)
def bench_stream_compression(chunks, opts):
total_size = sum(map(len, chunks))
for fn in get_benches('stream', 'compress'):
results = timer(lambda: fn(chunks, opts))
format_results(results, fn.title, 'compress stream', total_size)
def bench_stream_decompression(chunks, total_size, opts):
for fn in get_benches('stream', 'decompress'):
results = timer(lambda: fn(chunks, {}))
format_results(results, fn.title, 'decompress stream', total_size)
def bench_stream_zlib_compression(chunks, opts):
total_size = sum(map(len, chunks))
for fn in get_benches('stream', 'compress', zlib=True):
results = timer(lambda: fn(chunks, opts))
format_results(results, fn.title, 'compress stream zlib', total_size)
def bench_stream_zlib_decompression(chunks, total_size):
for fn in get_benches('stream', 'decompress', zlib=True):
results = timer(lambda: fn(chunks))
format_results(results, fn.title, 'decompress stream zlib', total_size)
def bench_content_dict_compression(chunks, opts):
total_size = sum(map(len, chunks))
for fn in get_benches('content-dict', 'compress'):
results = timer(lambda: fn(chunks, opts))
format_results(results, fn.title, 'compress content dict', total_size)
def bench_content_dict_decompression(chunks, total_size, opts):
for fn in get_benches('content-dict', 'decompress'):
if not opts.get('write_content_size') and fn.require_content_size:
continue
results = timer(lambda: fn(chunks, {}))
format_results(results, fn.title, 'decompress content dict', total_size)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
group = parser.add_argument_group('Compression Modes')
group.add_argument('--discrete', action='store_true',
help='Compress each input independently')
group.add_argument('--stream', action='store_true',
help='Feed each input into a stream and emit '
'flushed blocks')
group.add_argument('--content-dict', action='store_true',
help='Compress each input using the previous as a '
'content dictionary')
group.add_argument('--discrete-dict', action='store_true',
help='Compress each input independently with a '
'dictionary')
group.add_argument('--discrete-cover-dict', action='store_true',
help='Compress each input independently with a '
'dictionary generated using the COVER algorithm')
group = parser.add_argument_group('Benchmark Selection')
group.add_argument('--no-compression', action='store_true',
help='Do not test compression performance')
group.add_argument('--no-decompression', action='store_true',
help='Do not test decompression performance')
group.add_argument('--only-simple', action='store_true',
help='Only run the simple APIs')
group.add_argument('--zlib', action='store_true',
help='Benchmark against zlib')
group = parser.add_argument_group('Compression Parameters')
group.add_argument('-l', '--level', type=int, default=3,
help='Compression level')
group.add_argument('--write-size', action='store_true',
help='Write content size to zstd frames')
group.add_argument('--write-checksum', action='store_true',
help='Write checksum data to zstd frames')
group.add_argument('--dict-size', type=int, default=128 * 1024,
help='Maximum size of trained dictionary')
group.add_argument('--compress-threads', type=int,
help='Use multi-threaded compression with this many '
'threads')
group.add_argument('--batch-threads', type=int, default=0,
help='Use this many threads for batch APIs')
group.add_argument('--cover-k', type=int, default=0,
help='Segment size parameter to COVER algorithm')
group.add_argument('--cover-d', type=int, default=0,
help='Dmer size parameter to COVER algorithm')
group.add_argument('--zlib-level', type=int, default=6,
help='zlib compression level')
group = parser.add_argument_group('Input Processing')
group.add_argument('--limit-count', type=int,
help='limit number of input files added')
group.add_argument('--dict-sample-limit', type=int,
help='limit how many samples are fed into dictionary '
'training')
group.add_argument('--chunk-encoding', choices=['raw', 'zlib'], default='raw',
help='How input chunks are encoded. Can be used to '
'pass compressed chunks for benchmarking')
parser.add_argument('path', metavar='PATH', nargs='+')
args = parser.parse_args()
# If no compression mode defined, assume discrete.
if not args.stream and not args.content_dict and not args.discrete_dict:
args.discrete = True
# It is easier to filter here than to pass arguments to multiple
# functions.
if args.only_simple:
BENCHES[:] = [fn for fn in BENCHES if fn.simple]
opts = {}
opts['level'] = args.level
if args.write_size:
opts['write_content_size'] = True
if args.write_checksum:
opts['write_checksum'] = True
if args.compress_threads:
opts['threads'] = args.compress_threads
chunks = get_chunks(args.path, args.limit_count, args.chunk_encoding)
orig_size = sum(map(len, chunks))
print('%d chunks; %d bytes' % (len(chunks), orig_size))
if args.discrete_dict:
if args.dict_sample_limit:
training_chunks = chunks[0:args.dict_sample_limit]
else:
training_chunks = chunks
dict_data = zstd.train_dictionary(args.dict_size, training_chunks,
level=opts['level'])
print('trained dictionary of size %d (wanted %d) (l=%d)' % (
len(dict_data), args.dict_size, opts['level']))
if args.discrete_cover_dict:
if args.dict_sample_limit:
training_chunks = chunks[0:args.dict_sample_limit]
else:
training_chunks = chunks
cover_args = {
'k': args.cover_k,
'd': args.cover_d,
'optimize': False,
# Always use all available threads in optimize mode.
'threads': -1,
'level': opts['level'],
}
if not args.cover_k and not args.cover_d:
cover_args['optimize'] = True
cover_dict_data = zstd.train_cover_dictionary(args.dict_size,
training_chunks,
**cover_args)
print('trained cover dictionary of size %d (wanted %d); k=%d; d=%d' % (
len(cover_dict_data), args.dict_size,
cover_dict_data.k, cover_dict_data.d))
if args.zlib and args.discrete:
compressed_discrete_zlib = []
ratios = []
for chunk in chunks:
c = zlib.compress(chunk, args.zlib_level)
compressed_discrete_zlib.append(c)
ratios.append(float(len(c)) / float(len(chunk)))
compressed_size = sum(map(len, compressed_discrete_zlib))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print('zlib discrete compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%' % (
args.zlib_level, compressed_size, ratio, good_ratio))
# In discrete mode, each input is compressed independently, possibly
# with a dictionary.
if args.discrete:
zctx = zstd.ZstdCompressor(**opts)
compressed_discrete = []
ratios = []
# Always use multiple threads here so we complete faster.
for i, c in enumerate(zctx.multi_compress_to_buffer(chunks, threads=-1)):
compressed_discrete.append(c.tobytes())
ratios.append(float(len(c)) / float(len(chunks[i])))
compressed_size = sum(map(len, compressed_discrete))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print('discrete compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%' % (
opts['level'], compressed_size, ratio, good_ratio))
# Discrete dict mode is like discrete but trains a dictionary.
if args.discrete_dict:
dict_opts = dict(opts)
dict_opts['dict_data'] = dict_data
zctx = zstd.ZstdCompressor(**dict_opts)
compressed_discrete_dict = []
ratios = []
for i, c in enumerate(zctx.multi_compress_to_buffer(chunks, threads=-1)):
compressed_discrete_dict.append(c.tobytes())
ratios.append(float(len(c)) / float(len(chunks[i])))
compressed_size = sum(map(len, compressed_discrete_dict))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print('discrete dict compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%' % (
opts['level'], compressed_size, ratio, good_ratio))
if args.discrete_cover_dict:
cover_dict_opts = dict(opts)
cover_dict_opts['dict_data'] = cover_dict_data
zctx = zstd.ZstdCompressor(**cover_dict_opts)
compressed_discrete_cover_dict = []
ratios = []
for i, c in enumerate(zctx.multi_compress_to_buffer(chunks, threads=-1)):
compressed_discrete_cover_dict.append(c.tobytes())
ratios.append(float(len(c)) / float(len(chunks[i])))
compressed_size = sum(map(len, compressed_discrete_cover_dict))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print('discrete cover dict compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%' % (
opts['level'], compressed_size, ratio, good_ratio))
# In stream mode the inputs are fed into a streaming compressor and
# blocks are flushed for each input.
if args.zlib and args.stream:
compressed_stream_zlib = []
ratios = []
compressor = zlib.compressobj(args.zlib_level)
for chunk in chunks:
output = compressor.compress(chunk)
output += compressor.flush(zlib.Z_SYNC_FLUSH)
compressed_stream_zlib.append(output)
compressed_size = sum(map(len, compressed_stream_zlib))
ratio = float(compressed_size) / float(orig_size) * 100.0
print('stream zlib compressed size (l=%d): %d (%.2f%%)' % (
args.zlib_level, compressed_size, ratio))
if args.stream:
zctx = zstd.ZstdCompressor(**opts)
compressed_stream = []
ratios = []
compressor = zctx.compressobj()
for chunk in chunks:
output = compressor.compress(chunk)
output += compressor.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK)
compressed_stream.append(output)
compressed_size = sum(map(len, compressed_stream))
ratio = float(compressed_size) / float(orig_size) * 100.0
print('stream compressed size (l=%d): %d (%.2f%%)' % (
opts['level'], compressed_size, ratio))
if args.content_dict:
compressed_content_dict = []
ratios = []
# First chunk is compressed like normal.
c = zstd.ZstdCompressor(**opts).compress(chunks[0])
compressed_content_dict.append(c)
ratios.append(float(len(c)) / float(len(chunks[0])))
# Subsequent chunks use previous chunk as a dict.
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
zctx = zstd.ZstdCompressor(dict_data=d, **opts)
c = zctx.compress(chunk)
compressed_content_dict.append(c)
ratios.append(float(len(c)) / float(len(chunk)))
compressed_size = sum(map(len, compressed_content_dict))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print('content dict compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%' % (
opts['level'], compressed_size, ratio, good_ratio))
print('')
if not args.no_compression:
if args.zlib and args.discrete:
bench_discrete_zlib_compression(chunks,
{'zlib_level': args.zlib_level})
if args.discrete:
bench_discrete_compression(chunks, opts,
threads=args.batch_threads)
if args.discrete_dict:
bench_discrete_compression(chunks, dict_opts,
threads=args.batch_threads)
if args.discrete_cover_dict:
bench_discrete_compression(chunks, cover_dict_opts,
cover=True, threads=args.batch_threads)
if args.zlib and args.stream:
bench_stream_zlib_compression(chunks,
{'zlib_level': args.zlib_level})
if args.stream:
bench_stream_compression(chunks, opts)
if args.content_dict:
bench_content_dict_compression(chunks, opts)
if not args.no_decompression:
print('')
if not args.no_decompression:
if args.zlib and args.discrete:
bench_discrete_zlib_decompression(compressed_discrete_zlib,
orig_size)
if args.discrete:
bench_discrete_decompression(chunks, compressed_discrete, orig_size,
opts, threads=args.batch_threads)
if args.discrete_dict:
bench_discrete_decompression(chunks, compressed_discrete_dict,
orig_size, dict_opts,
threads=args.batch_threads)
if args.discrete_cover_dict:
bench_discrete_decompression(chunks, compressed_discrete_cover_dict,
orig_size, cover_dict_opts, cover=True,
threads=args.batch_threads)
if args.zlib and args.stream:
bench_stream_zlib_decompression(compressed_stream_zlib, orig_size)
if args.stream:
bench_stream_decompression(compressed_stream, orig_size, opts)
if args.content_dict:
bench_content_dict_decompression(compressed_content_dict,
orig_size, opts)
| bsd-3-clause | 887,005,041,969,168,800 | 34.674611 | 92 | 0.601057 | false | 3.776437 | false | false | false |
talbrecht/pism_pik07 | site-packages/PISM/ssa.py | 1 | 22015 | # Copyright (C) 2011, 2012, 2013, 2014, 2015 David Maxwell and Constantine Khroulev
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Module containing classes managing SSA forward runs and
SSA verification test cases."""
import PISM
import math
from PISM import util, model
# Conversion from command-line arguments to classes of SSA solver.
SSAAlgorithms = {"fem": PISM.SSAFEM, "fd": PISM.SSAFD}
class SSARun(object):
"""Mediates solving PISM's SSA model from a minimal set of data, without the constrution of an :cpp:class:`iceModel`.
It codifies the steps needed to put together the data for an SSA run; subclasses do the work of
implementing the steps in :meth:`_setFromOptions`, :meth:`_initGrid`, etc. Uses include:
* Running SSA test cases.
* Running the SSA in standalone mode (e.g. via :command:`ssaforward.py`)
* The SSA inversion code.
Usage: After construction (of a subclass),
1. Call :meth:`setup` to run through the various
steps needed to set up an environment for solving the SSA.
2. Solve the SSA with :meth:`solve`.
3. Optionally write the the model vectors and solution to a file with :meth:`write`."""
def __init__(self):
"""Do little constructor. Real work is done by :meth:`setup` which should be called prior to :meth:`solve`."""
self.grid = None #: The computation grid; will be set by :meth:`_initGrid`
self.config = None #: Placeholder for config dictionary; set indirectly by :meth:`_constructModelData`
#: Instance of :class:`PISM.model.ModelData` that stores all data needed for solving the SSA. Much of the work of
#: the :class:`SSARun` is involved in setting up this object. Tasks include setting up :cpp:class:IceModelVec
#: variables as well as model physics (e.g. :cpp:class:`EnthalpyConverter`).
self.modeldata = None
self.ssa = None #: Subclass of :cpp:class:`SSA` that sovles the SSA.
def setup(self):
"""Orchestrates the steps of setting up an environment for running the SSA. The following methods
are called in order, and should be impelmeneted by a subclass.
1. :meth:`_setFromOptions` to set any parameters from command-line options
2. :meth:`_initGrid` to determine the computation grid, to be stored as :attr:`grid`
3. :meth:`_constructModelData` provide a :class:`ModelData` object (a default implementation is provided)
4. :meth:`_initPhysics` to set the non-vec members of the :class:`ModelData`, e.g. the :cpp:class:`EnthalpyConverter`.
5. :meth:`_constructSSA` to build the actual subclass of :cpp:class:`SSA` that will be used to solve the SSA
6. :meth:`_initSSACoefficients` enter all of the vecs needed for solving the SSA into the :class:`ModelData`.
7. :meth:`_initSSA` initialize the :cpp:class:`SSA` returned in step 5
"""
self._setFromOptions()
self._initGrid()
if self.grid is None:
raise RuntimeError("SSARun failed to provide a grid.")
self.modeldata = self._constructModelData()
if self.modeldata is None:
raise RuntimeError("SSARun._constructModelData failed to provide a ModelData.")
self.config = self.modeldata.config
self._initPhysics()
if self.modeldata.enthalpyconverter is None:
raise RuntimeError("SSARun._initPhysics failed to initialize the physics of the underlying SSA solver.")
self.ssa = self._constructSSA()
if self.ssa is None:
raise RuntimeError("SSARun._constructSSA failed to provide an SSA.")
self._initSSACoefficients()
# FIXME: is there a reasonable check to do here?
self._initSSA()
def solve(self):
"""Solve the SSA by calling the underlying PISM :cpp:class:`SSA`'s
:cpp:member:`update` method. Returns the solution vector (owned by
self.ssa, but you should not need to know about ownership).
"""
vecs = self.modeldata.vecs
# make sure vecs is locked!
self.ssa.init()
if vecs.has('vel_bc'):
self.ssa.set_boundary_conditions(vecs.bc_mask, vecs.vel_bc)
melange_back_pressure = PISM.IceModelVec2S()
melange_back_pressure.create(self.grid, "melange_back_pressure", PISM.WITHOUT_GHOSTS)
melange_back_pressure.set_attrs("diagnostic",
"melange back pressure fraction", "1", "")
PISM.verbPrintf(2, self.grid.com, "* Solving the SSA stress balance ...\n")
fast = False
self.ssa.update(fast, melange_back_pressure)
return self.ssa.velocity()
def write(self, filename):
"""Saves all of :attr:`modeldata`'s vecs (and the solution) to an
output file."""
grid = self.grid
vecs = self.modeldata.vecs
pio = PISM.PIO(grid.com, "netcdf3")
pio.open(filename, PISM.PISM_READWRITE_MOVE)
PISM.define_time(pio, grid.ctx().config().get_string("time_dimension_name"),
grid.ctx().config().get_string("calendar"),
grid.ctx().time().units_string(),
grid.ctx().unit_system())
PISM.append_time(pio, grid.ctx().config().get_string("time_dimension_name"), 0.0)
pio.close()
# Save time & command line
PISM.util.writeProvenance(filename)
vecs.writeall(filename)
vel_ssa = self.ssa.velocity()
vel_ssa.write(filename)
sys = self.grid.ctx().unit_system()
velbar_mag = model.createCBarVec(self.grid)
velbar_mag.set_to_magnitude(vel_ssa)
velbar_mag.mask_by(vecs.thk, PISM.convert(sys, -0.01, "m/year", "m/second"))
velbar_mag.write(filename)
def _setFromOptions(self):
"""Optionally override to set any data from command line variables."""
pass
def _constructModelData(self):
"""Optionally override to return a custom :class:`PISM.model.ModelData` instance."""
return model.ModelData(self.grid)
def _initGrid(self):
"""Override to return the computation grid."""
raise NotImplementedError()
def _initPhysics(self):
"""Override to set the non-var parts of :attr:`modeldata` (e.g. the basal yeild stress model and the enthalpy converter)"""
raise NotImplementedError()
def _allocStdSSACoefficients(self):
"""Helper method that allocates the standard :cpp:class:`IceModelVec` variables used to solve the SSA and stores them
in :attr:`modeldata```.vecs``:
* ``surface``
* ``thickness``
* ``bed``
* ``tauc``
* ``enthalpy``
* ``mask``
* ``age`` if -age is given
Intended to be called from custom implementations of :meth:`_initSSACoefficients` if desired."""
vecs = self.modeldata.vecs
grid = self.grid
vecs.add(model.createIceSurfaceVec(grid))
vecs.add(model.createIceThicknessVec(grid))
vecs.add(model.createBedrockElevationVec(grid))
vecs.add(model.createYieldStressVec(grid), 'tauc')
vecs.add(model.createEnthalpyVec(grid), 'enthalpy')
vecs.add(model.createIceMaskVec(grid), 'mask')
# The SIA model might need the "age" field
if grid.ctx().config().get_boolean("do_age"):
vecs.add(model.createAgeVec(grid), "age")
def _allocateBCs(self, velname='_bc', maskname='bc_mask'):
"""Helper method that allocates standard Dirichlet data
:cpp:class:`IceModelVec` variable and stores them in
:attr:`modeldata` ``.vecs``:
* ``vel_bc``
* ``bc_mask``
"""
vecs = self.modeldata.vecs
vecs.add(model.create2dVelocityVec(self.grid,
name=velname,
desc='SSA velocity boundary condition',
intent='intent'),
"vel_bc")
vecs.add(model.createBCMaskVec(self.grid, name=maskname),
"bc_mask")
def _initSSACoefficients(self):
"""Override to allocate and initialize all :cpp:class:`IceModelVec` variables in :attr:`modeldata` ``.vecs``
needed for solving the SSA."""
raise NotImplementedError()
def _constructSSA(self):
"""Optionally override to return an instance of :cpp:class:`SSA` (e.g. :cpp:class:`SSAFD` or :cpp:class:`SSAFEM`)
that will be used for solving the SSA."""
md = self.modeldata
return SSAAlgorithms[md.config.get_string("ssa_method")](md.grid, md.enthalpyconverter)
def _initSSA(self):
"""Optionally perform any final initialization of :attr:`ssa`."""
pass
class SSAExactTestCase(SSARun):
"""Base class for implmentation of specific SSA test cases. Provides a mechanism for comparing
computed and exact values. Simply construct with a grid size and then call :meth:`run`"""
def __init__(self, Mx, My):
"""Initialize with a grid of the specified size."""
SSARun.__init__(self)
self.Mx = Mx
self.My = My
# For convenience, provide a grid. It will get initialized later
# on when _initGrid is called by our setup method.
self.grid = None
def run(self, output_file):
"""Main command intended to be called by whatever code executes the test case.
Calls :meth:`setup`, :meth:`solve`, :meth:`report`, and :meth:`write`."""
self.setup()
self.solve()
self.report()
self.write(output_file)
def report(self):
"""Compares computed and exact solution values and displays a summary report."""
grid = self.grid
ssa_stdout = self.ssa.stdout_report()
PISM.verbPrintf(3, grid.com, ssa_stdout)
maxvecerr = 0.0
avvecerr = 0.0
avuerr = 0.0
avverr = 0.0
maxuerr = 0.0
maxverr = 0.0
if (self.config.get_boolean("do_pseudo_plastic_till") and
self.config.get_double("pseudo_plastic_q") != 1.0):
PISM.verbPrintf(1, grid.com, "WARNING: numerical errors not valid for pseudo-plastic till\n")
PISM.verbPrintf(1, grid.com, "NUMERICAL ERRORS in velocity relative to exact solution:\n")
vel_ssa = self.ssa.velocity()
vel_ssa.begin_access()
exactvelmax = 0
gexactvelmax = 0
for (i, j) in self.grid.points():
x = grid.x(i)
y = grid.y(j)
(uexact, vexact) = self.exactSolution(i, j, x, y)
exactnormsq = math.sqrt(uexact * uexact + vexact * vexact)
exactvelmax = max(exactnormsq, exactvelmax)
solution = vel_ssa[i, j]
uerr = abs(solution.u - uexact)
verr = abs(solution.v - vexact)
avuerr += uerr
avverr += verr
maxuerr = max(maxuerr, uerr)
maxverr = max(maxverr, verr)
vecerr = math.sqrt(uerr * uerr + verr * verr)
maxvecerr = max(maxvecerr, vecerr)
avvecerr = avvecerr + vecerr
vel_ssa.end_access()
N = grid.Mx() * grid.My()
gexactvelmax = PISM.GlobalMax(grid.com, exactvelmax)
gmaxuerr = PISM.GlobalMax(grid.com, maxuerr)
gmaxverr = PISM.GlobalMax(grid.com, maxverr)
gavuerr = PISM.GlobalSum(grid.com, avuerr) / N
gavverr = PISM.GlobalSum(grid.com, avverr) / N
gmaxvecerr = PISM.GlobalMax(grid.com, maxvecerr)
gavvecerr = PISM.GlobalSum(grid.com, avvecerr) / N
sys = grid.ctx().unit_system()
m_year = PISM.UnitConverter(sys, "m / second", "m / year")
if abs(gexactvelmax) > 0.0:
relative_vel_error = (gavvecerr / gexactvelmax) * 100.0
else:
relative_vel_error = 0.0
PISM.verbPrintf(1, grid.com, "velocity : maxvector prcntavvec maxu maxv avu avv\n")
PISM.verbPrintf(1, grid.com,
" %11.4f%13.5f%10.4f%10.4f%10.4f%10.4f\n",
m_year(gmaxvecerr),
relative_vel_error,
m_year(gmaxuerr),
m_year(gmaxverr),
m_year(gavuerr),
m_year(gavverr))
PISM.verbPrintf(1, grid.com, "NUM ERRORS DONE\n")
def exactSolution(self, i, j, xi, xj):
"""Override to provide the exact value of the solution at grid index (``i``, ``j``) with
coordinates (``xi``, ``xj``)."""
raise NotImplementedError()
def write(self, filename):
"""Override of :meth:`SSARun.write`. Does all of the above, and saves a copy of the exact solution."""
SSARun.write(self, filename)
grid = self.grid
exact = model.create2dVelocityVec(grid, name="_exact", desc="SSA exact solution", intent="diagnostic")
exact.begin_access()
for (i, j) in grid.points():
exact[i, j] = self.exactSolution(i, j, grid.x(i), grid.y(j))
exact.end_access()
exact.write(filename)
class SSAFromInputFile(SSARun):
"""Class for running the SSA based on data provided in an input file."""
def __init__(self, boot_file):
SSARun.__init__(self)
self.grid = None
self.config = PISM.Context().config
self.boot_file = boot_file
self.phi_to_tauc = False
self.is_regional = False
def _setFromOptions(self):
self.phi_to_tauc = PISM.OptionBool("-phi_to_tauc",
"Recompute pseudo yield stresses from till friction angles.")
self.is_regional = PISM.OptionBool("-regional", "enable 'regional' mode")
def _initGrid(self):
"""Override of :meth:`SSARun._initGrid`. Sets periodicity based on
``-periodicity`` command-line option."""
# FIXME: allow specification of Mx and My different from what's
# in the boot_file.
periodicity = PISM.XY_PERIODIC
(pstring, pflag) = PISM.optionsListWasSet('-periodicity', "Grid periodicity",
'x,y,xy,none', 'xy')
if pflag:
pdict = {'x': PISM.X_PERIODIC, 'y': PISM.Y_PERIODIC,
'xy': PISM.XY_PERIODIC, 'none': PISM.NOT_PERIODIC}
periodicity = pdict[pstring]
else:
if self.is_regional and (self.config.get_string("ssa_method") == "fem"):
periodicity = PISM.NOT_PERIODIC
self.grid = PISM.IceGrid.FromFile(PISM.Context().ctx, self.boot_file, "enthalpy",
periodicity)
def _initPhysics(self):
"""Override of :meth:`SSARun._initPhysics` that sets the physics based on command-line flags."""
config = self.config
enthalpyconverter = PISM.EnthalpyConverter(config)
if PISM.OptionString("-ssa_glen", "SSA flow law Glen exponent").is_set():
config.set_string("ssa_flow_law", "isothermal_glen")
config.scalar_from_option("ice_softness", "ice_softness")
else:
config.set_string("ssa_flow_law", "gpbld")
self.modeldata.setPhysics(enthalpyconverter)
def _allocExtraSSACoefficients(self):
"""Allocate storage for SSA coefficients."""
vecs = self.modeldata.vecs
if util.fileHasVariable(self.boot_file, 'ssa_driving_stress_x'):
vecs.add(model.createDrivingStressXVec(self.grid))
if util.fileHasVariable(self.boot_file, 'ssa_driving_stress_y'):
vecs.add(model.createDrivingStressYVec(self.grid))
no_model_mask = None
# For a regional run we'll need no_model_mask, usurfstore, thkstore
if self.is_regional:
no_model_mask = model.createNoModelMaskVec(self.grid)
vecs.add(no_model_mask, 'no_model_mask')
vecs.add(model.createIceSurfaceStoreVec(self.grid))
vecs.add(model.createIceThicknessStoreVec(self.grid))
if self.config.get_boolean('ssa_dirichlet_bc'):
vecs.add(model.create2dVelocityVec(self.grid, name='_ssa_bc',
desc='SSA velocity boundary condition',
intent='intent'),
"vel_ssa_bc")
if self.is_regional:
vecs.add(no_model_mask, 'bc_mask')
else:
vecs.add(model.createBCMaskVec(self.grid), 'bc_mask')
if self.phi_to_tauc:
vecs.add(PISM.model.createBasalMeltRateVec(self.grid))
vecs.add(PISM.model.createTillPhiVec(self.grid))
vecs.add(PISM.model.createBasalWaterVec(self.grid))
def _initSSACoefficients(self):
"""Override of :meth:`SSARun._initSSACoefficients` that initializes variables from the
contents of the input file."""
# Build the standard thickness, bed, etc
self._allocStdSSACoefficients()
self._allocExtraSSACoefficients()
vecs = self.modeldata.vecs
thickness = vecs.land_ice_thickness
bed = vecs.bedrock_altitude
enthalpy = vecs.enthalpy
mask = vecs.mask
surface = vecs.surface_altitude
# Read in the PISM state variables that are used directly in the SSA solver
for v in [thickness, bed, enthalpy]:
v.regrid(self.boot_file, True)
# The SIA model might need the age field.
if self.config.get_boolean("do_age"):
vecs.age.regrid(self.boot_file, True)
# variables mask and surface are computed from the geometry previously read
sea_level = 0 # FIXME setFromOption?
gc = PISM.GeometryCalculator(sea_level, self.config)
gc.compute(bed, thickness, mask, surface)
if util.fileHasVariable(self.boot_file, 'ssa_driving_stress_x'):
vecs.ssa_driving_stress_x.regrid(self.boot_file, critical=True)
if util.fileHasVariable(self.boot_file, 'ssa_driving_stress_y'):
vecs.ssa_driving_stress_y.regrid(self.boot_file, critical=True)
# For a regional run we'll need no_model_mask, usurfstore, thkstore
if self.is_regional:
vecs.no_model_mask.regrid(self.boot_file, True)
if util.fileHasVariable(self.boot_file, 'usurfstore'):
vecs.usurfstore.regrid(self.boot_file, True)
else:
vecs.usurfstore.copy_from(vecs.surface_altitude)
if util.fileHasVariable(self.boot_file, 'thkstore'):
vecs.thkstore.regrid(self.boot_file, True)
else:
vecs.thkstore.copy_from(vecs.land_ice_thickness)
# Compute yield stress from PISM state variables
# (basal melt rate, tillphi, and basal water height)
grid = self.grid
if self.phi_to_tauc:
for v in [vecs.bmr, vecs.tillphi, vecs.bwat]:
v.regrid(self.boot_file, True)
vecs.add(v)
if self.is_regional:
yieldstress = PISM.RegionalDefaultYieldStress(self.modeldata.grid)
else:
yieldstress = PISM.MohrCoulombYieldStress(self.modeldata.grid)
# make sure vecs is locked!
yieldstress.init()
yieldstress.set_till_friction_angle(vecs.tillphi)
yieldstress.update(0, 1)
vecs.tauc.copy_from(yieldstress.basal_material_yield_stress())
else:
vecs.tauc.regrid(self.boot_file, True)
if self.config.get_boolean('ssa_dirichlet_bc'):
has_u_ssa_bc = util.fileHasVariable(self.boot_file, 'u_ssa_bc')
has_v_ssa_bc = util.fileHasVariable(self.boot_file, 'v_ssa_bc')
if (not has_u_ssa_bc) or (not has_v_ssa_bc):
PISM.verbPrintf(2, grid.com,
"Input file '%s' missing Dirichlet boundary data u/v_ssa_bc;"
" using zero default instead." % self.boot_file)
vecs.vel_ssa_bc.set(0.0)
else:
vecs.vel_ssa_bc.regrid(self.boot_file, True)
if not self.is_regional:
bc_mask_name = vecs.bc_mask.metadata().get_string("short_name")
if util.fileHasVariable(self.boot_file, bc_mask_name):
vecs.bc_mask.regrid(self.boot_file, True)
else:
PISM.verbPrintf(2, grid.com,
"Input file '%s' missing Dirichlet location mask '%s'."
" Default to no Dirichlet locations." % (self.boot_file, bc_mask_name))
vecs.bc_mask.set(0)
def _constructSSA(self):
"""Constructs an instance of :cpp:class:`SSA` for solving the SSA based on command-line flags ``-regional`` and ``-ssa_method``"""
md = self.modeldata
if self.is_regional and (md.config.get_string("ssa_method") == "fd"):
algorithm = PISM.SSAFD_Regional
else:
algorithm = SSAAlgorithms[md.config.get_string("ssa_method")]
return algorithm(md.grid, md.enthalpyconverter)
| gpl-3.0 | 5,655,968,930,728,700,000 | 41.17433 | 138 | 0.599909 | false | 3.583171 | true | false | false |
axt/angr | angr/storage/file.py | 4 | 10323 | from ..state_plugins.plugin import SimStatePlugin
from ..state_plugins.sim_action_object import SimActionObject
from .. import sim_options
import claripy
import logging
l = logging.getLogger("angr.storage.file")
# TODO: symbolic file positions
import itertools
file_counter = itertools.count()
dialogue_counter = itertools.count()
class Flags: # pylint: disable=W0232,
O_RDONLY = 0
O_WRTONLY = 1
O_RDWR = 2
O_APPEND = 4096
O_ASYNC = 64
O_CLOEXEC = 512
# TODO mode for this flag
O_CREAT = 256
O_DIRECT = 262144
O_DIRECTORY = 2097152
O_EXCL = 2048
O_LARGEFILE = 1048576
O_NOATIME = 16777216
O_NOCTTY = 1024
O_NOFOLLOW = 4194304
O_NONBLOCK = 8192
O_NODELAY = 8192
O_SYNC = 67174400
O_TRUNC = 1024
def _deps_unpack(a):
if isinstance(a, SimActionObject):
return a.ast, a.reg_deps, a.tmp_deps
else:
return a, None, None
class SimFile(SimStatePlugin):
"""
Represents a file.
"""
# Creates a SimFile
def __init__(self, name, mode, pos=0, content=None, size=None, closed=None):
super(SimFile, self).__init__()
self.name = name
self.mode = mode
self.pos = pos
self.size = size
self.content = SimSymbolicMemory(memory_id="file_%s_%d" % (name, file_counter.next())) if content is None else content
self.closed = False if closed is None else closed
@property
def read_pos(self):
return self.pos
@read_pos.setter
def read_pos(self, val):
self.pos = val
@property
def write_pos(self):
return self.pos
@write_pos.setter
def write_pos(self, val):
self.pos = val
def set_state(self, st):
super(SimFile, self).set_state(st)
if isinstance(self.pos, (int, long)):
self.pos = claripy.BVV(self.pos, st.arch.bits)
if isinstance(self.size, (int, long)):
self.size = claripy.BVV(self.size, st.arch.bits)
self.content.set_state(st)
def variables(self):
"""
:return: the symbolic variable names associated with the file.
"""
return self.content.mem._name_mapping.keys()
def close(self):
l.debug("File %s closed.", self.name)
self.closed = True
return 0
def read(self, dst_addr, length):
"""
Reads some data from the current (or provided) position of the file.
:param dst_addr: If specified, the data is written to that address.
:param length: The length of the read.
:return: The length of the read.
"""
orig_length = length
real_length = length
max_length = length
if self.size is not None:
max_length = self.size - self.pos
# TODO: check file close status
# check if we need to concretize the length
if (
sim_options.CONCRETIZE_SYMBOLIC_FILE_READ_SIZES in self.state.options and
(self.state.se.symbolic(orig_length) or self.state.se.symbolic(max_length))
):
orig_max = self.state.se.max_int(orig_length)
self.state.add_constraints(orig_length == orig_max)
real_length = min(orig_max, self.state.se.max_int(max_length))
if self.size is not None:
length_constraint = self.pos + real_length <= self.size
if (self.state.se.symbolic(real_length) or self.state.se.symbolic(max_length)) and \
self.state.se.satisfiable(extra_constraints=(length_constraint,)):
self.state.add_constraints(length_constraint)
elif not self.state.se.symbolic(real_length) or not self.state.se.symbolic(max_length):
real_length = min(self.state.se.eval(max_length), self.state.se.eval(real_length))
self.content.copy_contents(dst_addr, self.pos, real_length , dst_memory=self.state.memory)
self.read_pos += _deps_unpack(real_length)[0]
return real_length
def read_from(self, length):
# TODO: check file close status
read_length = length
if self.size is not None:
remaining = self.size - self.pos
read_length = self.state.se.If(remaining < length, remaining, length)
data = self.content.load(self.pos, read_length)
self.read_pos += _deps_unpack(read_length)[0]
return data
# Writes some data to the current position of the file.
def write(self, content, length):
# TODO: something about length
# TODO: check file close status
self.content.store(self.pos, content)
self.write_pos += _deps_unpack(length)[0]
return length
# Seeks to a position in the file.
def seek(self, where):
# TODO: check file close status
if isinstance(where, (int, long)):
where = self.state.se.BVV(where, self.state.arch.bits)
self.pos = where
# Copies the SimFile object.
def copy(self):
return SimFile(self.name, self.mode, pos=self.pos, content=self.content.copy(), size=self.size, closed=self.closed)
def all_bytes(self):
indexes = self.content.mem.keys()
if len(indexes) == 0:
return self.state.se.BVV("")
min_idx = min(indexes)
max_idx = max(indexes)
buff = [ ]
for i in range(min_idx, max_idx+1):
buff.append(self.content.load(i, 1))
return self.state.se.Concat(*buff)
def concretize(self, **kwargs):
"""
Returns a concrete value for this file satisfying the current state constraints.
Or: generate a testcase for this file.
"""
return self.state.se.eval(self.all_bytes(), cast_to=str, **kwargs)
def merge(self, others, merge_conditions, common_ancestor=None):
"""
Merges the SimFile object with `others`.
"""
if not all(isinstance(oth, SimFile) for oth in others):
raise SimMergeError("merging files of different types is not supported")
all_files = list(others) + [ self ]
if len(set(o.pos for o in all_files)) > 1:
l.warning("Cheap HACK to support multiple file positions in a merge.")
# self.pos = max(o.pos for o in all_files)
# max cannot be used as file positions might be symbolic.
#max_pos = None
#for o in all_files:
# if max_pos is not None:
# comp = self.state.se.simplify(max_pos >= o.pos)
# #if self.state.se.symbolic(comp):
# # #import ipdb; ipdb.set_trace()
# # raise SimMergeError("merging file positions with symbolic max position is not ye supported (TODO)")
# max_pos = o.pos if self.state.se.is_false(comp) else max_pos
# else:
# max_pos = o.pos
self.pos = max(
self.state.se.max(self.pos),
max(o.state.se.max(o.pos) for o in others)
)
#if len(set(o.name for o in all_files)) > 1:
# raise SimMergeError("merging file names is not yet supported (TODO)")
#if len(set(o.mode for o in all_files)) > 1:
# raise SimMergeError("merging modes is not yet supported (TODO)")
return self.content.merge(
[ o.content for o in others ], merge_conditions, common_ancestor=common_ancestor
)
def widen(self, others):
return self.merge(others, [])
class SimDialogue(SimFile):
"""
Emulates a dialogue with a program. Enables us to perform concrete short reads.
"""
def __init__(self, name, mode=None, pos=0, content=None, size=None, dialogue_entries=None):
super(SimDialogue, self).__init__(name, mode=mode, pos=pos, content=content, size=size)
self.dialogue_entries = [ ] if dialogue_entries is None else dialogue_entries
def set_state(self, st):
super(SimDialogue, self).set_state(st)
if isinstance(self.pos, (int, long)):
self.pos = claripy.BVV(self.pos, st.arch.bits)
if isinstance(self.size, (int, long)):
self.size = claripy.BVV(self.size, st.arch.bits)
self.content.set_state(st)
def add_dialogue_entry(self, dialogue_len):
"""
Add a new dialogue piece to the end of the dialogue.
"""
self.dialogue_entries.append(dialogue_len)
def read(self, dst_addr, length):
"""
Reads some data from current dialogue entry, emulates short reads.
"""
# make sure there is a current dialogue
try:
# this should always be a concrete value
current_pkt_length = self.dialogue_entries.pop(0)
except IndexError:
return 0
# two things can happen here:
# * we have a less than or equal amount of concrete content than the request read length
# * we have more concrete content than what was requested
# we assume the length passed to read can always be concretized to a single value
# because our dialogue entries will always be preconstrained
lengths = self.state.se.eval_upto(length, 2)
if len(lengths) > 1:
raise ValueError("read called with a symbolic length which can be more than a single value")
length_c = lengths[0]
if current_pkt_length <= length_c:
self.content.copy_contents(dst_addr, self.pos, current_pkt_length, dst_memory=self.state.memory)
return_length = current_pkt_length
else:
self.content.copy_contents(dst_addr, self.pos, length_c, dst_memory=self.state.memory)
return_length = length_c
# now add the remaining content as a new dialogue on top of the dialogue list
leftovers = current_pkt_length - length_c
self.dialogue_entries.insert(0, leftovers)
self.pos += return_length
return return_length
# Copies the SimDialogue object.
def copy(self):
return SimDialogue(self.name, mode=self.mode, pos=self.pos, content=self.content.copy(), size=self.size, dialogue_entries=list(self.dialogue_entries))
from ..state_plugins.symbolic_memory import SimSymbolicMemory
from ..errors import SimMergeError
| bsd-2-clause | 6,460,877,108,234,902,000 | 32.625407 | 158 | 0.606316 | false | 3.709307 | false | false | false |
TheAlgorithms/Python | project_euler/problem_067/sol1.py | 1 | 1261 | """
Problem Statement:
By starting at the top of the triangle below and moving to adjacent numbers on
the row below, the maximum total from top to bottom is 23.
3
7 4
2 4 6
8 5 9 3
That is, 3 + 7 + 4 + 9 = 23.
Find the maximum total from top to bottom in triangle.txt (right click and
'Save Link/Target As...'), a 15K text file containing a triangle with
one-hundred rows.
"""
import os
def solution():
"""
Finds the maximum total in a triangle as described by the problem statement
above.
>>> solution()
7273
"""
script_dir = os.path.dirname(os.path.realpath(__file__))
triangle = os.path.join(script_dir, "triangle.txt")
with open(triangle) as f:
triangle = f.readlines()
a = map(lambda x: x.rstrip("\r\n").split(" "), triangle)
a = list(map(lambda x: list(map(lambda y: int(y), x)), a))
for i in range(1, len(a)):
for j in range(len(a[i])):
if j != len(a[i - 1]):
number1 = a[i - 1][j]
else:
number1 = 0
if j > 0:
number2 = a[i - 1][j - 1]
else:
number2 = 0
a[i][j] += max(number1, number2)
return max(a[-1])
if __name__ == "__main__":
print(solution())
| mit | 8,428,645,675,244,132,000 | 24.734694 | 79 | 0.555115 | false | 3.42663 | false | false | false |
FireBladeNooT/Medusa_1_6 | medusa/server/web/core/error_logs.py | 1 | 5951 | # coding=utf-8
"""Route to error logs web page."""
from __future__ import unicode_literals
from datetime import datetime, timedelta
from mako.filters import html_escape
from six import text_type
from tornroutes import route
from .base import PageTemplate, WebRoot
from .... import logger, ui
from ....classes import ErrorViewer, WarningViewer
from ....issue_submitter import IssueSubmitter
from ....logger import filter_logline, read_loglines
from ....version_checker import CheckVersion
log_name_filters = {
None: html_escape('<No Filter>'),
'DAILYSEARCHER': 'Daily Searcher',
'BACKLOG': 'Backlog',
'SHOWUPDATER': 'Show Updater',
'CHECKVERSION': 'Check Version',
'SHOWQUEUE': 'Show Queue (All)',
'SEARCHQUEUE': 'Search Queue (All)',
'SEARCHQUEUE-DAILY-SEARCH': 'Search Queue (Daily Searcher)',
'SEARCHQUEUE-BACKLOG': 'Search Queue (Backlog)',
'SEARCHQUEUE-MANUAL': 'Search Queue (Manual)',
'SEARCHQUEUE-FORCED': 'Search Queue (Forced)',
'SEARCHQUEUE-RETRY': 'Search Queue (Retry/Failed)',
'SEARCHQUEUE-RSS': 'Search Queue (RSS)',
'SHOWQUEUE-UPDATE': 'Show Queue (Update)',
'SHOWQUEUE-REFRESH': 'Show Queue (Refresh)',
'FINDPROPERS': 'Find Propers',
'POSTPROCESSOR': 'PostProcessor',
'FINDSUBTITLES': 'Find Subtitles',
'TRAKTCHECKER': 'Trakt Checker',
'EVENT': 'Event',
'ERROR': 'Error',
'TORNADO': 'Tornado',
'Thread': 'Thread',
'MAIN': 'Main',
}
thread_names = {
'SHOWQUEUE': {name for name in log_name_filters if name and name.startswith('SHOWQUEUE-')},
'SEARCHQUEUE': {name for name in log_name_filters if name and name.startswith('SEARCHQUEUE-')}
}
log_periods = {
'all': None,
'one_day': timedelta(days=1),
'three_days': timedelta(days=3),
'one_week': timedelta(days=7),
}
@route('/errorlogs(/?.*)')
class ErrorLogs(WebRoot):
"""Route to errorlogs web page."""
# @TODO: Move this route to /log(/?)
# GitHub Issue submitter
issue_submitter = IssueSubmitter()
def __init__(self, *args, **kwargs):
"""Default constructor."""
super(ErrorLogs, self).__init__(*args, **kwargs)
def _create_menu(self, level):
return [
{ # Clear Errors
'title': 'Clear Errors',
'path': 'errorlogs/clearerrors/',
'requires': self._has_errors() and level == logger.ERROR,
'icon': 'ui-icon ui-icon-trash'
},
{ # Clear Warnings
'title': 'Clear Warnings',
'path': 'errorlogs/clearerrors/?level={level}'.format(level=logger.WARNING),
'requires': self._has_warnings() and level == logger.WARNING,
'icon': 'ui-icon ui-icon-trash'
},
{ # Submit Errors
'title': 'Submit Errors',
'path': 'errorlogs/submit_errors/',
'requires': self._has_errors() and level == logger.ERROR,
'class': 'submiterrors',
'confirm': True,
'icon': 'ui-icon ui-icon-arrowreturnthick-1-n'
},
]
def index(self, level=logger.ERROR, **kwargs):
"""Default index page."""
try:
level = int(level)
except (TypeError, ValueError):
level = logger.ERROR
t = PageTemplate(rh=self, filename='errorlogs.mako')
return t.render(header='Logs & Errors', title='Logs & Errors', topmenu='system',
submenu=self._create_menu(level), logLevel=level, controller='errorlogs', action='index')
@staticmethod
def _has_errors():
return bool(ErrorViewer.errors)
@staticmethod
def _has_warnings():
return bool(WarningViewer.errors)
def clearerrors(self, level=logger.ERROR):
"""Clear the errors or warnings."""
# @TODO: Replace this with DELETE /api/v2/log/{logLevel} or /api/v2/log/
if int(level) == logger.WARNING:
WarningViewer.clear()
else:
ErrorViewer.clear()
return self.redirect('/errorlogs/viewlog/')
def viewlog(self, min_level=logger.INFO, log_filter=None, log_search=None, max_lines=1000, log_period='one_day', **kwargs):
"""View the log given the specified filters."""
# @TODO: Replace index with this or merge it so ?search=true or ?query={queryString} enables this "view"
min_level = int(min_level)
log_filter = log_filter if log_filter in log_name_filters else None
t = PageTemplate(rh=self, filename='viewlogs.mako')
period = log_periods.get(log_period)
modification_time = datetime.now() - period if period else None
data = [line for line in read_loglines(modification_time=modification_time, formatter=text_type, max_lines=max_lines,
predicate=lambda l: filter_logline(l, min_level=min_level,
thread_name=thread_names.get(log_filter, log_filter),
search_query=log_search))]
return t.render(header='Log File', title='Logs', topmenu='system', log_lines='\n'.join([html_escape(line) for line in data]),
min_level=min_level, log_name_filters=log_name_filters, log_filter=log_filter, log_search=log_search, log_period=log_period,
controller='errorlogs', action='viewlogs')
def submit_errors(self):
"""Create an issue in medusa issue tracker."""
results = self.issue_submitter.submit_github_issue(CheckVersion())
for submitter_result, issue_id in results:
submitter_notification = ui.notifications.error if issue_id is None else ui.notifications.message
submitter_notification(submitter_result)
return self.redirect('/errorlogs/')
| gpl-3.0 | -6,083,476,195,281,966,000 | 38.673333 | 148 | 0.596202 | false | 3.904856 | false | false | false |
annoviko/pyclustering | pyclustering/cluster/elbow.py | 1 | 9785 | """!
@brief Elbow method to determine the optimal number of clusters for k-means clustering.
@details Implementation based on paper @cite article::cluster::elbow::1.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import math
from pyclustering.cluster.kmeans import kmeans
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer, random_center_initializer
from pyclustering.core.wrapper import ccore_library
import pyclustering.core.elbow_wrapper as wrapper
class elbow:
"""!
@brief Class represents Elbow method that is used to find out appropriate amount of clusters in a dataset.
@details The elbow is a heuristic method of interpretation and validation of consistency within cluster analysis
designed to help find the appropriate number of clusters in a dataset.Elbow method performs clustering
using K-Means algorithm for each K and estimate clustering results using sum of square erros. By default
K-Means++ algorithm is used to calculate initial centers that are used by K-Means algorithm.
The Elbow is determined by max distance from each point (x, y) to segment from kmin-point (x0, y0) to kmax-point (x1, y1),
where 'x' is K (amount of clusters), and 'y' is within-cluster error. Following expression is used to calculate Elbow
length:
\f[Elbow_{k} = \frac{\left ( y_{0} - y_{1} \right )x_{k} + \left ( x_{1} - x_{0} \right )y_{k} + \left ( x_{0}y_{1} - x_{1}y_{0} \right )}{\sqrt{\left ( x_{1} - x_{0} \right )^{2} + \left ( y_{1} - y_{0} \right )^{2}}}\f]
Usage example of Elbow method for cluster analysis:
@code
from pyclustering.cluster.kmeans import kmeans, kmeans_visualizer
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.cluster.elbow import elbow
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES
# read sample 'Simple3' from file (sample contains four clusters)
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
# create instance of Elbow method using K value from 1 to 10.
kmin, kmax = 1, 10
elbow_instance = elbow(sample, kmin, kmax)
# process input data and obtain results of analysis
elbow_instance.process()
amount_clusters = elbow_instance.get_amount() # most probable amount of clusters
wce = elbow_instance.get_wce() # total within-cluster errors for each K
# perform cluster analysis using K-Means algorithm
centers = kmeans_plusplus_initializer(sample, amount_clusters,
amount_candidates=kmeans_plusplus_initializer.FARTHEST_CENTER_CANDIDATE).initialize()
kmeans_instance = kmeans(sample, centers)
kmeans_instance.process()
# obtain clustering results and visualize them
clusters = kmeans_instance.get_clusters()
centers = kmeans_instance.get_centers()
kmeans_visualizer.show_clusters(sample, clusters, centers)
@endcode
By default Elbow uses K-Means++ initializer to calculate initial centers for K-Means algorithm, it can be changed
using argument 'initializer':
@code
# perform analysis using Elbow method with random center initializer for K-Means algorithm inside of the method.
kmin, kmax = 1, 10
elbow_instance = elbow(sample, kmin, kmax, initializer=random_center_initializer)
elbow_instance.process()
@endcode
@image html elbow_example_simple_03.png "Elbows analysis with further K-Means clustering."
"""
def __init__(self, data, kmin, kmax, **kwargs):
"""!
@brief Construct Elbow method.
@param[in] data (array_like): Input data that is presented as array of points (objects), each point should be represented by array_like data structure.
@param[in] kmin (int): Minimum amount of clusters that should be considered.
@param[in] kmax (int): Maximum amount of clusters that should be considered.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: `ccore`, `initializer`, `random_state`, `kstep`).
<b>Keyword Args:</b><br>
- ccore (bool): If `True` then C++ implementation of pyclustering library is used (by default `True`).
- initializer (callable): Center initializer that is used by K-Means algorithm (by default K-Means++).
- random_state (int): Seed for random state (by default is `None`, current system time is used).
- kstep (int): Search step in the interval [kmin, kmax] (by default is `1`).
"""
self.__initializer = kwargs.get('initializer', kmeans_plusplus_initializer)
self.__random_state = kwargs.get('random_state', None)
self.__kstep = kwargs.get('kstep', 1)
self.__ccore = kwargs.get('ccore', True) or \
isinstance(self.__initializer, kmeans_plusplus_initializer) or \
isinstance(self.__initializer, random_center_initializer)
if self.__ccore:
self.__ccore = ccore_library.workable()
self.__data = data
self.__kmin = kmin
self.__kmax = kmax
self.__wce = []
self.__elbows = []
self.__kvalue = -1
self.__verify_arguments()
def process(self):
"""!
@brief Performs analysis to find out appropriate amount of clusters.
@return (elbow) Returns itself (Elbow instance).
@return
"""
if self.__ccore:
self.__process_by_ccore()
else:
self.__process_by_python()
return self
def __process_by_ccore(self):
"""!
@brief Performs processing using C++ implementation.
"""
if isinstance(self.__initializer, kmeans_plusplus_initializer):
initializer = wrapper.elbow_center_initializer.KMEANS_PLUS_PLUS
else:
initializer = wrapper.elbow_center_initializer.RANDOM
result = wrapper.elbow(self.__data, self.__kmin, self.__kmax, self.__kstep, initializer, self.__random_state)
self.__kvalue = result[0]
self.__wce = result[1]
def __process_by_python(self):
"""!
@brief Performs processing using python implementation.
"""
for amount in range(self.__kmin, self.__kmax + 1, self.__kstep):
centers = self.__initializer(self.__data, amount, random_state=self.__random_state).initialize()
instance = kmeans(self.__data, centers, ccore=False)
instance.process()
self.__wce.append(instance.get_total_wce())
self.__calculate_elbows()
self.__find_optimal_kvalue()
def get_amount(self):
"""!
@brief Returns appropriate amount of clusters.
"""
return self.__kvalue
def get_wce(self):
"""!
@brief Returns list of total within cluster errors for each K-value, for example, in case of `kstep = 1`:
(kmin, kmin + 1, ..., kmax).
"""
return self.__wce
def __calculate_elbows(self):
"""!
@brief Calculates potential elbows.
@details Elbow is calculated as a distance from each point (x, y) to segment from kmin-point (x0, y0) to kmax-point (x1, y1).
"""
x0, y0 = 0.0, self.__wce[0]
x1, y1 = float(len(self.__wce)), self.__wce[-1]
for index_elbow in range(1, len(self.__wce) - 1):
x, y = float(index_elbow), self.__wce[index_elbow]
segment = abs((y0 - y1) * x + (x1 - x0) * y + (x0 * y1 - x1 * y0))
norm = math.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)
distance = segment / norm
self.__elbows.append(distance)
def __find_optimal_kvalue(self):
"""!
@brief Finds elbow and returns corresponding K-value.
"""
optimal_elbow_value = max(self.__elbows)
self.__kvalue = (self.__elbows.index(optimal_elbow_value) + 1) * self.__kstep + self.__kmin
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__data))
if self.__kmin < 1:
raise ValueError("K min value (current value '%d') should be greater or equal to 1." % self.__kmin)
if self.__kstep < 1:
raise ValueError("K step value (current value '%d') should be greater or equal to 1." % self.__kstep)
if self.__kmax - self.__kmin + 1 < 3:
raise ValueError("Amount of K (" + str(self.__kmax - self.__kmin) + ") is too small for analysis. "
"It is require to have at least three K to build elbow.")
steps_to_process = math.floor((self.__kmax - self.__kmin) / self.__kstep) + 1
if steps_to_process < 3:
raise ValueError("The search step is too high '%d' for analysis (amount of K for analysis is '%d'). "
"It is require to have at least three K to build elbow." % (self.__kstep, steps_to_process))
if len(self.__data) < self.__kmax:
raise ValueError("K max value '%d' is greater than amount of points in data '%d'." %
(self.__kmax, len(self.__data)))
| gpl-3.0 | -6,176,914,998,356,930,000 | 38.770833 | 225 | 0.601635 | false | 3.961538 | false | false | false |
alabid/blinkdb | bin/dev/clear-buffer-cache.py | 12 | 1406 | #!/usr/bin/python
# Copyright (C) 2012 The Regents of The University California.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Clear OS buffer cache for mesos clusters on EC2.
import os
import thread
import time
machinesFile = "/root/spark-ec2/slaves"
machs = open(machinesFile).readlines()
machs = map(lambda s: s.strip(),machs)
machCount = len(machs)
machID = 0
cmd = "sync; echo 3 > /proc/sys/vm/drop_caches"
done = {}
def dropCachesThread( mach, myID, *args ):
print "SSH to machine %i" % (myID)
os.system("ssh %s '%s'" % (mach, cmd))
done[mach] = "done"
for mach in ( machs ):
thread.start_new_thread(dropCachesThread, (mach, machID))
machID = machID + 1
time.sleep(0.2)
while (len(done.keys()) < machCount):
print "waiting for %d tasks to finish..." % (machCount - len(done.keys()))
time.sleep(1)
print "Done with %i threads" % (len(done.keys()))
| apache-2.0 | -2,378,912,866,519,992,300 | 28.914894 | 76 | 0.706259 | false | 3.247113 | false | false | false |
matsumoto-r/synciga | src/build/android/pylib/base/base_test_runner.py | 2 | 7812 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import httplib
import logging
import os
import tempfile
import time
from pylib import android_commands
from pylib import constants
from pylib import ports
from pylib.chrome_test_server_spawner import SpawningServer
from pylib.flag_changer import FlagChanger
from pylib.forwarder import Forwarder
from pylib.valgrind_tools import CreateTool
# TODO(frankf): Move this to pylib/utils
import lighttpd_server
# A file on device to store ports of net test server. The format of the file is
# test-spawner-server-port:test-server-port
NET_TEST_SERVER_PORT_INFO_FILE = 'net-test-server-ports'
class BaseTestRunner(object):
"""Base class for running tests on a single device.
A subclass should implement RunTests() with no parameter, so that calling
the Run() method will set up tests, run them and tear them down.
"""
def __init__(self, device, tool, build_type):
"""
Args:
device: Tests will run on the device of this ID.
shard_index: Index number of the shard on which the test suite will run.
build_type: 'Release' or 'Debug'.
"""
self.device = device
self.adb = android_commands.AndroidCommands(device=device)
self.tool = CreateTool(tool, self.adb)
self._http_server = None
self._forwarder = None
self._forwarder_device_port = 8000
self.forwarder_base_url = ('http://localhost:%d' %
self._forwarder_device_port)
self.flags = FlagChanger(self.adb)
self.flags.AddFlags(['--disable-fre'])
self._spawning_server = None
self._spawner_forwarder = None
# We will allocate port for test server spawner when calling method
# LaunchChromeTestServerSpawner and allocate port for test server when
# starting it in TestServerThread.
self.test_server_spawner_port = 0
self.test_server_port = 0
self.build_type = build_type
def _PushTestServerPortInfoToDevice(self):
"""Pushes the latest port information to device."""
self.adb.SetFileContents(self.adb.GetExternalStorage() + '/' +
NET_TEST_SERVER_PORT_INFO_FILE,
'%d:%d' % (self.test_server_spawner_port,
self.test_server_port))
def RunTest(self, test):
"""Runs a test. Needs to be overridden.
Args:
test: A test to run.
Returns:
Tuple containing: (test_result.TestResults, tests to rerun or None)
"""
raise NotImplementedError
def SetUp(self):
"""Run once before all tests are run."""
Forwarder.KillDevice(self.adb, self.tool)
def TearDown(self):
"""Run once after all tests are run."""
self.ShutdownHelperToolsForTestSuite()
def CopyTestData(self, test_data_paths, dest_dir):
"""Copies |test_data_paths| list of files/directories to |dest_dir|.
Args:
test_data_paths: A list of files or directories relative to |dest_dir|
which should be copied to the device. The paths must exist in
|CHROME_DIR|.
dest_dir: Absolute path to copy to on the device.
"""
for p in test_data_paths:
self.adb.PushIfNeeded(
os.path.join(constants.CHROME_DIR, p),
os.path.join(dest_dir, p))
def LaunchTestHttpServer(self, document_root, port=None,
extra_config_contents=None):
"""Launches an HTTP server to serve HTTP tests.
Args:
document_root: Document root of the HTTP server.
port: port on which we want to the http server bind.
extra_config_contents: Extra config contents for the HTTP server.
"""
self._http_server = lighttpd_server.LighttpdServer(
document_root, port=port, extra_config_contents=extra_config_contents)
if self._http_server.StartupHttpServer():
logging.info('http server started: http://localhost:%s',
self._http_server.port)
else:
logging.critical('Failed to start http server')
self.StartForwarderForHttpServer()
return (self._forwarder_device_port, self._http_server.port)
def _CreateAndRunForwarder(
self, adb, port_pairs, tool, host_name, build_type):
"""Creates and run a forwarder."""
forwarder = Forwarder(adb, build_type)
forwarder.Run(port_pairs, tool, host_name)
return forwarder
def StartForwarder(self, port_pairs):
"""Starts TCP traffic forwarding for the given |port_pairs|.
Args:
host_port_pairs: A list of (device_port, local_port) tuples to forward.
"""
if self._forwarder:
self._forwarder.Close()
self._forwarder = self._CreateAndRunForwarder(
self.adb, port_pairs, self.tool, '127.0.0.1', self.build_type)
def StartForwarderForHttpServer(self):
"""Starts a forwarder for the HTTP server.
The forwarder forwards HTTP requests and responses between host and device.
"""
self.StartForwarder([(self._forwarder_device_port, self._http_server.port)])
def RestartHttpServerForwarderIfNecessary(self):
"""Restarts the forwarder if it's not open."""
# Checks to see if the http server port is being used. If not forwards the
# request.
# TODO(dtrainor): This is not always reliable because sometimes the port
# will be left open even after the forwarder has been killed.
if not ports.IsDevicePortUsed(self.adb,
self._forwarder_device_port):
self.StartForwarderForHttpServer()
def ShutdownHelperToolsForTestSuite(self):
"""Shuts down the server and the forwarder."""
# Forwarders should be killed before the actual servers they're forwarding
# to as they are clients potentially with open connections and to allow for
# proper hand-shake/shutdown.
Forwarder.KillDevice(self.adb, self.tool)
if self._forwarder:
self._forwarder.Close()
if self._http_server:
self._http_server.ShutdownHttpServer()
if self._spawning_server:
self._spawning_server.Stop()
self.flags.Restore()
def CleanupSpawningServerState(self):
"""Tells the spawning server to clean up any state.
If the spawning server is reused for multiple tests, this should be called
after each test to prevent tests affecting each other.
"""
if self._spawning_server:
self._spawning_server.CleanupState()
def LaunchChromeTestServerSpawner(self):
"""Launches test server spawner."""
server_ready = False
error_msgs = []
# Try 3 times to launch test spawner server.
for i in xrange(0, 3):
# Do not allocate port for test server here. We will allocate
# different port for individual test in TestServerThread.
self.test_server_spawner_port = ports.AllocateTestServerPort()
self._spawning_server = SpawningServer(self.test_server_spawner_port,
self.adb,
self.tool,
self.build_type)
self._spawning_server.Start()
server_ready, error_msg = ports.IsHttpServerConnectable(
'127.0.0.1', self.test_server_spawner_port, path='/ping',
expected_read='ready')
if server_ready:
break
else:
error_msgs.append(error_msg)
self._spawning_server.Stop()
# Wait for 2 seconds then restart.
time.sleep(2)
if not server_ready:
logging.error(';'.join(error_msgs))
raise Exception('Can not start the test spawner server.')
self._PushTestServerPortInfoToDevice()
self._spawner_forwarder = self._CreateAndRunForwarder(
self.adb,
[(self.test_server_spawner_port, self.test_server_spawner_port)],
self.tool, '127.0.0.1', self.build_type)
| bsd-3-clause | -5,498,488,929,738,747,000 | 36.37799 | 80 | 0.670635 | false | 3.869242 | true | false | false |
ncgr/lis_gis | grin_app/views.py | 2 | 18316 | import logging
import simplejson as json
import re
from functools import reduce
from decimal import Decimal
from django.conf import settings
from django.db import connection
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import ensure_csrf_cookie
from grin_app.ensure_nocache import ensure_nocache
# SRID 4326 is WGS 84 long lat unit=degrees, also the specification of the
# geoometric_coord field in the grin_accessions table.
SRID = 4326
DEFAULT_LIMIT = 200
TWO_PLACES = Decimal('0.01')
ACCESSION_TAB = 'lis_germplasm.grin_accession'
ACC_SELECT_COLS = (
'gid', 'taxon', 'latdec', 'longdec', 'accenumb', 'elevation', 'cropname',
'collsite', 'acqdate', 'origcty'
)
# Brewer nominal category colors from chroma.js set1,2,3 concatenated:
NOMINAL_COLORS = [
"#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#ffff33",
"#a65628", "#f781bf", "#999999", "#66c2a5", "#fc8d62", "#8da0cb",
"#e78ac3", "#a6d854", "#ffd92f", "#e5c494", "#b3b3b3", "#8dd3c7",
"#ffffb3", "#bebada", "#fb8072", "#80b1d3", "#fdb462", "#b3de69",
"#fccde5", "#d9d9d9", "#bc80bd", "#ccebc5", "#ffed6f"
]
NOMINAL_THRESHOLD = 10
DEFAULT_COLOR = 'lightgrey'
ORDER_BY_FRAG = '''
ORDER BY ST_Distance(
geographic_coord::geography,
ST_Centroid(
ST_MakeEnvelope(%(minx)s, %(miny)s, %(maxx)s, %(maxy)s, %(srid)s)
)
) ASC, taxon, gid
'''
LIMIT_FRAG = 'LIMIT %(limit)s'
COUNTRY_REGEX = re.compile(r'[a-z]{3}', re.I)
TAXON_FTS_BOOLEAN_REGEX = re.compile(r'^(\w+\s*[\||&]\s*\w+)+$')
logger = logging.getLogger(__name__)
GRIN_ACC_WHERE_FRAGS = {
'fts': {
'include': lambda p: TAXON_FTS_BOOLEAN_REGEX.match(
p.get('taxon_query', '')),
'sql': "taxon_fts @@ to_tsquery('english', %(taxon_query)s)",
},
'fts_simple': {
'include': lambda p: p.get('taxon_query', None) and not
GRIN_ACC_WHERE_FRAGS['fts']['include'](p),
'sql': "taxon_fts @@ plainto_tsquery('english', %(taxon_query)s)",
},
'country': {
'include': lambda p: p.get('country', None),
'sql': 'origcty = %(country)s',
},
'geocoded_only': {
'include': lambda p: p.get('limit_geo_bounds', None) in (
True, 'true') or p.get('geocoded_only', None) in (True, 'true'),
'sql': 'latdec <> 0 AND longdec <> 0',
},
'limit_geo_bounds': {
'include': lambda p: p.get('limit_geo_bounds', None) in (True, 'true'),
'sql': '''
latdec <> 0 AND longdec <> 0 AND
ST_Contains(
ST_MakeEnvelope(%(minx)s, %(miny)s, %(maxx)s, %(maxy)s, %(srid)s),
geographic_coord::geometry
)''',
},
}
GRIN_EVAL_WHERE_FRAGS = {
'descriptor_name': {
'include': lambda p: p.get('descriptor_name', None),
'sql': 'descriptor_name = %(descriptor_name)s',
},
'accession prefix': {
'include': lambda p: p.get('prefix', None),
'sql': 'accession_prefix = %(prefix)s',
},
'accession number': {
'include': lambda p: p.get('acc_num', None),
'sql': 'accession_number = %(acc_num)s',
},
'accession surfix': {
'include': lambda p: p.get('suffix', None),
'sql': 'accession_surfix = %(suffix)s',
},
}
@ensure_csrf_cookie
@ensure_nocache
def index(req):
"""Render the index template, which will boot up angular-js.
"""
return render(req, 'grin_app/index.html', context=settings.BRANDING)
@ensure_csrf_cookie
@ensure_nocache
def evaluation_descr_names(req):
"""Return JSON for all distinct trait descriptor names matching the
given taxon. (the trait overlay choice is only available after a
taxon is selected). Join on the grin_accession table to use the
FTS index on taxon there.
"""
assert req.method == 'GET', 'GET request method required'
params = req.GET.dict()
assert 'taxon' in params, 'missing taxon param'
assert params['taxon'], 'empty taxon param'
params['taxon_query'] = params['taxon']
where_clauses = [
val['sql'] for key, val in GRIN_ACC_WHERE_FRAGS.items()
if val['include'](params)
]
if len(where_clauses) == 0:
where_sql = ''
else:
where_sql = 'WHERE %s' % ' AND '.join(where_clauses)
sql = '''
SELECT DISTINCT descriptor_name
FROM lis_germplasm.legumes_grin_evaluation_data
JOIN lis_germplasm.grin_accession
USING (accenumb)
%s
ORDER BY descriptor_name
''' % where_sql
sql_params = {'taxon_query': params['taxon']}
cursor = connection.cursor()
# logger.info(cursor.mogrify(sql, sql_params))
cursor.execute(sql, sql_params)
names = [row[0] for row in cursor.fetchall()]
result = json.dumps(names)
response = HttpResponse(result, content_type='application/json')
return response
@ensure_csrf_cookie
@ensure_nocache
def evaluation_search(req):
"""Return JSON array of observation_value for all trait records
matching a set of accession ids, and matching the descriptor_name
field. Used for creating map markers or map overlays with specific
accesions' trait data.
"""
assert req.method == 'POST', 'POST request method required'
params = json.loads(req.body)
assert 'accession_ids' in params, 'missing accession_ids param'
assert 'descriptor_name' in params, 'missing descriptor_name param'
sql = '''
SELECT accenumb, descriptor_name, observation_value
FROM lis_germplasm.legumes_grin_evaluation_data
WHERE descriptor_name = %(descriptor_name)s
AND accenumb IN %(accession_ids)s
'''
sql_params = {
'descriptor_name': params['descriptor_name'],
'accession_ids': tuple(params['accession_ids'])
}
cursor = connection.cursor()
# logger.info(cursor.mogrify(sql, sql_params))
cursor.execute(sql, sql_params)
rows = _dictfetchall(cursor)
# observation_value is a string field, so cast to int or float as necessary
rows_clean = []
for row in rows:
row['observation_value'] = _string2num(row['observation_value'])
rows_clean.append(row)
result = json.dumps(rows_clean, use_decimal=True)
response = HttpResponse(result, content_type='application/json')
return response
def _string2num(s):
"""
Convert a string to int or float if possible.
"""
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
pass
return s
@ensure_csrf_cookie
@ensure_nocache
def evaluation_metadata(req):
"""Return JSON with trait metadata for the given taxon and trait
descriptor_name. This enables the client to display a legend, and
colorize accessions by either numeric or category traits.
"""
assert req.method == 'POST', 'POST request method required'
params = json.loads(req.body)
assert 'taxon' in params, 'missing taxon param'
assert 'descriptor_name' in params, 'missing descriptor_name param'
assert 'trait_scale' in params, 'missing trait_scale param'
assert 'accession_ids' in params, 'missing accession_ids param'
assert params['taxon'], 'empty taxon param'
result = None
cursor = connection.cursor()
# full text search on the taxon field in accessions table, also
# joining on taxon to get relevant evaluation metadata.
sql_params = {
'taxon_query': params['taxon'],
'descriptor_name': params['descriptor_name']
}
where_clauses = [
val['sql'] for
key, val in {**GRIN_ACC_WHERE_FRAGS, **GRIN_EVAL_WHERE_FRAGS}.items()
if val['include'](sql_params)
]
if len(where_clauses) == 0:
where_sql = ''
else:
where_sql = 'WHERE %s' % ' AND '.join(where_clauses)
sql = '''
SELECT DISTINCT taxon, descriptor_name, obs_type, obs_min, obs_max,
obs_nominal_values
FROM lis_germplasm.grin_evaluation_metadata
JOIN lis_germplasm.grin_accession
USING (taxon)
%s
''' % where_sql
# logger.info(cursor.mogrify(sql, sql_params))
cursor.execute(sql, sql_params)
trait_metadata = _dictfetchall(cursor)
if len(trait_metadata) == 0:
# early out if there were no matching metadata records
return HttpResponse({}, content_type='application/json')
obs_type = trait_metadata[0]['obs_type']
if obs_type == 'numeric':
if params['trait_scale'] == 'local':
# must perform another query to restrict observations to this
# set of accessions (local, not global)
sql = '''
SELECT observation_value
FROM lis_germplasm.legumes_grin_evaluation_data
WHERE accenumb IN %(accession_ids)s
AND descriptor_name = %(descriptor_name)s
'''
sql_params = {
'descriptor_name': params['descriptor_name'],
'accession_ids': tuple(params['accession_ids'])
}
# logger.info(cursor.mogrify(sql, sql_params))
cursor.execute(sql, sql_params)
obs_values = [_string2num(row[0]) for row in cursor.fetchall()]
result = {
'taxon_query': params['taxon'],
'descriptor_name': params['descriptor_name'],
'trait_type': 'numeric',
'min': min(obs_values) if obs_values else 0,
'max': max(obs_values) if obs_values else 0,
}
elif params['trait_scale'] == 'global':
mins = [rec['obs_min'] for rec in trait_metadata]
maxes = [rec['obs_max'] for rec in trait_metadata]
result = {
'taxon_query': params['taxon'],
'descriptor_name': params['descriptor_name'],
'trait_type': 'numeric',
'min': reduce(lambda x, y: x + y, mins) / len(mins),
'max': reduce(lambda x, y: x + y, maxes) / len(maxes),
}
elif obs_type == 'nominal':
vals = set()
for rec in trait_metadata:
vals |= set(rec['obs_nominal_values'])
num_preset_colors = len(NOMINAL_COLORS)
colors = {}
for i, val in enumerate(vals):
if i < num_preset_colors:
colors[val] = NOMINAL_COLORS[i]
else:
colors[val] = DEFAULT_COLOR
result = {
'taxon_query': params['taxon'],
'descriptor_name': params['descriptor_name'],
'trait_type': 'nominal',
'obs_nominal_values': sorted(vals),
'colors': colors,
}
response = HttpResponse(json.dumps(result, use_decimal=True),
content_type='application/json')
return response
@ensure_csrf_cookie
@ensure_nocache
def evaluation_detail(req):
"""Return JSON for all evalation/trait records matching this accession id.
"""
assert req.method == 'GET', 'GET request method required'
params = req.GET.dict()
assert 'accenumb' in params, 'missing accenumb param'
prefix = ''
acc_num = ''
suffix = ''
parts = params['accenumb'].split()
parts_len = len(parts)
if parts_len > 2:
prefix, acc_num, rest = parts[0], parts[1], parts[2:] # suffix optional
suffix = ' '.join(rest)
elif parts_len == 2:
prefix, acc_num = parts[0], parts[1]
elif parts_len == 1:
acc_num = parts[0]
else:
acc_num = params['accenumb']
cursor = connection.cursor()
sql_params = {
'prefix': prefix,
'acc_num': acc_num,
'suffix': suffix,
}
where_clauses = [
val['sql'] for key, val in GRIN_EVAL_WHERE_FRAGS.items()
if val['include'](sql_params)
]
where_sql = ' AND '.join(where_clauses)
sql = '''
SELECT accession_prefix,
accession_number,
accession_surfix,
observation_value,
descriptor_name,
method_name,
plant_name,
taxon,
origin,
original_value,
frequency,
low,
hign,
mean,
sdev,
ssize,
inventory_prefix,
inventory_number,
inventory_suffix,
accession_comment
FROM lis_germplasm.legumes_grin_evaluation_data
WHERE %s
ORDER BY descriptor_name
''' % where_sql
# logger.info(cursor.mogrify(sql, sql_params))
cursor.execute(sql, sql_params)
rows = _dictfetchall(cursor)
result = json.dumps(rows, use_decimal=True)
response = HttpResponse(result, content_type='application/json')
return response
@ensure_csrf_cookie
@ensure_nocache
def accession_detail(req):
"""Return JSON for all columns for a accession id."""
assert req.method == 'GET', 'GET request method required'
params = req.GET.dict()
assert 'accenumb' in params, 'missing accenumb param'
# fix me: name the columns dont select *!
sql = '''
SELECT * FROM lis_germplasm.grin_accession WHERE accenumb = %(accenumb)s
'''
cursor = connection.cursor()
# logger.info(cursor.mogrify(sql, params))
cursor.execute(sql, params)
rows = _dictfetchall(cursor)
return _acc_search_response(rows)
@ensure_csrf_cookie
@ensure_nocache
def countries(req):
"""Return a json array of countries for search filtering ui.
"""
cursor = connection.cursor()
sql = '''
SELECT DISTINCT origcty FROM lis_germplasm.grin_accession ORDER by origcty
'''
cursor.execute(sql)
# flatten into array, filter out bogus records like '' or 3 number codes
results = [row[0] for row in cursor.fetchall()
if row[0] and COUNTRY_REGEX.match(row[0])]
return HttpResponse(json.dumps(results), content_type='application/json')
@ensure_csrf_cookie
@ensure_nocache
def search(req):
"""Search by map bounds and return GeoJSON results."""
assert req.method == 'POST', 'POST request method required'
params = json.loads(req.body)
# logger.info(params)
if 'limit' not in params:
params['limit'] = DEFAULT_LIMIT
else:
params['limit'] = int(params['limit'])
where_clauses = [
val['sql'] for key, val in GRIN_ACC_WHERE_FRAGS.items()
if val['include'](params)
]
if len(where_clauses) == 0:
where_sql = ''
else:
where_sql = 'WHERE (%s)' % ' AND '.join(where_clauses)
cols_sql = ' , '.join(ACC_SELECT_COLS)
sql = '''SELECT %s FROM %s %s %s %s''' % (
cols_sql,
ACCESSION_TAB,
where_sql,
ORDER_BY_FRAG,
LIMIT_FRAG
)
cursor = connection.cursor()
sql_params = {
'taxon_query': params.get('taxon_query', None),
'country': params.get('country', None),
'minx': float(params.get('sw_lng', 0)),
'miny': float(params.get('sw_lat', 0)),
'maxx': float(params.get('ne_lng', 0)),
'maxy': float(params.get('ne_lat', 0)),
'limit': params['limit'],
'srid': SRID,
}
# logger.info(cursor.mogrify(sql, sql_params))
cursor.execute(sql, sql_params)
rows = _dictfetchall(cursor)
# when searching for a set of accessionIds, the result needs to
# either get merged in addition to the SQL LIMIT results, or just
# returned instead
if params.get('accession_ids', None):
if ',' in params['accession_ids']:
sql_params = {'accession_ids': params['accession_ids'].split(',')}
else:
sql_params = {'accession_ids': [params['accession_ids']]}
where_sql = 'WHERE accenumb = ANY( %(accession_ids)s )'
sql = 'SELECT %s FROM %s %s' % (
cols_sql,
ACCESSION_TAB,
where_sql
)
cursor.execute(sql, sql_params)
rows_with_requested_accessions = _dictfetchall(cursor)
if params.get('accession_ids_inclusive', None):
# merge results with previous set
uniq = set()
def is_unique(r):
k = r.get('accenumb', None)
if k in uniq:
return False
uniq.add(k)
return True
rows = [row for row in rows_with_requested_accessions + rows
if is_unique(row)]
else:
# simple replace with these results
rows = rows_with_requested_accessions
return _acc_search_response(rows)
def _acc_search_response(rows):
geo_json = []
# logger.info('results: %d' % len(rows))
for rec in rows:
# fix up properties which are not json serializable
if rec.get('acqdate', None):
rec['acqdate'] = str(rec['acqdate'])
else:
rec['acqdate'] = None
if rec.get('colldate', None):
rec['colldate'] = str(rec['colldate'])
else:
rec['colldate'] = None
# geojson can have null coords, so output this for
# non-geocoded search results (e.g. full text search w/ limit
# to current map extent turned off
if rec.get('longdec', 0) == 0 and rec.get('latdec', 0) == 0:
coords = None
else:
lat = Decimal(rec['latdec']).quantize(TWO_PLACES)
lng = Decimal(rec['longdec']).quantize(TWO_PLACES)
coords = [lng, lat]
del rec['latdec'] # have been translated into geojson coords,
del rec['longdec'] # so these keys are extraneous now.
geo_json_frag = {
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': coords
},
'properties': rec # rec happens to be a dict of properties. yay
}
# tag this accession with something to distinguish it from
# user provided accession ids
geo_json_frag['properties']['from_api'] = True
geo_json.append(geo_json_frag)
result = json.dumps(geo_json, use_decimal=True)
response = HttpResponse(result, content_type='application/json')
return response
def _dictfetchall(cursor):
"""Return all rows from a cursor as a dict"""
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
| gpl-3.0 | 8,975,848,377,664,379,000 | 33.887619 | 80 | 0.589102 | false | 3.548925 | false | false | false |
testmana2/test | Helpviewer/Network/QtHelpAccessHandler.py | 2 | 4066 | # -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing a scheme access handler for QtHelp.
"""
from __future__ import unicode_literals
import mimetypes
import os
from PyQt5.QtCore import QByteArray
from .SchemeAccessHandler import SchemeAccessHandler
from .NetworkReply import NetworkReply
QtDocPath = "qthelp://com.trolltech."
ExtensionMap = {
".bmp": "image/bmp",
".css": "text/css",
".gif": "image/gif",
".html": "text/html",
".htm": "text/html",
".ico": "image/x-icon",
".jpeg": "image/jpeg",
".jpg": "image/jpeg",
".js": "application/x-javascript",
".mng": "video/x-mng",
".pbm": "image/x-portable-bitmap",
".pgm": "image/x-portable-graymap",
".pdf": "application/pdf",
".png": "image/png",
".ppm": "image/x-portable-pixmap",
".rss": "application/rss+xml",
".svg": "image/svg+xml",
".svgz": "image/svg+xml",
".text": "text/plain",
".tif": "image/tiff",
".tiff": "image/tiff",
".txt": "text/plain",
".xbm": "image/x-xbitmap",
".xml": "text/xml",
".xpm": "image/x-xpm",
".xsl": "text/xsl",
".xhtml": "application/xhtml+xml",
".wml": "text/vnd.wap.wml",
".wmlc": "application/vnd.wap.wmlc",
}
class QtHelpAccessHandler(SchemeAccessHandler):
"""
Class implementing a scheme access handler for QtHelp.
"""
def __init__(self, engine, parent=None):
"""
Constructor
@param engine reference to the help engine (QHelpEngine)
@param parent reference to the parent object (QObject)
"""
SchemeAccessHandler.__init__(self, parent)
self.__engine = engine
def __mimeFromUrl(self, url):
"""
Private method to guess the mime type given an URL.
@param url URL to guess the mime type from (QUrl)
@return mime type for the given URL (string)
"""
path = url.path()
ext = os.path.splitext(path)[1].lower()
if ext in ExtensionMap:
return ExtensionMap[ext]
else:
return "application/octet-stream"
def createRequest(self, op, request, outgoingData=None):
"""
Public method to create a request.
@param op the operation to be performed
(QNetworkAccessManager.Operation)
@param request reference to the request object (QNetworkRequest)
@param outgoingData reference to an IODevice containing data to be sent
(QIODevice)
@return reference to the created reply object (QNetworkReply)
"""
url = request.url()
strUrl = url.toString()
# For some reason the url to load is already wrong (passed from webkit)
# though the css file and the references inside should work that way.
# One possible problem might be that the css is loaded at the same
# level as the html, thus a path inside the css like
# (../images/foo.png) might cd out of the virtual folder
if not self.__engine.findFile(url).isValid():
if strUrl.startswith(QtDocPath):
newUrl = request.url()
if not newUrl.path().startswith("/qdoc/"):
newUrl.setPath("qdoc" + newUrl.path())
url = newUrl
strUrl = url.toString()
mimeType = mimetypes.guess_type(strUrl)[0]
if mimeType is None:
# do our own (limited) guessing
mimeType = self.__mimeFromUrl(url)
if self.__engine.findFile(url).isValid():
data = self.__engine.fileData(url)
else:
data = QByteArray(self.tr(
"""<title>Error 404...</title>"""
"""<div align="center"><br><br>"""
"""<h1>The page could not be found</h1><br>"""
"""<h3>'{0}'</h3></div>""").format(strUrl).encode("utf-8"))
return NetworkReply(request, data, mimeType, self.parent())
| gpl-3.0 | -7,076,045,622,750,163,000 | 31.528 | 79 | 0.570831 | false | 3.79291 | false | false | false |
Jumpscale/jumpscale6_core | apps/gridportal/base/system__packagemanager/methodclass/system_packagemanager.py | 1 | 1921 | from JumpScale import j
import JumpScale.grid.agentcontroller
class system_packagemanager(j.code.classGetBase()):
def __init__(self):
self._te = {}
self.actorname = "packagemanager"
self.appname = "system"
self.client = j.clients.agentcontroller.get()
self.gid = j.application.whoAmI.gid
def execute(self, script, nid, gid=None, **kwargs):
return self.client.execute('jumpscale', script, nid=nid, gid=gid, args=kwargs)
def getJPackages(self, **args):
nid = args.get('nid')
domain = args.get('domain', None)
return self.execute('jpackage_list', nid=nid, domain=domain)
def getJPackageInfo(self, **args):
nid = args.get('nid')
domain = args.get('domain', None)
name = args.get('pname', None)
version = args.get('version', None)
return self.execute('jpackage_info', nid=nid, domain=domain, pname=name, version=version)
def getJPackageFilesInfo(self, **args):
"""
ask the right processmanager on right node to get the information (will query jpackages underneath)
returns all relevant info about files of jpackage
param:nid id of node
param:domain domain name for jpackage
param:pname name for jpackage
result json
"""
nid = args.get('nid')
domain = args.get('domain', None)
name = args.get('pname', None)
version = args.get('version', None)
return self.execute('jpackage_fileinfo', nid=nid, domain=domain, pname=name, version=version)
def action(self, **args):
nid = args.get('nid')
domain = args.get('domain', None)
name = args.get('pname', None)
action = args.get('action', None)
version = args.get('version', None)
return self.execute('jpackage_action', nid=nid, domain=domain, pname=name, version=version, action=action)
| bsd-2-clause | 6,941,479,301,493,565,000 | 35.245283 | 114 | 0.624154 | false | 3.737354 | false | false | false |
Marcelpv96/SITWprac2017 | sportsBetting/features/steps/register_events.py | 1 | 1797 | from behave import *
use_step_matcher('parse')
@given('Exist a event created by "{username}"')
def step_impl(context, username):
from sportsBetting.models import Event, Team
from django.contrib.auth.models import User
for row in context.table:
name = row['local'] + ' v ' + row['visitor']
if not Event.objects.filter(name=name).exists():
e = Event()
e.name = name
e.user = User.objects.get(username=username)
e.team1 = Team.objects.get(name=row['local'])
e.team2 = Team.objects.get(name=row['visitor'])
e.save()
@when('I add a new event')
def step_impl(context):
for row in context.table:
context.browser.visit(context.get_url('/events/create/'))
if context.browser.url == context.get_url('/events/create/'):
form = context.browser.find_by_tag('form').first
context.browser.fill('name', row['local'] + ' v ' + row['visitor'])
context.browser.find_by_xpath(
'//select[@id="id_team1"]//option[text()="' + row['local'] + '"]', ).first.click()
context.browser.find_by_xpath(
'//select[@id="id_team2"]//option[text()="' + row['visitor'] + '"]', ).first.click()
form.find_by_id('team_submit').first.click()
@when('I want to delete the event "{event_name}"')
def step_impl(context, event_name):
from sportsBetting.models import Event
id = Event.objects.get(name=event_name).id
context.browser.visit(context.get_url('/events/delete/' + str(id)))
@when('I delete the event')
def step_impl(context):
form = context.browser.find_by_tag('form').first
form.find_by_css('.btn').first.click()
assert context.browser.url == context.get_url('/events/list_events/')
| gpl-3.0 | 9,157,351,948,259,877,000 | 34.94 | 100 | 0.604897 | false | 3.496109 | false | false | false |
cleinias/Homeo | src/VREP/VREPDetermTest.py | 1 | 27074 | '''
Created on Feb 22, 2015
@author: stefano
Script that tests V-REP deterministic runs.
Runs V-REP repeatedly with a deterministic
series of random motor commands over TCP/IP
Include also related tests (such as light readings, Braitenberg-like simulations)
Assumes:
1. V-REP world ("Scene") "$HOMEO/src/VREP/Khepera-J-Proximity-only.SF.ttt" is already running
2. V-REP listens on ports 19997 (for main control)
3. The V-REP robot to be controlled is called "Khepera"
4. Other V-REP assumptions about lights and other features of the V-REP world (see method comments and V-REP world description)
5. A SimsData subdir exists at /home/stefano/Documents/Projects/Homeostat/Simulator/Python-port/Homeo/SimulationsData
'''
import vrep
from Helpers.SimulationThread import SimulationThread
import math
import numpy as np
# import matplotlib.pyplot as plt
import os, sys
import subprocess
import datetime
from numpy import dot, arccos, degrees
from math import pi
from numpy.linalg import norm
from time import sleep,time, strftime,localtime
from Helpers.General_Helper_Functions import scaleTo
from ctypes import c_ubyte
def distance(pointA3D, pointB3D):
"Return Euclidean distance between two 3D points"
return math.sqrt((pointA3D[0]-pointB3D[0])**2 + (pointA3D[1]-pointB3D[1])**2 + (pointA3D[2]-pointB3D[2])**2)
def distanceFromOrig(point3D):
"Return Euclidean distance"
return math.sqrt((0 - point3D[0])**2 + (0 - point3D[1])**2 + (0 - point3D[2])**2)
def clip(clipValue,minV,maxV):
if clipValue < minV:
return minV
elif clipValue > maxV:
return maxV
return clipValue
def asByteArray(m_string):
return (c_ubyte * len(m_string)).from_buffer_copy(m_string)
class VREPTests(object):
def __init__(self, noSteps = 5000, noRuns=5, robotName = "Khepera"):
"Parameters"
#VREP_scene_file ="/home/stefano/Documents/Projects/Homeostat/Simulator/Python-port/Homeo/src/VREP/Khepera-J-Proximity-only.SF.ttt"
self.simulation_port = 19997
self.trajectoryPort = 19998
self.robot_host = '127.0.0.1'
self.VREP_HOME = '/home/stefano/builds/from-upstream-sources/V-REP_PRO_EDU_V3_2_0_64_Linux/'
self.robotName = robotName
self.noRuns = noRuns
self.noSteps = noSteps
self.targetPose = [7,7]
self.initPose = [4,4,0.0191]
self.initOrient = [-90,0,-90]
self.betwCmdDelays = 0
self.maxSpeed = 50
self.trajStateSignalName = "HOMEO_SIGNAL_"+ self.robotName + "_TRAJECTORY_RECORDER"
def startTrajRecorder(self):
pass
def connectAll(self):
self.connect()
self.getHandles()
# self.startTrajRecorder()
def testDetermMomvt(self):
self.moveRandomly()
def testLightSensors(self):
self.moveAndReadLights()
def moveReadLights(self):
self.moveAndReadProxSensors()
def moveRandomly(self):
"Set trajectory data directory and communicate to V-REP"
HOMEODIR = '/home/stefano/Documents/Projects/Homeostat/Simulator/Python-port/Homeo/'
dataDir = 'SimsData-'+strftime("%Y-%m-%d-%H-%M-%S", localtime(time()))
simsDataDir = os.path.join(HOMEODIR,"SimulationsData",dataDir)
os.mkdir(simsDataDir)
print "Saving to: ", simsDataDir
e = vrep.simxSetStringSignal(self.simulID,"HOMEO_SIGNAL_SIM_DATA_DIR" ,asByteArray(simsDataDir), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Message sent, error code: ", e
for run in xrange(self.noRuns):
eCode = vrep.simxStartSimulation(self.simulID, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
e = vrep.simxSetStringSignal(self.simulID,"HOMEO_SIGNAL_SIM_DATA_DIR" ,asByteArray(simsDataDir), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Simulation started: run number %d, error code: %d"% (run+1, eCode)
"Wait until simulation is ready, otherwise we will miss a few movement commands"
# sleep(2)
np.random.seed(64)
# resetRobotInitPose(initPose, self.simulID, ePuckHandle)
eCode = vrep.simxSetStringSignal(self.simulID, self.trajStateSignalName, asByteArray("NEWFILE"), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if eCode == 0:
print "Starting a new trajectory file"
else:
print "ERROR: Could not start a new trajectory file"
for step in xrange(self.noSteps):
timeStart = time()
rightSpeed = np.random.uniform(self.maxSpeed * 2) # - self.maxSpeed
leftSpeed = np.random.uniform(self.maxSpeed * 2) # -maxSpeed
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, rightSpeed, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, leftSpeed, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
for i in xrange(self.betwCmdDelays):
vrep.simxSynchronousTrigger(self.simulID)
timeElapsed = time() - timeStart
"Stop the robot"
self.stopRobot(self.simulID, [self.rightMotor, self.leftMotor])
eCode = vrep.simxSetStringSignal(self.simulID, self.trajStateSignalName, asByteArray("SAVE"), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if eCode == 0:
print "Saving trajectory file"
else:
print "ERROR: Could not save a new trajectory file"
sleep(.5)
robotPose = vrep.simxGetObjectPosition(self.simulID, self.robotHandle, -1, vrep.simx_opmode_oneshot_wait)[1][:2]
vrep.simxSynchronousTrigger(self.simulID)
print "%d: Robot is at: %.3f, %.3f Distance from target is: %.4f. Run took exactly %.3f seconds" % (run,
robotPose[0],
robotPose[1],
self.computeDistance(self.targetPose, robotPose),
timeElapsed) #
eCode = vrep.simxStopSimulation(self.simulID, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
sleep(1)
# eCode = vrep.simxStartSimulation(self.simulID, vrep.simx_opmode_oneshot_wait)
# vrep.simxSynchronousTrigger(self.simulID)
eCode = vrep.simxSetStringSignal(self.simulID, self.trajStateSignalName, asByteArray("CLOSEFILE"), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if eCode == 0:
print "Starting a new trajectory file"
else:
print "ERROR: Could not close a new trajectory file"
print "Done"
def moveAndReadLights(self):
"rotate in place and print light readings"
eCode, res, rightEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.rightEye, 0, vrep.simx_opmode_streaming)
ecode, res, leftEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.leftEye, 0, vrep.simx_opmode_streaming)
vrep.simxSynchronousTrigger(self.simulID)
for step in xrange(self.noSteps):
rightSpeed = 25
leftSpeed = rightSpeed
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, rightSpeed, vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, leftSpeed, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
eCodeR, res, rightEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.rightEye, 0, vrep.simx_opmode_buffer)
eCodeL, res, leftEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.leftEye, 0, vrep.simx_opmode_buffer)
vrep.simxSynchronousTrigger(self.simulID)
# print "Right eCode:\t", eCodeR,
# print "Left eCode:\t", eCodeL
# leftImg = np.array(leftEyeRead, np.uint8)
# rightImg.resize(res[0],res[1],3)
print "Right:\t%d, %d\tLeft:\t%d, %d"% (len(rightEyeRead),sum(rightEyeRead), len(leftEyeRead),sum(leftEyeRead))
# print rightImg.shape
# plt.imshow(rightImg)#, origin="lower")
# for run in xrange(self.noRuns):
# np.random.seed(64)
#
# for step in xrange(self.noSteps):
# rightSpeed = np.random.uniform(self.maxSpeed * 2) # - self.maxSpeed
# leftSpeed = np.random.uniform(self.maxSpeed * 2) # -maxSpeed
# eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, rightSpeed, vrep.simx_opmode_oneshot_wait)
# eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, leftSpeed, vrep.simx_opmode_oneshot_wait)
# vrep.simxSynchronousTrigger(self.simulID)
# eCode, res, rightEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.rightEye, 1, vrep.simx_opmode_buffer)
# ecode, res, leftEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.leftEye, 1, vrep.simx_opmode_buffer)
# vrep.simxSynchronousTrigger(self.simulID)
# print "Right eye reads: \t", rightEyeRead
# print "Left eye reads: \t", leftEyeRead
def moveAndReadProxSensors(self):
"rotate in place and print sensor distance and normal vector readings"
for step in xrange(self.noSteps):
if step>self.noSteps / 2:
rightSpeed = -1
leftSpeed = -rightSpeed
else:
rightSpeed = 1
leftSpeed = -rightSpeed
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, rightSpeed, vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, leftSpeed, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
rightInput = vrep.simxReadProximitySensor(self.simulID, self.rightEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
leftInput = vrep.simxReadProximitySensor(self.simulID, self.leftEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Left-->err:%s - Detct'd: %s\t%s\t\tRight--> err:%s - Detct'd: %s\t\t\t%s" % (leftInput[0],
leftInput[3],
leftInput[2],
rightInput[0],
rightInput[3],
rightInput[2])
sleep(.1)
self.stopRobot(self.simulID,[self.rightMotor,self.leftMotor])
vrep.simxSynchronousTrigger(self.simulID)
def braiten1a(self):
"slowly move forward and print normal vector readings"
intens = 50
ambientIntens = 0
attVect = [0,0,1]
print "Proximity sensor readings error codes: "
for step in xrange(self.noSteps):
rightInput = vrep.simxReadProximitySensor(self.simulID, self.rightEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
leftInput = vrep.simxReadProximitySensor(self.simulID, self.leftEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
centerInput = vrep.simxReadProximitySensor(self.simulID, self.KJcenterEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
angle = degrees(self.angleBetVecs([0,0,1], centerInput[2]))
lightReading = self.irradAtSensor(intens, ambientIntens, centerInput[2], attVect)
print "Center-->err:%s - Detct'd: %s\tAngle:%.3f\tIrrad:%.3f\tNorm: %.3f\tVector:%s\t" % (centerInput[0],
centerInput[3],
angle,
lightReading,
norm(centerInput[2]),
centerInput[2])
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, lightReading, vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, lightReading, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
sleep(0)
def braiten1b(self):
"slowly move forward and print normal vector readings"
intens = 100
ambientIntensRatio = 0.2
attVect = [0,0,pi *4]
for step in xrange(self.noSteps):
rightInput = vrep.simxReadProximitySensor(self.simulID, self.rightEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
leftInput = vrep.simxReadProximitySensor(self.simulID, self.leftEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
centerInput = vrep.simxReadProximitySensor(self.simulID, self.KJcenterEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
angle = degrees(self.angleBetVecs([0,0,1], centerInput[2]))
lightReading = self.irradAtSensor(intens, ambientIntensRatio, centerInput[2], attVect)
print "Center-->err:%s - Detct'd: %s\tAngle:%.3f\tIrrad:%.3f\tNorm: %.3f\tVector:%s\t" % (centerInput[0],
centerInput[3],
angle,
lightReading,
norm(centerInput[2]),
centerInput[2])
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, 1/lightReading, vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, 1/lightReading, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
sleep(0)
def braiten2a(self):
"Seek light source"
"PARAMETERS"
intens = 100
ambientIntensRatio = 0
attVect = [0,0,1]
HOMEODIR = '/home/stefano/Documents/Projects/Homeostat/Simulator/Python-port/Homeo/'
dataDir = 'SimsData-'+strftime("%Y-%m-%d-%H-%M-%S", localtime(time()))
simsDataDir = os.path.join(HOMEODIR,"SimulationsData",dataDir)
os.mkdir(simsDataDir)
print "Saving to: ", simsDataDir
e = vrep.simxSetStringSignal(self.simulID,"HOMEO_SIGNAL_SIM_DATA_DIR" ,asByteArray(simsDataDir), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Message sent, error code: ", e
"END PARAMETERS"
for run in xrange(self.noRuns):
eCode = vrep.simxStartSimulation(self.simulID, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
e = vrep.simxSetStringSignal(self.simulID,"HOMEO_SIGNAL_SIM_DATA_DIR" ,asByteArray(simsDataDir), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Simulation started: run number %d, error code: %d"% (run+1, eCode)
"Wait until simulation is ready, otherwise we will miss a few movement commands"
# sleep(2)
np.random.seed(64)
# resetRobotInitPose(initPose, self.simulID, ePuckHandle)
eCode = vrep.simxSetStringSignal(self.simulID, self.trajStateSignalName, asByteArray("NEWFILE"), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if eCode == 0:
print "Starting a new trajectory file"
else:
print "ERROR: Could not start a new trajectory file"
timeStart = time()
for step in xrange(self.noSteps):
rightLight = vrep.simxGetFloatSignal(self.simulID, "HOMEO_SIGNAL_rightEye_LIGHT_READING", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
leftLight = vrep.simxGetFloatSignal(self.simulID, "HOMEO_SIGNAL_leftEye_LIGHT_READING", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
# print "rightLight %.3f\t left light: %.3f" %(rightLight[1],leftLight[1])
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, clip(leftLight[1],0,self.maxSpeed), vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, clip(rightLight[1],0, self.maxSpeed), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
sleep(0)
timeElapsed = time() - timeStart
"Stop the robot"
self.stopRobot(self.simulID, [self.rightMotor, self.leftMotor])
eCode = vrep.simxSetStringSignal(self.simulID, self.trajStateSignalName, asByteArray("SAVE"), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if eCode == 0:
print "Saving trajectory file"
else:
print "ERROR: Could not save a new trajectory file"
sleep(.5)
robotPose = vrep.simxGetObjectPosition(self.simulID, self.robotHandle, -1, vrep.simx_opmode_oneshot_wait)[1][:2]
vrep.simxSynchronousTrigger(self.simulID)
print "%d: Robot is at: %.3f, %.3f Distance from target is: %.4f. Run took exactly %.3f seconds" % (run,
robotPose[0],
robotPose[1],
self.computeDistance(self.targetPose, robotPose),
timeElapsed) #
eCode = vrep.simxStopSimulation(self.simulID, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
sleep(1)
# eCode = vrep.simxStartSimulation(self.simulID, vrep.simx_opmode_oneshot_wait)
# vrep.simxSynchronousTrigger(self.simulID)
eCode = vrep.simxSetStringSignal(self.simulID, self.trajStateSignalName, asByteArray("CLOSEFILE"), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if eCode == 0:
print "Starting a new trajectory file"
else:
print "ERROR: Could not close a new trajectory file"
print "Done"
def cleanUp(self):
print "About to stop simulation connected to self.simulID: ", self.simulID
vrep.simxStopSimulation(self.simulID, vrep.simx_opmode_oneshot)
vrep.simxSynchronousTrigger(self.simulID)
# vrep.simxFinish(robotID)
vrep.simxFinish(self.simulID)
vrep.simxFinish(-1)
print "Disconnected from V-REP"
def computeDistance(self,a, b):
return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)
def stopRobot(self,simulHandle, motorHandles):
for motor in motorHandles:
eCode = vrep.simxSetJointTargetVelocity(simulHandle, motor, 0, vrep.simx_opmode_oneshot)
vrep.simxSynchronousTrigger(self.simulID)
def connect(self):
#os.chdir(VREP_HOME)
#subprocess.call([os.path.join(VREP_HOME,'vrep.sh'), VREP_scene_file], shell = True, cwd = VREP_HOME)
"Close existing connections"
vrep.simxFinish(-1)
"Connect to Simulation"
self.simulID = vrep.simxStart(self.robot_host,self.simulation_port,True,True, 5000,5)
eCode = vrep.simxSynchronous(self.simulID, True)
if eCode != 0:
print "Could not get V-REP to synchronize operation with me"
if not self.simulID == -1:
eCode = vrep.simxStartSimulation(self.simulID, vrep.simx_opmode_oneshot)
vrep.simxSynchronousTrigger(self.simulID)
print "my SimulID is ", self.simulID
else:
sys.exit("Failed to connect to VREP simulation. Bailing out")
def getHandles(self):
"Get handles for epuck and motors"
ecodeE, self.robotHandle = vrep.simxGetObjectHandle(self.simulID, "Khepera", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
eCodeR, self.rightMotor = vrep.simxGetObjectHandle(self.simulID, "rightWheel", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
eCodeL, self.leftMotor = vrep.simxGetObjectHandle(self.simulID, "leftWheel", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
eCodeR, self.rightEye = vrep.simxGetObjectHandle(self.simulID, "rightEye", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
eCodeL, self.leftEye = vrep.simxGetObjectHandle(self.simulID, "leftEye", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
# eCodeL, self.KJcenterEye = vrep.simxGetObjectHandle(self.simulID, "Khepera_proxSensor3", vrep.simx_opmode_oneshot_wait)
# vrep.simxSynchronousTrigger(self.simulID)
eCode,self.targetID = vrep.simxGetObjectHandle(self.simulID,"TARGET", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if (self.rightMotor == 0 or self.leftMotor == 0 or self.rightEye == 0 or self.leftEye == 0):
self.cleanUp()
sys.exit("Exiting: Could not connect to motors or sensors")
else:
print " I am connected to Right Motor: %d, leftMotor: %d, Right eye: %d, Left eye: %d, and my target has ID:%d" % (self.rightMotor,
self.leftMotor,
self.rightEye,
self.leftEye,
self.targetID)
def angleBetVecs(self,vecA,vecB):
vecA_norm = vecA/norm(vecA)
vecB_norm = vecB/norm(vecB)
return arccos(dot(vecA_norm,vecB_norm))
def irradAtSensor(self,intens,ambIntensRatio,vecToLight, attenVect):
"""Compute the irradiance at the light sensor surface
Intens is the directional component of the light intensity,
ambIntensRatio is ambient component (not subject to attenuation) of the light's intensity. Must be in [0,1]
vecToLight is the 3D vector to the light source in the sensor's frame of reference
attenVect is a 3 element vector with the direct, linear, and quadratic attenuation coefficients """
cosAngle = (dot([0,0,1],vecToLight)/norm(vecToLight))
directIntens = (intens * (1-ambIntensRatio)) * cosAngle
distance = norm(vecToLight)
attenuation = 1/(attenVect[0]+(attenVect[1]*distance)+(attenVect[2]*distance**2))
return (directIntens + (intens*ambIntensRatio)) * attenuation
def testMaxSpeed(self, maxSpeed, mode):
"""test max speed of khepera-like robot in V-Rep
revving the motors up to maxSpeed in the self.noSteps and then backward.
mode--> 1, both motors, 2: right only, 3: left only"""
if mode == 1:
rightOn = leftOn = 1
elif mode == 2:
rightOn = 1
leftOn = 0
elif mode == 3:
rightOn = 0
leftOn = 1
unitSpeed = maxSpeed /self.noSteps
for i in xrange(self.noSteps):
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, unitSpeed *(i+1)*rightOn, vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, unitSpeed *(i+1)*leftOn, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Step: %s\t Speed now: %.2f" %(str(i),(unitSpeed *(i+1)))
for i in xrange(self.noSteps):
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, -(maxSpeed/(i+1))*rightOn, vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, -(maxSpeed/(i+1))*leftOn, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Step: %s\t Speed now: %.2f" % (str(i), (maxSpeed/(i+1))*rightOn)
if __name__ == "__main__":
test = VREPTests(noSteps=100, noRuns=5)
test.connectAll()
# test.testDetermMomvt()
# test.testLightSensors()
# test.moveReadLights()
# test.testMaxSpeed(300,1)
test.braiten2a()
test.cleanUp()
| gpl-3.0 | 6,535,370,862,703,530,000 | 55.640167 | 162 | 0.582182 | false | 3.613239 | true | false | false |
ndawe/pyAMI | pyAMI/query.py | 1 | 28966 | # Author: Noel Dawe
from __future__ import division
import re
import sys
from pyAMI.objects import DatasetInfo, RunPeriod
from pyAMI.schema import *
from pyAMI.utils import *
from pyAMI.defaults import YEAR, STREAM, TYPE, PROJECT, PRODSTEP
DATA_PATTERN = re.compile(
'^(?P<project>\w+).(?P<run>[0-9]+).'
'(?P<stream>[a-zA-Z_\-0-9]+).(recon|merge).'
'(?P<type>[a-zA-Z_\-0-9]+).(?P<version>\w+)$')
ESD_VERSION_PATTERN = '(?P<la>f|r)(?P<lb>[0-9]+)'
AOD_VERSION_PATTERN = ESD_VERSION_PATTERN + '_(?P<ma>m|p)(?P<mb>[0-9]+)'
NTUP_VERSION_PATTERN = AOD_VERSION_PATTERN + '_p(?P<rb>[0-9]+)'
ESD_VERSION_PATTERN = re.compile('^%s$' % ESD_VERSION_PATTERN)
AOD_VERSION_PATTERN = re.compile('^%s$' % AOD_VERSION_PATTERN)
NTUP_VERSION_PATTERN = re.compile('^%s$' % NTUP_VERSION_PATTERN)
def _clean_dataset(dataset):
"""
Remove trailing slashes
*dataset*: str
dataset name
"""
if dataset is None:
return None
return dataset.rstrip('/')
def _expand_period_contraints(periods):
"""
period=B -> period like B%
period=B2 -> period=B2
"""
if isinstance(periods, basestring):
periods = periods.split(',')
selection = []
# single character
single_chars = [p for p in periods if len(p) == 1]
selection += ["period like '%s%%'" % p for p in single_chars]
# multiple characters
mult_chars = [p for p in periods if len(p) > 1]
selection += ["period='%s'" % p for p in mult_chars]
return " OR ".join(selection)
def search_query(client,
entity,
cmd='SearchQuery',
cmd_args=None,
pattern=None,
order=None,
limit=None,
fields=None,
flatten=False,
mode='defaultField',
project_name='Atlas_Production',
processing_step_name='Atlas_Production',
show_archived=False,
literal_match=False,
**kwargs):
try:
table = TABLES[entity]
except KeyError:
raise TypeError('Entity %s does not exist' % entity)
primary_field = table.primary
query_fields = parse_fields(fields, table)
if primary_field not in query_fields:
query_fields.append(primary_field)
query_fields_str = ', '.join(query_fields)
if cmd_args is None:
cmd_args = {}
if not isinstance(pattern, list):
patterns = [pattern]
else:
patterns = pattern
constraints = []
for pattern in patterns:
# If the user has not put any '%' characters
# then we add them to the beginning and the end of the pattern
# otherwise assume the user knows what he/she is doing.
# If we do not do this it is impossible to search for strings which
# start with a given character sequence
if pattern is None:
if literal_match:
raise ValueError(
'pattern must not be None for literal matches')
pattern = '%'
elif '%' not in pattern and not literal_match:
pattern = '%' + pattern + '%'
elif not literal_match:
# replace repeated % with a single %
pattern = re.sub('%+', '%', pattern)
if literal_match:
constraints.append("%s='%s'" % (primary_field, pattern))
else:
constraints.append("%s like '%s'" % (primary_field, pattern))
constraints = ' OR '.join(constraints)
constraints = '(%s)' % constraints
if kwargs:
for name, value in kwargs.items():
if value is not None:
name = validate_field(name, table)
"""
Case of multiple values for a given field -> search with OR
"""
if name == 'period':
constraints += " AND (%s)" % _expand_period_contraints(value)
else:
if isinstance(value, (list, tuple)):
constraints += " AND (%s)" % (" OR ".join(["%s='%s'" %
(name, val) for val in value]))
else:
constraints += " AND %s='%s'" % (name, value)
if order is None:
order_field = primary_field
else:
order_field = validate_field(order, table)
if isinstance(limit, (list, tuple)):
limit = ' LIMIT %i,%i' % tuple(limit)
elif limit is not None:
limit = ' LIMIT 0,%i' % limit
else:
limit = ''
args = [cmd,
"entity=%s" % entity,
"glite=SELECT "
+ query_fields_str
+ (" WHERE (%s)" % constraints)
+ (" ORDER BY %s" % order_field)
+ limit,
"project=%s" % project_name,
"processingStep=%s" % processing_step_name,
"mode=%s" % mode]
for item in cmd_args.items():
args.append("%s=%s" % item)
if show_archived:
args.append('showArchived=true')
result = client.execute(args)
things = [thing for thing in result.rows()]
if flatten:
things = flatten_results(things, query_fields)
return query_fields, things
def get_types(client,
pattern,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
**kwargs):
"""
A command to list all ATLAS types.
Only those with writeStatus=valid can be used for new names.
"""
if 'write_status' not in kwargs:
kwargs['write_status'] = 'valid'
query_fields, types = search_query(
client=client, entity='data_type', pattern=pattern,
processing_step_name='*',
order=order, limit=limit, fields=fields, show_archived=show_archived, **kwargs)
if flatten:
types = flatten_results(types, query_fields)
return types
def get_subtypes(client,
pattern,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
**kwargs):
"""
A command to list all ATLAS subtypes.
Only those with writeStatus=valid can be used for new names.
"""
if 'write_status' not in kwargs:
kwargs['write_status'] = 'valid'
query_fields, types = search_query(
client=client, entity='subData_type', pattern=pattern,
processing_step_name='*',
order=order, limit=limit, fields=fields, show_archived=show_archived, **kwargs)
if flatten:
types = flatten_results(types, query_fields)
return types
def add_type(client, type):
"""
Add a type
"""
args = ['Addtype', type]
return client.execute(args)
def get_nomenclatures(client,
pattern,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
**kwargs):
"""
Return list of ATLAS nomenclatures
"""
if 'write_status' not in kwargs:
kwargs['write_status'] = 'valid'
query_fields, nomens = search_query(
client=client, entity='nomenclature', pattern=pattern,
processing_step_name='*',
order=order, limit=limit, fields=fields, show_archived=show_archived, **kwargs)
if flatten:
nomens = flatten_results(nomens, query_fields)
return nomens
def get_projects(client,
pattern,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
**kwargs):
if 'write_status' not in kwargs:
kwargs['write_status'] = 'valid'
query_fields, projects = search_query(
client=client, entity='projects', pattern=pattern,
processing_step_name='*',
order=order, limit=limit, fields=fields, show_archived=show_archived, **kwargs)
if flatten:
projects = flatten_results(projects, query_fields)
return projects
def get_subprojects(client,
pattern,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
**kwargs):
if 'write_status' not in kwargs:
kwargs['write_status'] = 'valid'
query_fields, projects = search_query(
client=client, entity='subProjects', pattern=pattern,
processing_step_name='*',
order=order, limit=limit, fields=fields, show_archived=show_archived, **kwargs)
if flatten:
projects = flatten_results(projects, query_fields)
return projects
def get_prodsteps(client,
pattern,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
**kwargs):
if 'write_status' not in kwargs:
kwargs['write_status'] = 'valid'
query_fields, steps = search_query(
client=client, entity='productionStep', pattern=pattern,
processing_step_name='*',
order=order, limit=limit, fields=fields, show_archived=show_archived, **kwargs)
if flatten:
steps = flatten_results(steps, query_fields)
return steps
def get_datasets(client,
pattern,
parent_type=None,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
from_file=False,
**kwargs):
"""
Return list of datasets matching pattern
"""
if 'ami_status' not in kwargs:
kwargs['ami_status'] = 'VALID'
cmd_args = {}
if parent_type is not None and 'parent_type' not in kwargs:
cmd_args['parentType'] = parent_type
if from_file:
patterns = read_patterns_from(pattern)
else:
patterns = [pattern]
patterns = [_clean_dataset(p) for p in patterns]
query_fields, datasets = search_query(
client=client,
cmd='DatasetSearchQuery',
cmd_args=cmd_args,
entity='dataset',
pattern=patterns,
order=order, limit=limit,
fields=fields,
show_archived=show_archived, **kwargs)
if flatten:
datasets = flatten_results(datasets, query_fields)
return datasets
def get_periods_for_run(client, run):
"""
Return data periods which contain this run
"""
result = client.execute(['GetDataPeriodsForRun', '-runNumber=%i' % run])
periods = sorted([
RunPeriod(
level=int(e['periodLevel']),
name=str(e['period']),
project=str(e['project']),
status=str(e['status']),
description=str(e['description']))
for e in result.to_dict()['Element_Info'].values()])
return periods
def get_periods(client, year=YEAR, level=2):
"""
Return all periods at a specified detail level in the given year
"""
cmd = ['ListDataPeriods', '-createdSince=2009-01-01 00:00:00' ]
if year > 2000:
year %= 1000
cmd += [ '-projectName=data%02i%%' % year]
if level in [1, 2, 3]:
cmd += [ '-periodLevel=%i' % level ]
else:
raise ValueError('level must be 1, 2, or 3')
result = client.execute(cmd)
periods = [RunPeriod(project=e['projectName'],
year=year,
name=str(e['period']),
level=level,
status=e['status'],
description=e['description']) \
for e in result.to_dict()['Element_Info'].values()]
periods.sort()
return periods
def get_all_periods(client):
"""
Return all periods
"""
all_periods = []
p = re.compile("(?P<period>(?P<periodletter>[A-Za-z]+)(?P<periodnumber>\d+)?)$")
result = get_periods(client, year=0, level=0)
for period, projectName in result:
m = p.match(period)
if not m:
continue
year = int(projectName[4:6])
period_letter = m.group('periodletter')
if m.group('periodnumber'):
period_number = int(m.group('periodnumber'))
else:
period_number = 0
if len(period_letter) != 1:
pc = 0
else:
pc = 10000 * year + 100 * (ord(period_letter.upper()) - 65) + period_number
all_periods += [ ((year, period, pc), projectName + ".period" + period) ]
all_periods.sort()
return all_periods
def print_periods(periods, wrap_desc=True, wrap_width=50, stream=None):
if stream is None:
stream = sys.stdout
table = [['Project', 'Name', 'Status', 'Description']]
for period in periods:
table.append([period.project,
period.name,
period.status,
period.description])
print_table(table,
wrap_last=wrap_desc,
wrap_width=wrap_width,
vsep='-',
stream=stream)
def get_runs(client, periods=None, year=YEAR):
"""
Return all runs contained in the given periods in the specified year
"""
if year > 2000:
year %= 1000
if not periods:
periods = [period.name for period in get_periods(client, year=year, level=1)]
elif isinstance(periods, basestring):
periods = periods.split(',')
runs = []
# remove duplicate periods
for period in set(periods):
cmd = ['GetRunsForDataPeriod', '-period=%s' % period]
cmd += [ '-projectName=data%02i%%' % year ]
result = client.execute(cmd)
runs += [ int(e['runNumber']) for e in result.to_dict()['Element_Info'].values() ]
# remove duplicates
runs = list(set(runs))
runs.sort()
return runs
def get_provenance(client, dataset, type=None, **kwargs):
"""
Return all parent dataset of the given dataset
"""
dataset = _clean_dataset(dataset)
args = ["ListDatasetProvenance",
"logicalDatasetName=%s" % dataset,
'output=xml']
if kwargs:
args += ['%s=%s' % item for item in kwargs.items()]
result = client.execute(args)
dom = result.dom
graph = dom.getElementsByTagName('graph')
dictOfLists = {}
for line in graph:
nodes = line.getElementsByTagName('node')
for node in nodes:
level = int(node.attributes['level'].value)
dataset = node.attributes['name'].value
if type and (type in dataset):
levelList = dictOfLists.get(level, [])
levelList.append(dataset)
dictOfLists[level] = levelList
elif not type:
levelList = dictOfLists.get(level, [])
levelList.append(dataset)
dictOfLists[level] = levelList
return dictOfLists
def print_provenance(result):
for key in sorted(result.keys()):
print "generation =", key
for dataset in sorted(result[key]):
print " ", dataset
def get_dataset_info(client, dataset, **kwargs):
"""
Return a DatasetInfo instance (the dataset metadata)
*client*: AMIClient
*dataset*: str
*kwargs*: dict
"""
dataset = _clean_dataset(dataset)
args = ["GetDatasetInfo",
"logicalDatasetName=%s" % dataset]
if kwargs:
args += ['%s=%s' % item for item in kwargs.items()]
dataset_info = DatasetInfo(dataset=dataset)
result = client.execute(args)
dom = result.dom
# get the rowsets
rowsets = dom.getElementsByTagName('rowset')
for rowset in rowsets:
rowsetLabel = ""
if "type" in rowset.attributes.keys():
rowsetLabel = rowsetLabel + rowset.attributes['type'].value
rows = rowset.getElementsByTagName('row')
if (rowsetLabel == "Element_Info"):
for row in rows:
fields = row.getElementsByTagName("field")
for field in fields:
if field.firstChild:
tableName = field.attributes['table'].value
if tableName == "dataset":
value = field.firstChild.nodeValue
name = field.attributes['name'].value
dataset_info.info[name] = value
elif tableName == "dataset_extra":
value = field.firstChild.nodeValue
name = field.attributes['name'].value
dataset_info.extra[name] = value
elif (tableName == "dataset_added_comment") or \
(tableName == "dataset_comment"):
value = field.firstChild.nodeValue
name = field.attributes['name'].value
dataset_info.comments[name] = value
elif (tableName == "dataset_property"):
propertyName = field.attributes['name'].value.split('_')[0]
if propertyName in dataset_info.properties:
tmpDict = dataset_info.properties[propertyName]
else:
tmpDict = {"type": "",
"min": "",
"max": "",
"unit": "",
"description": ""}
propertyNameSubField = field.attributes['name'].value
try:
propertyNameSubValue = field.firstChild.nodeValue
except:
propertyNameSubValue = ""
if propertyNameSubField == propertyName + "_type":
tmpDict["type"] = propertyNameSubValue
if propertyNameSubField == propertyName + "_min":
tmpDict["min"] = propertyNameSubValue
if propertyNameSubField == propertyName + "_max":
tmpDict["max"] = propertyNameSubValue
if propertyNameSubField == propertyName + "_unit":
tmpDict["unit"] = propertyNameSubValue
if propertyNameSubField == propertyName + "_desc":
tmpDict["description"] = propertyNameSubValue
dataset_info.properties[propertyName] = tmpDict
return dataset_info
def get_event_info(client, dataset, **kwargs):
"""
Return the metadata of the parent event generator dataset
*client*: AMIClient
*dataset*: str
*kwargs*: dict
"""
dataset = _clean_dataset(dataset)
if 'EVNT' not in dataset:
prov = get_provenance(client, dataset, type='EVNT', **kwargs)
evgen_datasets = []
for key, dsets in prov.items():
evgen_datasets += dsets
else:
evgen_datasets = [dataset]
results = []
for dset in set(evgen_datasets):
results.append(get_dataset_info(client, dset, **kwargs))
return results
def get_dataset_xsec_effic(client, dataset, **kwargs):
"""
Return the cross section and generator filter efficiency
*client*: AMIClient
*dataset*: str
*kwargs*: dict
"""
infos = get_event_info(client, dataset, **kwargs)
if len(infos) > 1:
raise ValueError('Dataset %s has multiple parent event generator datasets' % dataset)
elif not infos:
raise ValueError('Event info not found for dataset %s' % dataset)
info = infos[0]
try:
xsec = float(info.extra['crossSection_mean'])
except KeyError:
raise ValueError('No cross section listed for dataset %s' % dataset)
try:
effic = float(info.extra['GenFiltEff_mean'])
except KeyError:
raise ValueError('No generator filter efficiency listed for dataset %s' % dataset)
return xsec, effic
def get_dataset_xsec_min_max_effic(client, dataset, **kwargs):
"""
Return the cross section mean, min, max, and generator filter efficiency
*client*: AMIClient
*dataset*: str
*kwargs*: dict
"""
infos = get_event_info(client, dataset, **kwargs)
if len(infos) > 1:
raise ValueError('Dataset %s has multiple parent event generator datasets' % dataset)
elif not infos:
raise ValueError('Event info not found for dataset %s' % dataset)
info = infos[0]
try:
xsec = float(info.extra['crossSection_mean'])
except KeyError:
raise ValueError('No cross section listed for dataset %s' % dataset)
try:
xsec_min = float(info.properties['crossSection']['min'])
xsec_max = float(info.properties['crossSection']['max'])
except KeyError:
raise ValueError('No cross section min or max listed for dataset %s' % dataset)
try:
effic = float(info.extra['GenFiltEff_mean'])
except KeyError:
raise ValueError('No generator filter efficiency listed for dataset %s' % dataset)
return xsec, xsec_min, xsec_max, effic
def get_data_datasets(client,
tag_pattern=None,
periods=None,
project=PROJECT,
stream=STREAM,
type=TYPE,
prod_step=PRODSTEP,
parent_type=None,
grl=None,
fields=None,
latest=False,
flatten=False,
**kwargs):
"""
*client*: AMIClient
*tag_pattern*: [ str | None ]
*periods*: [ list | tuple | str | None ]
*project*: str
*stream*: str
*type*: str
*prod_step*: str
*parent_type*: str
*fields*: [ list | tuple | str | None ]
*latest*: bool
*flatten*: bool
Returns a list of dicts if flatten==False
else list of tuples with elements in same order as fields
"""
# Transmit period(s) as kwargs in order to do only one query
if periods is not None:
if isinstance(periods, basestring):
periods = periods.split(',')
kwargs['period'] = periods
if grl is not None:
# need to be compatible with Python 2.4
# so no ElementTree here...
from xml.dom import minidom
doc = minidom.parse(grl)
run_nodes = doc.getElementsByTagName('Run')
runs = []
for node in run_nodes:
runs.append(int(node.childNodes[0].data))
kwargs['run'] = runs
datasets = get_datasets(client, tag_pattern, fields=fields,
project=project, stream=stream, type=type,
prod_step=prod_step,
parent_type=parent_type,
**kwargs)
if latest:
if type.startswith('NTUP'):
VERSION_PATTERN = NTUP_VERSION_PATTERN
elif type.startswith('AOD'):
VERSION_PATTERN = AOD_VERSION_PATTERN
elif type.startswith('ESD'):
VERSION_PATTERN = ESD_VERSION_PATTERN
else:
raise TypeError('\'latest\' not implemented for type %s' % type)
ds_unique = {}
for ds in datasets:
name = ds['logicalDatasetName']
match = re.match(DATA_PATTERN, name)
if match:
new_version = re.match(VERSION_PATTERN, match.group('version'))
if not new_version:
continue
run = int(match.group('run'))
if run not in ds_unique:
ds_unique[run] = ds
else:
curr_version = re.match(VERSION_PATTERN, re.match(DATA_PATTERN, ds_unique[run]['logicalDatasetName']).group('version'))
if type.startswith('NTUP'):
if new_version.group('la') == 'r' and curr_version.group('la') == 'f' or \
((new_version.group('la') == curr_version.group('la') and \
int(new_version.group('lb')) >= int(curr_version.group('lb')) and \
int(new_version.group('mb')) >= int(curr_version.group('mb')) and \
int(new_version.group('rb')) >= int(curr_version.group('rb')))):
ds_unique[run] = ds
elif type.startswith('AOD'):
if new_version.group('la') == 'r' and curr_version.group('la') == 'f' or \
((new_version.group('la') == curr_version.group('la') and \
int(new_version.group('lb')) >= int(curr_version.group('lb')) and \
int(new_version.group('mb')) >= int(curr_version.group('mb')))):
ds_unique[run] = ds
elif type.startswith('ESD'):
if new_version.group('la') == 'r' and curr_version.group('la') == 'f' or \
((new_version.group('la') == curr_version.group('la') and \
int(new_version.group('lb')) >= int(curr_version.group('lb')))):
ds_unique[run] = ds
datasets = ds_unique.values()
datasets.sort()
if flatten:
fields = parse_fields(fields, DATASET_TABLE)
fields.append('logicalDatasetName')
return flatten_results(datasets, fields)
return datasets
# does not work...
def get_configtagfields(client, tag, *args, **kwargs):
"""
*client*: AMIClient
*tag*: str
*args*: tuple
tuple of args to add to AMI command
*kwargs*: dict
dict of keyword args to add to AMI commmand as key=value
"""
argv = ['ListConfigTagFields',
'configTag=%s' % tag]
argv.extend(args)
for name, value in kwargs.items():
argv.append("%s='%s'" % (name, value))
result = client.execute(argv)
return result
def get_configtags(client, tag, *args, **kwargs):
"""
*client*: AMIClient
*tag*: str
*args*: tuple
tuple of args to add to AMI command
*kwargs*: dict
dict of keyword args to add to AMI commmand as key=value
"""
argv = ['ListConfigurationTag',
'configTag=%s' % tag]
argv.extend(args)
for name, value in kwargs.items():
argv.append("%s='%s'" % (name, value))
result = client.execute(argv)
return [row for row in result.rows()]
def get_files(client, dataset, limit=None):
"""
*client*: AMIClient
*dataset*: str
*limit*: [ tuple | list | int | None ]
"""
dataset = _clean_dataset(dataset)
args = ['ListFiles', 'logicalDatasetName=%s' % dataset]
if limit is not None:
if isinstance(limit, (list, tuple)):
limit = 'limit=%i,%i' % tuple(limit)
else:
limit = 'limit=0,%i' % limit
args.append(limit)
result = client.execute(args)
return result.rows()
def list_files(client, dataset, limit=None, total=False, human_readable=False, long=False, stream=None):
"""
*client*: AMIClient
*dataset*: str
*limit*: [ tuple | list | int | None ]
*total*: bool
*human_readable*: bool
*long*: bool
*stream*: file
"""
if stream is None:
stream = sys.stdout
if long:
table = []
total_size = 0
total_events = 0
for file in get_files(client, dataset, limit=limit):
size = file['fileSize']
if size != 'NULL':
total_size += int(size)
if human_readable:
size = humanize_bytes(int(size))
events = file['events']
if events != 'NULL':
total_events += int(events)
table.append(["size: %s" % size, "events: %s" % events, file['LFN'], "GUID: %s" % file['fileGUID']])
if total:
if human_readable:
total_size = humanize_bytes(total_size)
table.append(["size: %s" % total_size, "events: %i" % total_events, "total", ""])
print_table(table, stream=stream)
else:
for file in get_files(client, dataset, limit=limit):
print >> stream, file['LFN']
| gpl-3.0 | -2,716,299,493,972,413,000 | 32.720605 | 139 | 0.528378 | false | 4.271641 | false | false | false |
Arpaso/alphabetic-simple | src/alphabetic/models.py | 1 | 1038 | ### -*- coding: utf-8 -*- ####################################################
from django.core.cache import cache
SESSION_GROUP_KEY = 'alphabetic_default_group'
DEFAULT_GROUP = 'rus'
CACHE_SECOND_PREFIX = 'alphabetic_second'
def get_group(request):
return request.session.get(SESSION_GROUP_KEY, DEFAULT_GROUP)
def set_group(request, group_key):
request.session[SESSION_GROUP_KEY] = group_key
def get_cache_key(queryset, letter, cache_params):
"""Generates unique cache key"""
try:
ident_class = queryset.model.__name__
except AttributeError:
ident_class = ''
return "_".join([CACHE_SECOND_PREFIX, ident_class, letter]+list(cache_params))
def get_second_level(queryset, letter, cache_params):
key = get_cache_key(queryset, letter, cache_params)
if key:
return cache.get(key)
def set_second_level(queryset, letter, second_level, timeout, cache_params):
key = get_cache_key(queryset, letter, cache_params)
if key:
cache.set(key, second_level, timeout)
| mit | 2,268,775,377,863,402,800 | 31.4375 | 82 | 0.647399 | false | 3.591696 | false | false | false |
tilezen/joerd | setup.py | 2 | 1627 | from setuptools import setup, find_packages
version = '0.0.1'
setup(name='joerd',
version=version,
description="A tool for downloading and generating elevation data.",
long_description=open('README.md').read(),
classifiers=[
# strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Utilities',
],
keywords='map dem elevation raster',
author='Matt Amos, Mapzen',
author_email='[email protected]',
url='https://github.com/mapzen/joerd',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'GDAL',
'beautifulsoup4',
'requests',
'numpy',
'PyYAML',
'pyqtree',
'geographiclib',
'boto3',
'contextlib2',
],
test_suite='tests',
tests_require=[
'httptestserver',
],
entry_points=dict(
console_scripts=[
'joerd = joerd.command:joerd_main',
]
)
)
| mit | -4,738,945,808,598,778,000 | 30.901961 | 79 | 0.553165 | false | 4.193299 | false | false | false |
yuanming-hu/taichi | examples/mpm_lagrangian_forces.py | 1 | 5908 | import numpy as np
import taichi as ti
ti.init(arch=ti.gpu)
dim = 2
quality = 1 # Use a larger integral number for higher quality
n_particle_x = 100 * quality
n_particle_y = 8 * quality
n_particles = n_particle_x * n_particle_y
n_elements = (n_particle_x - 1) * (n_particle_y - 1) * 2
n_grid = 64 * quality
dx = 1 / n_grid
inv_dx = 1 / dx
dt = 1e-4 / quality
E = 25000
p_mass = 1
p_vol = 1
mu = 1
la = 1
x = ti.Vector.field(dim, dtype=float, shape=n_particles, needs_grad=True)
v = ti.Vector.field(dim, dtype=float, shape=n_particles)
C = ti.Matrix.field(dim, dim, dtype=float, shape=n_particles)
grid_v = ti.Vector.field(dim, dtype=float, shape=(n_grid, n_grid))
grid_m = ti.field(dtype=float, shape=(n_grid, n_grid))
restT = ti.Matrix.field(dim, dim, dtype=float, shape=n_particles)
total_energy = ti.field(dtype=float, shape=(), needs_grad=True)
vertices = ti.field(dtype=ti.i32, shape=(n_elements, 3))
@ti.func
def mesh(i, j):
return i * n_particle_y + j
@ti.func
def compute_T(i):
a = vertices[i, 0]
b = vertices[i, 1]
c = vertices[i, 2]
ab = x[b] - x[a]
ac = x[c] - x[a]
return ti.Matrix([[ab[0], ac[0]], [ab[1], ac[1]]])
@ti.kernel
def initialize():
for i in range(n_particle_x):
for j in range(n_particle_y):
t = mesh(i, j)
x[t] = [0.1 + i * dx * 0.5, 0.7 + j * dx * 0.5]
v[t] = [0, -1]
# build mesh
for i in range(n_particle_x - 1):
for j in range(n_particle_y - 1):
# element id
eid = (i * (n_particle_y - 1) + j) * 2
vertices[eid, 0] = mesh(i, j)
vertices[eid, 1] = mesh(i + 1, j)
vertices[eid, 2] = mesh(i, j + 1)
eid = (i * (n_particle_y - 1) + j) * 2 + 1
vertices[eid, 0] = mesh(i, j + 1)
vertices[eid, 1] = mesh(i + 1, j + 1)
vertices[eid, 2] = mesh(i + 1, j)
for i in range(n_elements):
restT[i] = compute_T(i) # Compute rest T
@ti.kernel
def compute_total_energy():
for i in range(n_elements):
currentT = compute_T(i)
F = currentT @ restT[i].inverse()
# NeoHookean
I1 = (F @ F.transpose()).trace()
J = F.determinant()
element_energy = 0.5 * mu * (
I1 - 2) - mu * ti.log(J) + 0.5 * la * ti.log(J)**2
total_energy[None] += E * element_energy * dx * dx
@ti.kernel
def p2g():
for p in x:
base = ti.cast(x[p] * inv_dx - 0.5, ti.i32)
fx = x[p] * inv_dx - ti.cast(base, float)
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
affine = p_mass * C[p]
for i in ti.static(range(3)):
for j in ti.static(range(3)):
I = ti.Vector([i, j])
dpos = (float(I) - fx) * dx
weight = w[i].x * w[j].y
grid_v[base + I] += weight * (p_mass * v[p] - dt * x.grad[p] +
affine @ dpos)
grid_m[base + I] += weight * p_mass
bound = 3
@ti.kernel
def grid_op():
for i, j in grid_m:
if grid_m[i, j] > 0:
inv_m = 1 / grid_m[i, j]
grid_v[i, j] = inv_m * grid_v[i, j]
grid_v[i, j].y -= dt * 9.8
# center collision circle
dist = ti.Vector([i * dx - 0.5, j * dx - 0.5])
if dist.norm_sqr() < 0.005:
dist = dist.normalized()
grid_v[i, j] -= dist * min(0, grid_v[i, j].dot(dist))
# box
if i < bound and grid_v[i, j].x < 0:
grid_v[i, j].x = 0
if i > n_grid - bound and grid_v[i, j].x > 0:
grid_v[i, j].x = 0
if j < bound and grid_v[i, j].y < 0:
grid_v[i, j].y = 0
if j > n_grid - bound and grid_v[i, j].y > 0:
grid_v[i, j].y = 0
@ti.kernel
def g2p():
for p in x:
base = ti.cast(x[p] * inv_dx - 0.5, ti.i32)
fx = x[p] * inv_dx - float(base)
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1.0)**2, 0.5 * (fx - 0.5)**2]
new_v = ti.Vector([0.0, 0.0])
new_C = ti.Matrix([[0.0, 0.0], [0.0, 0.0]])
for i in ti.static(range(3)):
for j in ti.static(range(3)):
I = ti.Vector([i, j])
dpos = float(I) - fx
g_v = grid_v[base + I]
weight = w[i].x * w[j].y
new_v += weight * g_v
new_C += 4 * weight * g_v.outer_product(dpos) * inv_dx
v[p] = new_v
x[p] += dt * v[p]
C[p] = new_C
gui = ti.GUI("MPM", (640, 640), background_color=0x112F41)
def main():
initialize()
vertices_ = vertices.to_numpy()
while gui.running and not gui.get_event(gui.ESCAPE):
for s in range(int(1e-2 // dt)):
grid_m.fill(0)
grid_v.fill(0)
# Note that we are now differentiating the total energy w.r.t. the particle position.
# Recall that F = - \partial (total_energy) / \partial x
with ti.Tape(total_energy):
# Do the forward computation of total energy and backward propagation for x.grad, which is later used in p2g
compute_total_energy()
# It's OK not to use the computed total_energy at all, since we only need x.grad
p2g()
grid_op()
g2p()
gui.circle((0.5, 0.5), radius=45, color=0x068587)
particle_pos = x.to_numpy()
a = vertices_.reshape(n_elements * 3)
b = np.roll(vertices_, shift=1, axis=1).reshape(n_elements * 3)
gui.lines(particle_pos[a], particle_pos[b], radius=1, color=0x4FB99F)
gui.circles(particle_pos, radius=1.5, color=0xF2B134)
gui.line((0.00, 0.03 / quality), (1.0, 0.03 / quality),
color=0xFFFFFF,
radius=3)
gui.show()
if __name__ == '__main__':
main()
| mit | 2,220,515,973,470,853,600 | 30.094737 | 124 | 0.488152 | false | 2.80932 | false | false | false |
dm6718/RITSAR | ritsar/phsRead.py | 1 | 12945 | #Include dependencies
import numpy as np
from numpy import pi
from numpy.linalg import norm
from scipy.io import loadmat
from scipy.stats import linregress
from fnmatch import fnmatch
import os
import sys
import xml.etree.ElementTree as ET
def AFRL(directory, pol, start_az, n_az=3):
##############################################################################
# #
# This function reads in the AFRL *.mat files from the user supplied #
# directory and exports both the phs and a Python dictionary compatible #
# with ritsar. #
# #
##############################################################################
#Check Python version
version = sys.version_info
#Get filenames
walker = os.walk(directory+'/'+pol)
if version.major < 3:
w = walker.next()
else:
w = walker.__next__()
prefix = '/'+pol+'/'+w[2][0][0:19]
az_str = []
fnames = []
az = np.arange(start_az, start_az+n_az)
[az_str.append(str('%03d_'%a)) for a in az]
[fnames.append(directory+prefix+a+pol+'.mat') for a in az_str]
#Grab n_az phase histories
phs = []; platform = []
for fname in fnames:
#Convert MATLAB structure to Python dictionary
MATdata = loadmat(fname)['data'][0][0]
data =\
{
'fp' : MATdata[0],
'freq' : MATdata[1][:,0],
'x' : MATdata[2].T,
'y' : MATdata[3].T,
'z' : MATdata[4].T,
'r0' : MATdata[5][0],
'th' : MATdata[6][0],
'phi' : MATdata[7][0],
}
#Define phase history
phs_tmp = data['fp'].T
phs.append(phs_tmp)
#Transform data to be compatible with ritsar
c = 299792458.0
nsamples = int(phs_tmp.shape[1])
npulses = int(phs_tmp.shape[0])
freq = data['freq']
pos = np.hstack((data['x'], data['y'], data['z']))
k_r = 4*pi*freq/c
B_IF = data['freq'].max()-data['freq'].min()
delta_r = c/(2*B_IF)
delta_t = 1.0/B_IF
t = np.linspace(-nsamples/2, nsamples/2, nsamples)*delta_t
chirprate, f_0, r, p, s\
= linregress(t, freq)
#Vector to scene center at synthetic aperture center
if np.mod(npulses,2)>0:
R_c = pos[npulses/2]
else:
R_c = np.mean(
pos[npulses/2-1:npulses/2+1],
axis = 0)
#Save values to dictionary for export
platform_tmp = \
{
'f_0' : f_0,
'freq' : freq,
'chirprate' : chirprate,
'B_IF' : B_IF,
'nsamples' : nsamples,
'npulses' : npulses,
'pos' : pos,
'delta_r' : delta_r,
'R_c' : R_c,
't' : t,
'k_r' : k_r,
}
platform.append(platform_tmp)
#Stack data from different azimuth files
phs = np.vstack(phs)
npulses = int(phs.shape[0])
pos = platform[0]['pos']
for i in range(1, n_az):
pos = np.vstack((pos, platform[i]['pos']))
if np.mod(npulses,2)>0:
R_c = pos[npulses/2]
else:
R_c = np.mean(
pos[npulses/2-1:npulses/2+1],
axis = 0)
#Replace Dictionary values
platform = platform_tmp
platform['npulses'] = npulses
platform['pos'] = pos
platform['R_c'] = R_c
#Synthetic aperture length
L = norm(pos[-1]-pos[0])
#Add k_y
platform['k_y'] = np.linspace(-npulses/2,npulses/2,npulses)*2*pi/L
return(phs, platform)
def Sandia(directory):
##############################################################################
# #
# This function reads in the Sandia *.phs and *.au2 files from the user #
# supplied directoryand exports both the phs and a Python dictionary #
# compatible with ritsar. #
# #
##############################################################################
#get filename containing auxilliary data
for file in os.listdir(directory):
if fnmatch(file, '*.au2'):
aux_fname = directory+file
#import auxillary data
f=open(aux_fname,'rb')
#initialize tuple
record=['blank'] #first record blank to ensure
#indices match record numbers
#record 1
data = np.fromfile(f, dtype = np.dtype([
('version','S6'),
('phtype','S6'),
('phmode','S6'),
('phgrid','S6'),
('phscal','S6'),
('cbps','S6')
]),count=1)
record.append(data[0])
#record 2
f.seek(44)
data = np.fromfile(f, dtype = np.dtype([
('npulses','i4'),
('nsamples','i4'),
('ipp_start','i4'),
('ddas','f4',(5,)),
('kamb','i4')
]),count=1)
record.append(data[0])
#record 3
f.seek(44*2)
data = np.fromfile(f, dtype = np.dtype([
('fpn','f4',(3,)),
('grp','f4',(3,)),
('cdpstr','f4'),
('cdpstp','f4')
]),count=1)
record.append(data[0])
#record 4
f.seek(44*3)
data = np.fromfile(f, dtype = np.dtype([
('f0','f4'),
('fs','f4'),
('fdot','f4'),
('r0','f4')
]),count=1)
record.append(data[0])
#record 5 (blank)rvr_au_read.py
f.seek(44*4)
data = []
record.append(data)
#record 6
npulses = record[2]['npulses']
rpoint = np.zeros([npulses,3])
deltar = np.zeros([npulses,])
fscale = np.zeros([npulses,])
c_stab = np.zeros([npulses,3])
#build up arrays for record(npulses+6)
for n in range(npulses):
f.seek((n+5)*44)
data = np.fromfile(f, dtype = np.dtype([
('rpoint','f4',(3,)),
('deltar','f4'),
('fscale','f4'),
('c_stab','f8',(3,))
]),count=1)
rpoint[n,:] = data[0]['rpoint']
deltar[n] = data[0]['deltar']
fscale[n] = data[0]['fscale']
c_stab[n,:] = data[0]['c_stab']
#consolidate arrays into a 'data' dataype
dt = np.dtype([
('rpoint','f4',(npulses,3)),
('deltar','f4',(npulses,)),
('fscale','f4',(npulses,)),
('c_stab','f8',(npulses,3))
])
data = np.array((rpoint,deltar,fscale,c_stab)
,dtype=dt)
#write to record file
record.append(data)
#import phase history
for file in os.listdir(directory):
if fnmatch(file, '*.phs'):
phs_fname = directory+file
nsamples = record[2][1]
npulses = record[2][0]
f=open(phs_fname,'rb')
dt = np.dtype('i2')
phs = np.fromfile(f, dtype=dt, count=-1)
real = phs[0::2].reshape([npulses,nsamples])
imag = phs[1::2].reshape([npulses,nsamples])
phs = real+1j*imag
#Create platform dictionary
c = 299792458.0
pos = record[6]['rpoint']
n_hat = record[3]['fpn']
delta_t = record[4]['fs']
t = np.linspace(-nsamples/2, nsamples/2, nsamples)*1.0/delta_t
chirprate = record[4]['fdot']*1.0/(2*pi)
f_0 = record[4]['f0']*1.0/(2*pi) + chirprate*nsamples/(2*delta_t)
B_IF = (t.max()-t.min())*chirprate
delta_r = c/(2*B_IF)
freq = f_0+chirprate*t
omega = 2*pi*freq
k_r = 2*omega/c
if np.mod(npulses,2)>0:
R_c = pos[npulses/2]
else:
R_c = np.mean(
pos[npulses/2-1:npulses/2+1],
axis = 0)
platform = \
{
'f_0' : f_0,
'chirprate' : chirprate,
'B_IF' : B_IF,
'nsamples' : nsamples,
'npulses' : npulses,
'delta_r' : delta_r,
'pos' : pos,
'R_c' : R_c,
't' : t,
'k_r' : k_r,
'n_hat' : n_hat
}
return(phs, platform)
##############################################################################
# #
# This function reads in the DIRSIG xml data as well as the envi header #
# file from the user supplied directory. The phs and a Python dictionary #
# compatible with ritsar are returned to the function caller. #
# #
##############################################################################
def get(root, entry):
for entry in root.iter(entry):
out = entry.text
return(out)
def getWildcard(directory, char):
for file in os.listdir(directory):
if fnmatch(file, char):
fname = directory+file
return(fname)
def DIRSIG(directory):
from spectral.io import envi
#get phase history
phs_fname = getWildcard(directory, '*.hdr')
phs = envi.open(phs_fname).load(dtype = np.complex128)
phs = np.squeeze(phs)
#get platform geometry
ppd_fname = getWildcard(directory, '*.ppd')
tree = ET.parse(ppd_fname)
root = tree.getroot()
pos_dirs = []
for children in root.iter('point'):
pos_dirs.append(float(children[0].text))
pos_dirs.append(float(children[1].text))
pos_dirs.append(float(children[2].text))
pos_dirs = np.asarray(pos_dirs).reshape([len(pos_dirs)/3,3])
t_dirs=[]
for children in root.iter('datetime'):
t_dirs.append(float(children.text))
t_dirs = np.asarray(t_dirs)
#get platform system paramters
platform_fname = getWildcard(directory, '*.platform')
tree = ET.parse(platform_fname)
root = tree.getroot()
#put metadata into a dictionary
metadata = root[0]
keys = []; vals = []
for children in metadata:
keys.append(children[0].text)
vals.append(children[1].text)
metadata = dict(zip(keys,vals))
#obtain key parameters
c = 299792458.0
nsamples = int(phs.shape[1])
npulses = int(phs.shape[0])
vp = float(get(root, 'speed'))
delta_t = float(get(root, 'delta'))
t = np.linspace(-nsamples/2, nsamples/2, nsamples)*delta_t
prf = float(get(root, 'clockrate'))
chirprate = float(get(root, 'chirprate'))/pi
T_p = float(get(root, 'pulseduration'))
B = T_p*chirprate
B_IF = (t.max() - t.min())*chirprate
delta_r = c/(2*B_IF)
f_0 = float(get(root, 'center'))*1e9
freq = f_0+chirprate*t
omega = 2*pi*freq
k_r = 2*omega/c
T0 = float(get(root, 'min'))
T1 = float(get(root, 'max'))
#compute slowtime position
ti = np.linspace(0,1.0/prf*npulses, npulses)
x = np.array([np.interp(ti, t_dirs, pos_dirs[:,0])]).T
y = np.array([np.interp(ti, t_dirs, pos_dirs[:,1])]).T
z = np.array([np.interp(ti, t_dirs, pos_dirs[:,2])]).T
pos = np.hstack((x,y,z))
L = norm(pos[-1]-pos[0])
k_y = np.linspace(-npulses/2,npulses/2,npulses)*2*pi/L
#Vector to scene center at synthetic aperture center
if np.mod(npulses,2)>0:
R_c = pos[npulses/2]
else:
R_c = np.mean(
pos[npulses/2-1:npulses/2+1],
axis = 0)
#Derived Parameters
if np.mod(nsamples,2)==0:
T = np.arange(T0, T1+0*delta_t, delta_t)
else:
T = np.arange(T0, T1, delta_t)
#Mix signal
signal = np.zeros(phs.shape)+0j
for i in range(0,npulses,1):
r_0 = norm(pos[i])
tau_c = 2*r_0/c
ref = np.exp(-1j*(2*pi*f_0*(T-tau_c)+pi*chirprate*(T-tau_c)**2))
signal[i,:] = ref*phs[i,:]
platform = \
{
'f_0' : f_0,
'freq' : freq,
'chirprate' : chirprate,
'B' : B,
'B_IF' : B_IF,
'nsamples' : nsamples,
'npulses' : npulses,
'delta_r' : delta_r,
'delta_t' : delta_t,
'vp' : vp,
'pos' : pos,
'R_c' : R_c,
't' : t,
'k_r' : k_r,
'k_y' : k_y,
'metadata' : metadata
}
return(signal, platform)
| mit | -1,367,512,936,624,855,000 | 30.49635 | 78 | 0.449903 | false | 3.285533 | false | false | false |
jfelectron/python-driver | cassandra/murmur3.py | 15 | 2387 | from six.moves import range
import struct
def body_and_tail(data):
l = len(data)
nblocks = l // 16
tail = l % 16
if nblocks:
return struct.unpack_from('qq' * nblocks, data), struct.unpack_from('b' * tail, data, -tail), l
else:
return tuple(), struct.unpack_from('b' * tail, data, -tail), l
def rotl64(x, r):
# note: not a general-purpose function because it leaves the high-order bits intact
# suitable for this use case without wasting cycles
mask = 2 ** r - 1
rotated = (x << r) | ((x >> 64 - r) & mask)
return rotated
def fmix(k):
# masking off the 31s bits that would be leftover after >> 33 a 64-bit number
k ^= (k >> 33) & 0x7fffffff
k *= 0xff51afd7ed558ccd
k ^= (k >> 33) & 0x7fffffff
k *= 0xc4ceb9fe1a85ec53
k ^= (k >> 33) & 0x7fffffff
return k
INT64_MAX = int(2 ** 63 - 1)
INT64_MIN = -INT64_MAX - 1
INT64_OVF_OFFSET = INT64_MAX + 1
INT64_OVF_DIV = 2 * INT64_OVF_OFFSET
def truncate_int64(x):
if not INT64_MIN <= x <= INT64_MAX:
x = (x + INT64_OVF_OFFSET) % INT64_OVF_DIV - INT64_OVF_OFFSET
return x
def _murmur3(data):
h1 = h2 = 0
c1 = -8663945395140668459 # 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
body, tail, total_len = body_and_tail(data)
# body
for i in range(0, len(body), 2):
k1 = body[i]
k2 = body[i + 1]
k1 *= c1
k1 = rotl64(k1, 31)
k1 *= c2
h1 ^= k1
h1 = rotl64(h1, 27)
h1 += h2
h1 = h1 * 5 + 0x52dce729
k2 *= c2
k2 = rotl64(k2, 33)
k2 *= c1
h2 ^= k2
h2 = rotl64(h2, 31)
h2 += h1
h2 = h2 * 5 + 0x38495ab5
# tail
k1 = k2 = 0
len_tail = len(tail)
if len_tail > 8:
for i in range(len_tail - 1, 7, -1):
k2 ^= tail[i] << (i - 8) * 8
k2 *= c2
k2 = rotl64(k2, 33)
k2 *= c1
h2 ^= k2
if len_tail:
for i in range(min(7, len_tail - 1), -1, -1):
k1 ^= tail[i] << i * 8
k1 *= c1
k1 = rotl64(k1, 31)
k1 *= c2
h1 ^= k1
# finalization
h1 ^= total_len
h2 ^= total_len
h1 += h2
h2 += h1
h1 = fmix(h1)
h2 = fmix(h2)
h1 += h2
return truncate_int64(h1)
try:
from cassandra.cmurmur3 import murmur3
except ImportError:
murmur3 = _murmur3
| apache-2.0 | -2,561,335,672,573,787,600 | 20.123894 | 103 | 0.518224 | false | 2.664063 | false | false | false |
drogenlied/qudi | logic/laser_logic.py | 1 | 5941 | #-*- coding: utf-8 -*-
"""
Laser management.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from qtpy import QtCore
import numpy as np
import time
from logic.generic_logic import GenericLogic
from interface.simple_laser_interface import ControlMode, ShutterState, LaserState
class LaserLogic(GenericLogic):
""" Logic module agreggating multiple hardware switches.
"""
_modclass = 'laser'
_modtype = 'logic'
_in = {'laser': 'SimpleLaserInterface'}
_out = {'laserlogic': 'LaserLogic'}
sigUpdate = QtCore.Signal()
def on_activate(self, e):
""" Prepare logic module for work.
@param object e: Fysom state change notification
"""
self._laser = self.get_in_connector('laser')
self.stopRequest = False
self.bufferLength = 100
self.data = {}
# waiting time between queries im milliseconds
self.queryInterval = 100
# delay timer for querying laser
self.queryTimer = QtCore.QTimer()
self.queryTimer.setInterval(self.queryInterval)
self.queryTimer.setSingleShot(True)
self.queryTimer.timeout.connect(self.check_laser_loop, QtCore.Qt.QueuedConnection)
# get laser capabilities
self.laser_shutter = self._laser.get_shutter_state()
self.laser_power_range = self._laser.get_power_range()
self.laser_extra = self._laser.get_extra_info()
self.laser_state = self._laser.get_laser_state()
self.laser_can_turn_on = self.laser_state.value <= LaserState.ON.value
self.laser_can_power = ControlMode.POWER in self._laser.allowed_control_modes()
self.laser_can_current = ControlMode.CURRENT in self._laser.allowed_control_modes()
if ControlMode.MIXED in self._laser.allowed_control_modes():
self.laser_can_power = True
self.laser_can_current = True
self.has_shutter = self._laser.get_shutter_state() != ShutterState.NOSHUTTER
self.init_data_logging()
#QtCore.QTimer.singleShot(100, self.start_query_loop)
self.start_query_loop()
def on_deactivate(self, e):
""" Deactivate modeule.
@param object e: Fysom state change notification
"""
self.stop_query_loop()
@QtCore.Slot()
def check_laser_loop(self):
""" """
if self.stopRequest:
self.stop()
self.stopRequest = False
return
self.laser_state = self._laser.get_laser_state()
self.laser_shutter = self._laser.get_shutter_state()
self.laser_power = self._laser.get_power()
self.laser_current = self._laser.get_current()
self.laser_temps = self._laser.get_temperatures()
for k in self.data:
self.data[k] = np.roll(self.data[k], -1)
self.data['power'][-1] = self.laser_power
self.data['current'][-1] = self.laser_current
self.data['time'][-1] = time.time()
for k,v in self.laser_temps.items():
self.data[k][-1] = v
self.queryTimer.start(self.queryInterval)
self.sigUpdate.emit()
@QtCore.Slot()
def start_query_loop(self):
""" start the loop """
self.run()
self.queryTimer.start(self.queryInterval)
@QtCore.Slot()
def stop_query_loop(self):
""" stop loop """
self.stopRequest = True
for i in range(10):
if not self.stopRequest:
return
QtCore.QCoreApplication.processEvents()
time.sleep(self.queryInterval/1000)
def init_data_logging(self):
""" """
self.data['current'] = np.zeros(self.bufferLength)
self.data['power'] = np.zeros(self.bufferLength)
self.data['time'] = np.ones(self.bufferLength) * time.time()
temps = self._laser.get_temperatures()
for name in temps:
self.data[name] = np.zeros(self.bufferLength)
@QtCore.Slot(ControlMode)
def set_control_mode(self, mode):
""" """
if mode in self._laser.allowed_control_modes():
if mode == ControlMode.POWER:
self.laser_power = self._laser.get_power()
self._laser.set_power(self.laser_power)
self._laser.set_control_mode(mode)
elif mode == ControlMode.CURRENT:
self.laser_current = self._laser.get_current()
self._laser.set_current(self.laser_current)
self._laser.set_control_mode(mode)
@QtCore.Slot(float)
def set_laser_state(self, state):
if state and self.laser_state == LaserState.OFF:
self._laser.on()
if not state and self.laser_state == LaserState.ON:
self._laser.off()
@QtCore.Slot(bool)
def set_shutter_state(self, state):
if state and self.laser_shutter == ShutterState.CLOSED:
self._laser.set_shutter_state(ShutterState.OPEN)
if not state and self.laser_shutter == ShutterState.OPEN:
self._laser.set_shutter_state(ShutterState.CLOSED)
@QtCore.Slot(float)
def set_power(self, power):
self._laser.set_power(power)
@QtCore.Slot(float)
def set_current(self, current):
self._laser.set_current(current) | gpl-3.0 | 3,949,672,181,347,411,500 | 34.795181 | 91 | 0.63188 | false | 3.596247 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.