text
stringlengths
29
850k
from network.server import FarProtocol, FarFactory from network.messages import Tag from models.mob import Player, NPC, Attack from models.world import Room, Exit, Direction from twisted.internet import reactor, task from copy import deepcopy from guppy import hpy class Game(object): def __init__(self): self.name = "FAR v0.01" self.players = [] self.npcs = [] self.mobs = [] self.rooms = [] self.commandtags = [Tag("[SAY]", self.saymsg), Tag("[QUIT]", self.quit), Tag("[FIGHT]", self.startfight), Tag("[FLEE]", self.stopfight), Tag("[IDENTIFY]", self.identify), Tag("[LOOK]", self.look), Tag("[EXITS]", self.exits), Tag("[MOVETO]", self.moveto), Tag("[GO]",self.go), Tag("[NPCS]", self.listnpcs), Tag("[MOBS]", self.listmobs), Tag("[STATS]", self.stats)] self.exit = False def tick(self): #for player in self.players: #player.addmessage('Tick!') for room in self.rooms: for mob in room.mobs: if mob.mobile: mob.walk() print "Tick" #print hpy().heap() def combat(self): for player in self.players: if player.fighting: player.combat() for mob in self.mobs: if mob.fighting: mob.combat() def connection(self, connect): p = Player(connect); p.addmessage('Welcome!') p.goto(self.rooms[1]) self.players.append(p) def disconnection(self, connect): killplayer = None for p in self.players: if p.connection == connect: killplayer = p if killplayer == None: print "Could not find player" else: killplayer.room.player_left(killplayer) self.players.remove(killplayer) def saymsg(self, player, args): player.addmessage('You said: %s' % '|'.join(args)) for p in self.players: if p != player: p.addmessage('%s: %s' % (player.name, ' '.join(args))) def quit(self, player, args): player.addmessage('Bye!'); player.exit = True def startfight(self, player, args): if len(args) == 1: player.target = self.findmob(args[0], player.room) if player.target is None: player.addmessage("I dont see them.") else: player.addmessage("You attack %s!" % player.target.name) player.fighting = True player.target.target = player player.target.fighting = True else: player.addmessage("Who do you want to fight?") def stopfight(self, player, args): player.addmessage("You run away screaming.") player.fighting = False player.target.fighting = False player.target.target = None player.target = None def identify(self, player, args): if len(args) == 1: player.name = args[0] player.identified = True player.addmessage("Welcome, %s" % player.name) def look(self, player, args): if len(args) == 0: player.look() #player.addmessage("[%d] %s\r\n%s" % (player.room.number, # player.room.short_description, # player.room.long_description)) def stats(self, player, args): if len(args) == 0: player.stats() def moveto(self, player, args): if len(args) == 1: newroom = self.findroom(args[0]) print "Player %s moving to [%s]%s" % (player.name, args[0], newroom.short_description) player.goto(newroom) def go(self, player, args): if len(args) == 1: direction = int(args[0]) dest = None for e in player.room.exits: if e.direction == direction: dest = e.room self.moveto(player, [dest.number]) if dest == None: player.addmessage("[BADEXIT]") def listnpcs(self, player, args): for n in self.npcs: if n is not None: player.addmessage("[%s] %s" % (n.number, n.name)) def listmobs(self, player, args): for m in self.mobs: player.addmessage("[%s] %s : [%s] %s" % (m.number, m.name, m.room.number, m.room.short_description)) def exits(self, player, args): if len(args) == 0: for e in player.room.exits: player.addmessage("[%s] %s" % (Direction.STRINGS[e.direction], e.room.short_description)) def findroom(self, roomnum): for r in self.rooms: if r.number == int(roomnum): return r return self.rooms[0] def findnpc(self, npcnum): for n in self.npcs: if n.number == int(npcnum): return n return self.npcs[0] def findmob(self, npcnum, room): for m in room.mobs: if m.number == int(npcnum): return m return None def parser(self, player, line): parts = line.split('|') success = False for t in self.commandtags: if t.name == parts[0]: if len(parts) > 1: p = parts[1:] else: p = [] t.action(player, p) success = True if (success == False): print 'No such tag' def loadstatic(self): f = open('models/world1.txt','r') iterator = 0 exits = [] for line in f: if line[0] != "#": iterator += 1 if iterator == 5: print "Adding room: [%d] %s" % (rnum, sdesc) self.rooms.append(Room(rnum, sdesc, ldesc)) iterator = 1 if iterator == 1: rnum = int(line) if iterator == 2: sdesc = line.rstrip() if iterator == 3: ldesc = line.rstrip() if iterator == 4: direction = 1 for e in line.split('|'): if int(e) > 0: exits.append([rnum, direction, int(e)]) direction += 1 for e in exits: fromroom = self.findroom(e[0]) toroom = self.findroom(e[2]) fromroom.connect_room(e[1], toroom) print "Adding exit from %s to %s" % (fromroom.number, toroom.number) f = open('models/mobs1.txt','r') iterator = 0 attacks = [] for line in f: if line[0] != "#": iterator += 1 if iterator == 13: print "Adding NPC: [%d] %s" % (number, name) self.npcs.append(NPC(number, name, desc, level, hp, attacks, mobile)) attacks = [] iterator = 1 if iterator == 1: number = int(line) if iterator == 2: name = line.rstrip() if iterator == 3: desc = line.rstrip() if iterator == 4: level = int(line) if iterator == 5: hp = int(line) if iterator in range(6, 11): dice = line.split('|') if len(dice) == 3: attacks.append(Attack(int(dice[0]), int(dice[1]), int(dice[2]))) if iterator == 12: if int(line) == 1: mobile = True else: mobile = False f = open('models/populate1.txt', 'r') for line in f: data = line.split('|') if len(data) == 2: npcnum = int(data[0]) roomnum = int(data[1]) newmob = deepcopy(self.findnpc(npcnum)) newmob.goto(self.findroom(roomnum)) self.mobs.append(newmob) print "Placed [%d] %s in room %d" % (newmob.number, newmob.name, roomnum) if __name__ == '__main__': g = Game() # Set up a few rooms and exits to connect them # this should go into a static load file g.rooms = [Room(0, 'Nowhere', 'This is nowhere, man.')] #, #Room(1, 'The Square', 'This is the center of town.', # [Exit(Direction.EAST, 2), Exit(Direction.WEST, 3)]), #Room(2, 'Main Street', 'Walking along the main street', [Exit(Direction.WEST, 1)]), #Room(3, 'Main Street', 'Walking along the main street', [Exit(Direction.EAST, 1)])] g.loadstatic() reactor.listenTCP(4000, FarFactory(g)) reactor.run()
I was searching for a Property and found this listing (MLS® #A4425045). Please send me more information regarding 1810 E Beach Dr, Bradenton, FL, 34207. Thank you! I'd like to request a showing of 1810 E Beach Dr, Bradenton, FL, 34207 (MLS® #A4425045). Thank you!
import discord import discord.utils import asyncio import os.path import re import markovify import random import datetime from chatBot.settings import JSONSettings prog_path = os.path.dirname(os.path.abspath(__file__)) default_settings = {"Discord token": "", "Source type (channel or user)": "", "Source": [""], "Target channel": "", "Response frequency (%)": "25", "Chat idle allowed (m)": "10", "Sample size per source": "10000", "Allow Mentions": "false" } #Load information settings = JSONSettings(prog_path, default_settings) #Create new discord client client = discord.Client() last_recieved = datetime.datetime.now() def remove_emojii(text): emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" #emoticons u"\U0001F300-\U0001F5FF" #symbols & pictographs u"\U0001F680-\U0001F6FF" #transport & map symbols u"\U0001F1E0-\U0001F1FF" #flags (iOS) u"\U00002702-\U000027B0" #dingbats u"\U000024C2-\U0001F251" #enclosed characters u"\U0001F681-\U0001F6C5" #additional transport u"\U0001F30D-\U0001F567" #additional symbols u"\U0001F600-\U0001F636" #additional emoticons "]+", flags=re.UNICODE) return emoji_pattern.sub(r'', text) def remove_mentions(text): convert_dict = { '@\u200beveryone': '@-everyone', '@\u200bhere': '@-here' } text = str(text.translate(convert_dict)) mentions = re.findall(r'<@!?([0-9]+)>', text) for mention in mentions: member = discord.utils.find(lambda m: str(m.id) == str(mention), find_channel(settings.get_setting('Target channel')).server.members) if (member is not None): text = text.replace(mention, "-" + member.name) return text async def auto_message_check(): global last_recieved while True: if ((datetime.datetime.now() - (last_recieved + datetime.timedelta(minutes=int(settings.get_setting('Chat idle allowed (m)'))))).days >= 0): asyncio.ensure_future(send_response()) await asyncio.sleep(10) def response_roll(): x = random.randint(0,100) return (x <= int(settings.get_setting('Response frequency (%)'))) def safe_print(text): print (remove_emojii(text)) def find_channel(target_channel_name): channel = discord.utils.get(client.get_all_channels(), name=target_channel_name) return channel async def retrieve_source_text(): source = settings.get_setting('Source') source_text = "" if (settings.get_setting('Source type (channel or user)') == "channel"): for channel in source: target_channel = find_channel(channel) async for message in client.logs_from(target_channel, limit=int(settings.get_setting('Sample size per source'))): source_text += message.content + "\n" return source_text elif(settings.get_setting('Source type (channel or user)') == "user"): for user in source: pass else: print("Error: Invalid source type! Please choose either 'channel' or 'user' in settings file.") time.sleep(3) sys.exit() async def generate_sentence (): source_text = await retrieve_source_text() text_model = markovify.NewlineText(source_text) new_sentence = None while not new_sentence: new_sentence = text_model.make_sentence() if (settings.get_setting('Allow Mentions') != "true"): new_sentence = remove_mentions(new_sentence) return new_sentence async def send_response(): global last_recieved target_channel_name = settings.get_setting('Target channel') last_recieved = datetime.datetime.now() start_last_recieved = last_recieved sentence = await generate_sentence() if (start_last_recieved == last_recieved): await client.send_message(find_channel(target_channel_name), sentence) @client.event async def on_message(message): target_channel_name = settings.get_setting('Target channel') if ((message.channel.name == target_channel_name) and (message.author.id != client.user.id)): if (response_roll()): asyncio.ensure_future(send_response()) @client.event async def on_ready(): print('Logged in as: ' + client.user.name + '[' + client.user.id + '].') print("Logging in to bot...") #Run client (connect and login) ~ Blocking (must be last) ~ This is an unabstracted version of client.run() to give more control try: if (not settings.get_setting('Discord token')): print ("Please enter a discord bot token in 'settings.JSON' before running") time.sleep(3) sys.exit() else: client.loop.run_until_complete(asyncio.gather( client.start(settings.get_setting('Discord token')), auto_message_check() )) except KeyboardInterrupt: #Set exit flag to allow wakeup() to close properly exit_flag = True client.loop.run_until_complete(client.logout()) pending = asyncio.Task.all_tasks() gathered = asyncio.gather(*pending) try: gathered.cancel() client.loop.run_until_complete(gathered) gathered.exception() except: pass finally: client.loop.close()
In a previous post, we began developing an Alexa Gadget Skill and set up a simple roll call dialog. One thing that our sample could really benefit from is providing visual feedback to the user when the Echo Buttons are pressed or selected during the roll call process. The Alexa Gadget Skills API gives us control over each gadget’s light. In this post we dive into and have our skill take advantage of this functionality. So far, we have been working with the GameEngineInterface. This interface allows us to set input handlers on gadgets and to receive users’ gadget input events. The interface that lets us control the device itself is the GadgetControllerInterface. This interface contains one directive called SetLight and one request called System.ExceptionEncountered. The System.ExceptionEncountered request is sent to our skill when the SetLight directive has failed for whatever reason. In this post, we focus look at the SetLight directive. The directive can act on either a specified collection of gadgets or, if the targetGadgets array remains empty, all paired gadgets. The parameters field contains the details of what event triggers the animation and the animation’s definition. The triggerEvent can be a button up, down or none, in which case the animation begins playing immediately. triggerEventTimeMs is a delay in milliseconds after the event occurs before the animation begins. The animations object includes instructions on how many times to repeat a sequence (repeat field), which lights on the gadget animation is for (targetLights field) and a list of step by step instructions on how to execute the animation (sequence field). Each sequence step has a duration in milliseconds, a color in HEX without the # character and a blend flag indicating whether the device should interpolate the color from its current state to the step color. A few additional items to note. The targetLights array is simply [ "1" ] because the Echo Buttons have one light. Future gadgets might have more. This field will provide fine tuned control over each light when those gadgets come out. The number of sequence steps allowed is limited by the length of the targetGadgets array. The formula for the limit is: 38 - targetGadgets.length * 3. Of course, that might be subject to change, so please consult the official docs. Each Echo Button can have one animation set per trigger. Any directive that sends a different animation for a trigger will overwrite whatever animation was set before. Set all lights to an animation when the skill launches. Set all lights to some color and fade out when the roll call initializes. Set a light to a solid color if a button has been selected. Once roll call is finished, set the buttons that are not used to black and set the used buttons to some animation indicating that they are in the game. We first create something to help us build the animations JSON. As it turns out, the trivia game sample has a really cool animations helper that we can use. I went ahead and translated it to TypeScript. The helper now has two modules: BasicAnimations and ComplexAnimations. BasicAnimations contains many functions such as setting a static color (SolidAnimation), fade in/out (FadeInAnimation/FadeOutAnimation) or alternating between a color and black (BlinkAnimation). ComplexAnimations contains two functions, one of which is SpectrumAnimation, an animation that takes the light through any number of color transitions. The code for all of these is fairly easy to follow. Here is what two of them look like. Lastly, we create a SetLightDirectiveBuilder module to build the SetLight directive instances. The goal of this code is to generate the directives given an animation, an optional array of targetGadgetIds, the triggering event and the delay. We create three helpers so we do not have to pass the TiggerEventType parameter. A minor convenience. and receive the following JSON to send back as part of our response from a skill. Excellent! Let us integrate these new features into our skill to see the lights in action! We’ve actually done most of the work we needed to do already. The Roll Call code we had created in the previous part of this series is in a state where we can easily add the SetLight directive. The LaunchHandler changes only to add directives. Note that not only do we add the skillLaunch animation, which cycles through white, purple, and yellow, for 5 cycles. We also add the default button up and down animations. We do this so that whenever a user presses any button, we provide some sort of color feedback. The other interesting change is specific to feedback when a button is selected during the roll call. Recall, when a user pressed the first button in the roll call, our skill acknowledges this via a voice response and asks the user to press the second button. We would like to have the light perform an animation at this point; a good visual cue for the user to know which buttons are selected and which are not. We modify the handleButtonCheckin function in the RollCall module to send the directive for each gadgetId that was selected in the current request. We also do the math to ensure that the button count is reflected correctly if the skill received multiple button inputs simultaneously. I’m not certain this can actually occur, but since inputEvents is an array… better safe than sorry. Beyond that, it’s smooth sailing. You can deploy this code into a skill by using ask deploy. Here is a short video of the current code working on my desk. Code can be found in the Github repo.
#based off of neuralnetworksanddeeplearning.com import numpy as np import gzip import struct import random epochs = 10 #number of training cycles y_train = np.zeros((60000,10)) #initialize for one-hot encoding alpha = 100 #learning rate batchsize = 6 num_neurons def main(): print "Test" #read in data images_train, images_test, labels_train, labels_test = readData() #randomly initialize weights and biases weights = .01*np.random.rand(784,10) bias = .01*np.random.rand(10000,10) #one-hot encode labels y_train[np.arange(60000), labels_train] = 1 #group training data training_data = zip(images_train, labels_train) #train classifier weights_t, bias_t = trainClassifier(epochs, images_train_b, y_train_b, weights, bias) #test classifier accuracy = testClassifier(images_test, labels_test, weights_t, bias_t) print "Accuracy: " + str(accuracy) + "%" def readData(): image_train_filename = 'MNIST_data/train-images-idx3-ubyte.gz' label_train_filename = 'MNIST_data/train-labels-idx1-ubyte.gz' image_test_filename = 'MNIST_data/t10k-images-idx3-ubyte.gz' label_test_filename = 'MNIST_data/t10k-labels-idx1-ubyte.gz' print "Opening files" #uncompress files and read data with gzip.open(image_train_filename, 'r') as f: magicnum, numimage, row, col = struct.unpack('>IIII', f.read(16)) images = np.fromstring(f.read(), dtype='uint8').reshape(numimage, row * col) with gzip.open(label_train_filename, 'r') as f: magicnum, numlabel = struct.unpack('>II', f.read(8)) labels = np.fromstring(f.read(), dtype='int8') with gzip.open(image_test_filename, 'r') as f: magicnum, numimage, row, col = struct.unpack('>IIII', f.read(16)) images_t = np.fromstring(f.read(), dtype='uint8').reshape(numimage, row * col) with gzip.open(label_test_filename, 'r') as f: magicnum, numlabel = struct.unpack('>II', f.read(8)) labels_t = np.fromstring(f.read(), dtype='int8') return images, images_t, labels, labels_t def forwardPass(weights, x, bias): y_pred = [] #linear model y_i = x.dot(weights) + bias #activation function for i in range(len(y_i)): y_probs = softmax(y_i[i]) y_pred.append(y_probs) return y_pred # def softmax(y): # y_s = np.exp(y-np.max(y)) # y_soft = y_s/y_s.sum() # return y_soft def loss(y_pred, y_actual): #cross entropy loss #y_actual multiplied by log of y_pred #error_sum = y_actual * np.log10(y_pred-y_actual) #sum #error = -np.sum(error_sum) #Least squares error error = np.sum((y_pred-y_actual)**2) return error def sgd(training_data, weights, biases): #train using stochastic gradient descent for i in range(0,epochs): #randomly shuffle data random.shuffle(training_data) #partition into batches batches = np.split(training_data, batchsize) #apply gradient descent for each batch for batch in batches: weights, biases = gradientUpdate(weights, biases) print "Epoch " + str(i) + " complete" return weights, biases def gradientUpdate(weights, bias): nabla_b = [np.zeros(b.shape) for b in bias] nabla_w = [np.zeros(w.shape) for w in weights] #obtain gradients deltaW, deltaB = backprop() deltaW = deltaW + nabla_w deltaB = deltaB + nabla_b #update weights & biases w = (weights - (alpha/len(miniBatch))*deltaw) b = (bias - (alpha/len(minibatch))*deltaB) return w, b def backprop(x, y, weights, bias): nabla_b = [np.zeros(b.shape) for b in bias] nabla_w = [np.zeros(w.shape) for w in weights] #feedforward activation = x activation_list = [x] z_list = [] for w, b in zip(weights, bias): z = np.dot(w, activation) + b z_list.append(z) activation = softmax(z) activation_list.append(activation) #backward pass delta = cost_derivative(activation_list[-1], y) * sigmoid_deriv(z_list[-1]) nabla_b[-1] = delta nabla_w[-1] = np.dot(delta, activation_list[-2].T) for l in xrange(2, num_neurons): z = z_list[-1] sd = sigmoid_deriv(z) delta = np.dot(weights[-l + 1].T, delta) * sd nabla_b[-1] = delta nabla_w[-1] = np.dot(delta, activation_list[-l-1].T) return (nabla_w, nabla_b) def cost_derivative(output, y): return (output - y) def softmax(z): return 1.0/(1.0 + np.exp(-z)) def softmax_deriv(z): return softmax(z) * (1 - softmax(z)) def testClassifier(images, labels, weights, bias): correct = 0 total = 0 prediction = [] print "Testing" y_pred= forwardPass(weights, images, bias) #predictions for test images for i in range(len(y_pred)): prediction.append(np.argmax(y_pred[i])) #Check accuracy of guess for i in range(0,len(y_pred)): if prediction[i] == labels[i]: correct += 1 total += 1 accuracy = (correct/ float(total))*100 return accuracy if __name__ == '__main__': main()
At 24, you haven’t stopped happening in my life. I hope you never do. Yesterday, I informed readers of my recent run in with ingrown toenails…I apologize if I grossed anybody out, but from the beginning I promised I would be real on here. Welcome! That being said, something as significant as receiving anesthetic, spending 40 minutes a day with my feet in salt water to eliminate infection possibilities and being ridiculed in the process, some sort of you had to have occurred. Good news, you did! The issue with my toes was my nails were growing where they weren’t intended to grow. This picture is a bit disgusting, but it does a great job depicting my lesson learned. My doctor let me keep the pieces of nail he cut out. I had both insides of my big toes operated on, so the nails below are sitting how they were on my feet. If you look closely the inside portions of the nails are what any Joe Schmo would have. Then, the outer parts of the nails are the issue. The left toe was just beginning to be ingrown again, the right toe was infected. It’s gross, but stick with me. Gross, but seeing these allowed me, and every person who knew about my ingrown toenails that gave me looks of disgust, to understand what was wrong with my feet. How the heck does a centimeter (if that) chunk of nail cause infection and severe pain? I’ll never understand, but as I was thinking about it I paralleled it to the big picture of life. How many times do I allow my little mistakes to build up and create much bigger issues in my world? We were created to live in harmony with God. Adam and Eve sinned. Now, we all inherit a sin nature, but God didn’t choose to leave us in our sinful state. God sent His Son, Jesus, to live a perfect life, become our sin on the cross and defeat death, so we may live in harmony with God once again. So, once I accept Christ, I’ll never sin again. Awesome, but absolutely not true. I’m going to fail, sin, mess up, make mistakes. The difference is my sin has been redeemed, and I want to rid myself of the junk in my life that grieves the Lord. So, how does this blip of theology have anything to do with my feet?! Just like my toenails were growing in places they shouldn’t be, sometimes I let sin fester in my life where it doesn’t need to be. It starts small, and I allow it to grow and remain a stronghold in my life. I fail to confess my sin, to humble myself and admit I’m wrong. So, it continues to grow where it’s not supposed to be. The longer we allow it to be present, the more painful removing it is…just like my toes. The consequences of our sin are greater the longer we allow it to remain. Our sins are forgiven, but they don’t come without consequence. As I physically had my ingrown toenails ripped out, I couldn’t help but wonder what sin needs to be ripped out of my life as well.
""" Tests use cases related to LMS Entrance Exam behavior, such as gated content access (TOC) """ from mock import patch, Mock from django.core.urlresolvers import reverse from django.test.client import RequestFactory from nose.plugins.attrib import attr from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory from courseware.model_data import FieldDataCache from courseware.module_render import toc_for_course, get_module, handle_xblock_callback from courseware.tests.factories import UserFactory, InstructorFactory, StaffFactory from courseware.tests.helpers import ( LoginEnrollmentTestCase, get_request_for_user ) from courseware.entrance_exams import ( course_has_entrance_exam, get_entrance_exam_content, get_entrance_exam_score, user_can_skip_entrance_exam, user_has_passed_entrance_exam, ) from student.models import CourseEnrollment from student.tests.factories import CourseEnrollmentFactory, AnonymousUserFactory from util.milestones_helpers import ( add_milestone, add_course_milestone, get_namespace_choices, generate_milestone_namespace, add_course_content_milestone, get_milestone_relationship_types, ) from milestones.tests.utils import MilestonesTestCaseMixin from xmodule.modulestore.django import modulestore from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory @attr('shard_1') @patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True}) class EntranceExamTestCases(LoginEnrollmentTestCase, ModuleStoreTestCase, MilestonesTestCaseMixin): """ Check that content is properly gated. Creates a test course from scratch. The tests below are designed to execute workflows regardless of the feature flag settings. """ @patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True}) def setUp(self): """ Test case scaffolding """ super(EntranceExamTestCases, self).setUp() self.course = CourseFactory.create( metadata={ 'entrance_exam_enabled': True, } ) self.chapter = ItemFactory.create( parent=self.course, display_name='Overview' ) ItemFactory.create( parent=self.chapter, display_name='Welcome' ) ItemFactory.create( parent=self.course, category='chapter', display_name="Week 1" ) self.chapter_subsection = ItemFactory.create( parent=self.chapter, category='sequential', display_name="Lesson 1" ) chapter_vertical = ItemFactory.create( parent=self.chapter_subsection, category='vertical', display_name='Lesson 1 Vertical - Unit 1' ) ItemFactory.create( parent=chapter_vertical, category="problem", display_name="Problem - Unit 1 Problem 1" ) ItemFactory.create( parent=chapter_vertical, category="problem", display_name="Problem - Unit 1 Problem 2" ) ItemFactory.create( category="instructor", parent=self.course, data="Instructor Tab", display_name="Instructor" ) self.entrance_exam = ItemFactory.create( parent=self.course, category="chapter", display_name="Entrance Exam Section - Chapter 1", is_entrance_exam=True, in_entrance_exam=True ) self.exam_1 = ItemFactory.create( parent=self.entrance_exam, category='sequential', display_name="Exam Sequential - Subsection 1", graded=True, in_entrance_exam=True ) subsection = ItemFactory.create( parent=self.exam_1, category='vertical', display_name='Exam Vertical - Unit 1' ) problem_xml = MultipleChoiceResponseXMLFactory().build_xml( question_text='The correct answer is Choice 3', choices=[False, False, True, False], choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3'] ) self.problem_1 = ItemFactory.create( parent=subsection, category="problem", display_name="Exam Problem - Problem 1", data=problem_xml ) self.problem_2 = ItemFactory.create( parent=subsection, category="problem", display_name="Exam Problem - Problem 2" ) add_entrance_exam_milestone(self.course, self.entrance_exam) self.course.entrance_exam_enabled = True self.course.entrance_exam_minimum_score_pct = 0.50 self.course.entrance_exam_id = unicode(self.entrance_exam.scope_ids.usage_id) self.anonymous_user = AnonymousUserFactory() self.request = get_request_for_user(UserFactory()) modulestore().update_item(self.course, self.request.user.id) # pylint: disable=no-member self.client.login(username=self.request.user.username, password="test") CourseEnrollment.enroll(self.request.user, self.course.id) self.expected_locked_toc = ( [ { 'active': True, 'sections': [ { 'url_name': u'Exam_Sequential_-_Subsection_1', 'display_name': u'Exam Sequential - Subsection 1', 'graded': True, 'format': '', 'due': None, 'active': True } ], 'url_name': u'Entrance_Exam_Section_-_Chapter_1', 'display_name': u'Entrance Exam Section - Chapter 1', 'display_id': u'entrance-exam-section-chapter-1', } ] ) self.expected_unlocked_toc = ( [ { 'active': False, 'sections': [ { 'url_name': u'Welcome', 'display_name': u'Welcome', 'graded': False, 'format': '', 'due': None, 'active': False }, { 'url_name': u'Lesson_1', 'display_name': u'Lesson 1', 'graded': False, 'format': '', 'due': None, 'active': False } ], 'url_name': u'Overview', 'display_name': u'Overview', 'display_id': u'overview' }, { 'active': False, 'sections': [], 'url_name': u'Week_1', 'display_name': u'Week 1', 'display_id': u'week-1' }, { 'active': False, 'sections': [], 'url_name': u'Instructor', 'display_name': u'Instructor', 'display_id': u'instructor' }, { 'active': True, 'sections': [ { 'url_name': u'Exam_Sequential_-_Subsection_1', 'display_name': u'Exam Sequential - Subsection 1', 'graded': True, 'format': '', 'due': None, 'active': True } ], 'url_name': u'Entrance_Exam_Section_-_Chapter_1', 'display_name': u'Entrance Exam Section - Chapter 1', 'display_id': u'entrance-exam-section-chapter-1' } ] ) def test_view_redirect_if_entrance_exam_required(self): """ Unit Test: if entrance exam is required. Should return a redirect. """ url = reverse('courseware', kwargs={'course_id': unicode(self.course.id)}) expected_url = reverse('courseware_section', kwargs={ 'course_id': unicode(self.course.id), 'chapter': self.entrance_exam.location.name, 'section': self.exam_1.location.name }) resp = self.client.get(url) self.assertRedirects(resp, expected_url, status_code=302, target_status_code=200) @patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': False}) def test_entrance_exam_content_absence(self): """ Unit Test: If entrance exam is not enabled then page should be redirected with chapter contents. """ url = reverse('courseware', kwargs={'course_id': unicode(self.course.id)}) expected_url = reverse('courseware_section', kwargs={ 'course_id': unicode(self.course.id), 'chapter': self.chapter.location.name, 'section': self.chapter_subsection.location.name }) resp = self.client.get(url) self.assertRedirects(resp, expected_url, status_code=302, target_status_code=200) resp = self.client.get(expected_url) self.assertNotIn('Exam Problem - Problem 1', resp.content) self.assertNotIn('Exam Problem - Problem 2', resp.content) def test_entrance_exam_content_presence(self): """ Unit Test: If entrance exam is enabled then its content e.g. problems should be loaded and redirection will occur with entrance exam contents. """ url = reverse('courseware', kwargs={'course_id': unicode(self.course.id)}) expected_url = reverse('courseware_section', kwargs={ 'course_id': unicode(self.course.id), 'chapter': self.entrance_exam.location.name, 'section': self.exam_1.location.name }) resp = self.client.get(url) self.assertRedirects(resp, expected_url, status_code=302, target_status_code=200) resp = self.client.get(expected_url) self.assertIn('Exam Problem - Problem 1', resp.content) self.assertIn('Exam Problem - Problem 2', resp.content) def test_get_entrance_exam_content(self): """ test get entrance exam content method """ exam_chapter = get_entrance_exam_content(self.request, self.course) self.assertEqual(exam_chapter.url_name, self.entrance_exam.url_name) self.assertFalse(user_has_passed_entrance_exam(self.request, self.course)) answer_entrance_exam_problem(self.course, self.request, self.problem_1) answer_entrance_exam_problem(self.course, self.request, self.problem_2) exam_chapter = get_entrance_exam_content(self.request, self.course) self.assertEqual(exam_chapter, None) self.assertTrue(user_has_passed_entrance_exam(self.request, self.course)) def test_entrance_exam_score(self): """ test entrance exam score. we will hit the method get_entrance_exam_score to verify exam score. """ with self.assertNumQueries(1): exam_score = get_entrance_exam_score(self.request, self.course) self.assertEqual(exam_score, 0) answer_entrance_exam_problem(self.course, self.request, self.problem_1) answer_entrance_exam_problem(self.course, self.request, self.problem_2) with self.assertNumQueries(1): exam_score = get_entrance_exam_score(self.request, self.course) # 50 percent exam score should be achieved. self.assertGreater(exam_score * 100, 50) def test_entrance_exam_requirement_message(self): """ Unit Test: entrance exam requirement message should be present in response """ url = reverse( 'courseware_section', kwargs={ 'course_id': unicode(self.course.id), 'chapter': self.entrance_exam.location.name, 'section': self.exam_1.location.name } ) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn('To access course materials, you must score', resp.content) def test_entrance_exam_requirement_message_with_correct_percentage(self): """ Unit Test: entrance exam requirement message should be present in response and percentage of required score should be rounded as expected """ minimum_score_pct = 29 self.course.entrance_exam_minimum_score_pct = float(minimum_score_pct) / 100 modulestore().update_item(self.course, self.request.user.id) # pylint: disable=no-member url = reverse( 'courseware_section', kwargs={ 'course_id': unicode(self.course.id), 'chapter': self.entrance_exam.location.name, 'section': self.exam_1.location.name } ) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn('To access course materials, you must score {required_score}% or higher'.format( required_score=minimum_score_pct ), resp.content) def test_entrance_exam_requirement_message_hidden(self): """ Unit Test: entrance exam message should not be present outside the context of entrance exam subsection. """ # Login as staff to avoid redirect to entrance exam self.client.logout() staff_user = StaffFactory(course_key=self.course.id) self.client.login(username=staff_user.username, password='test') CourseEnrollment.enroll(staff_user, self.course.id) url = reverse( 'courseware_section', kwargs={ 'course_id': unicode(self.course.id), 'chapter': self.chapter.location.name, 'section': self.chapter_subsection.location.name } ) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertNotIn('To access course materials, you must score', resp.content) self.assertNotIn('You have passed the entrance exam.', resp.content) def test_entrance_exam_passed_message_and_course_content(self): """ Unit Test: exam passing message and rest of the course section should be present when user achieves the entrance exam milestone/pass the exam. """ url = reverse( 'courseware_section', kwargs={ 'course_id': unicode(self.course.id), 'chapter': self.entrance_exam.location.name, 'section': self.exam_1.location.name } ) answer_entrance_exam_problem(self.course, self.request, self.problem_1) answer_entrance_exam_problem(self.course, self.request, self.problem_2) resp = self.client.get(url) self.assertNotIn('To access course materials, you must score', resp.content) self.assertIn('You have passed the entrance exam.', resp.content) self.assertIn('Lesson 1', resp.content) def test_entrance_exam_gating(self): """ Unit Test: test_entrance_exam_gating """ # This user helps to cover a discovered bug in the milestone fulfillment logic chaos_user = UserFactory() locked_toc = self._return_table_of_contents() for toc_section in self.expected_locked_toc: self.assertIn(toc_section, locked_toc) # Set up the chaos user answer_entrance_exam_problem(self.course, self.request, self.problem_1, chaos_user) answer_entrance_exam_problem(self.course, self.request, self.problem_1) answer_entrance_exam_problem(self.course, self.request, self.problem_2) unlocked_toc = self._return_table_of_contents() for toc_section in self.expected_unlocked_toc: self.assertIn(toc_section, unlocked_toc) def test_skip_entrance_exam_gating(self): """ Tests gating is disabled if skip entrance exam is set for a user. """ # make sure toc is locked before allowing user to skip entrance exam locked_toc = self._return_table_of_contents() for toc_section in self.expected_locked_toc: self.assertIn(toc_section, locked_toc) # hit skip entrance exam api in instructor app instructor = InstructorFactory(course_key=self.course.id) self.client.login(username=instructor.username, password='test') url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': unicode(self.course.id)}) response = self.client.post(url, { 'unique_student_identifier': self.request.user.email, }) self.assertEqual(response.status_code, 200) unlocked_toc = self._return_table_of_contents() for toc_section in self.expected_unlocked_toc: self.assertIn(toc_section, unlocked_toc) def test_entrance_exam_gating_for_staff(self): """ Tests gating is disabled if user is member of staff. """ # Login as member of staff self.client.logout() staff_user = StaffFactory(course_key=self.course.id) staff_user.is_staff = True self.client.login(username=staff_user.username, password='test') # assert staff has access to all toc self.request.user = staff_user unlocked_toc = self._return_table_of_contents() for toc_section in self.expected_unlocked_toc: self.assertIn(toc_section, unlocked_toc) @patch('courseware.entrance_exams.user_has_passed_entrance_exam', Mock(return_value=False)) def test_courseware_page_access_without_passing_entrance_exam(self): """ Test courseware access page without passing entrance exam """ url = reverse( 'courseware_chapter', kwargs={'course_id': unicode(self.course.id), 'chapter': self.chapter.url_name} ) response = self.client.get(url) redirect_url = reverse('courseware', args=[unicode(self.course.id)]) self.assertRedirects(response, redirect_url, status_code=302, target_status_code=302) response = self.client.get(redirect_url) exam_url = response.get('Location') self.assertRedirects(response, exam_url) @patch('courseware.entrance_exams.user_has_passed_entrance_exam', Mock(return_value=False)) def test_courseinfo_page_access_without_passing_entrance_exam(self): """ Test courseware access page without passing entrance exam """ url = reverse('info', args=[unicode(self.course.id)]) response = self.client.get(url) redirect_url = reverse('courseware', args=[unicode(self.course.id)]) self.assertRedirects(response, redirect_url, status_code=302, target_status_code=302) response = self.client.get(redirect_url) exam_url = response.get('Location') self.assertRedirects(response, exam_url) @patch('courseware.entrance_exams.user_has_passed_entrance_exam', Mock(return_value=True)) def test_courseware_page_access_after_passing_entrance_exam(self): """ Test courseware access page after passing entrance exam """ # Mocking get_required_content with empty list to assume user has passed entrance exam self._assert_chapter_loaded(self.course, self.chapter) @patch('util.milestones_helpers.get_required_content', Mock(return_value=['a value'])) def test_courseware_page_access_with_staff_user_without_passing_entrance_exam(self): """ Test courseware access page without passing entrance exam but with staff user """ self.logout() staff_user = StaffFactory.create(course_key=self.course.id) self.login(staff_user.email, 'test') CourseEnrollmentFactory(user=staff_user, course_id=self.course.id) self._assert_chapter_loaded(self.course, self.chapter) def test_courseware_page_access_with_staff_user_after_passing_entrance_exam(self): """ Test courseware access page after passing entrance exam but with staff user """ self.logout() staff_user = StaffFactory.create(course_key=self.course.id) self.login(staff_user.email, 'test') CourseEnrollmentFactory(user=staff_user, course_id=self.course.id) self._assert_chapter_loaded(self.course, self.chapter) @patch.dict("django.conf.settings.FEATURES", {'ENTRANCE_EXAMS': False}) def test_courseware_page_access_when_entrance_exams_disabled(self): """ Test courseware page access when ENTRANCE_EXAMS feature is disabled """ self._assert_chapter_loaded(self.course, self.chapter) def test_can_skip_entrance_exam_with_anonymous_user(self): """ Test can_skip_entrance_exam method with anonymous user """ self.assertFalse(user_can_skip_entrance_exam(self.request, self.anonymous_user, self.course)) def test_has_passed_entrance_exam_with_anonymous_user(self): """ Test has_passed_entrance_exam method with anonymous user """ self.request.user = self.anonymous_user self.assertFalse(user_has_passed_entrance_exam(self.request, self.course)) def test_course_has_entrance_exam_missing_exam_id(self): course = CourseFactory.create( metadata={ 'entrance_exam_enabled': True, } ) self.assertFalse(course_has_entrance_exam(course)) def test_user_has_passed_entrance_exam_short_circuit_missing_exam(self): course = CourseFactory.create( ) self.assertTrue(user_has_passed_entrance_exam(self.request, course)) @patch.dict("django.conf.settings.FEATURES", {'ENABLE_MASQUERADE': False}) def test_entrance_exam_xblock_response(self): """ Tests entrance exam xblock has `entrance_exam_passed` key in json response. """ request_factory = RequestFactory() data = {'input_{}_2_1'.format(unicode(self.problem_1.location.html_id())): 'choice_2'} request = request_factory.post( 'problem_check', data=data ) request.user = self.user response = handle_xblock_callback( request, unicode(self.course.id), unicode(self.problem_1.location), 'xmodule_handler', 'problem_check', ) self.assertEqual(response.status_code, 200) self.assertIn('entrance_exam_passed', response.content) def _assert_chapter_loaded(self, course, chapter): """ Asserts courseware chapter load successfully. """ url = reverse( 'courseware_chapter', kwargs={'course_id': unicode(course.id), 'chapter': chapter.url_name} ) response = self.client.get(url) self.assertEqual(response.status_code, 200) def _return_table_of_contents(self): """ Returns table of content for the entrance exam specific to this test Returns the table of contents for course self.course, for chapter self.entrance_exam, and for section self.exam1 """ self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents( # pylint: disable=attribute-defined-outside-init self.course.id, self.request.user, self.entrance_exam ) return toc_for_course( self.request.user, self.request, self.course, self.entrance_exam.url_name, self.exam_1.url_name, self.field_data_cache ) def answer_entrance_exam_problem(course, request, problem, user=None): """ Takes a required milestone `problem` in a `course` and fulfills it. Args: course (Course): Course object, the course the required problem is in request (Request): request Object problem (xblock): xblock object, the problem to be fulfilled user (User): User object in case it is different from request.user """ if not user: user = request.user grade_dict = {'value': 1, 'max_value': 1, 'user_id': user.id} field_data_cache = FieldDataCache.cache_for_descriptor_descendents( course.id, user, course, depth=2 ) # pylint: disable=protected-access module = get_module( user, request, problem.scope_ids.usage_id, field_data_cache, )._xmodule module.system.publish(problem, 'grade', grade_dict) def add_entrance_exam_milestone(course, entrance_exam): """ Adds the milestone for given `entrance_exam` in `course` Args: course (Course): Course object in which the extrance_exam is located entrance_exam (xblock): the entrance exam to be added as a milestone """ namespace_choices = get_namespace_choices() milestone_relationship_types = get_milestone_relationship_types() milestone_namespace = generate_milestone_namespace( namespace_choices.get('ENTRANCE_EXAM'), course.id ) milestone = add_milestone( { 'name': 'Test Milestone', 'namespace': milestone_namespace, 'description': 'Testing Courseware Entrance Exam Chapter', } ) add_course_milestone( unicode(course.id), milestone_relationship_types['REQUIRES'], milestone ) add_course_content_milestone( unicode(course.id), unicode(entrance_exam.location), milestone_relationship_types['FULFILLS'], milestone )
This is a placeholder page for Lisa Hinshelwood, which means this person is not currently on this site. We do suggest using the tools below to find Lisa Hinshelwood. You are visiting the placeholder page for Lisa Hinshelwood. This page is here because someone used our placeholder utility to look for Lisa Hinshelwood. We created this page automatically in hopes Lisa Hinshelwood would find it. If you are not Lisa Hinshelwood, but are an alumni of Buffalo Grove High School, register on this site for free now.
#!/usr/bin/env python3 """ This is not necessarily an efficient way of doing things by downloading ASCII per Madrigal remote filter and then converting to hdf5 locally, rather we want to download just HDF5, but it's a OK way to start. Tests loading of globalisprint ascii file, resaving as HDF5 for fast data processing first I clunkily used globalIsprint.py --verbose --url=http://isr.sri.com/madrigal --parms=DNE,AZM,ELM,NE,UT1 --output=example.txt --startDate="01/01/1950" --endDate="10/31/2007" --inst=61 --kindat=0 --filter azm,90,270 then I ran the code below. Finally, we demonstrate reading HDF5 into an array. """ from numpy import loadtxt #should consider perhaps genfromtxt to handle "missing" values import h5py from os.path import splitext,expanduser from pandas import DataFrame from time import time def txt2h5(fn): h5fn = splitext(expanduser(fn))[0] + '.h5' print('saving to ' + h5fn) gc=(1,2,4) # a priori based on the specific globalisprint command, and that numpy.loadtxt can't handle non-numeric values # get column names with open(fn,'r') as f: head = f.readline().split() # load data tic = time() arr = loadtxt(fn,skiprows=1,usecols=gc) print('loading text data took {:.4f} seconds'.format(time()-tic)) with h5py.File(h5fn,'w',libver='latest') as f: for i,c in enumerate(gc): #because we only read "good" columns f[head[c]] = arr[:,i] return h5fn def readh5(h5fn): tic = time() with h5py.File(h5fn,'r',libver='latest') as f: df = DataFrame(index=f['UT1'], data={'AZM':f['AZM'], 'ELM':f['ELM']}) print('loading HDF5 data took {:.4f} seconds'.format(time()-tic)) return df if __name__ == '__main__': from sys import argv h5fn = txt2h5(argv[1]) # ascii to hdf5 df = readh5(h5fn)
Private investigators are an interesting group. A combination of rugged individualists, mousy ex-librarians, storied ex-law enforcement officers, ex-arson investigators, doggedly determined ex-claims adjusters and even people who saw one to many episodes of CSI and decided to jump into the business. Many of these folks arrived with ego to spare. I’ve known private investigators that were television journalists, recent widows and radio personalities. If you put 10 private investigators in a room you’re probably going to get 11 different opinions on the best way to accomplish a task. This gallimaufry of personalities carries over into the world of private investigator internet newsgroups. For the uninitiated, newsgroups are public (and private) on-line discussion forums. There are literally hundreds of internet newsgroups for private investigators covering every conceivable topic of the private eye world. I’ll go over a few general rules first and then list some of the best internet newsgroups you should be a member of. A few things to remember when you’re participating (and here’s where the good, bad and ugly come into play): DON’T TYPE IN ALL CAPITALS. IT IS THE SAME THING AS YELLING! Use spell check. It’s free and well worth it. Don’t just sit on the sidelines. Make sure you contribute. But give your ideas some careful consideration before you commit them to the permanency of the internet. As Mark Twain said “It is better to keep your mouth shut and appear stupid than to open it and remove all doubt.” Also, as Bill O’Reilly, that bold, fresh piece of humanity is want to say, “Keep it pithy.” It never ceases to amaze me the lengthy epistles that some participants write. I don’t know about you but I’m busy. I don’t have time to read a 12 page missive complete with addendums and an index. Your signature line should contain more than your first name. People, especially private eyes, like to know who they’re dealing with. Consider a signature line that, at the very least, includes your name, company name, address (because it’s nice to know where you’re from), your license number and a way to contact you, such as a telephone, email or website address. And finally, we know that you are a rare and delicate flower. So avoid personal attacks, flaming and ad hominem comments. It’s not nice and it’s rarely germane to the conversation. There are many sources for good private investigator newsgroups. Some of the more popular newsgroups are on Linkedin and Yahoo Groups. I am not necessarily recommending any of these newsgroups to you (although I am on several of them myself). I just wanted you to be aware of them. You may or may not find value in subscribing. As far as the Yahoo Groups, go to www.Groups.Yahoo.com. From there you can search for just about any type of newsgroup you want. For example, check out PIcases, GroupsThePIgroup, PIdomestic, PImarketing, BlackBookOnline, Private-Investigators-PInow, PIweekly, Private Investigator and Surveillance, to name just a few. On Linkedin make sure to navigate to the “Interests” tab (on top) and then click on “Groups.” The Linkedin Groups tend to be much more professional. You can find newsgroups for all aspect of our chosen profession. While you’re there check out Professonal Private Investigators, PI Marketing, PI Partner Search, Surveillance Investigators, Worldwide Covert Surveillance Network, Investigation Network and PI Magazine. You can find newsgroups in both, Yahoo Groups and Linkedin that are based on certifications (such as the CFE), your state and region, and the type of investigative work you do. There’s something for everyone. Interacting with other professional private investigators in these newsgroups will help you learn about new techniques and methods. You’ll be able to ask questions and get some good feedback. And you may find other investigators you can turn to in other parts of your state, region or the world. This is Scott Fulmer, the Utah Gumshoe, reminding you that the game…is afoot!
import time import numpy as np import tkinter as tk from PIL import ImageTk, Image np.random.seed(1) PhotoImage = ImageTk.PhotoImage UNIT = 100 # 픽셀 수 HEIGHT = 5 # 그리드 월드 세로 WIDTH = 5 # 그리드 월드 가로 class Env(tk.Tk): def __init__(self): super(Env, self).__init__() self.action_space = ['u', 'd', 'l', 'r'] self.n_actions = len(self.action_space) self.title('monte carlo') self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT)) self.shapes = self.load_images() self.canvas = self._build_canvas() self.texts = [] def _build_canvas(self): canvas = tk.Canvas(self, bg='white', height=HEIGHT * UNIT, width=WIDTH * UNIT) # 그리드 생성 for c in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80 x0, y0, x1, y1 = c, 0, c, HEIGHT * UNIT canvas.create_line(x0, y0, x1, y1) for r in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80 x0, y0, x1, y1 = 0, r, HEIGHT * UNIT, r canvas.create_line(x0, y0, x1, y1) # 캔버스에 이미지 추가 self.rectangle = canvas.create_image(50, 50, image=self.shapes[0]) self.triangle1 = canvas.create_image(250, 150, image=self.shapes[1]) self.triangle2 = canvas.create_image(150, 250, image=self.shapes[1]) self.circle = canvas.create_image(250, 250, image=self.shapes[2]) canvas.pack() return canvas def load_images(self): rectangle = PhotoImage( Image.open("../img/rectangle.png").resize((65, 65))) triangle = PhotoImage( Image.open("../img/triangle.png").resize((65, 65))) circle = PhotoImage( Image.open("../img/circle.png").resize((65, 65))) return rectangle, triangle, circle @staticmethod def coords_to_state(coords): x = int((coords[0] - 50) / 100) y = int((coords[1] - 50) / 100) return [x, y] def reset(self): self.update() time.sleep(0.5) x, y = self.canvas.coords(self.rectangle) self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y) return self.coords_to_state(self.canvas.coords(self.rectangle)) def step(self, action): state = self.canvas.coords(self.rectangle) base_action = np.array([0, 0]) self.render() if action == 0: # 상 if state[1] > UNIT: base_action[1] -= UNIT elif action == 1: # 하 if state[1] < (HEIGHT - 1) * UNIT: base_action[1] += UNIT elif action == 2: # 좌 if state[0] > UNIT: base_action[0] -= UNIT elif action == 3: # 우 if state[0] < (WIDTH - 1) * UNIT: base_action[0] += UNIT # 에이전트 이동 self.canvas.move(self.rectangle, base_action[0], base_action[1]) # 에이전트(빨간 네모)를 가장 상위로 배치 self.canvas.tag_raise(self.rectangle) next_state = self.canvas.coords(self.rectangle) # 보상 함수 if next_state == self.canvas.coords(self.circle): reward = 100 done = True elif next_state in [self.canvas.coords(self.triangle1), self.canvas.coords(self.triangle2)]: reward = -100 done = True else: reward = 0 done = False next_state = self.coords_to_state(next_state) return next_state, reward, done def render(self): time.sleep(0.03) self.update()
It is bothering me that I don’t bleed anymore. The monthly circulation system which had homed my body for last twenty five years is no more. Now, should I say “Rest in peace” and let it go. But my mind isn’t accepting the change. Although my body kept reminding me that it is about to go. The time it entered my body, I had staring looks. Our Indian culture doesn’t allow us to talk about these things and are kept secretive. Acceptance was not easy at that time as well. It pained and bled a lot. The suffering was unbearable. I was not to touch any kitchen utensils in those days and neither was I allowed to go to temple or go in the room which had a mini temple at home too. I was told to remain conscious when I sit and walk. The talk to boys or males be it your father or brother had to be ceased and that was the major part of counseling. Leaving all the psychological problems, I started enjoying the bleeding days by the age of sixteen. Maybe because by the time I knew that this gives me the ability to give birth to another organism and makes me look beautiful. I had a perfect curvy body. But the withdrawal part is unacceptable at all. The strength it has given to my body is leading to weakness. The bones have become weaker. I apply homemade oil, sometimes branded oils too specifically on my knees. I feel insecure at times about my facial beauty too. My body and face curves are going away. My abdomen has become bulgy too.The body fat doesn’t allow me wear my previous kept loveable clothes collection. U r absolutely right shilpa, well written. At this age women need extra care . Even being physically weak they must try to feel young mentally. It ll make them happy which ll support their hormone system to stay fit n healthy. You are absolutely right shilpa, well written. At this age women need extra care . Even being physically weak they must try to feel young mentally. It ll make them happy which ll support their hormone system to stay fit and healthy. Being a beauty product dealer, please suggest some good beauty products to be used and mention some beauty tips too! Stay positive and happy. Surround yourself with happy, warm and genuine people.
import tensorflow as tf import os import matplotlib.pyplot as plt from enet import ENet, ENet_arg_scope from preprocessing import preprocess from scipy.misc import imsave import numpy as np slim = tf.contrib.slim image_dir = './dataset/test/' images_list = sorted([os.path.join(image_dir, file) for file in os.listdir(image_dir) if file.endswith('.png')]) checkpoint_dir = "./checkpoint_mfb" checkpoint = tf.train.latest_checkpoint(checkpoint_dir) num_initial_blocks = 1 skip_connections = False stage_two_repeat = 2 ''' #Labels to colours are obtained from here: https://github.com/alexgkendall/SegNet-Tutorial/blob/c922cc4a4fcc7ce279dd998fb2d4a8703f34ebd7/Scripts/test_segmentation_camvid.py However, the road_marking class is collapsed into the road class in the dataset provided. Classes: ------------ Sky = [128,128,128] Building = [128,0,0] Pole = [192,192,128] Road_marking = [255,69,0] Road = [128,64,128] Pavement = [60,40,222] Tree = [128,128,0] SignSymbol = [192,128,128] Fence = [64,64,128] Car = [64,0,128] Pedestrian = [64,64,0] Bicyclist = [0,128,192] Unlabelled = [0,0,0] ''' label_to_colours = {0: [128,128,128], 1: [128,0,0], 2: [192,192,128], 3: [128,64,128], 4: [60,40,222], 5: [128,128,0], 6: [192,128,128], 7: [64,64,128], 8: [64,0,128], 9: [64,64,0], 10: [0,128,192], 11: [0,0,0]} #Create the photo directory photo_dir = checkpoint_dir + "/test_images" if not os.path.exists(photo_dir): os.mkdir(photo_dir) #Create a function to convert each pixel label to colour. def grayscale_to_colour(image): print 'Converting image...' image = image.reshape((360, 480, 1)) image = np.repeat(image, 3, axis=-1) for i in xrange(image.shape[0]): for j in xrange(image.shape[1]): label = int(image[i][j][0]) image[i][j] = np.array(label_to_colours[label]) return image with tf.Graph().as_default() as graph: images_tensor = tf.train.string_input_producer(images_list, shuffle=False) reader = tf.WholeFileReader() key, image_tensor = reader.read(images_tensor) image = tf.image.decode_png(image_tensor, channels=3) # image = tf.image.resize_image_with_crop_or_pad(image, 360, 480) # image = tf.cast(image, tf.float32) image = preprocess(image) images = tf.train.batch([image], batch_size = 10, allow_smaller_final_batch=True) #Create the model inference with slim.arg_scope(ENet_arg_scope()): logits, probabilities = ENet(images, num_classes=12, batch_size=10, is_training=True, reuse=None, num_initial_blocks=num_initial_blocks, stage_two_repeat=stage_two_repeat, skip_connections=skip_connections) variables_to_restore = slim.get_variables_to_restore() saver = tf.train.Saver(variables_to_restore) def restore_fn(sess): return saver.restore(sess, checkpoint) predictions = tf.argmax(probabilities, -1) predictions = tf.cast(predictions, tf.float32) print 'HERE', predictions.get_shape() sv = tf.train.Supervisor(logdir=None, init_fn=restore_fn) with sv.managed_session() as sess: for i in xrange(len(images_list) / 10 + 1): segmentations = sess.run(predictions) # print segmentations.shape for j in xrange(segmentations.shape[0]): #Stop at the 233rd image as it's repeated if i*10 + j == 223: break converted_image = grayscale_to_colour(segmentations[j]) print 'Saving image %s/%s' %(i*10 + j, len(images_list)) plt.axis('off') plt.imshow(converted_image) imsave(photo_dir + "/image_%s.png" %(i*10 + j), converted_image) # plt.show()
3. UberSELECT (similar to UberBLACK, but the car does not have to be black. More premium than UberX but cheaper than BLACK. Vehicles seat up to four passengers). Up to $750 sign-up bonus for Uber with my referral. Drivers must provide valid TX driver’s license (cannot be out of state). Drivers must pass a screening process (details below). Drivers must have an accepted vehicle (check below for additional info). Your vehicle must be from the year 2003 or newer. All XL vehicles must seat the driver AND at least 6 additional passengers. Vehicle Insurance (vehicular insurance must list a vehicle & driver’s name directly on the policy document itself). Once your driver’s license has been submitted and approved, and your account is activated, you will be asked to enter your social security number in the Driver Dashboard system. This will allow new drivers to undergo a screening process (the process is provided by a third-party system called Checkr, which will contact you directly. You can look into the status of your application here: applicant.checkr.com). VIN (which should match your insurance records). If your name is listed on your insurance, then your name DOES NOT have to appear on your vehicle registration (i.e., it can be the vehicle of a friend or family member). Temporary registration documents (from new vehicle purchases) are permitted, but permanent vehicle registration documents must be uploaded after the temporary term has ceased. Uber requires new drivers to receive a 19-point vehicle inspection of the car they intend to use as their primary ridesharing vehicle. You’ll need to provide Uber with your banking and tax information (so they will have a means to pay you). You MUST provide Uber with a checking account (Credit Unions and Savings Accounts are NOT permitted). Make sure to double check routing & account numbers – they must be correct. Payment information can sometimes take a few days (up to a week) to be added to the system.
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urllib_parse_urlparse from ..utils import ( determine_ext, int_or_none, xpath_attr, xpath_text, ) class RuutuIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?ruutu\.fi/video/(?P<id>\d+)' _TESTS = [ { 'url': 'http://www.ruutu.fi/video/2058907', 'md5': 'ab2093f39be1ca8581963451b3c0234f', 'info_dict': { 'id': '2058907', 'ext': 'mp4', 'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!', 'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6', 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 114, 'age_limit': 0, }, }, { 'url': 'http://www.ruutu.fi/video/2057306', 'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9', 'info_dict': { 'id': '2057306', 'ext': 'mp4', 'title': 'Superpesis: katso koko kausi Ruudussa', 'description': 'md5:da2736052fef3b2bd5e0005e63c25eac', 'thumbnail': 're:^https?://.*\.jpg$', 'duration': 40, 'age_limit': 0, }, }, ] def _real_extract(self, url): video_id = self._match_id(url) video_xml = self._download_xml( 'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id, video_id) formats = [] processed_urls = [] def extract_formats(node): for child in node: if child.tag.endswith('Files'): extract_formats(child) elif child.tag.endswith('File'): video_url = child.text if (not video_url or video_url in processed_urls or any(p in video_url for p in ('NOT_USED', 'NOT-USED'))): return processed_urls.append(video_url) ext = determine_ext(video_url) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( video_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( video_url, video_id, f4m_id='hds', fatal=False)) else: proto = compat_urllib_parse_urlparse(video_url).scheme if not child.tag.startswith('HTTP') and proto != 'rtmp': continue preference = -1 if proto == 'rtmp' else 1 label = child.get('label') tbr = int_or_none(child.get('bitrate')) format_id = '%s-%s' % (proto, label if label else tbr) if label or tbr else proto if not self._is_valid_url(video_url, video_id, format_id): continue width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]] formats.append({ 'format_id': format_id, 'url': video_url, 'width': width, 'height': height, 'tbr': tbr, 'preference': preference, }) extract_formats(video_xml.find('./Clip')) self._sort_formats(formats) return { 'id': video_id, 'title': xpath_attr(video_xml, './/Behavior/Program', 'program_name', 'title', fatal=True), 'description': xpath_attr(video_xml, './/Behavior/Program', 'description', 'description'), 'thumbnail': xpath_attr(video_xml, './/Behavior/Startpicture', 'href', 'thumbnail'), 'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')), 'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')), 'formats': formats, }
Rear screen taxi advertising is a fantastic way to advertise your brand. This can be a stand-alone service or as part of a broader managed marketing campaign. It can also be used to reinforce any experiential marketing experience. This type of advertising has a fantastic effect when viewed by shoppers and office workers around town. It mainly helps with visual brand reinforcement as it is seen at eye level for most consumers. The technology used also allows the taxi driver to unimpaired vision through the advertisement. It can be displayed as a full window or as a top or bottom strip depending on your design. We can give a fresh design for your campaign or utilise your artwork. Whatever works for the product or service that you would like to showcase for your company. You can also extend the brand reinforcement, with a rear screen and full livery combined for maximum effect. Contact us for prices and availability for either full livery, supersides, tip seat advertising or rear screen advertising.
import threading import re from bs4 import BeautifulSoup import requests from global_variable import HAS_QT, HEADERS if HAS_QT: from global_variable import SENDER class Novel(): """ get novel information for creating epub file Attributes: volume_name: A string represent the volume name volume_number: A string represent the volume number volume_author: A string represent the author volume_illustrator: A string represent the illustrator volume_introduction: A string represent the introduction volume_cover_url: A string represent the cover_url chapter_links: A string represent the chapter links output_dir: A stirng represent the epub save path cover_path: A string represent the cover path book_name: A string represent the book name chapter: A list represent the chapter base_path: A string represent the epub temp path """ def __init__(self, url, single_thread): self.url = url self.single_thread = single_thread self.chapters = [] self.volume_name = '' self.volume_number = '' self.author = '' self.illustrator = '' self.introduction = '' self.cover_url = '' self.chapters_links = [] self.base_path = '' @staticmethod def parse_page(url): """ parse page with BeautifulSoup Args: url: A string represent the url to be parsed Return: A BeatifulSoup element """ r = requests.get(url, headers=HEADERS) r.encoding = 'utf-8' return BeautifulSoup(r.text) @staticmethod def find_chapter_links(soup): """ extract chapter links from page Args: soup: A parsed page Returns: a list contains the book's chapter links """ temp_chapter_links = soup.select( 'body div.content div.container div.row-fluid div.span9 div.well div.row-fluid ul.lk-chapter-list li') find_chapter_links = re.compile(r'<a href="(.*)">') chapter_links = [] for i in temp_chapter_links: chapter_links.append(find_chapter_links.search(str(i)).group(1)) return chapter_links def find_volume_name_number(self, soup): name_and_number = str(soup.select('h1.ft-24 strong'))[1:-1].replace('</strong>', '').split('\n') self.volume_name = name_and_number[1].strip() self.volume_number = name_and_number[2].strip() self.print_info('Volume_name:' + self.volume_name + ',Volume_number:' + self.volume_number) @property def book_name(self): return self.volume_name + ' ' + self.volume_number def find_author_illustrator(self, soup): temp_author_name = soup.select('table.lk-book-detail td') find_author_name = re.compile(r'target="_blank">(.*)</a></td>') find_illustrator_name = re.compile(r'<td>(.*)</td>') self.author = find_author_name.search(str(temp_author_name[3])).group(1) self.illustrator = find_illustrator_name.search(str(temp_author_name[5])).group(1) self.print_info('Author:' + self.author + '\nillustrator:' + self.illustrator) def find_introduction(self, soup): temp_introduction = soup.select( 'html body div.content div.container div.row-fluid div.span9 div.well div.row-fluid div.span10 p') find_introduction = re.compile(r'<p style="width:42em; text-indent: 2em;">(.*)</p>') self.introduction = find_introduction.search(str(temp_introduction).replace('\n', '')).group(1) def find_cover_url(self, soup): temp_cover_url = soup.select( 'div.container div.row-fluid div.span9 div.well div.row-fluid div.span2 div.lk-book-cover a') find_cover_url = re.compile(r'<img src="(.*)"/>') self.cover_url = 'http://lknovel.lightnovel.cn' + find_cover_url.search(str(temp_cover_url)).group(1) def extract_epub_info(self): """ extract volume's basic info Args: soup: A parsed page Return: A dict contains the volume's info """ soup = self.parse_page(self.url) self.find_volume_name_number(soup) self.find_author_illustrator(soup) self.find_introduction(soup) self.find_cover_url(soup) self.chapters_links = self.find_chapter_links(soup) @staticmethod def get_new_chapter_name(soup): """ get the formal chapter name Args: soup: A parsed page Returns: A string contain the chapter name """ chapter_name = soup.select('h3.ft-20')[0].get_text() new_chapter_name = chapter_name[:chapter_name.index('章') + 1] + ' ' + chapter_name[chapter_name.index('章') + 1:] return new_chapter_name @staticmethod def print_info(info): try: print(info) if HAS_QT: SENDER.sigChangeStatus.emit(info) except UnicodeDecodeError as e: print('Ignored:', e) @staticmethod def get_content(soup): """ extract contents from each page Args: soup: parsed page Return: A list contain paragraphs of one chapter """ content = [] temp_chapter_content = soup.select('div.lk-view-line') find_picture_url = re.compile(r'data-cover="(.*)" src="') for line in temp_chapter_content: if 'lk-view-img' not in str(line): content.append(line.get_text().strip()) else: picture_url = find_picture_url.search(str(line)).group(1) content.append(picture_url) return content def add_chapter(self, chapter): """ add chapter chapter structure:a tuple (chapter number,chapter name,content) """ self.chapters.append(chapter) def extract_chapter(self, url, number): """ add each chapter's content to the Epub instance Args: url: A string represent the chapter url to be added epub: A Epub instance number: A int represent the chapter's number """ try: soup = self.parse_page(url) new_chapter_name = self.get_new_chapter_name(soup) self.print_info(new_chapter_name) content = self.get_content(soup) self.add_chapter((number, new_chapter_name, content)) except Exception as e: if HAS_QT: SENDER.sigWarningMessage.emit('错误', str(e) + '\nat:' + url) SENDER.sigButton.emit() print(self.url) raise e def get_chapter_content(self): """ start extract every chapter in epub Args: epub: The Epub instance to be created """ th = [] if not self.single_thread: for i, link in enumerate(self.chapters_links): t = threading.Thread(target=self.extract_chapter, args=(link, i)) t.start() th.append(t) for t in th: t.join() else: for i, link in enumerate(self.chapters_links): self.extract_chapter(link, i) def get_novel_information(self): """get novel information""" self.extract_epub_info() self.get_chapter_content() self.print_info('novel信息获取完成') def novel_information(self): return {'chapter': self.chapters, 'volume_name': self.volume_name, 'volume_number': self.volume_number, 'book_name': self.book_name, 'author': self.author, 'illustrator': self.illustrator, 'introduction': self.introduction, 'cover_url': self.cover_url}
Executive Mosaic is pleased to introduce Kevin Phillips, CEO and president of ManTech, as an inductee into the 2019 Wash100 — Executive Mosaic’s annual selection of most influential voices in the government contracting arena — for his leadership and vision to advance ManTech in cybersecurity, business development and numerous other government sectors. This marks the third consecutive Wash100 award for Phillips. ManTech secured major contracts in 2018 including a potential 10-year, $959M award in July to manage enterprise information technology systems for a Department of Defense agency; and a potential six-year, $668M task order in September to deliver cybersecurity support to Group E federal agencies under the Department of Homeland Security’s Continuous Diagnostics and Mitigation program. Phillips noted that ManTech has supported over 65 agencies for the two phases of the CDM effort for more than three years. In November 2018, Phillips discussed the implications of an increased defense budget for fiscal years 2018 and 2019 based on the need for openness to address the government’s current challenges. “We see a growing desire for new thought leadership and innovation around technology government-wide, and a much broader dialogue between government and industry on how to accomplish mission objectives as partners,” he said in an interview with The Voice of Technology magazine. “We are seeing customers apply strong focus within the space and cyber domains with both policy and execution,” Phillips told investors during Mantech’s earnings call in August. Phillips mentioned the company’s investments in engineering, systems, workforce, business development and acquisitions are priorities to continue supporting growth and make an potential impact in the government contracting sector. “There’s a potential for fiscal 2019 to begin under a continuing resolution, which could have a slight impact on the timing of new contract award,” he added. Phillips joined ManTech following its acquisition of CTX in 2002, and has served across various leadership positions such as chief financial officer, corporate vice president, chief of staff and assistant chairman. In January 2018, he was appointed CEO of ManTech after holding the roles of president and chief operating officer with the company since November 2016. He is a member of the Northern Virginia Technology Council’s board of directors, as well as the William & Mary Foundation’s board of trustees. He spent 10 years with the U.S. Army Reserves prior to joining the private sector.
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import unittest import frappe from frappe.utils import global_search from frappe.test_runner import make_test_objects import frappe.utils class TestGlobalSearch(unittest.TestCase): def setUp(self): global_search.setup_global_search_table() self.assertTrue('__global_search' in frappe.db.get_tables()) doctype = "Event" global_search.reset() from frappe.custom.doctype.property_setter.property_setter import make_property_setter make_property_setter(doctype, "subject", "in_global_search", 1, "Int") make_property_setter(doctype, "event_type", "in_global_search", 1, "Int") make_property_setter(doctype, "roles", "in_global_search", 1, "Int") make_property_setter(doctype, "repeat_on", "in_global_search", 0, "Int") def tearDown(self): frappe.db.sql('delete from `tabProperty Setter` where doc_type="Event"') frappe.clear_cache(doctype='Event') frappe.db.sql('delete from `tabEvent`') frappe.db.sql('delete from __global_search') make_test_objects('Event') frappe.db.commit() def insert_test_events(self): frappe.db.sql('delete from tabEvent') phrases = ['"The Sixth Extinction II: Amor Fati" is the second episode of the seventh season of the American science fiction.', 'After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. ', 'Carter explored themes of extraterrestrial involvement in ancient mass extinctions in this episode, the third in a trilogy.'] for text in phrases: frappe.get_doc(dict( doctype='Event', subject=text, repeat_on='Every Month', starts_on=frappe.utils.now_datetime())).insert() frappe.db.commit() def test_search(self): self.insert_test_events() results = global_search.search('awakens') self.assertTrue('After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. ' in results[0].content) results = global_search.search('extraterrestrial') self.assertTrue('Carter explored themes of extraterrestrial involvement in ancient mass extinctions in this episode, the third in a trilogy.' in results[0].content) def test_update_doc(self): self.insert_test_events() test_subject = 'testing global search' event = frappe.get_doc('Event', frappe.get_all('Event')[0].name) event.subject = test_subject event.save() frappe.db.commit() results = global_search.search('testing global search') self.assertTrue('testing global search' in results[0].content) def test_update_fields(self): self.insert_test_events() results = global_search.search('Every Month') self.assertEquals(len(results), 0) doctype = "Event" from frappe.custom.doctype.property_setter.property_setter import make_property_setter make_property_setter(doctype, "repeat_on", "in_global_search", 1, "Int") global_search.rebuild_for_doctype(doctype) results = global_search.search('Every Month') self.assertEquals(len(results), 3) def test_delete_doc(self): self.insert_test_events() event_name = frappe.get_all('Event')[0].name event = frappe.get_doc('Event', event_name) test_subject = event.subject results = global_search.search(test_subject) self.assertEquals(len(results), 1) frappe.delete_doc('Event', event_name) results = global_search.search(test_subject) self.assertEquals(len(results), 0) def test_insert_child_table(self): frappe.db.sql('delete from tabEvent') phrases = ['Hydrus is a small constellation in the deep southern sky. ', 'It was first depicted on a celestial atlas by Johann Bayer in his 1603 Uranometria. ', 'The French explorer and astronomer Nicolas Louis de Lacaille charted the brighter stars and gave their Bayer designations in 1756. ', 'Its name means "male water snake", as opposed to Hydra, a much larger constellation that represents a female water snake. ', 'It remains below the horizon for most Northern Hemisphere observers.', 'The brightest star is the 2.8-magnitude Beta Hydri, also the closest reasonably bright star to the south celestial pole. ', 'Pulsating between magnitude 3.26 and 3.33, Gamma Hydri is a variable red giant some 60 times the diameter of our Sun. ', 'Lying near it is VW Hydri, one of the brightest dwarf novae in the heavens. ', 'Four star systems have been found to have exoplanets to date, most notably HD 10180, which could bear up to nine planetary companions.'] for text in phrases: doc = frappe.get_doc({ 'doctype':'Event', 'subject': text, 'starts_on': frappe.utils.now_datetime() }) doc.insert() frappe.db.commit() def test_get_field_value(self): cases = [ { "case_type": "generic", "data": ''' <style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px 'Open Sans'; -webkit-text-stroke: #000000} span.s1 {font-kerning: none} </style> <script> var options = { foo: "bar" } </script> <p class="p1"><span class="s1">Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.</span></p> ''', "result": ('Description : Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical ' 'Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, ' 'looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word ' 'in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum ' 'et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular ' 'during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.') }, { "case_type": "with_style", "data": ''' <style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px 'Open Sans'; -webkit-text-stroke: #000000} span.s1 {font-kerning: none} </style>Lorem Ipsum Dolor Sit Amet ''', "result": "Description : Lorem Ipsum Dolor Sit Amet" }, { "case_type": "with_script", "data": ''' <script> var options = { foo: "bar" } </script> Lorem Ipsum Dolor Sit Amet ''', "result": "Description : Lorem Ipsum Dolor Sit Amet" } ] for case in cases: doc = frappe.get_doc({ 'doctype':'Event', 'subject': 'Lorem Ipsum', 'starts_on': frappe.utils.now_datetime(), 'description': case["data"] }) field_as_text = '' for field in doc.meta.fields: if field.fieldname == 'description': field_as_text = global_search.get_formatted_value(doc.description, field) self.assertEquals(case["result"], field_as_text)
Papaw: Top of the DAD game! When you're at the top of the DAD game, they call you Papaw! Perfect Fathers' Day gift for the Papaw in your life!
#!/usr/bin/env python import sys import os import argparse import netCDF4 as nc import numpy as np import pandas as pd import re import datetime import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from lib_util import time_dim_to_pandas_periods """ What this script does: Plot a full timeseries fields from mom ocean_scalar.nc Example: """ def main(): parser = argparse.ArgumentParser() parser.add_argument('input_files', nargs='+', help='The MOM ocean_scalar.nc input data files.') parser.add_argument('--field', default='temp_global_ave', help='The fields included in this plot.') parser.add_argument('--output_dir', default='./', help='Directory where plots will be written.') args = parser.parse_args() title = None ylabel = None ts = pd.Series() # Go through input files one at a time building the timeseries as we go. for file in args.input_files: with nc.Dataset(file) as f: time_var = f.variables['time'] data_var = f.variables[args.field] title = data_var.long_name ylabel = data_var.units # Calculate the times/dates, these will be our indices. periods = time_dim_to_pandas_periods(f.variables['time']) data = f.variables[args.field][:] assert(data.shape[1] == 1) data = data.flatten() new_ts = pd.Series(data, periods) ts = ts.append(new_ts) ts = ts.sort_index() plot = ts.plot() plt.xlabel('Time (years)') plt.ylabel(ylabel) plt.title(title) fig = matplotlib.pyplot.gcf() fig.set_size_inches(9,4.5) plt.savefig(os.path.join(args.output_dir, '{}.png'.format(args.field))) if __name__ == '__main__': sys.exit(main())
SBEM is the National Calculation Methodology (NCM) tool used for non-domestic buildings for complying with the Energy Performance of Buildings Directive (EPBD) and Building Regulations Part L2A and L2B. The simplified tool produces comparative and indicative CO2 emissions rates for commercial buildings based on regulated energy use and could also be used to produce Energy Performance Certificates (EPC). SBEM was developed by BRE for the DCLG department as a compliance tool for regulatory purposes. The same issues persist with SBEM as with SAP. Whist it undoubtedly increased the energy efficiency of the construction industry, it has since outgrown its original purpose and intentions. We offer a choice between SBEM and Dynamic Simulation Modelling (DSM), also known as Level 5, to produce the regulatory documentation and issuing EPCs. How Consultergy could help you with SBEM? Consultergy could help you achieve compliance with Building Regulations Part L for all your non-domestic projects in order to comply with Building Control, local planning and mandatory government regulations. If you are looking for a quick turnaround and better economic value, SBEM Levels 3 and 4 could be the right solution for you. We are fully dedicated to offer an honest, open and objective view with regards to the positive aspects and drawbacks of the methodology with our clients and would be more than happy to guide you through these in detail regarding your next project.
########################################################################## # # MRC FGU Computational Genomics Group # # $Id$ # # Copyright (C) 2009 Andreas Heger # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ########################################################################## ''' gpipe/translate_forward2backward.py - ====================================================== :Author: Andreas Heger :Release: $Id$ :Date: |today| :Tags: Python Purpose ------- .. todo:: describe purpose of the script. Usage ----- Example:: python gpipe/translate_forward2backward.py --help Type:: python gpipe/translate_forward2backward.py --help for command line help. Documentation ------------- Code ---- ''' import os import sys import string import re import getopt import tempfile import time import popen2 USAGE = """python %s [OPTIONS] Version: $Id: gpipe/translate_forward2backward.py 18 2005-08-09 15:32:24Z andreas $ Wrapper for running gene predictions. Options: -h, --help print this message. -v, --verbose= loglevel. """ % sys.argv[0] param_long_options = ["verbose=", "help", "bracket-increment=", "query-border=", "border-refinement=", "exit-identical", "min-score=", "method=", "recursive", "refinement", "probe", "incremental", "exons=", "mask-probe", "format=", "probe-options=", "version"] param_short_options = "v:hi:b:em:procx:af:" param_columns = (1, 2, 3, 4) param_filename_contigs = "contig_sizes" def main(argv=None): """script main. parses command line options in sys.argv, unless *argv* is given. """ if argv is None: argv = sys.argv try: optlist, args = getopt.getopt( sys.argv[1:], param_short_options, param_long_options) except getopt.error, msg: print USAGE, msg sys.exit(2) for o, a in optlist: if o in ("-v", "--verbose"): param_loglevel = int(a) elif o in ("--version", ): print "version=" sys.exit(0) elif o in ("-h", "--help"): print USAGE sys.exit(0) elif o in ("-b", "--query-border"): param_query_border = int(a) contig_sizes = {} infile = open(param_filename_contigs, "r") for line in infile: if line[0] == "#": continue sbjct_token, size, offset = line[:-1].split("\t") contig_sizes[sbjct_token] = int(size) for line in sys.stdin: if line[0] == "#": continue data = line[:-1].split("\t") sbjct_token, sbjct_strand, sbjct_from, sbjct_to = ( data[param_columns[0]], data[param_columns[1]], data[param_columns[2]], data[param_columns[3]]) sbjct_from, sbjct_to = int(sbjct_from), int(sbjct_to) if sbjct_strand == "-" or sbjct_strand == "-1": if contig_sizes.has_key(sbjct_token): size = contig_sizes[sbjct_token] sbjct_from, sbjct_to = size - sbjct_to, size - sbjct_from data[param_columns[2]] = sbjct_from data[param_columns[3]] = sbjct_to print string.join(map(str, data), "\t") if __name__ == "__main__": sys.exit(main(sys.argv))
Waiting at a red light at the intersection of Rt 97 and Conant St. Imagine my shock when I came across this ghost of a racecar stopped a traffic light just in front of me. I wasn’t really chasing it, as it really seemed to follow in front of me. The driver took it down my regular home commute route through part of Beverly, MA. Sneaking snap shots the best I can. My best guess is that it’s a Bugatti Type 35 (A,B,C?), a French racing car from the 1920’s. The owner keeps it in “Bleu de France.” That’s French for “British Racing Green.” As the British colored their racing cars green, or the Italians used reds (like red Ferrari’s), the French color was blue with white letters. Here's where he got away. Had to wait for traffic. I was on my way to work, when I saw this Ford De Luxe sitting in the Stop n Shop parking lot. In a smell-the-roses moment, I gt into a different lane, and waited for an extra red light to take this shot. No big deal really, I still got to work well on time. Fortunately, it was still fairly early in the morning with the sun still rising. The car itself was facing east, so the lighting was right. Apparently, a convertible version was used in the movie, Back to The Future. So I drove my son over to Burnett’s Garage in Danvers, Mass, just for a drive by. Burnett’s specializes in MG’s. In my high school days, the lot would be full British two-seaters. I’m sure MG’s are getting tougher and tougher to come by these days. At this time, there were merely a few Midgets on the lot. Sitting off on it’s own was this green MG Magnette from the 1950’s or 1960’s. I’m guessing that this is a 1952 Mercury Monterey waiting for a new windshield and rear window.
import csv import os import json import gspread import datetime import re import unicodecsv as csv from app import db from util import safe_commit from emailer import send from emailer import create_email from endpoint import Endpoint from repository import Repository from repo_request import RepoRequest def get_repo_request_rows(): from oauth2client.service_account import ServiceAccountCredentials # this file inspired by https://www.twilio.com/blog/2017/02/an-easy-way-to-read-and-write-to-a-google-spreadsheet-in-python.html # use creds to create a client to interact with the Google Drive API scopes = ['https://spreadsheets.google.com/feeds'] json_creds = os.getenv("GOOGLE_SHEETS_CREDS_JSON") creds_dict = json.loads(json_creds) # hack to get around ugly new line escaping issues # this works for me, but later found links to what might be cleaner solutions: # use ast.literal_eval? https://github.com/googleapis/google-api-go-client/issues/185#issuecomment-422732250 # or maybe dumping like this might fix it? https://coreyward.svbtle.com/how-to-send-a-multiline-file-to-heroku-config creds_dict["private_key"] = creds_dict["private_key"].replace("\\\\n", "\n") # now continue creds = ServiceAccountCredentials.from_json_keyfile_dict(creds_dict, scopes) client = gspread.authorize(creds) # Find a workbook by url spreadsheet = client.open_by_url("https://docs.google.com/spreadsheets/d/1RcQuetbKVYRRf0GhGZQi38okY8gT1cPUs6l3RM94yQo/edit#gid=704459328") sheet = spreadsheet.sheet1 # Extract and print all of the values rows = sheet.get_all_values() print(rows[0:1]) return rows def save_repo_request_rows(rows): with open('out.csv','wb') as f: w = csv.DictWriter(f, fieldnames=RepoRequest.list_fieldnames(), encoding='utf-8-sig') for row in rows[1:]: # skip header row my_repo_request = RepoRequest() my_repo_request.set_id_seed(row[0]) column_num = 0 for fieldname in RepoRequest.list_fieldnames(): if fieldname != "id": setattr(my_repo_request, fieldname, row[column_num]) column_num += 1 w.writerow(my_repo_request.to_dict()) print u"adding repo request {}".format(my_repo_request) db.session.merge(my_repo_request) safe_commit(db) def add_endpoint(my_request): if not my_request.pmh_url: return None endpoint_with_this_id = Endpoint.query.filter(Endpoint.repo_request_id==my_request.id).first() if endpoint_with_this_id: print u"one already matches {}".format(my_request.id) return None raw_endpoint = my_request.pmh_url clean_endpoint = raw_endpoint.strip() clean_endpoint = clean_endpoint.strip("?") clean_endpoint = re.sub(u"\?verb=.*$", "", clean_endpoint, re.IGNORECASE) clean_endpoint = re.sub(u"^https?://api\.unpaywall\.org/repository/endpoint/test/", "", clean_endpoint, re.IGNORECASE) print u"raw endpoint is {}, clean endpoint is {}".format(raw_endpoint, clean_endpoint) matching_endpoint = Endpoint() matching_endpoint.pmh_url = clean_endpoint repo_matches = my_request.matching_repositories() if repo_matches: matching_repo = repo_matches[0] print u"yay! for {} {} matches repository {}".format( my_request.institution_name, my_request.repo_name, matching_repo) else: print u"no matching repository for {}: {}".format( my_request.institution_name, my_request.repo_name) matching_repo = Repository() # overwrite stuff with request matching_repo.institution_name = my_request.institution_name matching_repo.repository_name = my_request.repo_name matching_repo.home_page = my_request.repo_home_page matching_endpoint.repo_unique_id = matching_repo.id matching_endpoint.email = my_request.email matching_endpoint.repo_request_id = my_request.id matching_endpoint.ready_to_run = True matching_endpoint.set_identify_and_initial_query() db.session.merge(matching_endpoint) db.session.merge(matching_repo) print u"added {} {}".format(matching_endpoint, matching_repo) print u"see at url http://unpaywall.org/sources/repository/{}".format(matching_endpoint.id) safe_commit(db) print "saved" print "now sending email" # get the endpoint again, so it gets with all the meta info etc matching_endpoint = Endpoint.query.get(matching_endpoint.id) matching_endpoint.contacted_text = "automated welcome email" matching_endpoint.contacted = datetime.datetime.utcnow().isoformat() safe_commit(db) send_announcement_email(matching_endpoint) print "email sent" return matching_endpoint def send_announcement_email(my_endpoint): my_endpoint_id = my_endpoint.id email_address = my_endpoint.email repo_name = my_endpoint.repo.repository_name institution_name = my_endpoint.repo.institution_name print my_endpoint_id, email_address, repo_name, institution_name # prep email email = create_email(email_address, "Update on your Unpaywall indexing request (ref: {} )".format(my_endpoint_id), "repo_pulse", {"data": {"endpoint_id": my_endpoint_id, "repo_name": repo_name, "institution_name": institution_name}}, []) send(email, for_real=True) if __name__ == "__main__": rows = get_repo_request_rows() save_repo_request_rows(rows) my_requests = RepoRequest.query.all() for my_request in my_requests: if not my_request.is_duplicate: add_endpoint(my_request) # my_endpoints = Endpoint.query.filter(Endpoint.contacted_text=="automated welcome email") # for my_endpoint in my_endpoints: # print "would send an email to {}".format(my_endpoint) # send_announcement_email(my_endpoint)
Welcome to Lovelace Internet Media Marketing. Founded in 2000, this web development and management firm, at first specialists in the marine industry, has expanded into consumer products, retail sales and financial marketing. Our services include website design, construction, management and Internet marketing including SEO and PPC. Visit "Clients" to view a past and present sampling of several special interest websites in our portfolio. "WebMart" is a virtual marketplace of favorite products, and eBusinesses. "eMarket" contains interesting marketing information on the fast moving digital world, a multi-year perspective and recreational boating trends. "PhotoLog" is a collection of snapshots of friends at work or having fun. Copyright ©2000-2018 LIMMS LLC. All rights reserved.
import os, sys, tempfile class Scratch (object): def __init__ (self): tup = tempfile.mkstemp() self._path = tup[1] self._file = os.fdopen(tup[0]) self._file.close() def __del__ (self): pass #if self._path != None: # self.destruct() def destruct (self): self.close() os.unlink(self._path) self._path = None self._file = None def close (self): if self._file.closed == False: self._file.flush() self._file.close() def read (self): if self._file.closed == True: self._reopen() self._file.seek(0) return self._file.read() def _reopen (self): self._file = open(self._path, 'w+') def getopened (self): self.close() self._reopen() return self._file opened = property(getopened, NotImplemented, NotImplemented, "opened file - read only") def getfile (self): return self._file file = property(getfile, NotImplemented, NotImplemented, "file - read only")
Fruit crisps are delicious bags of crispy air dried fruit, containing 100% fruit and nothing else. Each bag is one of your 5 a day and they are available in Sweet Apple, Tangy Apple and Pear flavours. "insert a quote/description from one of the tuck shop managers here" Perry Court Farm crisps are 100% home grown in Kent. Visit our character village to read Brad Pip's story. Visit our character village to read Penelope Pear's story.
from rest_framework import serializers from librairy.models import Picture from portfolio.models import Portfolio, PortfolioPicture class PortfolioSerializer(serializers.ModelSerializer): pub_date = serializers.DateTimeField(required=False, allow_null=True) pictures = serializers.SerializerMethodField() url = serializers.HyperlinkedIdentityField( view_name='portfolio-detail', lookup_field='slug' ) class Meta: model = Portfolio fields = ('url', 'title', 'draft', 'author', 'pictures', 'pub_date', 'slug', 'order', ) read_only_fields = ('slug', 'author') def get_pictures(self, object): # because many to many relation order is not respected # by drf, we get list manually return object.get_pictures().values_list('picture', flat=True) class PortfolioPictureSerializer(serializers.ModelSerializer): portfolio = serializers.SlugRelatedField( slug_field="slug", queryset=Portfolio.objects.all() ) class Meta: model = PortfolioPicture fields = ('portfolio', 'picture', 'order') class PortfolioHeadSerializer(PortfolioSerializer): class Meta: model = Portfolio fields = ('title', 'slug')
Journalistic Criticism of Richard Nixon's Watergate Speaking of 1973. Eiland, Millard Fayne, "Journalistic Criticism of Richard Nixon's Watergate Speaking of 1973." (1974). LSU Historical Dissertations and Theses. 2722.
# -*- coding: utf-8 -*- """ HMS Hospital Status Assessment and Request Management System """ module = request.controller resourcename = request.function if not settings.has_module(module): raise HTTP(404, body="Module disabled: %s" % module) # ----------------------------------------------------------------------------- def s3_menu_postp(): # @todo: rewrite this for new framework if len(request.args) > 0 and request.args[0].isdigit(): newreq = dict(from_record="hms_hospital.%s" % request.args[0], from_fields="hospital_id$id") #selreq = {"req.hospital_id":request.args[0]} else: newreq = dict() selreq = {"req.hospital_id__ne":"NONE"} menu_selected = [] hospital_id = s3mgr.get_session("hms", "hospital") if hospital_id: hospital = s3db.hms_hospital query = (hospital.id == hospital_id) record = db(query).select(hospital.id, hospital.name, limitby=(0, 1)).first() if record: name = record.name menu_selected.append(["%s: %s" % (T("Hospital"), name), False, URL(f="hospital", args=[record.id])]) if menu_selected: menu_selected = [T("Open recent"), True, None, menu_selected] response.menu_options.append(menu_selected) # ----------------------------------------------------------------------------- def index(): """ Module's Home Page """ return s3db.cms_index(module, alt_function="index_alt") # ----------------------------------------------------------------------------- def index_alt(): """ Module homepage for non-Admin users when no CMS content found """ # Just redirect to the Hospitals Map redirect(URL(f="hospital", args=["map"])) # ----------------------------------------------------------------------------- def ltc(): """ Filtered REST Controller """ s3.filter = (s3db.hms_hospital.facility_type == 31) return hospital() # ----------------------------------------------------------------------------- def marker_fn(record): """ Function to decide which Marker to use for Hospital Map @ToDo: Legend @ToDo: Move to Templates @ToDo: Use Symbology """ stable = db.hms_status status = db(stable.hospital_id == record.id).select(stable.facility_status, limitby=(0, 1) ).first() if record.facility_type == 31: marker = "special_needs" else: marker = "hospital" if status: if status.facility_status == 1: # Normal marker = "%s_green" % marker elif status.facility_status in (3, 4): # Evacuating or Closed marker = "%s_red" % marker elif status.facility_status == 2: # Compromised marker = "%s_yellow" % marker mtable = db.gis_marker marker = db(mtable.name == marker).select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1)).first() return marker # ----------------------------------------------------------------------------- def hospital(): """ Main REST controller for hospital data """ table = s3db.hms_hospital # Load Models to add tabs if settings.has_module("inv"): s3db.table("inv_inv_item") elif settings.has_module("req"): # (gets loaded by Inv if available) s3db.table("req_req") # Pre-processor def prep(r): # Location Filter s3db.gis_location_filter(r) if r.interactive: if r.component: if r.component.name == "inv_item" or \ r.component.name == "recv" or \ r.component.name == "send": # Filter out items which are already in this inventory s3db.inv_prep(r) elif r.component.name == "human_resource": # Filter out people which are already staff for this hospital s3base.s3_filter_staff(r) # Make it clear that this is for adding new staff, not assigning existing s3.crud_strings.hrm_human_resource.label_create_button = T("Add New Staff Member") # Cascade the organisation_id from the hospital to the staff field = s3db.hrm_human_resource.organisation_id field.default = r.record.organisation_id field.writable = False elif r.component.name == "req": if r.method != "update" and r.method != "read": # Hide fields which don't make sense in a Create form # inc list_create (list_fields over-rides) s3db.req_create_form_mods() elif r.component.name == "status": table = db.hms_status table.facility_status.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Facility Status"), T("Status of the facility."))) table.facility_operations.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Facility Operations"), T("Overall status of the facility operations."))) table.clinical_status.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Clinical Status"), T("Status of the clinical departments."))) table.clinical_operations.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Clinical Operations"), T("Overall status of the clinical operations."))) table.ems_status.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Emergency Medical Services"), T("Status of operations/availability of emergency medical services at this facility."))) table.ems_reason.comment = DIV(_class="tooltip", _title="%s|%s" % (T("EMS Status Reasons"), T("Report the contributing factors for the current EMS status."))) table.or_status.comment = DIV(_class="tooltip", _title="%s|%s" % (T("OR Status"), T("Status of the operating rooms of this facility."))) table.or_reason.comment = DIV(_class="tooltip", _title="%s|%s" % (T("OR Status Reason"), T("Report the contributing factors for the current OR status."))) table.morgue_status.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Morgue Status"), T("Status of morgue capacity."))) table.morgue_units.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Morgue Units Available"), T("Number of vacant/available units to which victims can be transported immediately."))) table.security_status.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Security Status"), T("Status of security procedures/access restrictions for the facility."))) table.staffing.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Staffing Level"), T("Current staffing level at the facility."))) table.access_status.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Road Conditions"), T("Describe the condition of the roads from/to the facility."))) elif r.component.name == "bed_capacity": table = db.hms_bed_capacity table.bed_type.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Bed Type"), T("Specify the bed type of this unit."))) table.beds_baseline.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Baseline Number of Beds"), T("Baseline number of beds of that type in this unit."))) table.beds_available.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Available Beds"), T("Number of available/vacant beds of that type in this unit at the time of reporting."))) table.beds_add24.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Additional Beds / 24hrs"), T("Number of additional beds of that type expected to become available in this unit within the next 24 hours."))) elif r.component.name == "activity": table = db.hms_activity table.date.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Date & Time"), T("Date and time this report relates to."))) table.patients.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Patients"), T("Number of in-patients at the time of reporting."))) table.admissions24.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Admissions/24hrs"), T("Number of newly admitted patients during the past 24 hours."))) table.discharges24.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Discharges/24hrs"), T("Number of discharged patients during the past 24 hours."))) table.deaths24.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Deaths/24hrs"), T("Number of deaths during the past 24 hours."))) elif r.component.name == "contact": table = db.hms_contact table.title.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Title"), T("The Role this person plays within this hospital."))) elif r.component.name == "image": table = s3db.doc_image table.location_id.readable = table.location_id.writable = False table.organisation_id.readable = table.organisation_id.writable = False table.person_id.readable = table.person_id.writable = False elif r.component.name == "ctc": table = db.hms_ctc table.ctc.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Cholera Treatment Center"), T("Does this facility provide a cholera treatment center?"))) table.number_of_patients.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Current number of patients"), T("How many patients with the disease are currently hospitalized at this facility?"))) table.cases_24.comment = DIV(_class="tooltip", _title="%s|%s" % (T("New cases in the past 24h"), T("How many new cases have been admitted to this facility in the past 24h?"))) table.deaths_24.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Deaths in the past 24h"), T("How many of the patients with the disease died in the past 24h at this facility?"))) table.icaths_available.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Infusion catheters available"), T("Specify the number of available sets"))) table.icaths_needed_24.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Infusion catheters need per 24h"), T("Specify the number of sets needed per 24h"))) table.infusions_available.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Infusions available"), T("Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions"))) table.infusions_needed_24.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Infusions needed per 24h"), T("Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h"))) table.antibiotics_available.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Antibiotics available"), T("Specify the number of available units (adult doses)"))) table.antibiotics_needed_24.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Antibiotics needed per 24h"), T("Specify the number of units (adult doses) needed per 24h"))) table.problem_types.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Current problems, categories"), T("Select all that apply"))) table.problem_details.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Current problems, details"), T("Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved."))) else: table = r.table if r.id: table.obsolete.readable = table.obsolete.writable = True elif r.method == "map": # Tell the client to request per-feature markers s3db.configure("hms_hospital", marker_fn=marker_fn) s3.formats["have"] = r.url() # .have added by JS # Add comments table.gov_uuid.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Government UID"), T("The Unique Identifier (UUID) as assigned to this facility by the government."))) table.total_beds.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Total Beds"), T("Total number of beds in this facility. Automatically updated from daily reports."))) table.available_beds.comment = DIV(_class="tooltip", _title="%s|%s" % (T("Available Beds"), T("Number of vacant/available beds in this facility. Automatically updated from daily reports."))) elif r.representation == "aadata": pass # Hide the Implied fields here too to make columns match #db.rms_req.shelter_id.readable = False #db.rms_req.organisation_id.readable = False elif r.representation == "plain": # Duplicates info in the other fields r.table.location_id.readable = False elif r.representation == "geojson": # Load these models now as they'll be needed when we encode mtable = s3db.gis_marker stable = s3db.hms_status s3db.configure("hms_hospital", marker_fn=marker_fn) return True s3.prep = prep if "map" in request.args: # S3Map has migrated hide_filter = False else: # Not yet ready otherwise hide_filter = True output = s3_rest_controller(rheader=s3db.hms_hospital_rheader, hide_filter=hide_filter, ) return output # ----------------------------------------------------------------------------- def incoming(): """ Incoming Shipments """ return inv_incoming() # ----------------------------------------------------------------------------- def req_match(): """ Match Requests """ return s3db.req_match() # END =========================================================================
John Krasinski didn’t just have one role in his movie, “A Quiet Place,” which he also directed. He also played the monsters in the horror film. During his appearance on “Jimmy Kimmel Live!” on Tuesday night, host Jimmy Kimmel held up a picture of Krasinski in his motion-capture suit that would ultimately transform him into the creatures that hunt him and his family in the movie. “A Quiet Place,” also directed by Krasinski, stars Krasinski’s wife Emily Blunt, Millicent Simmonds and Noah Jupe. It is about a family that has to live in complete silence, otherwise, these creatures will hunt them. Even the most normal tasks become daunting: eating dinner with the family, playing board games or going fishing makes the characters fear for their lives.
#!/usr/bin/python # -*- coding: utf-8 -*- ######################################################################### # File Name: PyCCP.py # Author: Carson Wang # mail: [email protected] # Created Time: 2017-03-04 21:48:51 ######################################################################### import urllib, urllib2, cookielib, re def parseTree(string): if not isinstance(string, unicode): try: string = string.decode('utf-8') except: raise UnicodeError('Input encoding should be UTF8 of UNICODE') string = string.encode('cp950') URL = 'http://parser.iis.sinica.edu.tw/' cj = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) opener.addheaders = [ ('User-Agent', 'Mozilla/5.0 Gecko/20100101 Firefox/29.0'), ('referer', 'http://parser.iis.sinica.edu.tw/'), ('Host', 'parser.iis.sinica.edu.tw') ] raw = urllib.urlopen(URL).read() fid = re.search('name="id" value="(\d+)"', raw).group(1) postdata = dict() postdata['myTag'] = string postdata['id'] = fid postdata = urllib.urlencode(postdata) resURL = 'http://parser.iis.sinica.edu.tw/svr/webparser.asp' res = opener.open(resURL, postdata).read() res = res.decode('cp950') res = re.findall('<nobr>#\d+:(.*?)</nobr>', res) return res
More than half of motorists screened during a summer crackdown on drug-driving failed roadside tests, figures have shown. An average of 37 drivers a day were caught driving under the influence of banned substances, or 57% of the 1,962 motorists tested. Data from 38 police forces in England and Wales, from 14 June to 15 July, show there was a wide gap between the numbers of people tested for alcohol and other drugs. During that period, 36,675 breath tests for alcohol were carried out, with 3,667 – about one in 10 drivers – being either positive, refused or failed by the driver. Last year, 1,084 of the 2,022 tests for drug-driving came back positive (53.6%), while in 2016, 1,028 of 2,588 tests were failed (39.7%), figures from the National Police Chiefs’ Council (NPCC) show. Officers use so-called drugalysers to check for cocaine and cannabis after swabbing a suspect’s mouth, while a blood test can be used at a police station to check for ecstasy and heroin. They can be used for motorists seen driving erratically or who have been involved in an accident. Ch Con Anthony Bangham, the NPCC lead for roads policing, said: “Driving under the influence of drink or drugs is an incredibly dangerous and selfish decision to take, and it can have devastating consequences on people’s lives.
import re from django.http import Http404, HttpResponseRedirect from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext from django.contrib import messages from django.contrib.auth.decorators import login_required from django.utils.translation import ugettext as _ from django.utils import timezone from django.core.urlresolvers import reverse from django.conf import settings from django_messages.models import Message from django_messages.forms import ComposeForm, ComposeToForm from django_messages.utils import format_quote, get_user_model, get_username_field User = get_user_model() if "notification" in settings.INSTALLED_APPS and getattr(settings, 'DJANGO_MESSAGES_NOTIFY', True): from notification import models as notification else: notification = None @login_required def inbox(request, template_name='django_messages/inbox.html'): """ Displays a list of received messages for the current user. Optional Arguments: ``template_name``: name of the template to use. """ message_list = Message.objects.inbox_for(request.user) return render_to_response(template_name, { 'message_list': message_list, }, context_instance=RequestContext(request)) @login_required def outbox(request, template_name='django_messages/outbox.html'): """ Displays a list of sent messages by the current user. Optional arguments: ``template_name``: name of the template to use. """ message_list = Message.objects.outbox_for(request.user) return render_to_response(template_name, { 'message_list': message_list, }, context_instance=RequestContext(request)) @login_required def trash(request, template_name='django_messages/trash.html'): """ Displays a list of deleted messages. Optional arguments: ``template_name``: name of the template to use Hint: A Cron-Job could periodicly clean up old messages, which are deleted by sender and recipient. """ message_list = Message.objects.trash_for(request.user) return render_to_response(template_name, { 'message_list': message_list, }, context_instance=RequestContext(request)) @login_required def compose(request, recipient=None, form_class=ComposeForm, recipient_form_class=ComposeToForm, template_name='django_messages/compose.html', success_url=None, recipient_filter=None, recipient_format=None): """ Displays and handles the ``form_class`` form to compose new messages. Required Arguments: None Optional Arguments: ``recipient``: username of a `django.contrib.auth` User, who should receive the message, optionally multiple usernames could be separated by a '+' ``form_class``: the form-class to use ``template_name``: the template to use ``success_url``: where to redirect after successfull submission """ if recipient: recipients = User.objects.filter( **{ '%s__in' % get_username_field(): [ rr for rr in re.split(r'[+,\s]+', recipient) if rr ] } ) else: recipients = None if request.method == "POST": sender = request.user if recipients: form = recipient_form_class( request.POST, recipients=recipients, recipient_filter=recipient_filter, recipient_format=recipient_format ) else: form = form_class(request.POST, recipient_filter=recipient_filter) if form.is_valid(): form.save(sender=request.user) messages.info(request, _(u"Message successfully sent.")) if success_url is None: success_url = reverse('messages_inbox') if 'next' in request.GET: success_url = request.GET['next'] return HttpResponseRedirect(success_url) else: if recipient is not None: form = recipient_form_class(recipients = recipients, recipient_format=recipient_format) else: form = form_class() return render_to_response(template_name, { 'form': form, }, context_instance=RequestContext(request)) @login_required def reply(request, message_id, form_class=ComposeToForm, template_name='django_messages/compose.html', success_url=None, recipient_filter=None, recipient_format=None, quote_helper=format_quote, subject_template=_(u"Re: %(subject)s"),): """ Prepares the ``form_class`` form for writing a reply to a given message (specified via ``message_id``). Uses the ``format_quote`` helper from ``messages.utils`` to pre-format the quote. To change the quote format assign a different ``quote_helper`` kwarg in your url-conf. """ parent = get_object_or_404(Message, id=message_id) if parent.sender != request.user and parent.recipient != request.user: raise Http404 if request.method == "POST": sender = request.user form = form_class(request.POST, recipients=[parent.sender], recipient_filter=recipient_filter, recipient_format=recipient_format ) if form.is_valid(): form.save(sender=request.user, parent_msg=parent) messages.info(request, _(u"Message successfully sent.")) if success_url is None: success_url = reverse('messages_inbox') return HttpResponseRedirect(success_url) else: form = form_class(recipients=[parent.sender], initial={ 'body': quote_helper(parent.sender, parent.body), 'subject': subject_template % {'subject': parent.subject}, 'recipient': [parent.sender,] }, recipient_format=recipient_format) return render_to_response(template_name, { 'form': form, }, context_instance=RequestContext(request)) @login_required def delete(request, message_id, success_url=None): """ Marks a message as deleted by sender or recipient. The message is not really removed from the database, because two users must delete a message before it's save to remove it completely. A cron-job should prune the database and remove old messages which are deleted by both users. As a side effect, this makes it easy to implement a trash with undelete. You can pass ?next=/foo/bar/ via the url to redirect the user to a different page (e.g. `/foo/bar/`) than ``success_url`` after deletion of the message. """ user = request.user now = timezone.now() message = get_object_or_404(Message, id=message_id) deleted = False if success_url is None: success_url = reverse('messages_inbox') if 'next' in request.GET: success_url = request.GET['next'] if message.sender == user: message.sender_deleted_at = now deleted = True if message.recipient == user: message.recipient_deleted_at = now deleted = True if deleted: message.save() messages.info(request, _(u"Message successfully deleted.")) if notification: notification.send([user], "messages_deleted", {'message': message,}) return HttpResponseRedirect(success_url) raise Http404 @login_required def undelete(request, message_id, success_url=None): """ Recovers a message from trash. This is achieved by removing the ``(sender|recipient)_deleted_at`` from the model. """ user = request.user message = get_object_or_404(Message, id=message_id) undeleted = False if success_url is None: success_url = reverse('messages_inbox') if 'next' in request.GET: success_url = request.GET['next'] if message.sender == user: message.sender_deleted_at = None undeleted = True if message.recipient == user: message.recipient_deleted_at = None undeleted = True if undeleted: message.save() messages.info(request, _(u"Message successfully recovered.")) if notification: notification.send([user], "messages_recovered", {'message': message,}) return HttpResponseRedirect(success_url) raise Http404 @login_required def view(request, message_id, form_class=ComposeToForm, quote_helper=format_quote, subject_template=_(u"Re: %(subject)s"), recipient_format=None, template_name='django_messages/view.html'): """ Shows a single message.``message_id`` argument is required. The user is only allowed to see the message, if he is either the sender or the recipient. If the user is not allowed a 404 is raised. If the user is the recipient and the message is unread ``read_at`` is set to the current datetime. If the user is the recipient a reply form will be added to the tenplate context, otherwise 'reply_form' will be None. """ user = request.user now = timezone.now() message = get_object_or_404(Message, id=message_id) if (message.sender != user) and (message.recipient != user): raise Http404 if message.read_at is None and message.recipient == user: message.read_at = now message.save() context = {'message': message, 'reply_form': None} if message.recipient == user: form = form_class( recipients = [message.sender,], initial={ 'body': quote_helper(message.sender, message.body), 'subject': subject_template % {'subject': message.subject}, 'recipient': [message.sender,] }, recipient_format = recipient_format ) context['reply_form'] = form return render_to_response(template_name, context, context_instance=RequestContext(request))
Admission to Devonport High School for Girls is made through Plymouth City Council. Pupils are selected through academic ability with all candidates required to sit the 11-plus examination. The examination is designed to measure a child’s academic ability and determine whether she would be suited to a grammar school education. The 11-plus examinations assess your child’s academic ability by using GL Assessment English Comprehension and Mathematics paper. The 11-plus examination will take place before the closing date for submission of the application form for a school place and parents / carers are required to register their child to take the examination. Further details of this procedure and information to help you apply for a Year 7 secondary place within Plymouth can be found in the ‘Next Step Parents Guide’ which is available from the Plymouth City Council website. The school’s published admission number (PAN) for Year 7 is 120. For the last few years the school has received more applications than places available. There may be places available for students seeking transfer in years 8, 9 and 10. In year admissions are made via an application through Plymouth City Council and the satisfactory completion of a school set admissions test to clarify candidates’ ability and aptitude. For further details on admission criteria please see link below. Devonport High School for Girls welcomes applications from students transferring from other schools. The school published admission number (PAN) excluding those transferring from the school’s own year 11 is 35. Applications should be made direct to the school by the end of the Autumn Term. The number of students who have applied for each course will be taken into account, meaning that extra students can sometimes be accommodated over the admission number if the student’s chosen course is not full. The admission arrangements apply equally to those seeking admission from year 11 within the school to those seeking transfer from other schools. All those seeking admission to Year 12 must meet the basic entry requirements of 6 GCSEs at grade 5 or above which must include English and Mathematics. In addition to subject specific entry requirements you will require a minimum grade 6 in the subjects you wish to study. For the subject of Psychology you will need a grade 6 in Mathematics or a Science with a grade 6 in English. For the subject of Business you will need a grade 6 in Mathematics and a grade 6 in English. For specific entry requirements please see the Sixth Form prospectus or contact the school on 01752 705024. The Sixth Form Prospectus and application forms to apply to join the sixth form, can be found our Prospectus Page.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implements the Keras Sequential model.""" from builtins import range import keras from keras import backend as K from keras import layers from keras import models from keras.backend import relu import pandas as pd import tensorflow as tf from tensorflow.python.saved_model import builder as saved_model_builder from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import tag_constants from tensorflow.python.saved_model.signature_def_utils_impl import \ predict_signature_def # CSV columns in the input file. CSV_COLUMNS = ('age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship', 'race', 'gender', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country', 'income_bracket') CSV_COLUMN_DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''], [0], [0], [0], [''], ['']] # Categorical columns with vocab size # native_country and fnlwgt are ignored CATEGORICAL_COLS = (('education', 16), ('marital_status', 7), ('relationship', 6), ('workclass', 9), ('occupation', 15), ('gender', [' Male', ' Female']), ('race', 5)) CONTINUOUS_COLS = ('age', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week') LABELS = [' <=50K', ' >50K'] LABEL_COLUMN = 'income_bracket' UNUSED_COLUMNS = set(CSV_COLUMNS) - set( list(zip(*CATEGORICAL_COLS))[0] + CONTINUOUS_COLS + (LABEL_COLUMN,)) def model_fn(input_dim, labels_dim, hidden_units=[100, 70, 50, 20], learning_rate=0.1): """Create a Keras Sequential model with layers. Args: input_dim: (int) Input dimensions for input layer. labels_dim: (int) Label dimensions for input layer. hidden_units: [int] the layer sizes of the DNN (input layer first) learning_rate: (float) the learning rate for the optimizer. Returns: A Keras model. """ # "set_learning_phase" to False to avoid: # AbortionError(code=StatusCode.INVALID_ARGUMENT during online prediction. K.set_learning_phase(False) model = models.Sequential() for units in hidden_units: model.add( layers.Dense(units=units, input_dim=input_dim, activation=relu)) input_dim = units # Add a dense final layer with sigmoid function. model.add(layers.Dense(labels_dim, activation='sigmoid')) compile_model(model, learning_rate) return model def compile_model(model, learning_rate): model.compile( loss='binary_crossentropy', optimizer=keras.optimizers.Adam(lr=learning_rate), metrics=['accuracy']) return model def to_savedmodel(model, export_path): """Convert the Keras HDF5 model into TensorFlow SavedModel.""" builder = saved_model_builder.SavedModelBuilder(export_path) signature = predict_signature_def( inputs={'input': model.inputs[0]}, outputs={'income': model.outputs[0]}) with K.get_session() as sess: builder.add_meta_graph_and_variables( sess=sess, tags=[tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature }) builder.save() def to_numeric_features(features, feature_cols=None): """Converts the pandas input features to numeric values. Args: features: Input features in the data age (continuous) workclass (categorical) fnlwgt (continuous) education (categorical) education_num (continuous) marital_status (categorical) occupation (categorical) relationship (categorical) race (categorical) gender (categorical) capital_gain (continuous) capital_loss (continuous) hours_per_week (continuous) native_country (categorical) feature_cols: Column list of converted features to be returned. Optional, may be used to ensure schema consistency over multiple executions. Returns: A pandas dataframe. """ for col in CATEGORICAL_COLS: features = pd.concat( [features, pd.get_dummies(features[col[0]], drop_first=True)], axis=1) features.drop(col[0], axis=1, inplace=True) # Remove the unused columns from the dataframe. for col in UNUSED_COLUMNS: features.pop(col) # Re-index dataframe (if categories list changed from the previous dataset) if feature_cols is not None: features = features.T.reindex(feature_cols).T.fillna(0) return features def generator_input(filenames, chunk_size, batch_size=64): """Produce features and labels needed by keras fit_generator.""" feature_cols = None while True: input_reader = pd.read_csv( tf.gfile.Open(filenames[0]), names=CSV_COLUMNS, chunksize=chunk_size, na_values=' ?') for input_data in input_reader: input_data = input_data.dropna() label = pd.get_dummies(input_data.pop(LABEL_COLUMN)) input_data = to_numeric_features(input_data, feature_cols) # Retains schema for next chunk processing. if feature_cols is None: feature_cols = input_data.columns idx_len = input_data.shape[0] for index in range(0, idx_len, batch_size): yield (input_data.iloc[index:min(idx_len, index + batch_size)], label.iloc[index:min(idx_len, index + batch_size)])
Last night the House approved the current incarnation of our ed finance reform bill – HB 2261. I’m including links to some summaries of the bill, including the AP story from the Seattle PI site (out of nostalgia). It’s depressing that Curt Woodward was the only reporter physically present on the floor when we passed the bill – there used to be many, many more. The bill includes an amendment from me that creates the strong legal definition of “basic education,” a key element of requiring the Legislature to step up to the level of funding required to provide students with the opportunity to earn a meaningful diploma. We have more work to do as you can tell from the following comparison of this bill to our initial proposal. I can guarantee that we won’t do a bill exactly like our first proposal, but we need to address all the same categories of decisions. We are making progress and I hope to continue.
""" A data structure describing the publication of an LCA data resource. Each publication has two forms: (1) the serialized form lives in the antelope directory and provides enough information to reconstitute the This object is supposed to provide the basic information and functionality common to both v1 and v2 resources, each of which is a subclass with specialized properties. """ import os import json from .authorization import allowed_interfaces, PrivacyDeclaration class CatalogRequired(Exception): pass class LcPub(object): """ Abstract class that handles de/serialization and common features """ _type = None @property def name(self): raise NotImplementedError def serialize(self): raise NotImplementedError def write_to_file(self, path): if os.path.exists(os.path.join(path, self.name)): raise FileExistsError('Resource is already specified') with open(os.path.join(path, self.name), 'w') as fp: json.dump(self.serialize(), fp, indent=2, sort_keys=True) class AntelopeV1Pub(LcPub): """ An Antelope V1 publication is a record of a ForegroundStudy and a list of supported LCIA methods. In order to create it, we need to pass the things necessary to create the ForegroundStudy. but since that class doesn't exist yet, neither does this. Conceptually, we need: - a CatalogRef for the study's top level fragment - an iterable of lcia methods, being either caby ref (or by uuid * given that lcia methods should be uniquely determined) - an optional mapping between entity refs and indices for 'flows', 'flowproperties', 'processes', 'fragments' : otherwise these are determined by the order encountered when traversing the top level fragment and children """ _type = 'Antelope_v1' @property def name(self): return self._foreground def __init__(self, foreground, fragment_ref, lcia_methods=None, mapping=None): """ :param foreground: :param fragment_ref: :param lcia_methods: :param mapping: """ self._foreground = foreground if not fragment_ref.resolved: raise CatalogRequired('Fragment ref is not grounded!') self._fragment = fragment_ref self._lcia = lcia_methods or [] mapping = mapping or dict() if not isinstance(mapping, dict): raise TypeError('Mapping must be a dict') self._mapping = mapping # ultimately this needs to be populated by traversing the fragment self._reverse_mapping = dict() self._populate_mapping() self._reverse_map() def _populate_mapping(self): """ Beginning at the top-level fragment, traverse the model and identify all local fragments (parent + child) encountered during a traversal. From that, derive a list of stage names, flows, processes, and flow properties, and ensure that all are present in the mapping. :return: """ @staticmethod def _enum(lst): return {k: i for i, k in enumerate(lst)} def _reverse_map(self): self._reverse_mapping['lcia'] = self._enum(self._lcia) for k in 'flow', 'flowproperty', 'fragment', 'process', 'stage': self._reverse_mapping[k] = self._enum(self._mapping[k]) def serialize(self): return { 'type': self._type, 'name': self.name, 'fragment': self._fragment.link, 'lcia': self._lcia, 'mapping': self._mapping } class AntelopeV2Pub(LcPub): """ An Antelope V2 publication is a catalog-supported publication of a complete LCA data resource, denoted by semantic origin. It is instantiated essentially in the form of a CatalogQuery, which very little else to do, other than a privacy specification. """ _type = 'Antelope_v2' @property def name(self): return self._query.origin @property def query(self): return self._query def __init__(self, query, interfaces=allowed_interfaces, privacy=None): """ :param query: a grounded query :param interfaces: interfaces to allow access :param privacy: a privacy specification: either a blanket number or a dict. if None, all information is public (though limited to the named interfaces) if a number, all queries must be authorized with a privacy score lower than or equal to the number if a dict, queries having the specified scope must authorize with a privacy score lower than or equal to the corresponding value. The lowest privacy score is 0, so a negative number means authorization is not possible. Only keys in the list of known scopes are retained """ self._query = query if isinstance(interfaces, str): interfaces = (interfaces,) self._interfaces = tuple(k for k in interfaces if k in allowed_interfaces) if isinstance(privacy, dict): self._scopes = PrivacyDeclaration.from_dict(privacy) else: self._scopes = PrivacyDeclaration(privacy) def serialize(self): return { 'type': self._type, 'name': self.name, 'interfaces': self._interfaces, 'privacy': self._scopes.serialize() }
and she’s back on a roll! today will be a double feature of hot hits from the 70s. first up, 1974.
""" TestCmd.py: a testing framework for commands and scripts. The TestCmd module provides a framework for portable automated testing of executable commands and scripts (in any language, not just Python), especially commands and scripts that require file system interaction. In addition to running tests and evaluating conditions, the TestCmd module manages and cleans up one or more temporary workspace directories, and provides methods for creating files and directories in those workspace directories from in-line data, here-documents), allowing tests to be completely self-contained. A TestCmd environment object is created via the usual invocation: test = TestCmd() The TestCmd module provides pass_test(), fail_test(), and no_result() unbound methods that report test results for use with the Aegis change management system. These methods terminate the test immediately, reporting PASSED, FAILED or NO RESULT respectively and exiting with status 0 (success), 1 or 2 respectively. This allows for a distinction between an actual failed test and a test that could not be properly evaluated because of an external condition (such as a full file system or incorrect permissions). """ # Copyright 2000 Steven Knight # This module is free software, and you may redistribute it and/or modify # it under the same terms as Python itself, so long as this copyright message # and disclaimer are retained in their original form. # # IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, # SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF # THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH # DAMAGE. # # THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, # AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, # SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. # Copyright 2002-2003 Vladimir Prus. # Copyright 2002-2003 Dave Abrahams. # Copyright 2006 Rene Rivera. # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) from string import join, split __author__ = "Steven Knight <[email protected]>" __revision__ = "TestCmd.py 0.D002 2001/08/31 14:56:12 software" __version__ = "0.02" from types import * import os import os.path import re import shutil import stat import subprocess import sys import tempfile import traceback tempfile.template = 'testcmd.' _Cleanup = [] def _clean(): global _Cleanup list = _Cleanup[:] _Cleanup = [] list.reverse() for test in list: test.cleanup() sys.exitfunc = _clean def caller(tblist, skip): string = "" arr = [] for file, line, name, text in tblist: if file[-10:] == "TestCmd.py": break arr = [(file, line, name, text)] + arr atfrom = "at" for file, line, name, text in arr[skip:]: if name == "?": name = "" else: name = " (" + name + ")" string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name)) atfrom = "\tfrom" return string def fail_test(self=None, condition=True, function=None, skip=0): """Cause the test to fail. By default, the fail_test() method reports that the test FAILED and exits with a status of 1. If a condition argument is supplied, the test fails only if the condition is true. """ if not condition: return if not function is None: function() of = "" desc = "" sep = " " if not self is None: if self.program: of = " of " + join(self.program, " ") sep = "\n\t" if self.description: desc = " [" + self.description + "]" sep = "\n\t" at = caller(traceback.extract_stack(), skip) sys.stderr.write("FAILED test" + of + desc + sep + at + """ in directory: """ + os.getcwd() ) sys.exit(1) def no_result(self=None, condition=True, function=None, skip=0): """Causes a test to exit with no valid result. By default, the no_result() method reports NO RESULT for the test and exits with a status of 2. If a condition argument is supplied, the test fails only if the condition is true. """ if not condition: return if not function is None: function() of = "" desc = "" sep = " " if not self is None: if self.program: of = " of " + self.program sep = "\n\t" if self.description: desc = " [" + self.description + "]" sep = "\n\t" at = caller(traceback.extract_stack(), skip) sys.stderr.write("NO RESULT for test" + of + desc + sep + at) sys.exit(2) def pass_test(self=None, condition=True, function=None): """Causes a test to pass. By default, the pass_test() method reports PASSED for the test and exits with a status of 0. If a condition argument is supplied, the test passes only if the condition is true. """ if not condition: return if not function is None: function() sys.stderr.write("PASSED\n") sys.exit(0) class MatchError(object): def __init__(self, message): self.message = message def __nonzero__(self): return False def __bool__(self): return False def match_exact(lines=None, matches=None): """ Returns whether the given lists or strings containing lines separated using newline characters contain exactly the same data. """ if not type(lines) is ListType: lines = split(lines, "\n") if not type(matches) is ListType: matches = split(matches, "\n") if len(lines) != len(matches): return for i in range(len(lines)): if lines[i] != matches[i]: return MatchError("Mismatch at line %d\n- %s\n+ %s\n" % (i+1, matches[i], lines[i])) if len(lines) < len(matches): return MatchError("Missing lines at line %d\n- %s" % (len(lines), "\n- ".join(matches[len(lines):]))) if len(lines) > len(matches): return MatchError("Extra lines at line %d\n+ %s" % (len(matches), "\n+ ".join(lines[len(matches):]))) return 1 def match_re(lines=None, res=None): """ Given lists or strings contain lines separated using newline characters. This function matches those lines one by one, interpreting the lines in the res parameter as regular expressions. """ if not type(lines) is ListType: lines = split(lines, "\n") if not type(res) is ListType: res = split(res, "\n") for i in range(min(len(lines), len(res))): if not re.compile("^" + res[i] + "$").search(lines[i]): return MatchError("Mismatch at line %d\n- %s\n+ %s\n" % (i+1, res[i], lines[i])) if len(lines) < len(res): return MatchError("Missing lines at line %d\n- %s" % (len(lines), "\n- ".join(res[len(lines):]))) if len(lines) > len(res): return MatchError("Extra lines at line %d\n+ %s" % (len(res), "\n+ ".join(lines[len(res):]))) return 1 class TestCmd: def __init__(self, description=None, program=None, workdir=None, subdir=None, verbose=False, match=None, inpath=None): self._cwd = os.getcwd() self.description_set(description) self.program_set(program, inpath) self.verbose_set(verbose) if match is None: self.match_func = match_re else: self.match_func = match self._dirlist = [] self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0} env = os.environ.get('PRESERVE') if env: self._preserve['pass_test'] = env self._preserve['fail_test'] = env self._preserve['no_result'] = env else: env = os.environ.get('PRESERVE_PASS') if env is not None: self._preserve['pass_test'] = env env = os.environ.get('PRESERVE_FAIL') if env is not None: self._preserve['fail_test'] = env env = os.environ.get('PRESERVE_PASS') if env is not None: self._preserve['PRESERVE_NO_RESULT'] = env self._stdout = [] self._stderr = [] self.status = None self.condition = 'no_result' self.workdir_set(workdir) self.subdir(subdir) def __del__(self): self.cleanup() def __repr__(self): return "%x" % id(self) def cleanup(self, condition=None): """ Removes any temporary working directories for the specified TestCmd environment. If the environment variable PRESERVE was set when the TestCmd environment was created, temporary working directories are not removed. If any of the environment variables PRESERVE_PASS, PRESERVE_FAIL or PRESERVE_NO_RESULT were set when the TestCmd environment was created, then temporary working directories are not removed if the test passed, failed or had no result, respectively. Temporary working directories are also preserved for conditions specified via the preserve method. Typically, this method is not called directly, but is used when the script exits to clean up temporary working directories as appropriate for the exit status. """ if not self._dirlist: return if condition is None: condition = self.condition if self._preserve[condition]: for dir in self._dirlist: print("Preserved directory %s" % dir) else: list = self._dirlist[:] list.reverse() for dir in list: self.writable(dir, 1) shutil.rmtree(dir, ignore_errors=1) self._dirlist = [] self.workdir = None os.chdir(self._cwd) try: global _Cleanup _Cleanup.remove(self) except (AttributeError, ValueError): pass def description_set(self, description): """Set the description of the functionality being tested.""" self.description = description def fail_test(self, condition=True, function=None, skip=0): """Cause the test to fail.""" if not condition: return self.condition = 'fail_test' fail_test(self = self, condition = condition, function = function, skip = skip) def match(self, lines, matches): """Compare actual and expected file contents.""" return self.match_func(lines, matches) def match_exact(self, lines, matches): """Compare actual and expected file content exactly.""" return match_exact(lines, matches) def match_re(self, lines, res): """Compare file content with a regular expression.""" return match_re(lines, res) def no_result(self, condition=True, function=None, skip=0): """Report that the test could not be run.""" if not condition: return self.condition = 'no_result' no_result(self = self, condition = condition, function = function, skip = skip) def pass_test(self, condition=True, function=None): """Cause the test to pass.""" if not condition: return self.condition = 'pass_test' pass_test(self, condition, function) def preserve(self, *conditions): """ Arrange for the temporary working directories for the specified TestCmd environment to be preserved for one or more conditions. If no conditions are specified, arranges for the temporary working directories to be preserved for all conditions. """ if conditions is (): conditions = ('pass_test', 'fail_test', 'no_result') for cond in conditions: self._preserve[cond] = 1 def program_set(self, program, inpath): """Set the executable program or script to be tested.""" if not inpath and program and not os.path.isabs(program[0]): program[0] = os.path.join(self._cwd, program[0]) self.program = program def read(self, file, mode='rb'): """ Reads and returns the contents of the specified file name. The file name may be a list, in which case the elements are concatenated with the os.path.join() method. The file is assumed to be under the temporary working directory unless it is an absolute path name. The I/O mode for the file may be specified and must begin with an 'r'. The default is 'rb' (binary read). """ if type(file) is ListType: file = apply(os.path.join, tuple(file)) if not os.path.isabs(file): file = os.path.join(self.workdir, file) if mode[0] != 'r': raise ValueError, "mode must begin with 'r'" return open(file, mode).read() def run(self, program=None, arguments=None, chdir=None, stdin=None, universal_newlines=True): """ Runs a test of the program or script for the test environment. Standard output and error output are saved for future retrieval via the stdout() and stderr() methods. 'universal_newlines' parameter controls how the child process input/output streams are opened as defined for the same named Python subprocess.POpen constructor parameter. """ if chdir: if not os.path.isabs(chdir): chdir = os.path.join(self.workpath(chdir)) if self.verbose: sys.stderr.write("chdir(" + chdir + ")\n") else: chdir = self.workdir cmd = [] if program and program[0]: if program[0] != self.program[0] and not os.path.isabs(program[0]): program[0] = os.path.join(self._cwd, program[0]) cmd += program else: cmd += self.program if arguments: cmd += arguments.split(" ") if self.verbose: sys.stderr.write(join(cmd, " ") + "\n") p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=chdir, universal_newlines=universal_newlines) if stdin: if type(stdin) is ListType: stdin = "".join(stdin) out, err = p.communicate(stdin) self._stdout.append(out) self._stderr.append(err) self.status = p.returncode if self.verbose: sys.stdout.write(self._stdout[-1]) sys.stderr.write(self._stderr[-1]) def stderr(self, run=None): """ Returns the error output from the specified run number. If there is no specified run number, then returns the error output of the last run. If the run number is less than zero, then returns the error output from that many runs back from the current run. """ if not run: run = len(self._stderr) elif run < 0: run = len(self._stderr) + run run -= 1 if run < 0: return '' return self._stderr[run] def stdout(self, run=None): """ Returns the standard output from the specified run number. If there is no specified run number, then returns the standard output of the last run. If the run number is less than zero, then returns the standard output from that many runs back from the current run. """ if not run: run = len(self._stdout) elif run < 0: run = len(self._stdout) + run run -= 1 if run < 0: return '' return self._stdout[run] def subdir(self, *subdirs): """ Create new subdirectories under the temporary working directory, one for each argument. An argument may be a list, in which case the list elements are concatenated using the os.path.join() method. Subdirectories multiple levels deep must be created using a separate argument for each level: test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory']) Returns the number of subdirectories actually created. """ count = 0 for sub in subdirs: if sub is None: continue if type(sub) is ListType: sub = apply(os.path.join, tuple(sub)) new = os.path.join(self.workdir, sub) try: os.mkdir(new) except: pass else: count += 1 return count def unlink(self, file): """ Unlinks the specified file name. The file name may be a list, in which case the elements are concatenated using the os.path.join() method. The file is assumed to be under the temporary working directory unless it is an absolute path name. """ if type(file) is ListType: file = apply(os.path.join, tuple(file)) if not os.path.isabs(file): file = os.path.join(self.workdir, file) os.unlink(file) def verbose_set(self, verbose): """Set the verbose level.""" self.verbose = verbose def workdir_set(self, path): """ Creates a temporary working directory with the specified path name. If the path is a null string (''), a unique directory name is created. """ if os.path.isabs(path): self.workdir = path else: if path != None: if path == '': path = tempfile.mktemp() if path != None: os.mkdir(path) self._dirlist.append(path) global _Cleanup try: _Cleanup.index(self) except ValueError: _Cleanup.append(self) # We would like to set self.workdir like this: # self.workdir = path # But symlinks in the path will report things differently from # os.getcwd(), so chdir there and back to fetch the canonical # path. cwd = os.getcwd() os.chdir(path) self.workdir = os.getcwd() os.chdir(cwd) else: self.workdir = None def workpath(self, *args): """ Returns the absolute path name to a subdirectory or file within the current temporary working directory. Concatenates the temporary working directory name with the specified arguments using os.path.join(). """ return apply(os.path.join, (self.workdir,) + tuple(args)) def writable(self, top, write): """ Make the specified directory tree writable (write == 1) or not (write == None). """ def _walk_chmod(arg, dirname, names): st = os.stat(dirname) os.chmod(dirname, arg(st[stat.ST_MODE])) for name in names: fullname = os.path.join(dirname, name) st = os.stat(fullname) os.chmod(fullname, arg(st[stat.ST_MODE])) _mode_writable = lambda mode: stat.S_IMODE(mode|0200) _mode_non_writable = lambda mode: stat.S_IMODE(mode&~0200) if write: f = _mode_writable else: f = _mode_non_writable try: os.path.walk(top, _walk_chmod, f) except: pass # Ignore any problems changing modes. def write(self, file, content, mode='wb'): """ Writes the specified content text (second argument) to the specified file name (first argument). The file name may be a list, in which case the elements are concatenated using the os.path.join() method. The file is created under the temporary working directory. Any subdirectories in the path must already exist. The I/O mode for the file may be specified and must begin with a 'w'. The default is 'wb' (binary write). """ if type(file) is ListType: file = apply(os.path.join, tuple(file)) if not os.path.isabs(file): file = os.path.join(self.workdir, file) if mode[0] != 'w': raise ValueError, "mode must begin with 'w'" open(file, mode).write(content)
PURPOSE: Cyclooxygenase (COX)-2 and matrix metalloproteinase (MMP)-9 play a key role in the pathogenesis of in-stent restenosis. We investigated the effect of a short-term therapy of celecoxib, a COX-2 inhibitor, with or without doxycycline, an MMP inhibitor, after coronary stenting on inflammatory biomarkers and neointimal hyperplasia. MATERIALS AND METHODS: A total of 75 patients (86 lesions) treated with bare metal stents were randomized into three groups: 1) combination therapy (200 mg celecoxib and 20 mg doxycycline, both twice daily), 2) celecoxib (200 mg twice daily) only, and 3) non-therapy control. Celecoxib and doxycycline were administered for 3 weeks after coronary stenting. The primary endpoint was neointimal volume obstruction by intravascular ultrasound (IVUS) at 6 months. The secondary endpoints included clinical outcomes, angiographic data, and changes in blood levels of inflammatory biomarkers. RESULTS: Follow-up IVUS revealed no significant difference in the neointimal volume obstruction among the three treatment groups. There was no difference in cardiac deaths, myocardial infarctions, target lesion revascularization or stent thrombosis among the groups. Blood levels of high-sensitivity C-reactive protein, soluble CD40 ligand, and MMP-9 varied widely 48 hours and 3 weeks after coronary stenting, however, they did not show any significant difference among the groups. CONCLUSION: Our study failed to demonstrate any beneficial effects of the short-term therapy with celecoxib and doxycycline or with celecoxib alone in the suppression of inflammatory biomarkers or in the inhibition of neointimal hyperplasia. Large scale randomized trials are necessary to define the role of anti- inflammatory therapy in the inhibition of neointimal hyperplasia.
## ## This file is part of the libsigrokdecode project. ## ## Copyright (C) 2012-2013 Uwe Hermann <[email protected]> ## Copyright (C) 2019 Stephan Thiele <[email protected]> ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, see <http://www.gnu.org/licenses/>. ## from common.srdhelper import bitpack_msb import sigrokdecode as srd class SamplerateError(Exception): pass def dlc2len(dlc): return [0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 16, 20, 24, 32, 48, 64][dlc] class Decoder(srd.Decoder): api_version = 3 id = 'can' name = 'CAN' longname = 'Controller Area Network' desc = 'Field bus protocol for distributed realtime control.' license = 'gplv2+' inputs = ['logic'] outputs = ['can'] tags = ['Automotive'] channels = ( {'id': 'can_rx', 'name': 'CAN RX', 'desc': 'CAN bus line'}, ) options = ( {'id': 'nominal_bitrate', 'desc': 'Nominal bitrate (bits/s)', 'default': 1000000}, {'id': 'fast_bitrate', 'desc': 'Fast bitrate (bits/s)', 'default': 2000000}, {'id': 'sample_point', 'desc': 'Sample point (%)', 'default': 70.0}, ) annotations = ( ('data', 'Payload data'), ('sof', 'Start of frame'), ('eof', 'End of frame'), ('id', 'Identifier'), ('ext-id', 'Extended identifier'), ('full-id', 'Full identifier'), ('ide', 'Identifier extension bit'), ('reserved-bit', 'Reserved bit 0 and 1'), ('rtr', 'Remote transmission request'), ('srr', 'Substitute remote request'), ('dlc', 'Data length count'), ('crc-sequence', 'CRC sequence'), ('crc-delimiter', 'CRC delimiter'), ('ack-slot', 'ACK slot'), ('ack-delimiter', 'ACK delimiter'), ('stuff-bit', 'Stuff bit'), ('warning', 'Warning'), ('bit', 'Bit'), ) annotation_rows = ( ('bits', 'Bits', (15, 17)), ('fields', 'Fields', tuple(range(15))), ('warnings', 'Warnings', (16,)), ) def __init__(self): self.reset() def reset(self): self.samplerate = None self.reset_variables() def start(self): self.out_ann = self.register(srd.OUTPUT_ANN) self.out_python = self.register(srd.OUTPUT_PYTHON) def set_bit_rate(self, bitrate): self.bit_width = float(self.samplerate) / float(bitrate) self.sample_point = (self.bit_width / 100.0) * self.options['sample_point'] def set_nominal_bitrate(self): self.set_bit_rate(self.options['nominal_bitrate']) def set_fast_bitrate(self): self.set_bit_rate(self.options['fast_bitrate']) def metadata(self, key, value): if key == srd.SRD_CONF_SAMPLERATE: self.samplerate = value self.bit_width = float(self.samplerate) / float(self.options['nominal_bitrate']) self.sample_point = (self.bit_width / 100.0) * self.options['sample_point'] # Generic helper for CAN bit annotations. def putg(self, ss, es, data): left, right = int(self.sample_point), int(self.bit_width - self.sample_point) self.put(ss - left, es + right, self.out_ann, data) # Single-CAN-bit annotation using the current samplenum. def putx(self, data): self.putg(self.samplenum, self.samplenum, data) # Single-CAN-bit annotation using the samplenum of CAN bit 12. def put12(self, data): self.putg(self.ss_bit12, self.ss_bit12, data) # Single-CAN-bit annotation using the samplenum of CAN bit 32. def put32(self, data): self.putg(self.ss_bit32, self.ss_bit32, data) # Multi-CAN-bit annotation from self.ss_block to current samplenum. def putb(self, data): self.putg(self.ss_block, self.samplenum, data) def putpy(self, data): self.put(self.ss_packet, self.es_packet, self.out_python, data) def reset_variables(self): self.state = 'IDLE' self.sof = self.frame_type = self.dlc = None self.rawbits = [] # All bits, including stuff bits self.bits = [] # Only actual CAN frame bits (no stuff bits) self.curbit = 0 # Current bit of CAN frame (bit 0 == SOF) self.last_databit = 999 # Positive value that bitnum+x will never match self.ss_block = None self.ss_bit12 = None self.ss_bit32 = None self.ss_databytebits = [] self.frame_bytes = [] self.rtr_type = None self.fd = False self.rtr = None # Poor man's clock synchronization. Use signal edges which change to # dominant state in rather simple ways. This naive approach is neither # aware of the SYNC phase's width nor the specific location of the edge, # but improves the decoder's reliability when the input signal's bitrate # does not exactly match the nominal rate. def dom_edge_seen(self, force = False): self.dom_edge_snum = self.samplenum self.dom_edge_bcount = self.curbit # Determine the position of the next desired bit's sample point. def get_sample_point(self, bitnum): samplenum = self.dom_edge_snum samplenum += self.bit_width * (bitnum - self.dom_edge_bcount) samplenum += self.sample_point return int(samplenum) def is_stuff_bit(self): # CAN uses NRZ encoding and bit stuffing. # After 5 identical bits, a stuff bit of opposite value is added. # But not in the CRC delimiter, ACK, and end of frame fields. if len(self.bits) > self.last_databit + 17: return False last_6_bits = self.rawbits[-6:] if last_6_bits not in ([0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 0]): return False # Stuff bit. Keep it in self.rawbits, but drop it from self.bits. self.bits.pop() # Drop last bit. return True def is_valid_crc(self, crc_bits): return True # TODO def decode_error_frame(self, bits): pass # TODO def decode_overload_frame(self, bits): pass # TODO # Both standard and extended frames end with CRC, CRC delimiter, ACK, # ACK delimiter, and EOF fields. Handle them in a common function. # Returns True if the frame ended (EOF), False otherwise. def decode_frame_end(self, can_rx, bitnum): # Remember start of CRC sequence (see below). if bitnum == (self.last_databit + 1): self.ss_block = self.samplenum if self.fd: if dlc2len(self.dlc) < 16: self.crc_len = 27 # 17 + SBC + stuff bits else: self.crc_len = 32 # 21 + SBC + stuff bits else: self.crc_len = 15 # CRC sequence (15 bits, 17 bits or 21 bits) elif bitnum == (self.last_databit + self.crc_len): if self.fd: if dlc2len(self.dlc) < 16: crc_type = "CRC-17" else: crc_type = "CRC-21" else: crc_type = "CRC-15" x = self.last_databit + 1 crc_bits = self.bits[x:x + self.crc_len + 1] self.crc = bitpack_msb(crc_bits) self.putb([11, ['%s sequence: 0x%04x' % (crc_type, self.crc), '%s: 0x%04x' % (crc_type, self.crc), '%s' % crc_type]]) if not self.is_valid_crc(crc_bits): self.putb([16, ['CRC is invalid']]) # CRC delimiter bit (recessive) elif bitnum == (self.last_databit + self.crc_len + 1): self.putx([12, ['CRC delimiter: %d' % can_rx, 'CRC d: %d' % can_rx, 'CRC d']]) if can_rx != 1: self.putx([16, ['CRC delimiter must be a recessive bit']]) if self.fd: self.set_nominal_bitrate() # ACK slot bit (dominant: ACK, recessive: NACK) elif bitnum == (self.last_databit + self.crc_len + 2): ack = 'ACK' if can_rx == 0 else 'NACK' self.putx([13, ['ACK slot: %s' % ack, 'ACK s: %s' % ack, 'ACK s']]) # ACK delimiter bit (recessive) elif bitnum == (self.last_databit + self.crc_len + 3): self.putx([14, ['ACK delimiter: %d' % can_rx, 'ACK d: %d' % can_rx, 'ACK d']]) if can_rx != 1: self.putx([16, ['ACK delimiter must be a recessive bit']]) # Remember start of EOF (see below). elif bitnum == (self.last_databit + self.crc_len + 4): self.ss_block = self.samplenum # End of frame (EOF), 7 recessive bits elif bitnum == (self.last_databit + self.crc_len + 10): self.putb([2, ['End of frame', 'EOF', 'E']]) if self.rawbits[-7:] != [1, 1, 1, 1, 1, 1, 1]: self.putb([16, ['End of frame (EOF) must be 7 recessive bits']]) self.es_packet = self.samplenum py_data = tuple([self.frame_type, self.fullid, self.rtr_type, self.dlc, self.frame_bytes]) self.putpy(py_data) self.reset_variables() return True return False # Returns True if the frame ended (EOF), False otherwise. def decode_standard_frame(self, can_rx, bitnum): # Bit 14: FDF (Flexible data format) # Has to be sent dominant when FD frame, has to be sent recessive # when classic CAN frame. if bitnum == 14: self.fd = True if can_rx else False if self.fd: self.putx([7, ['Flexible data format: %d' % can_rx, 'FDF: %d' % can_rx, 'FDF']]) else: self.putx([7, ['Reserved bit 0: %d' % can_rx, 'RB0: %d' % can_rx, 'RB0']]) if self.fd: # Bit 12: Substitute remote request (SRR) bit self.put12([8, ['Substitute remote request', 'SRR']]) self.dlc_start = 18 else: # Bit 12: Remote transmission request (RTR) bit # Data frame: dominant, remote frame: recessive # Remote frames do not contain a data field. rtr = 'remote' if self.bits[12] == 1 else 'data' self.put12([8, ['Remote transmission request: %s frame' % rtr, 'RTR: %s frame' % rtr, 'RTR']]) self.rtr_type = rtr self.dlc_start = 15 if bitnum == 15 and self.fd: self.putx([7, ['Reserved: %d' % can_rx, 'R0: %d' % can_rx, 'R0']]) if bitnum == 16 and self.fd: self.putx([7, ['Bit rate switch: %d' % can_rx, 'BRS: %d' % can_rx, 'BRS']]) if bitnum == 17 and self.fd: self.putx([7, ['Error state indicator: %d' % can_rx, 'ESI: %d' % can_rx, 'ESI']]) # Remember start of DLC (see below). elif bitnum == self.dlc_start: self.ss_block = self.samplenum # Bits 15-18: Data length code (DLC), in number of bytes (0-8). elif bitnum == self.dlc_start + 3: self.dlc = bitpack_msb(self.bits[self.dlc_start:self.dlc_start + 4]) self.putb([10, ['Data length code: %d' % self.dlc, 'DLC: %d' % self.dlc, 'DLC']]) self.last_databit = self.dlc_start + 3 + (dlc2len(self.dlc) * 8) if self.dlc > 8 and not self.fd: self.putb([16, ['Data length code (DLC) > 8 is not allowed']]) # Remember all databyte bits, except the very last one. elif bitnum in range(self.dlc_start + 4, self.last_databit): self.ss_databytebits.append(self.samplenum) # Bits 19-X: Data field (0-8 bytes, depending on DLC) # The bits within a data byte are transferred MSB-first. elif bitnum == self.last_databit: self.ss_databytebits.append(self.samplenum) # Last databyte bit. for i in range(dlc2len(self.dlc)): x = self.dlc_start + 4 + (8 * i) b = bitpack_msb(self.bits[x:x + 8]) self.frame_bytes.append(b) ss = self.ss_databytebits[i * 8] es = self.ss_databytebits[((i + 1) * 8) - 1] self.putg(ss, es, [0, ['Data byte %d: 0x%02x' % (i, b), 'DB %d: 0x%02x' % (i, b), 'DB']]) self.ss_databytebits = [] elif bitnum > self.last_databit: return self.decode_frame_end(can_rx, bitnum) return False # Returns True if the frame ended (EOF), False otherwise. def decode_extended_frame(self, can_rx, bitnum): # Remember start of EID (see below). if bitnum == 14: self.ss_block = self.samplenum self.fd = False self.dlc_start = 35 # Bits 14-31: Extended identifier (EID[17..0]) elif bitnum == 31: self.eid = bitpack_msb(self.bits[14:]) s = '%d (0x%x)' % (self.eid, self.eid) self.putb([4, ['Extended Identifier: %s' % s, 'Extended ID: %s' % s, 'Extended ID', 'EID']]) self.fullid = self.ident << 18 | self.eid s = '%d (0x%x)' % (self.fullid, self.fullid) self.putb([5, ['Full Identifier: %s' % s, 'Full ID: %s' % s, 'Full ID', 'FID']]) # Bit 12: Substitute remote request (SRR) bit self.put12([9, ['Substitute remote request: %d' % self.bits[12], 'SRR: %d' % self.bits[12], 'SRR']]) # Bit 32: Remote transmission request (RTR) bit # Data frame: dominant, remote frame: recessive # Remote frames do not contain a data field. # Remember start of RTR (see below). if bitnum == 32: self.ss_bit32 = self.samplenum self.rtr = can_rx if not self.fd: rtr = 'remote' if can_rx == 1 else 'data' self.putx([8, ['Remote transmission request: %s frame' % rtr, 'RTR: %s frame' % rtr, 'RTR']]) self.rtr_type = rtr # Bit 33: RB1 (reserved bit) elif bitnum == 33: self.fd = True if can_rx else False if self.fd: self.dlc_start = 37 self.putx([7, ['Flexible data format: %d' % can_rx, 'FDF: %d' % can_rx, 'FDF']]) self.put32([7, ['Reserved bit 1: %d' % self.rtr, 'RB1: %d' % self.rtr, 'RB1']]) else: self.putx([7, ['Reserved bit 1: %d' % can_rx, 'RB1: %d' % can_rx, 'RB1']]) # Bit 34: RB0 (reserved bit) elif bitnum == 34: self.putx([7, ['Reserved bit 0: %d' % can_rx, 'RB0: %d' % can_rx, 'RB0']]) elif bitnum == 35 and self.fd: self.putx([7, ['Bit rate switch: %d' % can_rx, 'BRS: %d' % can_rx, 'BRS']]) elif bitnum == 36 and self.fd: self.putx([7, ['Error state indicator: %d' % can_rx, 'ESI: %d' % can_rx, 'ESI']]) # Remember start of DLC (see below). elif bitnum == self.dlc_start: self.ss_block = self.samplenum # Bits 35-38: Data length code (DLC), in number of bytes (0-8). elif bitnum == self.dlc_start + 3: self.dlc = bitpack_msb(self.bits[self.dlc_start:self.dlc_start + 4]) self.putb([10, ['Data length code: %d' % self.dlc, 'DLC: %d' % self.dlc, 'DLC']]) self.last_databit = self.dlc_start + 3 + (dlc2len(self.dlc) * 8) # Remember all databyte bits, except the very last one. elif bitnum in range(self.dlc_start + 4, self.last_databit): self.ss_databytebits.append(self.samplenum) # Bits 39-X: Data field (0-8 bytes, depending on DLC) # The bits within a data byte are transferred MSB-first. elif bitnum == self.last_databit: self.ss_databytebits.append(self.samplenum) # Last databyte bit. for i in range(dlc2len(self.dlc)): x = self.dlc_start + 4 + (8 * i) b = bitpack_msb(self.bits[x:x + 8]) self.frame_bytes.append(b) ss = self.ss_databytebits[i * 8] es = self.ss_databytebits[((i + 1) * 8) - 1] self.putg(ss, es, [0, ['Data byte %d: 0x%02x' % (i, b), 'DB %d: 0x%02x' % (i, b), 'DB']]) self.ss_databytebits = [] elif bitnum > self.last_databit: return self.decode_frame_end(can_rx, bitnum) return False def handle_bit(self, can_rx): self.rawbits.append(can_rx) self.bits.append(can_rx) # Get the index of the current CAN frame bit (without stuff bits). bitnum = len(self.bits) - 1 if self.fd and can_rx: if bitnum == 16 and self.frame_type == 'standard' \ or bitnum == 35 and self.frame_type == 'extended': self.dom_edge_seen(force=True) self.set_fast_bitrate() # If this is a stuff bit, remove it from self.bits and ignore it. if self.is_stuff_bit(): self.putx([15, [str(can_rx)]]) self.curbit += 1 # Increase self.curbit (bitnum is not affected). return else: self.putx([17, [str(can_rx)]]) # Bit 0: Start of frame (SOF) bit if bitnum == 0: self.ss_packet = self.samplenum self.putx([1, ['Start of frame', 'SOF', 'S']]) if can_rx != 0: self.putx([16, ['Start of frame (SOF) must be a dominant bit']]) # Remember start of ID (see below). elif bitnum == 1: self.ss_block = self.samplenum # Bits 1-11: Identifier (ID[10..0]) # The bits ID[10..4] must NOT be all recessive. elif bitnum == 11: # BEWARE! Don't clobber the decoder's .id field which is # part of its boiler plate! self.ident = bitpack_msb(self.bits[1:]) self.fullid = self.ident s = '%d (0x%x)' % (self.ident, self.ident), self.putb([3, ['Identifier: %s' % s, 'ID: %s' % s, 'ID']]) if (self.ident & 0x7f0) == 0x7f0: self.putb([16, ['Identifier bits 10..4 must not be all recessive']]) # RTR or SRR bit, depending on frame type (gets handled later). elif bitnum == 12: # self.putx([0, ['RTR/SRR: %d' % can_rx]]) # Debug only. self.ss_bit12 = self.samplenum # Bit 13: Identifier extension (IDE) bit # Standard frame: dominant, extended frame: recessive elif bitnum == 13: ide = self.frame_type = 'standard' if can_rx == 0 else 'extended' self.putx([6, ['Identifier extension bit: %s frame' % ide, 'IDE: %s frame' % ide, 'IDE']]) # Bits 14-X: Frame-type dependent, passed to the resp. handlers. elif bitnum >= 14: if self.frame_type == 'standard': done = self.decode_standard_frame(can_rx, bitnum) else: done = self.decode_extended_frame(can_rx, bitnum) # The handlers return True if a frame ended (EOF). if done: return # After a frame there are 3 intermission bits (recessive). # After these bits, the bus is considered free. self.curbit += 1 def decode(self): if not self.samplerate: raise SamplerateError('Cannot decode without samplerate.') while True: # State machine. if self.state == 'IDLE': # Wait for a dominant state (logic 0) on the bus. (can_rx,) = self.wait({0: 'l'}) self.sof = self.samplenum self.dom_edge_seen(force = True) self.state = 'GET BITS' elif self.state == 'GET BITS': # Wait until we're in the correct bit/sampling position. pos = self.get_sample_point(self.curbit) (can_rx,) = self.wait([{'skip': pos - self.samplenum}, {0: 'f'}]) if self.matched[1]: self.dom_edge_seen() if self.matched[0]: self.handle_bit(can_rx)
we’re all goin over some day. where does this take us. i wan my golden arm. she listens to her mother. (this time my hat was larger). (the oldest had no glasses).
import sys import _mysql as mysql def get_mysql_credentials(): # read in credentials file lines = tuple(open('mysqlcreds.txt', 'r')) # return the tuple of the lines in the file # # host # dbname # username # password # return lines def main(argv): print "Starting application."; # pull from the database a list of all of the incidents to date print "Connecting to Database and pulling all incidents." # get our db info from our local file dbcreds = get_mysql_credentials() # decode responce host = dbcreds[0].rstrip() dbname = dbcreds[1].rstrip() username = dbcreds[2].rstrip() password = dbcreds[3].rstrip() # connect to our database database = mysql.connect(host=host,user=username,passwd=password,db=dbname) # generate query, and get the number of rows returned query = 'SELECT DISTINCT itemid FROM incidents' database.query(query) dbresult=database.store_result() #(count,),=dbresult.fetch_row() # get all of the incident itemid's from the result itemids = [] for row in dbresult.fetch_row(maxrows=0): itemids.append(row[0]) print "\tRetrieved {0} items".format(len(itemids)) print "... Done." print "Generating list of unique agencies ..." agencies = [] # iterate through and genereate a list of only uniuque agencies for itemid in itemids: # get short name of agency ( first four leters of the incident id ) shortname = itemid[0:4] # see if we have added it already if any(shortname is a for a in agencies) == False: # need to add the new agency to the list of agencies print "\tNew Agency Found! Shortname = {0}".format(shortname) agencies.append(shortname) print "... Done." print "Pushing {0} agencies to database ...".format(len(agencies)) for agency in agencies: query = 'INSERT INTO agencies (shortname,longname,description,websiteurl) VALUES("{0}","","","")'.format(agency) database.query(query) print "... Done." if __name__ == '__main__': sys.exit(main(sys.argv))
Are you ready to step up your health, relieve some stress and win a meal at Beefsteak or a new Apple Watch? From October 16th through the 26th, join GW students, faculty and staff to face off in a challenge to crown our new steps champion. We know that exercise is good for our health, and studies show that walking not only boosts energy, but also relieves stress. So, we are challenging you to #StepItUpGW and participate in the GW Steps Challenge. Registration ends October 15th, 2017, at 11:59PM and the competition starts October 16th. Winners will be announced on Twitter @GWTweets and on Facebook at the George Washington University on October 30, 2017. Across the University, all departments and schools will compete against each other in the team challenge to win a Beefsteak lunch. The team with the highest cumulative step average wins. So, the more steps you have each day, the more you can help boost your team’s average. You must participate in the contest to be eligible to win. Not only can you compete with your team, but as an individual, you can try to achieve a higher step count than President and Mrs. LeBlanc. For each day that you get more steps than their average step count, you will receive one entry into a raffle to win an Apple Watch (or similar device). For example, if you beat their average step count for all 11 days of the challenge, you will receive 11 entries into the raffle. You will automatically be entered into the Chase Mode challenge when joining a team using the registration instructions below. You can download the Stridekick app from the Apple App Store/Google Play Store or use the mobile-friendly site to access your profile and get started. On your Stridekick dashboard, you can check your team stats, follow the scoreboard, and encourage your friends, as well as keep up with President and Mrs. LeBlanc’s step count average. Click here to find and select your team. Students should join their school. Sign-in to or sign-up for your Stridekick account by pressing "Join." Follow the on-screen instructions to complete setup. Once you are on a team, you will also automatically be entered into the Chase challenge as an individual. For a full list of supported devices, click here. Criteria for Winning - You must have a Stridekick account and participate in the challenge by registering before the registration deadline to be eligible for either of the prizes. Read the official rules here. Tracking Device Account - Make sure to create an account for your tracking device in addition to the Stridekick account. You will need your tracking device account information to create a Stridekick account. Only One Tracking Device per Participant - You can only have one tracking device connected to a Stridekick account at a time. Only One Stridekick Account per Participant - You can only have one Stridekick account with the same device. Otherwise, sync errors may occur. Switching Accounts - If you need to switch the device to another account, please visit the Stridekick portal and click the blue bubble in the bottom right hand corner of the screen for assistance from Stridekick’s support team. Joining the Challenge Late - If you join the GW Steps Challenge after the start date and time on October 16th, your steps will still be accounted for as long as you have been syncing and capturing steps on your device. You can still participate, but will be ineligible to win any prizes. Other Questions - Have an issue with your device or Stridekick account? Check out the Help Center or contact support by clicking on the question mark [?] on the bottom right side of your Stridekick dashboard.
# Copyright 2020 by Kurt Rathjen. All Rights Reserved. # # This library is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. This library is distributed in the # hope that it will be useful, but WITHOUT ANY WARRANTY; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/>. import logging from studiovendor.Qt import QtGui from studiovendor.Qt import QtCore from studiovendor.Qt import QtWidgets import studioqt import studiolibrary logger = logging.getLogger(__name__) class LineEdit(QtWidgets.QLineEdit): def __init__(self, *args): QtWidgets.QLineEdit.__init__(self, *args) icon = studiolibrary.resource.icon("search.svg") self._iconButton = QtWidgets.QPushButton(self) self._iconButton.setObjectName("icon") self._iconButton.clicked.connect(self._iconClicked) self._iconButton.setIcon(icon) self._iconButton.setStyleSheet("QPushButton{background-color: transparent;}") icon = studiolibrary.resource.icon("times.svg") self._clearButton = QtWidgets.QPushButton(self) self._clearButton.setObjectName("clear") self._clearButton.setCursor(QtCore.Qt.ArrowCursor) self._clearButton.setIcon(icon) self._clearButton.setToolTip("Clear all search text") self._clearButton.clicked.connect(self._clearClicked) self._clearButton.setStyleSheet("QPushButton{background-color: transparent;}") self.textChanged.connect(self._textChanged) color = studioqt.Color.fromString("rgb(250,250,250,115)") self.setIconColor(color) self.update() def update(self): self.updateIconColor() self.updateClearButton() def _textChanged(self, text): """ Triggered when the text changes. :type text: str :rtype: None """ self.updateClearButton() def _clearClicked(self): """ Triggered when the user clicks the cross icon. :rtype: None """ self.setText("") self.setFocus() def _iconClicked(self): """ Triggered when the user clicks on the icon. :rtype: None """ if not self.hasFocus(): self.setFocus() def updateClearButton(self): """ Update the clear button depending on the current text. :rtype: None """ text = self.text() if text: self._clearButton.show() else: self._clearButton.hide() def contextMenuEvent(self, event): """ Triggered when the user right clicks on the search widget. :type event: QtCore.QEvent :rtype: None """ self.showContextMenu() def setIcon(self, icon): """ Set the icon for the search widget. :type icon: QtWidgets.QIcon :rtype: None """ self._iconButton.setIcon(icon) def setIconColor(self, color): """ Set the icon color for the search widget icon. :type color: QtGui.QColor :rtype: None """ icon = self._iconButton.icon() icon = studioqt.Icon(icon) icon.setColor(color) self._iconButton.setIcon(icon) icon = self._clearButton.icon() icon = studioqt.Icon(icon) icon.setColor(color) self._clearButton.setIcon(icon) def updateIconColor(self): """ Update the icon colors to the current foregroundRole. :rtype: None """ color = self.palette().color(self.foregroundRole()) color = studioqt.Color.fromColor(color) self.setIconColor(color) def settings(self): """ Return a dictionary of the current widget state. :rtype: dict """ settings = { "text": self.text(), } return settings def setSettings(self, settings): """ Restore the widget state from a settings dictionary. :type settings: dict :rtype: None """ text = settings.get("text", "") self.setText(text) def resizeEvent(self, event): """ Reimplemented so the icon maintains the same height as the widget. :type event: QtWidgets.QResizeEvent :rtype: None """ QtWidgets.QLineEdit.resizeEvent(self, event) height = self.height() size = QtCore.QSize(16, 16) self.setTextMargins(20, 0, 0, 0) self._iconButton.setIconSize(size) self._iconButton.setGeometry(0, 0, height, height) x = self.width() - height self._clearButton.setIconSize(size) self._clearButton.setGeometry(x, 0, height, height)
Description: The Gaminator Casino is still quite a young casino, which went online in 2015, but can already compete with the big online casinos. The Gaminator Casino presents itself in a very serious and stylish ambience. This casino offers over 40 different games from Novoline. However, the games of Merkur and NetEnt are also to be represented at Gaminator shortly.
"""Module that builds the Graphical User Interface.""" from uiplib.scheduler import scheduler from uiplib.setWallpaper import change_background from uiplib.utils.utils import update_settings, check_sites from uiplib.gui.gallery import Gallery from uiplib.gui import generalTab, settingsTab from tkinter import * from tkinter import messagebox from tkinter.ttk import * from PIL import Image, ImageTk from queue import Queue import os class MainWindow: """The main window that houses the app.""" def __init__(self, settings): """Initialize the Main Window.""" # configuration self.settings = settings # base window self.root = Tk() self.root.resizable(width=False, height=False) # set window title self.root.title("UIP") # self.root.wm_iconbitmap() sets icon bitmap self.queue = Queue() self.index = 0 self.images = [] self.update_images() # create the UI self.create_ui() def create_ui(self): """Method to initialize UI.""" self.notebook = Notebook(self.root) self.notebook.pack() generalTab.create_general_tab(self) settingsTab.create_settings_tab(self) def show_progess(self, show): """Method to display download progress.""" if show: self.progressBar = Progressbar(self.headerFrame, orient=HORIZONTAL, length='300', variable=self.progress, mode='determinate') self.progressBar.pack(fill=BOTH, padx=5, pady=5) else: self.progressBar = None def push(self, x): """Method to push onto UI Queue.""" self.queue.push(x) def run(self): """Method that runs the main event loop.""" self.update_ui() # run the main event loop of UI self.root.mainloop() def update_ui(self): """Method that updates UI periodically.""" # update UI with data received while self.queue and not self.queue.empty(): pass # update UI after every 200ms self.root.after(200, self.update_ui) def next_wallpaper(self): """Preview next wallpaper.""" self.index = (self.index + 1) % len(self.images) self.gallery.set_image(self.images[self.index]) def prev_wallpaper(self): """Preview previous wallpaper.""" self.index -= 1 self.gallery.set_image(self.images[self.index]) def set_wallpaper(self): """Set the wallpaper which is being previewed.""" image = self.images[self.index] change_background(image) def download(self): """Method to start download.""" pass def flush(self): """Method to flush all images.""" print("Flush Clicked!") def update_images(self): """Method to get images from directory.""" directory = self.settings['pics-folder'] files = os.listdir(directory) self.images = [os.path.join(directory, file) for file in files if (file.endswith('.png') or file.endswith('.jpg'))]
I am not talking about our lovely little girl who will turn 2 years soon, even if she is very independent. Sometimes even too much. I am glad to announce that our two feathered guests finally learned to fly and left their nest. It happened so quickly that I did not even realize they abandoned us. I have to admit that I started worrying about them because my husband gave them food and it seemed they were lazy in taking flying lessons. Anyway, one day I went on the balcony and did not find them anymore. They did not come not even for the night. I soon realized they went to live on their own. Good luck little baby pigeons! This entry was posted in Uncategorized and tagged pigeons on July 22, 2014 by Claudia. Today I discovered the “Italian Gestures Rap” on YouTube and I thought to share it with you. It was made by the US Consulate in Milan. It is about a young American diplomat coming to Italy. He is disoriented because he does not understand what the Italians say. So he takes Italian lessons and he learns how to manage 16 different Italian gestures. I never tought that “speaking with the hands” could be so difficult to understand for a foreigner. As an Italian, I have to say that I do not use the hands so frequently. When I ask for a coffee in a bar, I simply order a “caffè”. The gesture can be made when the place is crowded and it is difficult to be heard. Do you have similar gestures in your country too? This entry was posted in Uncategorized on July 2, 2014 by Claudia. I have just learnt of the imminent wedding here in Florence (Firenze, in Italian) between Kim Kardashian and Kanye West. They are going to get married on Saturday, May 24 at the Forte Belvedere. This Fort has been built between 1590 – 1595 under Ferdinando I De’ Medici, Grand Duke of Tuscany. The main purposes of this building were to protect the city and to show the power of the De’ Medici dinasty. Today it is possible to visit the Palazzina di Belvedere, inside the Fort, which is open for exhibitions or other particular events. From the exterior walls it is possible to have a gorgeous view of the city. Kim, listen, I am very sorry that I will not be able to attend your wedding ceremony but next time (and I am sure there will be a next time) please let me know in advance so that I will not take other committments. Joking aside, this city is very loved among foreigners and there are many couples who choose it for their marriage or honeymoon or simply for a trip. This entry was posted in Uncategorized and tagged Kim Kardashian's Wedding on May 21, 2014 by Claudia.
# -*- coding: utf-8 -*- from django.db import models class Profissao(models.Model): descricao = models.CharField(max_length=20) class Meta: ordering = ["descricao"] verbose_name_plural = "profissoes" def __unicode__(self): return self.descricao class Membro(models.Model): nome_completo = models.CharField(max_length=50) apelido = models.CharField(max_length=20) apresentacao = models.TextField() membro_desde = models.DateField() profissao_membro = models.ForeignKey(Profissao) class Meta: ordering = ["membro_desde"] def __unicode__(self): return self.nome_completo class Servico(models.Model): titulo = models.CharField(max_length=50) descricao = models.TextField() def __unicode__(self): return self.titulo class Produto(models.Model): titulo = models.CharField(max_length=50) descricao = models.TextField() def __unicode__(self): return self.titulo class Portifolio(models.Model): titulo = models.CharField(max_length=50) descricao = models.TextField() def __unicode__(self): return self.titulo # class Dados_Contato(models.Model): # nome_empresa = models.CharField(max_length=50) # email = models.EmailField(max_length=75) # telefone = models.CharField(max_length=15) # def __unicode__(self): # return self.nome_empresa
The Sir Thomas White Loan Charity has been in existence since 1542 and was founded by one of the City’s most generous (but least well known) benefactors, Sir Thomas White. Although he never actually set foot in Leicester, Sir Thomas is honoured by being included as one of the four statues around the Clock Tower in Leicester city centre. We are an Equal Opportunities organisation providing loans for new business ideas and young businesses needing investment. Eligible candidates may borrow up to £20,000 (business) or £10,000 (education) interest free for 9 years repayable by equal instalments after 3 years to establish themselves in their chosen career. To be eligible for consideration for a loan you must be over 18 and under 35; you must conduct your business in Leicester, Leicestershire or Rutland for the duration of the loan, and must be able to demonstrate that the business has the potential to succeed.
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### """ This addon implements a subdivision scheme called deathguppie. It is ideal for creating more detail locally when sculpting. Documentation First go to User Preferences->Addons and enable the DeathGuppie addon in the Mesh category. Go to EditMode, select some elements and invoke the addon (button in the Mesh Tool panel). The selected area will be subdivided according to the deathguppie algorithm. Subdivision is destructive so this is no modifier but a mesh operation. Selected area after operation allows for further sudividing the area. The smooth tickbox chooses between smooth and non-smooth subdivision. The Select inner only tickbox sets what is left selected after operation, only inner faces or everything. BEWARE - deathguppie will only subdivide grids of quads! If you wish to hotkey DeathGuppie: In the Input section of User Preferences at the bottom of the 3D View > Mesh section click 'Add New' button. In the Operator Identifier box put 'mesh.deathguppie'. Assign a hotkey. Save as Default (Optional). """ bl_info = { "name": "DeathGuppie", "author": "Gert De Roost", "version": (0, 3, 0), "blender": (2, 63, 0), "location": "View3D > Tools", "description": "Deathguppie subdivision operation", "warning": "", "wiki_url": "", "tracker_url": "", "category": "Mesh"} import bpy import bmesh bpy.types.Scene.Smooth = bpy.props.BoolProperty( name = "Smoothing", description = "Subdivide smooth", default = True) bpy.types.Scene.Inner = bpy.props.BoolProperty( name = "Select inner only", description = "After operation only inner verts selected", default = True) class DeathGuppie(bpy.types.Operator): bl_idname = "mesh.deathguppie" bl_label = "DeathGuppie" bl_description = "Deathguppie subdivision operation" bl_options = {'REGISTER', 'UNDO'} @classmethod def poll(cls, context): obj = context.active_object return (obj and obj.type == 'MESH' and context.mode == 'EDIT_MESH') def invoke(self, context, event): self.do_deathguppie(context) return {'FINISHED'} def do_deathguppie(self, context): scn = context.scene selobj = context.active_object bpy.ops.object.editmode_toggle() bpy.ops.object.duplicate() projobj = bpy.context.active_object bpy.ops.object.editmode_toggle() bpy.ops.mesh.subdivide(number_cuts=5, smoothness=1.0) bpy.ops.object.editmode_toggle() projobj.hide = 1 context.scene.objects.active = selobj bpy.ops.object.editmode_toggle() mesh = selobj.data bm = bmesh.from_edit_mesh(mesh) bmkeep = bm.copy() facelist = [] for f1 in bm.faces: if f1.select: linked = [] for e in f1.edges: for f2 in e.link_faces: if f2 != f1: if f2.select: linked.append(f2.index) break facelist.insert(0, []) facelist[0].append(f1) facelist[0].append(linked) transfer = {} holdlist = [] for [f, linked] in facelist: bpy.ops.mesh.select_all(action = 'DESELECT') f.select = 1 transfer[f.calc_center_median()[:]] = [f.index, linked] bpy.ops.mesh.split() bpy.ops.object.editmode_toggle() bpy.ops.object.editmode_toggle() bm = bmesh.from_edit_mesh(mesh) facelist = [] for f in bm.faces: num = 0 for e in f.edges: if len(e.link_faces) == 1: num += 1 if num == 4: if f.calc_center_median()[:] in transfer.keys(): f.select = 1 facelist.insert(0, []) facelist[0].append(f) facelist[0].append(transfer[f.calc_center_median()[:]]) def createinnerlists(f): for l in f.loops: self.cornerlist.append(l.vert) self.vselset.add(l.vert) v1 = l.vert vnext = l.link_loop_next.vert vprev = l.link_loop_prev.vert vnextnext = l.link_loop_next.link_loop_next.vert vprevprev = l.link_loop_prev.link_loop_prev.vert tempco1 = v1.co + (vprev.co - v1.co) / 3 tempco2 = vnext.co + (vnextnext.co - vnext.co) / 3 vert = bm.verts.new(tempco1 + ((tempco2 - tempco1) / 3)) self.innerlist.append(vert) self.smoothset.add(vert) self.vselset = set([]) fselset = set([]) self.smoothset = set([]) for [f, [foldidx, linked]] in facelist: fold = bmkeep.faces[foldidx] linked2 = [] for idx in linked: linked2.append(bmkeep.faces[idx]) self.cornerlist = [] self.innerlist = [] if len(linked) == 4: createinnerlists(f) for e in f.edges: ne, vert1 = bmesh.utils.edge_split(e, e.verts[0], 0.66) ne, vert2 = bmesh.utils.edge_split(ne, vert1, 0.5) self.vselset.add(vert1) self.vselset.add(vert2) self.smoothset.add(vert1) self.smoothset.add(vert2) for idx in range(len(self.cornerlist)): cv = self.cornerlist[idx] for l in f.loops: if l.vert == cv: fs = bm.faces.new((cv, l.link_loop_next.vert, self.innerlist[idx], l.link_loop_prev.vert)) fselset.add(fs) fs = bm.faces.new((l.link_loop_prev.vert, l.link_loop_prev.link_loop_prev.vert, self.innerlist[idx - 1], self.innerlist[idx])) fselset.add(fs) fs = bm.faces.new((self.innerlist[0], self.innerlist[1], self.innerlist[2], self.innerlist[3])) fselset.add(fs) bm.faces.remove(f) elif len(linked) == 3: fedges = fold.edges[:] for e1 in fedges: for f1 in e1.link_faces: if len(e1.link_faces) == 1 or (f1 != fold and not(f1 in linked2)): edge = f.edges[fedges.index(e1)] createinnerlists(f) for e in f.edges: if e != edge: ne, vert1 = bmesh.utils.edge_split(e, e.verts[0], 0.66) ne, vert2 = bmesh.utils.edge_split(ne, vert1, 0.5) self.vselset.add(vert1) self.vselset.add(vert2) self.smoothset.add(vert1) self.smoothset.add(vert2) for l in edge.link_loops: if l.face == f: if l.edge == edge: v1 = l.vert vnext = l.link_loop_next.vert vprev = l.link_loop_prev.vert vnextnext = l.link_loop_next.link_loop_next.vert vprevprev = l.link_loop_prev.link_loop_prev.vert for idx in range(4): if self.cornerlist[idx] == v1: co1 = self.innerlist[idx].co + ((self.innerlist[idx].co - self.innerlist[idx-1].co) / 2) co2 = self.innerlist[idx-3].co + ((self.innerlist[idx-3].co - self.innerlist[idx-2].co) / 2) sidev1 = bm.verts.new(co1) sidev2 = bm.verts.new(co2) fs = bm.faces.new((v1, vnext, sidev2, sidev1)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((v1, sidev1, self.innerlist[idx], vprev)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev2, vnext, vnextnext, self.innerlist[idx-3])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev1, sidev2, self.innerlist[idx-3], self.innerlist[idx])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((self.innerlist[idx], self.innerlist[idx-1], vprevprev, vprev)) fselset.add(fs) self.cornerlist[self.cornerlist.index(v1)] = None self.cornerlist[self.cornerlist.index(vnext)] = None break for idx in range(len(self.cornerlist)): cv = self.cornerlist[idx] if cv != None: for l in f.loops: if l.vert == cv: fs = bm.faces.new((cv, l.link_loop_next.vert, self.innerlist[idx], l.link_loop_prev.vert)) fselset.add(fs) fs = bm.faces.new((l.link_loop_prev.vert, l.link_loop_prev.link_loop_prev.vert, self.innerlist[idx - 1], self.innerlist[idx])) fselset.add(fs) fs = bm.faces.new((self.innerlist[0], self.innerlist[1], self.innerlist[2], self.innerlist[3])) fselset.add(fs) bm.faces.remove(f) self.smoothset.add(sidev1) self.smoothset.add(sidev2) elif len(linked) == 2: case = 'BRIDGE' for vert in linked2[0].verts: if vert in linked2[1].verts: case = 'CORNER' break if case == 'CORNER': fedges = fold.edges[:] edges = [] for e1 in fedges: for f1 in e1.link_faces: if len(e1.link_faces) == 1 or (f1 != fold and not(f1 in linked2)): edges.append(f.edges[fedges.index(e1)]) for l in edges[1].link_loops: if l.face == f: if l.edge == edges[1] and l.link_loop_next.edge == edges[0]: edges.reverse() break createinnerlists(f) for e in f.edges: if not(e in edges): ne, vert1 = bmesh.utils.edge_split(e, e.verts[0], 0.66) ne, vert2 = bmesh.utils.edge_split(ne, vert1, 0.5) self.vselset.add(vert1) self.vselset.add(vert2) self.smoothset.add(vert1) self.smoothset.add(vert2) for l in edges[0].link_loops: if l.face == f: if l.edge == edges[0]: if l.link_loop_next.edge == edges[1]: v1 = l.vert vnext = l.link_loop_next.vert vprev = l.link_loop_prev.vert vnextnext = l.link_loop_next.link_loop_next.vert vnnn = l.link_loop_next.link_loop_next.link_loop_next.vert vprevprev = l.link_loop_prev.link_loop_prev.vert vppp = l.link_loop_prev.link_loop_prev.link_loop_prev.vert vpppp = l.link_loop_prev.link_loop_prev.link_loop_prev.link_loop_prev.vert for idx in range(4): if self.cornerlist[idx] == v1: delta1 = (self.innerlist[idx].co - self.innerlist[idx-1].co) / 2 co1 = self.innerlist[idx].co + delta1 delta2 = (self.innerlist[idx-3].co - self.innerlist[idx].co) / 2 delta3 = (self.innerlist[idx-3].co - self.innerlist[idx-2].co) / 2 co2 = self.innerlist[idx-3].co + delta1 + delta2 sidev1 = bm.verts.new(co1) sidev2 = bm.verts.new(co2) sidev3 = bm.verts.new(self.innerlist[idx-2].co + ((self.innerlist[idx-2].co - self.innerlist[idx-1].co) / 2)) fs = bm.faces.new((v1, vnext, sidev2, sidev1)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev3, sidev2, vnext, vnextnext)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((v1, sidev1, self.innerlist[idx], vprev)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((self.innerlist[idx-2], sidev3, vnextnext, vnnn)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev1, sidev2, self.innerlist[idx-3], self.innerlist[idx])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev2, sidev3, self.innerlist[idx-2], self.innerlist[idx-3])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((vprevprev, vprev, self.innerlist[idx], self.innerlist[idx-1])) fselset.add(fs) fs = bm.faces.new((vpppp, vppp, vprevprev, self.innerlist[idx-1])) fselset.add(fs) fs = bm.faces.new((vnnn, vpppp, self.innerlist[idx-1], self.innerlist[idx-2])) fselset.add(fs) break break fs = bm.faces.new((self.innerlist[0], self.innerlist[1], self.innerlist[2], self.innerlist[3])) fselset.add(fs) bm.faces.remove(f) self.smoothset.add(sidev1) self.smoothset.add(sidev2) self.smoothset.add(sidev3) else: fedges = fold.edges[:] edges = [] for e1 in fedges: for f1 in e1.link_faces: if len(e1.link_faces) == 1 or (f1 != fold and not(f1 in linked2)): edges.append(f.edges[fedges.index(e1)]) createinnerlists(f) for e in f.edges: if not(e in edges): ne, vert1 = bmesh.utils.edge_split(e, e.verts[0], 0.66) ne, vert2 = bmesh.utils.edge_split(ne, vert1, 0.5) self.vselset.add(vert1) self.vselset.add(vert2) self.smoothset.add(vert1) self.smoothset.add(vert2) for l in f.loops: if l.edge == edges[0]: v1 = l.vert vnext = l.link_loop_next.vert vprev = l.link_loop_prev.vert vnextnext = l.link_loop_next.link_loop_next.vert vnnn = l.link_loop_next.link_loop_next.link_loop_next.vert vnnnn = l.link_loop_next.link_loop_next.link_loop_next.link_loop_next.vert vprevprev = l.link_loop_prev.link_loop_prev.vert vppp = l.link_loop_prev.link_loop_prev.link_loop_prev.vert vpppp = l.link_loop_prev.link_loop_prev.link_loop_prev.link_loop_prev.vert for idx in range(4): if self.cornerlist[idx] == v1: delta1 = (self.innerlist[idx].co - self.innerlist[idx-1].co) / 2 co1 = self.innerlist[idx].co + delta1 sidev1 = bm.verts.new(co1) delta2 = (self.innerlist[idx-3].co - self.innerlist[idx-2].co) / 2 co2 = self.innerlist[idx-3].co + delta2 sidev2 = bm.verts.new(co2) delta3 = (self.innerlist[idx-2].co - self.innerlist[idx-3].co) / 2 co3 = self.innerlist[idx-2].co + delta3 sidev3 = bm.verts.new(co3) delta4 = (self.innerlist[idx-1].co - self.innerlist[idx].co) / 2 co4 = self.innerlist[idx-1].co + delta4 sidev4 = bm.verts.new(co4) fs = bm.faces.new((v1, vnext, sidev2, sidev1)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((v1, sidev1, self.innerlist[idx], vprev)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((vnext, vnextnext, self.innerlist[idx-3], sidev2)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev1, sidev2, self.innerlist[idx-3], self.innerlist[idx])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((vppp, sidev4, sidev3, vnnnn)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((vppp, vprevprev, self.innerlist[idx-1], sidev4)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev3, self.innerlist[idx-2], vnnn, vnnnn)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev3, sidev4, self.innerlist[idx-1], self.innerlist[idx-2])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((vprevprev, vprev, self.innerlist[idx], self.innerlist[idx-1])) fselset.add(fs) fs = bm.faces.new((vnextnext, vnnn, self.innerlist[idx-2], self.innerlist[idx-3])) fselset.add(fs) fs = bm.faces.new((self.innerlist[0], self.innerlist[1], self.innerlist[2], self.innerlist[3])) fselset.add(fs) bm.faces.remove(f) self.smoothset.add(sidev1) self.smoothset.add(sidev2) self.smoothset.add(sidev3) self.smoothset.add(sidev4) elif len(linked) == 1: fedges = fold.edges[:] edges = [] for e1 in fedges: for f1 in e1.link_faces: if len(e1.link_faces) == 1 or (f1 != fold and not(f1 in linked2)): edges.append(f.edges[fedges.index(e1)]) for l in f.loops: if not(l.edge in edges): edges = [l.link_loop_next.edge, l.link_loop_next.link_loop_next.edge, l.link_loop_next.link_loop_next.link_loop_next.edge] createinnerlists(f) for e in f.edges: if not(e in edges): ne, vert1 = bmesh.utils.edge_split(e, e.verts[0], 0.66) ne, vert2 = bmesh.utils.edge_split(ne, vert1, 0.5) self.vselset.add(vert1) self.vselset.add(vert2) self.smoothset.add(vert1) self.smoothset.add(vert2) for l in f.loops: if l.edge == edges[0]: v1 = l.vert vnext = l.link_loop_next.vert vprev = l.link_loop_prev.vert vnextnext = l.link_loop_next.link_loop_next.vert vnnn = l.link_loop_next.link_loop_next.link_loop_next.vert vprevprev = l.link_loop_prev.link_loop_prev.vert vppp = l.link_loop_prev.link_loop_prev.link_loop_prev.vert vpppp = l.link_loop_prev.link_loop_prev.link_loop_prev.link_loop_prev.vert for idx in range(4): if self.cornerlist[idx] == v1: delta1 = (self.innerlist[idx].co - self.innerlist[idx-1].co) / 2 co1 = self.innerlist[idx].co + delta1 delta2 = (self.innerlist[idx-3].co - self.innerlist[idx].co) / 2 delta3 = (self.innerlist[idx-3].co - self.innerlist[idx-2].co) / 2 co2 = self.innerlist[idx-3].co + delta1 + delta2 sidev1 = bm.verts.new(co1) sidev2 = bm.verts.new(co2) delta4 = (self.innerlist[idx-2].co - self.innerlist[idx-1].co) / 2 delta5 = (self.innerlist[idx-2].co - self.innerlist[idx-3].co) / 2 co3 = self.innerlist[idx-2].co + delta4 + delta5 sidev3 = bm.verts.new(co3) delta6 = (self.innerlist[idx-1].co - self.innerlist[idx].co) / 2 co4 = self.innerlist[idx-1].co + delta6 sidev4 = bm.verts.new(co4) fs = bm.faces.new((v1, vnext, sidev2, sidev1)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev3, sidev2, vnext, vnextnext)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((v1, sidev1, self.innerlist[idx], vprev)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev1, sidev2, self.innerlist[idx-3], self.innerlist[idx])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev2, sidev3, self.innerlist[idx-2], self.innerlist[idx-3])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev4, sidev3, vnextnext, vppp)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((self.innerlist[idx-2], self.innerlist[idx-1], sidev4, sidev3)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((vprevprev, vppp, sidev4, self.innerlist[idx-1])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((vprev, vprevprev, self.innerlist[idx-1], self.innerlist[idx])) fselset.add(fs) fs = bm.faces.new((self.innerlist[0], self.innerlist[1], self.innerlist[2], self.innerlist[3])) fselset.add(fs) bm.faces.remove(f) self.smoothset.add(sidev1) self.smoothset.add(sidev2) self.smoothset.add(sidev3) self.smoothset.add(sidev4) elif len(linked) == 0: createinnerlists(f) l = f.loops[0] v1 = l.vert vnext = l.link_loop_next.vert vprev = l.link_loop_prev.vert vnextnext = l.link_loop_next.link_loop_next.vert for idx in range(4): if self.cornerlist[idx] == v1: sidev1 = bm.verts.new((self.cornerlist[idx].co + self.innerlist[idx].co) / 2) sidev2 = bm.verts.new((self.cornerlist[idx-3].co + self.innerlist[idx-3].co) / 2) sidev3 = bm.verts.new((self.cornerlist[idx-2].co + self.innerlist[idx-2].co) / 2) sidev4 = bm.verts.new((self.cornerlist[idx-1].co + self.innerlist[idx-1].co) / 2) fs = bm.faces.new((v1, vnext, sidev2, sidev1)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev3, sidev2, vnext, vnextnext)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev4, sidev3, vnextnext, vprev)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev1, sidev4, vprev, v1)) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev1, sidev2, self.innerlist[idx-3], self.innerlist[idx])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev2, sidev3, self.innerlist[idx-2], self.innerlist[idx-3])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev3, sidev4, self.innerlist[idx-1], self.innerlist[idx-2])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((sidev4, sidev1, self.innerlist[idx], self.innerlist[idx-1])) if not(scn.Inner): fselset.add(fs) fs = bm.faces.new((self.innerlist[0], self.innerlist[1], self.innerlist[2], self.innerlist[3])) fselset.add(fs) bm.faces.remove(f) self.smoothset.add(sidev1) self.smoothset.add(sidev2) self.smoothset.add(sidev3) self.smoothset.add(sidev4) if scn.Smooth: for v in self.smoothset: v.co = projobj.closest_point_on_mesh(v.co)[0] bpy.ops.mesh.select_all(action ='SELECT') bm.normal_update() bpy.ops.mesh.normals_make_consistent() bpy.ops.mesh.select_all(action = 'DESELECT') for f in fselset: f.select = 1 for e in f.edges: e.select = 1 for v in f.verts: v.select = 1 for e in bm.edges: if len(e.link_faces) == 1: e.verts[0].select = 1 e.verts[1].select = 1 bpy.ops.mesh.remove_doubles() for e in bm.edges: if len(e.link_faces) == 1: e.verts[0].select = 0 e.verts[1].select = 0 e.select = 0 mesh.update() bm.free() bmkeep.free() bpy.ops.object.editmode_toggle() bpy.ops.object.select_all(action = 'DESELECT') context.scene.objects.active = projobj projobj.hide = 0 bpy.ops.object.delete() selobj.select = 1 context.scene.objects.active = selobj bpy.ops.object.editmode_toggle() def panel_func(self, context): scn = bpy.context.scene self.layout.label(text="DeathGuppie:") self.layout.operator("mesh.deathguppie", text="Subdivide DG") self.layout.prop(scn, "Smooth") self.layout.prop(scn, "Inner") def register(): bpy.utils.register_module(__name__) bpy.types.VIEW3D_PT_tools_meshedit.append(panel_func) def unregister(): bpy.utils.unregister_module(__name__) bpy.types.VIEW3D_PT_tools_meshedit.remove(panel_func) if __name__ == "__main__": register()
The year has begun tremendously well for Powerflex Corporation. January just past has been our best January since the company was formed in 1989. We are extremely pleased and excited by this result. In fact this month just finished is the second best ever. David Bennett Director of Technical Services commented that "January is traditionally a slow month for sales as Australians take their summer break during this time and sales suffer. But not this year - we are now more than ever looking forward to a great year." Our product line is now mature with our Windows versions of PFXplus and PFX C-lib now well tested in the market place. We are now in a position to consolidate the very hard work that has gone into the development of these products. These principles are equally valid when applied to source code, especially when moving a character based application to the Windows environment. The cost and delay of a re-write can cripple a business and is absolutely unnecessary when using the latest release of PFXplus. Reduce the lines of code required to perform a task wherever possible. Powerflex Corporation is right behind you in your efforts to conserve. The motivation behind PFXplus for Windows is to provide a path to move existing character based code in an orderly fashion to the Windows, Windows 95 and Windows/NT environments. Not only does PFXplus allow you to re-use your existing code, providing the tools for conversion to Windows based code, it also allows you to continue using the procedural programming techniques you are familiar with. Then add Events and Objects at your leisure when you are fully comfortable with programming under Windows. The latest release of PFXplus provides some exciting new capabilities with the Common Controls Architecture. The feature article beginning on the next page provides an in-depth look at the benefits available in 32-bit Windows. Please contact Powerflex Corporation or your local dealer for more information about PFXplus with CCA. The PFXplus Common Controls Architecture (CCA) is a set of development tools and modules for creating PFXplus programs to run under 32-bit Windows 95 or Windows/NT. Whether you convert an existing program or write a new one using familiar techniques, the resulting program will have the unmistakable look and feel of genuine Microsoft Windows. a selection of powerful new controls, including the Progress Bar, Animation Control, and Bitmap Buttons, as well as the traditional Edit control, List box, Combo box, and various types of button. Correct operation of the Common Controls requires Windows 95 or Windows/NT 3.51 or later (4.0 or later recommended). PFXplus 4.2x is required, together with the new and updated versions of OBJECT.PFI, WINOBJ.PFI, and other files. supports the Common Controls listed in the following table. Powerflex Corporation is committed to providing tools that make success a reality. Read on for a list of new features provided in version 4.22 to help make the job of Windows application programming that little bit easier. A Toolbar is a control window that contains one or more buttons. Each button sends a command message when it is pressed, to carry out some action. Often a Toolbar contains the same commands as the menu but in a more directly accessible location. A Toolbar is defined as an object within PFXplus. Simply add a TOOLBAR object to a FORM to include a Toolbar control. This picture shows a typical Toolbar which can be created using the tools provided with PFXplus version 4.22. A Status Bar control is a horizontal window, usually displayed at the bottom of a form, in which an application can display various kinds of status information. The Status Bar control can appear in two forms. A normal Status Bar is divided into several parts to display more than one type of information. A simple Status Bar has no divisions and is typically used to display an error or notification message. This Status Bar is implemented as a standard PFXplus object called STATUSBAR. You need simply define it as an object within the main form definition. The Tab control is the essential element in constructing a Tabbed Dialog. The tabs and frame you see on a Tabbed Dialog are all generated by a single Tab control. A Tab control is analogous to the dividers in a notebook or the labels in a file cabinet. By using a Tab control, an application can define multiple pages for the same area of a form. The appearance of a Tab control is achieved by drawing a frame around a specified portion of the form display area to which the Tab control is attached. A Tab control can also have tooltips that are identical in nature to those available with a Toolbar. A Progress Bar is a very simple control that an application uses to indicate the progress of some activity towards completion. Visually, it consists of a rectangle that is gradually filled, from left to right, with the system highlight colour. The following illustration shows a Progress Bar. A Progress Bar has a range and a current position. The range represents the entire duration of the activity, and the current position represents the progress that the application has made toward completing the activity. The Progress Bar control uses the range and the current position to determine the percentage of the Progress Bar to fill with the highlight colour. Add class to your application with animation. Just a little effort gives great rewards. An Animation Control is a window that displays an Audio Video Interleaved (AVI) clip. An AVI clip is a series of bitmap frames like a movie. Although AVI clips can have sound, you can use only silent AVI clips with an Animation Control. Because the thread continues executing while the AVI clip is displayed, a common use for an Animation Control is to indicate system activity during a lengthy operation. A typical Animation Control is shown below. A Bitmap Button is a regular Windows button which displays a bitmap picture instead of text. They look great and are really simple to implement using our CCA tools. Most image editors can be used for the creation of Button Bitmap files. The program IMAGEDIT.EXE that is shipped with PFXplus is quite suitable for the task. This program has a limitation of 256 pixels for the width and height of a bitmap file. A bitmap with a size of 256 by 256 pixels makes for a large button, enough for most purposes. The bitmap file BMPBTN.BMP shown here was created with IMAGEDIT.EXE. The Common Controls Architecture (CCA) MENUBARS system is an integrated set of tools based on the Windows 32 bit Toolbar and Status Bar common controls, also incorporating the Windows menu system. The data defining these controls and the menu system is drawn from the one data file CCAMENU, which is similar to the standard MENU.DAT data file. Programs gain access to these controls by including CCA.PFI, which is designed to simplify the creation of Forms, Subforms and Popups. CCA.PFI uses WCOMCTL.PFI, which is responsible for the specific class creations and definitions relating to the Common Controls. When you update to PFXplus version 4.22 you will receive the CCA Add-On which includes all the tools described above. Remember you can take your existing Xflex character based code and convert it to Windows using the tools provided with PFXplus version 4.22. One of the key factors in so called power programming in any language is a good text editor. For development work in the Windows environment you will not get much better than ED for Windows, a locally produced product which is finding wide acceptance both here in Australia and overseas. The purpose of this article is to describe the way we have configured ED for Windows for our Powerflex development work. We have been using this system for over twelve months now and find that we rarely need to drop out to MS-DOS. If ED has been installed correctly, the necessary additions will have been made to the Windows configuration. While some will want to add the ED for Windows directory to the PATH statement, this is not really necessary if you launch the editor from a Desktop icon. In the Properties section of the icon we specify the full path for ED for Windows and it takes it from there. ED supports a range of file extensions and allows for Programs to be set up for each file type. For our purposes, it is beneficial to set up programs to compile various types of Powerflex code and to run the program we are currently working on. It is not necessary to repeat the programs for each file extension we may use, .SRC, .PFX, .FRM, .RPT etc. Once a compile program has been transferred to a Toolbar button, it will adapt to any extension you may use. We suggest you select one file type to set up an initial set of programs, then use the copy function to produce variations on the theme. Open a source file, for example GMEM3.PFX. From the Tool menu, select Programs, or alternatively use ATL+F10. From the Program Titles dialog select ADD and then proceed to complete the details along the lines of the example shown. This example shows how to set up a program to compile the current program, irrespective of extension, producing both a .PTC and .PR3 file. By specifying <NAME>, we will get the name of the current file including its drive and path. If we use the <FILE> option here we will get only the name of the current file, excluding any drive\path and file extension. If PFCN.EXE cannot be found in your PATH, you will need to specify the full path for that file. Make sure you check both the Save edited files and Unlock & reload files as All. We suggest that you also check the Goto first error option and select the Powerflex Error track type. Once you have saved this program you can use it as a template to produce other variations for the compiler. For example, Compile .PTC would have the Command line PFCN <NAME>. Similarly, you could set up a program to run the current program. Using the above as a template you might produce a Program with the Command Line PFLND <FILE>. Note that with this we use <FILE> to specify the program to run because we do not want to pass the full path to the runtime. We also have not checked the Goto first error option as this is not relevant. By just setting up all your favourite programs like this you will improve your working environment. To make things even easier, you can attach any or all of the programs you set up to the Toolbar. The only restriction here is that we are limited to the Toolbar button images that come with ED. Once a Program has been attached to a Toolbar button, it becomes independent of the Programs menu and hence the file extension. It is also more convenient to click a button to compile or run a program than to select a program from the Program menu. From the ED main menu, select Options and then Toolbars. The toolbar that you are currently using will be checked. If this is the one you want to change, note the name and then select Customize. From the Toolbar Configuration dialog, select the toolbar you want to modify. Then locate and select a button image. From the Command Type listbox, select Programs, and from there locate and select the program to be associated with the button image selected. Now locate the position you want to place the new button and select Add Before or Add After as appropriate. We would suggest that you use the Copy option before you embark on this exercise to avoid errors. You might also remove existing buttons that you would not use in order to make space for your new buttons. ED for Windows is available from Neville Franks at Soft As It Gets by telephone on +61 3 9885 4445 or by fax on +61 3 9885 4444, or by e-mail at [email protected] The Web site can be visited at www.getsoft.com. For existing users Neville tells me that version 3.80 is now released. In order to rationalise and streamline our e-mail services Powerflex will from the end of February discontinue our cc:Mail service. If you need to contact us please use the following Internet e-mail addresses. Support: [email protected] Information: [email protected] General Enquiries: [email protected] Sales: [email protected] Individuals: Individuals responding to your messages will provide you their direct e-mail addresses where necessary. It is important to provide a return e-mail or fax address when you correspond with us. The fax machine and postal service are still working for those who prefer non-electronic communication. The coming of the year 2000 presents a problem for computer software quite unlike any other. Although getting working software to keep working when the calendar clicks over should be easy enough, I know several computer professionals who are making unusual preparations. They swear they are going to take all their money out of the bank, run up a bill on the credit card, take a long holiday and go nowhere near an automatic machine on the magic day. Strange people! The year 2000 problem has two quite unusual features. First, the deadline has been known a long time and many people are not ready, but it will not slip. Not by one second. Secondly, no-one has ever done it before. There are no dry runs and no second chances. Fortunately, the problems are fairly well understood by now and PFXplus provides a good set of capabilities to encourage and support year-2000-compliant programming. As discussed in the previous issue of POWERlines, the main thing you must do is to convert all your existing data to use so-called "high-range" dates, if it doesn't already. That means that your data files should contain dates like 08/08/1998 and not 08/08/98. Unfortunately the fragment of code we published last issue was incomplete. It did not handle zero dates correctly. Here is the correct way to do it. So all you still have to do is make sure you are running PFXplus 2.63 or later, convert your data and literals in your programs into high range format and add these two lines to your PFX.INI. Then relax and enjoy the millenium with our compliments! Further information is available from Powerflex or your local dealer.
#!/usr/bin/env python """Reads iPhoto library info, and exports photos and movies.""" # Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import phoshare.phoshare_ui import phoshare.phoshare_main def main(): """Main routine for Phoshare. Decides on UI vs. non-UI version.""" # Remove the funny -psn_xxx_xxx argument (from py2app) if len(sys.argv) > 1 and sys.argv[1][:4] == '-psn': del sys.argv[1] if len(sys.argv) <= 1: phoshare.phoshare_ui.main() else: phoshare.phoshare_main.main() if __name__ == "__main__": main()
There are a lot of disabilities that are not visible on the outside. People with invisble illness come in all forms, sizes and ages. You can be young and in need of a mobility scooter in the store. We look fine, but still need the handicapped parking spot. You can not see on the outside how we really feel inside. How much energy and effort it takes simply to go out of the house. This design shows on the outside how you feel inside. It shows the people around you that you have an invisible disability. It makes visible what was invisible before.
#!/usr/bin/python """This is a quick script I wrote up to count how many messages each of my friends posted in a big group chat. This script depends on the Facebook Platform Python SDK found here: https://github.com/pythonforfacebook/facebook-sdk """ from __future__ import division import argparse import facebook import json import time from collections import defaultdict from urlparse import urlsplit def main(): """Uses the Facebook Graph API to get chat messages for the given ID and prints the number of messages each chat participant posted. Will write the chat messages in JSON format to a file if specified. """ args = get_arguments() data = [] graph = facebook.GraphAPI(args.token) chat_id = args.chat_id try: comments = graph.get_object(chat_id + "/comments") except facebook.GraphAPIError as e: print e else: more_comments = True while more_comments: comments_data = comments.get('data', []) data.extend(comments_data) paging_next = comments.get('paging', {}).get('next') if paging_next: next_page_query = urlsplit(paging_next)[3] # Prevents hammering the Graph API and getting # locked out. time.sleep(args.timeout) comments = graph.get_object( chat_id + "/comments?" + next_page_query) else: more_comments = False if len(data) and args.output_file: with open(args.output_file, 'w+') as f: f.write(json.dumps(data)) print_results(data) def get_arguments(): parser = argparse.ArgumentParser( description='Grabs messages in a given Facebook chat and provides a ' 'numerical break-down of the participants\' messages.') parser.add_argument('token', help='A Facebook access token. Can be retrieved from ' ' the Graph API Explorer: ' 'https://developers.facebook.com/tools/explorer') parser.add_argument('chat_id', help='The Facebook ID of the chat you want to analyze.' ' You can get these IDs from /me/inbox.') parser.add_argument('-o', '--output_file', help='Writes the chat messages in JSON format to the ' 'specified file.') parser.add_argument('-t', '--timeout', default=1, help='Provide a timeout (in seconds) between ' 'successive Graph API calls to prevent being ' 'locked out due to too many. Defaults to 1.') return parser.parse_args() def print_results(data): """Print the number of messages for each user in the chat. Calculate how many messages each participant in the chat has sent, along with what percentage of the chat's messages are theirs. """ mapping = defaultdict(lambda: {'count': 0, 'ratio': 0}) for comment in data: # Sometimes there are chats which are missing a 'from' field in # the messages. try: author = comment['from']['name'] except KeyError as e: author = '<UNKNOWN_AUTHOR>' mapping[author]['count'] += 1 for key, value in mapping.items(): value['ratio'] = value['count'] / len(data) print "{}: {} messages ({:.2%} of the chat)".format( key, value['count'], value['ratio']) if __name__ == '__main__': main()
Leave the bandage in place for 2 HOURS after work has been finished. ALWAYS wash your hands BEFORE touching your tattoo until it’s fully healed. Wash your tattoo with warm water to remove all of the ointment. The warmer the water, the easier it is to get the ointment off. DON’T use a washcloth, loofah, or any other abrasive material. Use a mild cleaner to wash the tattoo. We recommend using SimpleCleanse, which is an all organic, vegan, cleanser that is gentle to the skin and fresh tattoos. Gently massage over the area with your bare, freshly washed, hand and a small amount of SimpleCleanse. Rinse throughly, however DON’T soak the area. Keep shower time to a minimum. Pat dry with clean paper towels. DON’T use a towel or any other abrasive material. DON’T re-bandage. Once it’s off leave it off. You should repeat this every morning and every night until completely healed. This will ensure our work and your tattoo will look its best. *OPTIONAL* Some artists recommend applying an ointment like “Aquaphor” to your tattoo. Apply a small amount to the tattoo 2-3 times a day for the first 3-4 days. Massage the ointment completely into the tattoo. After a few days, the tattoo will start to form light scabbing tissue. At this time, you may begin to use SimpleShield which is an organic, vegan moisturizer that we offer. Apply a small amount of Simple Shield to your tattoo 2 to 3 times per day until the tattoo is completely healed. DON’T use bracitracin, neosporin, bactine, or any other ointments. Use SimpleShield sparingly and no more than the recommend use. More isn’t better. Avoid direct sunlight and tanning for at least 2 weeks. After the tattoo is completely healed you may apply sunscreen to the area in order to tan or have extended exposure to the sun. This keeps the color of your tattoo brighter. We recommend a minimum of SPF 50 or higher. DON’T go swimming in any natural body of water or chlorinated water for at least 2 weeks. If you have any other questions please feel free to call us at (865) 951-1486!
#!/usr/bin/python # mergetex.py # # Script for merging tex files into a single monolithic file. This # script should make it easy to generate an ArXiV-friendly single # .tex file from a paper that is broken into subfiles using LaTeX's # \input{} command. # # USAGE: # python mergetex.py [input] [output] # python mergetex.py mypaper.tex mypaperMerged.tex # # mergetex takes two arguments, the [input] file and the [output] # file into which the merged files should go. It recursively # searches [input] and adds any file given by uncommented \input{} # commands. # # # # v0.1 by Anand Sarwate ([email protected]) import argparse import string import re import sys import os.path def parseinclude(includefile,outfh): try: with open(includefile) as file: print("Found " + includefile + ". Merging...\n") except IOError as e: print('Unable to open ' + includefile + ': does not exist or no read permissions') fincl = open(includefile, 'r') # parse file line by line for line in fincl: # strip out comments in the line, if any dc = line.split('\\%') # look for escaped \% if (len(dc) == 1): # then there is no \% to be escaped first_comm = dc[0].find('%') if (first_comm == -1): decom = line else: decom = line[:(first_comm+1)] + '\n' else: # we had to escape a \% decom = "" # construct the uncommented part dc = line.split('%') for chunk in dc: # look in each chunk to see if there is a % if (chunk[-1] == '\\'): # if % is escaped... decom = decom + chunk + '%' else: if (chunk[-1] == '\n'): decom = decom + chunk else: decom = decom + chunk + '%\n' break # search for the line containing an \input{} command sec = re.match('\\\\input{(.*?)}', decom) if sec: # if the match is nonempty, then fname = re.sub('\\\\input{', '', sec.group(0)) fname = re.sub('}', '', fname) if (fname.find('.tex') == -1): fname = fname + '.tex' print('\tFound include for ' + fname + '\n') parseinclude(fname,outfh) # if no \input{}, print the line to the output file else: outfh.write(decom) fincl.close() # input argument parser # args.format will contain filename for format file # args.bibfile will contain filename of bibliography inparser = argparse.ArgumentParser(description='Parses argument list') inparser.add_argument('texfile', metavar='texfile', help='main .tex file') inparser.add_argument('output', metavar='output', help='desired target output file') args = inparser.parse_args() # INPUT PARSING AND WARNING GENERATION try: with open(args.texfile) as file: pass except IOError as e: print('Unable to open ' + args.texfile + ': does not exist or no read permissions') fin = open(args.texfile, 'r') fout = open(args.output, 'w') parseinclude(args.texfile,fout)
During Lead The Way 2017, youth leaders shared how outdoor activities provided by Outdoor Outreach have empowered them to overcome hardship and adversity. Check out this video of a group of teens and young adults seeing snow for the first time on a snowboarding trip with Outdoor Outreach. The video was created by an Outdoor Outreach participant, Lawrence Vallejos.
#encoding: utf-8 """ remapping -- Remapping figure showing orthogonalization from initial phase reset Created by Joe Monaco on 2010-10-12. Copyright (c) 2009-2011 Johns Hopkins University. All rights reserved. This software is provided AS IS under the terms of the Open Source MIT License. See http://www.opensource.org/licenses/mit-license.php. """ # Library imports import os import numpy as np import matplotlib as mpl import matplotlib.pylab as plt # Package imports from ..core.analysis import BaseAnalysis from ..vmo import VMOModel from ..session import VMOSession from ..compare import (correlation_matrix, correlation_diagonals, population_spatial_correlation) from ..tools.images import array_to_image from ..tools.radians import circle_diff_vec class RemappingFigure(BaseAnalysis): """ Run complete remapping experiment based on random initial reset """ label = "remapping" def collect_data(self, N_samples=2, **kwargs): """Run basic VMOModel remapping experiment by randomly initializing the phase code of a network of oscillators and place units. Keyword arguments: N_samples -- total number of simulations to run (N-1 remapped from 1st) Additional keyword arguments are passed on to VMOModel. """ self.results['N_samples'] = N_samples # Set up model parameters pdict = dict( N_outputs=500, N_theta=1000, N_cues=1, C_W=0.05, gamma_local=0, gamma_distal=0, num_trials=N_samples, refresh_fixed_points=False ) pdict.update(kwargs) # Set up and run the path integration model self.out('Running remapping simulations...') model = VMOModel(**pdict) model.advance_all() sessions = VMOSession.get_session_list(model) VMOSession.save_session_list(sessions, os.path.join(self.datadir, 'samples')) # Get unit ordering based on first environment sortix = list(sessions[0].sortix) sortix += list(set(range(sessions[0].num_units)) - set(sortix)) self.results['sortix'] = np.array(sortix) # Save multi-session population responses and activity patterns self.out('Computing and storing population responses...') R = [SD.get_population_matrix(clusters=sortix) for SD in sessions] np.save(os.path.join(self.datadir, 'R.npy'), np.asarray(R)) # Good-bye self.out('All done!') def create_plots(self, N_examples=4, examples=None): """Create figure(s) with basic data panels """ # Change to data directoary and start logging os.chdir(self.datadir) self.out.outfd = file('figure.log', 'w') # Set up main figure for plotting self.figure = {} figsize = 9, 12 plt.rcParams['figure.figsize'] = figsize self.figure['remapping'] = f = plt.figure(figsize=figsize) f.suptitle(self.label.title()) # Load the data R = np.load(os.path.join(self.datadir, 'R.npy')) N = self.results['N_samples'] # Example active unit responses across environments if examples is None: active = set() for j in xrange(N): active = active.union(set((R[j].max(axis=1)>=1).nonzero()[0])) active = list(active) active.sort() examples = np.random.permutation(len(active))[:N_examples] examples = np.array(active)[examples] self.out('Plotting example responses: %s'%repr(examples)) for i,ex in enumerate(examples): self.out('Unit %d max response = %.2f Hz'%(ex, R[:,ex].max())) for j in xrange(N): ax = plt.subplot(2*N_examples, N, N*i+j+1) ax.plot(R[j,ex], c='k', lw=1.5) ax.set_xlim(0, 360) ax.set_ylim(-0.1*R[:,ex].max(), 1.1*R[:,ex].max()) ax.set_axis_off() # Population responses for j in xrange(N): self.out('Environment %d population max = %.2f Hz'%(j+1, R[j].max())) ax = plt.subplot(2, N, j+1+N) ax.imshow(R[j], aspect='auto', interpolation='nearest') array_to_image(R[j], 'pop_env_%02d.png'%(j+1), cmap=mpl.cm.gray_r) plt.draw() plt.rcParams['figure.figsize'] = plt.rcParamsDefault['figure.figsize'] self.out.outfd.close()
There are so many students who normally need proper assistance when they have an assignment to work on. This is one of the most common experiences that students actually have to go through from time to time, and it is important for you to ensure that you have someone to help you out or somewhere to check just in case you are looking for some help with your homework. One thing that you need to realize is that in as far as assignments are concerned, the nature of the help that you get will go a long way in determining whether you can pass the task or not. If you get some proper help with your homework, there is a good chance that you will pass the task. However, if you get terrible help, you will most certainly fail in the task at hand. One of your best options in as far as this task is concerned is to speak to your parents for help. You might be surprised when you realize how efficient they can be in dealing with this particular task. Because of this reason therefore, make it a point to interact with your parents or even your guardians to assist you in any way that they can. There is so much that you can be able to achieve when you are working on this task with your friends. Not only are you able to discuss some of the items that you have been given, but this interaction will actually come in handy for you when you are looking to study for your exams. There is a lot that you can find in the library. All you have to do is to make sure that you are able to get some time off to study, or at least to visit the library and get this work done. Immediately you have been given the task, take the opportunity and run into the library.
# # Copyright (C) 2006-2016 Nexedi SA # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from collections import defaultdict import neo.lib.pt from neo.lib.protocol import CellStates, ZERO_TID class Cell(neo.lib.pt.Cell): replicating = ZERO_TID def setState(self, state): readable = self.isReadable() super(Cell, self).setState(state) if readable and not self.isReadable(): try: del self.backup_tid, self.replicating except AttributeError: pass neo.lib.pt.Cell = Cell class MappedNode(object): def __init__(self, node): self.node = node self.assigned = set() def __getattr__(self, attr): return getattr(self.node, attr) class PartitionTable(neo.lib.pt.PartitionTable): """This class manages a partition table for the primary master node""" def setID(self, id): assert isinstance(id, (int, long)) or id is None, id self._id = id def setNextID(self): if self._id is None: raise RuntimeError, 'I do not know the last Partition Table ID' self._id += 1 return self._id def make(self, node_list): """Make a new partition table from scratch.""" # start with the first PTID self._id = 1 # First, filter the list of nodes. node_list = [n for n in node_list if n.isRunning() \ and n.getUUID() is not None] if len(node_list) == 0: # Impossible. raise RuntimeError, 'cannot make a partition table with an ' \ 'empty storage node list' # Take it into account that the number of storage nodes may be less # than the number of replicas. repeats = min(self.nr + 1, len(node_list)) index = 0 for offset in xrange(self.np): row = [] for _ in xrange(repeats): node = node_list[index] row.append(Cell(node)) self.count_dict[node] = self.count_dict.get(node, 0) + 1 index += 1 if index == len(node_list): index = 0 self.partition_list[offset] = row self.num_filled_rows = self.np def dropNodeList(self, node_list, simulate=False): partition_list = [] change_list = [] feeding_list = [] for offset, row in enumerate(self.partition_list): new_row = [] partition_list.append(new_row) feeding = None drop_readable = uptodate = False for cell in row: node = cell.getNode() if node in node_list: change_list.append((offset, node.getUUID(), CellStates.DISCARDED)) if cell.isReadable(): drop_readable = True else: new_row.append(cell) if cell.isFeeding(): feeding = cell elif cell.isUpToDate(): uptodate = True if feeding is not None: if len(new_row) < len(row): change_list.append((offset, feeding.getUUID(), CellStates.UP_TO_DATE)) feeding_list.append(feeding) elif drop_readable and not uptodate: raise neo.lib.pt.PartitionTableException( "Refuse to drop nodes that contain the only readable" " copies of partition %u" % offset) if not simulate: self.partition_list = partition_list for cell in feeding_list: cell.setState(CellStates.UP_TO_DATE) self.count_dict[cell.getNode()] += 1 for node in node_list: self.count_dict.pop(node, None) self.num_filled_rows = len(filter(None, self.partition_list)) return change_list def load(self, ptid, row_list, nm): """ Load a partition table from a storage node during the recovery. Return the new storage nodes registered """ # check offsets for offset, _row in row_list: if offset >= self.getPartitions(): raise IndexError, offset # store the partition table self.clear() self._id = ptid new_nodes = [] for offset, row in row_list: for uuid, state in row: node = nm.getByUUID(uuid) if node is None: node = nm.createStorage(uuid=uuid) new_nodes.append(node.asTuple()) self.setCell(offset, node, state) return new_nodes def setUpToDate(self, node, offset): """Set a cell as up-to-date""" uuid = node.getUUID() # check the partition is assigned and known as outdated for cell in self.getCellList(offset): if cell.getUUID() == uuid: if cell.isOutOfDate(): break return else: raise neo.lib.pt.PartitionTableException('Non-assigned partition') # update the partition table cell_list = [self.setCell(offset, node, CellStates.UP_TO_DATE)] # If the partition contains a feeding cell, drop it now. for feeding_cell in self.getCellList(offset): if feeding_cell.isFeeding(): cell_list.append(self.removeCell(offset, feeding_cell.getNode())) break return cell_list def addNodeList(self, node_list): """Add nodes""" added_list = [] for node in node_list: if node not in self.count_dict: self.count_dict[node] = 0 added_list.append(node) return added_list def tweak(self, drop_list=()): """Optimize partition table This is done by computing a minimal diff between current partition table and what make() would do. """ assigned_dict = {x: {} for x in self.count_dict} readable_list = [set() for x in xrange(self.np)] for offset, row in enumerate(self.partition_list): for cell in row: if cell.isReadable(): readable_list[offset].add(cell) assigned_dict[cell.getNode()][offset] = cell pt = PartitionTable(self.np, self.nr) drop_list = set(drop_list).intersection(assigned_dict) node_set = {MappedNode(x) for x in assigned_dict if x not in drop_list} pt.make(node_set) for offset, row in enumerate(pt.partition_list): for cell in row: if cell.isReadable(): cell.getNode().assigned.add(offset) def map_nodes(): node_list = [] for node, assigned in assigned_dict.iteritems(): if node in drop_list: yield node, frozenset() continue readable = {offset for offset, cell in assigned.iteritems() if cell.isReadable()} # the criterion on UUID is purely cosmetic node_list.append((len(readable), len(assigned), -node.getUUID(), readable, node)) node_list.sort(reverse=1) for _, _, _, readable, node in node_list: assigned = assigned_dict[node] mapped = min(node_set, key=lambda m: ( len(m.assigned.symmetric_difference(assigned)), len(m.assigned ^ readable))) node_set.remove(mapped) yield node, mapped.assigned assert not node_set changed_list = [] uptodate_set = set() remove_dict = defaultdict(list) for node, mapped in map_nodes(): uuid = node.getUUID() assigned = assigned_dict[node] for offset, cell in assigned.iteritems(): if offset in mapped: if cell.isReadable(): uptodate_set.add(offset) readable_list[offset].remove(cell) if cell.isFeeding(): self.count_dict[node] += 1 state = CellStates.UP_TO_DATE cell.setState(state) changed_list.append((offset, uuid, state)) else: if not cell.isFeeding(): self.count_dict[node] -= 1 remove_dict[offset].append(cell) for offset in mapped.difference(assigned): self.count_dict[node] += 1 state = CellStates.OUT_OF_DATE self.partition_list[offset].append(Cell(node, state)) changed_list.append((offset, uuid, state)) count_dict = self.count_dict.copy() for offset, cell_list in remove_dict.iteritems(): row = self.partition_list[offset] feeding = None if offset in uptodate_set else min( readable_list[offset], key=lambda x: count_dict[x.getNode()]) for cell in cell_list: if cell is feeding: count_dict[cell.getNode()] += 1 if cell.isFeeding(): continue state = CellStates.FEEDING cell.setState(state) else: state = CellStates.DISCARDED row.remove(cell) changed_list.append((offset, cell.getUUID(), state)) assert self.num_filled_rows == len(filter(None, self.partition_list)) return changed_list def outdate(self, lost_node=None): """Outdate all non-working nodes Do not outdate cells of 'lost_node' for partitions it was the last node to serve. This allows a cluster restart. """ change_list = [] for offset, row in enumerate(self.partition_list): lost = lost_node cell_list = [] for cell in row: if cell.isReadable(): if cell.getNode().isRunning(): lost = None else : cell_list.append(cell) for cell in cell_list: if cell.getNode() is not lost: cell.setState(CellStates.OUT_OF_DATE) change_list.append((offset, cell.getUUID(), CellStates.OUT_OF_DATE)) return change_list def iterNodeCell(self, node): for offset, row in enumerate(self.partition_list): for cell in row: if cell.getNode() is node: yield offset, cell break def getOperationalNodeSet(self): """ Return a set of all nodes which are part of at least one UP TO DATE partition. An empty list is returned if these nodes aren't enough to become operational. """ node_set = set() for row in self.partition_list: if not any(cell.isReadable() and cell.getNode().isPending() for cell in row): return () # not operational node_set.update(cell.getNode() for cell in row if cell.isReadable()) return node_set def clearReplicating(self): for row in self.partition_list: for cell in row: try: del cell.replicating except AttributeError: pass def setBackupTidDict(self, backup_tid_dict): for row in self.partition_list: for cell in row: if cell.isReadable(): cell.backup_tid = backup_tid_dict.get(cell.getUUID(), ZERO_TID) def getBackupTid(self, mean=max): try: return min(mean(x.backup_tid for x in row if x.isReadable()) for row in self.partition_list) except ValueError: return ZERO_TID def getCheckTid(self, partition_list): try: return min(min(cell.backup_tid for cell in self.partition_list[offset] if cell.isReadable()) for offset in partition_list) except ValueError: return ZERO_TID
Ian Francis has worked for 17 years in the Resilience sector with a wide range of experience as a specialist in Business Continuity, Service Continuity and Data Protection, with implementation projects in the Telecommunication, Finance, Education, Charity and Government sectors. His early career included 13 years in the Royal Air Force as an avionics trade manager followed by 11 years in the Telecommunications sector. Ian's career in BC and SC started as UK Head of Business Continuity Planning for Orange, he successfully developed and implemented strategies, plans and testing programmes that enabled the company to achieve ISO 17799 accreditation in 2003. He was promoted to Global Head of BC/SC as the leader of the international business continuity programme for Orange Group. Ian has been a Member of the BCI (MBCI) since 2002 and obtained the CBCP certification in 2005 from DRI International. Ian holds the ISO 22301 Lead Implementer Certification and is a PECB Certified Trainer. He has delivered numerous courses to the public and various organisations since 2006. Running his own consultancy business for Business and Service Continuity, Ian has worked with numerous private and public sector clients including AOL, Capital One, Fortis Investments and BNP Paribas Asset Management in various European locations. Keen to maintain his wide knowledge in technology, his projects have involved the implementation of Data Protection strategies whilst considering geographical requirements and standards. These implementations have demonstrated their effectiveness in data breech incidents. He has delivered various programmes for the UK Government and Public sector organisations including those in Prison Services, Education, Defence, Vehicle and other Government support services. Ian has conducted numerous practical courses in the development and implementation of Incident and Business Continuity Exercises. His specialism includes the delivery of Integrated Crisis Management, Service Management and Business Continuity Exercises for his clients. His Crisis Management experience has included leading live incidents with loss of access of buildings or threats to staff such as 7/7, G20 Protests and the Brussels Bombings.
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Recurrent self attention models for VQA.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from tensor2tensor.layers import common_attention from tensor2tensor.layers import common_layers from tensor2tensor.layers import vqa_layers from tensor2tensor.models.research import universal_transformer from tensor2tensor.models.research import universal_transformer_util from tensor2tensor.models.research import vqa_attention from tensor2tensor.utils import registry # from tensor2tensor.utils import restore_hook import tensorflow as tf from tensorflow.contrib.layers.python.layers import utils @registry.register_model class VqaRecurrentSelfAttention(vqa_attention.VqaAttentionBaseline): """Recurrent Self attention both on image and question.""" # @staticmethod # def train_hooks(): # restore_resnet_hook = restore_hook.RestoreHook( # # TODO(zichaoy): hard code the path given static function. # checkpoint_path="/home/zichaoy/resnet_v1_152.ckpt", # new_model_scope="vqa_recurrent_self_attention/body/", # old_model_scope="resnet_v1_152/", # ) # return [restore_resnet_hook] def body(self, features): hp = self.hparams # pylint: disable=eval-used if hp.image_input_type == "image": image_feat = vqa_layers.image_embedding( features["inputs"], model_fn=eval(hp.image_model_fn), trainable=hp.train_resnet, is_training=hp.mode == tf.estimator.ModeKeys.TRAIN) else: image_feat = features["inputs"] image_feat = common_layers.flatten4d3d(image_feat) image_feat = common_layers.dense(image_feat, hp.hidden_size) utils.collect_named_outputs("norms", "image_feat_after_proj", tf.norm(image_feat, axis=-1)) question = common_layers.flatten4d3d(features["question"]) utils.collect_named_outputs("norms", "question_embedding", tf.norm(question, axis=-1)) (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias) = prepare_image_question_encoder( image_feat, question, hp) encoder_input = tf.nn.dropout( encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout) encoder_output, _ = recurrent_transformer_decoder( encoder_input, None, encoder_self_attention_bias, None, hp, name="encoder") utils.collect_named_outputs( "norms", "encoder_output", tf.norm(encoder_output, axis=-1)) # scale query by sqrt(hidden_size) query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5 query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0) batch_size = common_layers.shape_list(encoder_input)[0] query = tf.tile(query, [batch_size, 1, 1]) query = tf.nn.dropout( query, keep_prob=1.-hp.layer_prepostprocess_dropout) decoder_output, _ = recurrent_transformer_decoder( query, encoder_output, None, encoder_decoder_attention_bias, hp, name="decoder") utils.collect_named_outputs("norms", "decoder_output", tf.norm(decoder_output, axis=-1)) norm_tensors = utils.convert_collection_to_dict("norms") vqa_layers.summarize_tensors(norm_tensors, tag="norms/") # Expand dimension 1 and 2 return tf.expand_dims(decoder_output, axis=1) def prepare_image_question_encoder(image_feat, question, hparams): """Prepare encoder. Args: image_feat: a Tensor. question: a Tensor. hparams: run hyperparameters Returns: encoder_input: a Tensor, bottom of encoder stack encoder_self_attention_bias: a bias tensor for use in encoder self-attention """ encoder_input = tf.concat([image_feat, question], axis=1) encoder_padding = common_attention.embedding_to_padding(encoder_input) ignore_padding = common_attention.attention_bias_ignore_padding( encoder_padding) encoder_self_attention_bias = ignore_padding encoder_decoder_attention_bias = ignore_padding # Usual case - not a packed dataset. if hparams.pos == "timing": question = common_attention.add_timing_signal_1d(question) elif hparams.pos == "emb": question = common_attention.add_positional_embedding( question, hparams.max_length, "inputs_positional_embedding", None) encoder_input = tf.concat([image_feat, question], axis=1) return (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias) def recurrent_transformer_decoder( decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, name="decoder", nonpadding=None, save_weights_to=None, make_image_summary=True): """Recurrent decoder function.""" x = decoder_input attention_dropout_broadcast_dims = ( common_layers.comma_separated_string_to_integer_list( getattr(hparams, "attention_dropout_broadcast_dims", ""))) with tf.variable_scope(name): ffn_unit = functools.partial( # use encoder ffn, since decoder ffn use left padding universal_transformer_util.transformer_encoder_ffn_unit, hparams=hparams, nonpadding_mask=nonpadding) attention_unit = functools.partial( universal_transformer_util.transformer_decoder_attention_unit, hparams=hparams, encoder_output=encoder_output, decoder_self_attention_bias=decoder_self_attention_bias, encoder_decoder_attention_bias=encoder_decoder_attention_bias, attention_dropout_broadcast_dims=attention_dropout_broadcast_dims, save_weights_to=save_weights_to, make_image_summary=make_image_summary) x, extra_output = universal_transformer_util.universal_transformer_layer( x, hparams, ffn_unit, attention_unit) return common_layers.layer_preprocess(x, hparams), extra_output @registry.register_hparams def vqa_recurrent_self_attention_base(): """VQA attention baseline hparams.""" hparams = universal_transformer.universal_transformer_base() hparams.batch_size = 1024 hparams.use_fixed_batch_size = True hparams.weight_decay = 0. hparams.clip_grad_norm = 0. # use default initializer # hparams.initializer = "xavier" hparams.learning_rate_schedule = ( "constant*linear_warmup*rsqrt_normalized_decay") hparams.learning_rate_warmup_steps = 8000 hparams.learning_rate_constant = 7e-4 hparams.learning_rate_decay_rate = 0.5 hparams.learning_rate_decay_steps = 50000 # hparams.dropout = 0.5 hparams.summarize_grads = True hparams.summarize_vars = True # not used hparams hparams.label_smoothing = 0.1 hparams.multiply_embedding_mode = "sqrt_depth" # add new hparams # use raw image as input hparams.add_hparam("image_input_type", "feature") hparams.add_hparam("image_model_fn", "resnet_v1_152") hparams.add_hparam("resize_side", 512) hparams.add_hparam("height", 448) hparams.add_hparam("width", 448) hparams.add_hparam("distort", True) hparams.add_hparam("train_resnet", False) # question hidden size # hparams.hidden_size = 512 # hparams.filter_size = 1024 # hparams.num_hidden_layers = 4 # self attention parts # hparams.norm_type = "layer" # hparams.layer_preprocess_sequence = "n" # hparams.layer_postprocess_sequence = "da" # hparams.layer_prepostprocess_dropout = 0.1 # hparams.attention_dropout = 0.1 # hparams.relu_dropout = 0.1 # hparams.add_hparam("pos", "timing") # hparams.add_hparam("num_encoder_layers", 0) # hparams.add_hparam("num_decoder_layers", 0) # hparams.add_hparam("num_heads", 8) # hparams.add_hparam("attention_key_channels", 0) # hparams.add_hparam("attention_value_channels", 0) # hparams.add_hparam("self_attention_type", "dot_product") # iterative part hparams.transformer_ffn_type = "fc" return hparams @registry.register_hparams def vqa_recurrent_self_attention_small(): hparams = vqa_recurrent_self_attention_base() hparams.learning_rate_constant = 1e-3 hparams.hidden_size = 512 hparams.filter_size = 2048 hparams.num_heads = 8 hparams.layer_prepostprocess_dropout = 0.1 return hparams @registry.register_hparams def vqa_recurrent_self_attention_big(): hparams = vqa_recurrent_self_attention_base() hparams.learning_rate_constant = 5e-4 hparams.hidden_size = 2048 hparams.filter_size = 8192 return hparams @registry.register_hparams def vqa_recurrent_self_attention_big_l4(): hparams = vqa_recurrent_self_attention_big() hparams.num_rec_steps = 4 return hparams @registry.register_hparams def vqa_recurrent_self_attention_highway(): hparams = vqa_recurrent_self_attention_base() hparams.recurrence_type = "highway" return hparams @registry.register_hparams def vqa_recurrent_self_attention_gru(): hparams = vqa_recurrent_self_attention_base() hparams.recurrence_type = "gru" return hparams @registry.register_hparams def vqa_recurrent_self_attention_l8(): hparams = vqa_recurrent_self_attention_base() hparams.num_rec_steps = 8 return hparams @registry.register_hparams def vqa_recurrent_self_attention_mix_before_ut(): hparams = vqa_recurrent_self_attention_base() hparams.mix_with_transformer = "before_ut" return hparams @registry.register_hparams def vqa_recurrent_self_attention_l4(): hparams = vqa_recurrent_self_attention_base() hparams.num_rec_steps = 4 return hparams @registry.register_hparams def vqa_recurrent_self_attention_ls2(): hparams = vqa_recurrent_self_attention_base() hparams.label_smoothing = 0.2 return hparams @registry.register_hparams def vqa_recurrent_self_attention_drop1(): hparams = vqa_recurrent_self_attention_base() hparams.layer_prepostprocess_dropout = 0.1 return hparams @registry.register_hparams def vqa_recurrent_self_attention_drop3(): hparams = vqa_recurrent_self_attention_base() hparams.relu_dropout = 0.3 hparams.attention_dropout = 0.3 return hparams
We congratulate all of the citizens who chose to stand for election March 11. It takes quite a bit of intestinal fortitude to put yourself out there in front of every resident and offer yourself up for election. We’ve noted in the past how unpleasant some of our towns’ boards can be, with backbiting and sniping and downright nastiness, rather than a polite expression of views and the normal disagreements among gentlemen and ladies. We’ve also noted that serving on a board takes time, commitment and a thick skin. Most of us like to complain. Few of us actually choose to do something about what upsets us. People who pursue elected office put themselves in position to make that leap, and we thank them for it. And we want to acknowledge everyone who took time from their busy schedule to participate by voting. But this election took a little more of an ugly turn in a few cases. YouTube videos concerning one of the Derry Town Council races may have brought viewers a chuckle at one of the candidate’s expense, but they were nothing if not mean. And of course, they were anonymous. Londonderry blogs were filled with – again anonymous – attacks on candidates. It’s so easy to say anything you want, particularly if it’s mean spirited, when you can hide behind a false name, or better yet, no name. It’s so much harder to offer up those views when you have to take “credit” for them. Making fun of people, calling them names, questioning their motives or intelligence – if you need to do that to make a point, do it in public and fully identified. We call it cowardice for hiding behind anonymity or false names in a misguided effort to make a point. So do we have any doubt as to why more people don’t run for office? We know that not everyone has the time or the inclination, but for those who do, they next have to consider whether they wish to be subjected to verbal abuse and ridicule from people who don’t have the guts to use their own names. So much for “meaningful dialogue” and honesty, and especially, transparency. Anonymous attacks may demonstrate a clever use of words and a sharp wit, but their goal is to hurt. That’s not something to chuckle at. And if that’s the best shot that can be levied against someone seeking election, it’s not a very substantial one.
"""Django Endless Pagination template tags.""" import re from django import template from django.utils.encoding import iri_to_uri from simple_pagination import settings from django.core.paginator import ( EmptyPage, Page, PageNotAnInteger, Paginator, ) from simple_pagination import utils from simple_pagination import models PAGINATE_EXPRESSION = re.compile(r""" ^ # Beginning of line. (((?P<first_page>\w+)\,)?(?P<per_page>\w+)\s+)? # First page, per page. (?P<objects>[\.\w]+) # Objects / queryset. (\s+starting\s+from\s+page\s+(?P<number>[\-]?\d+|\w+))? # Page start. (\s+using\s+(?P<key>[\"\'\-\w]+))? # Querystring key. (\s+with\s+(?P<override_path>[\"\'\/\w]+))? # Override path. (\s+as\s+(?P<var_name>\w+))? # Context variable name. $ # End of line. """, re.VERBOSE) SHOW_CURRENT_NUMBER_EXPRESSION = re.compile(r""" ^ # Beginning of line. (starting\s+from\s+page\s+(?P<number>\w+))?\s* # Page start. (using\s+(?P<key>[\"\'\-\w]+))?\s* # Querystring key. (as\s+(?P<var_name>\w+))? # Context variable name. $ # End of line. """, re.VERBOSE) register = template.Library() @register.tag def paginate(parser, token, paginator_class=None): """Paginate objects. Usage: .. code-block:: html+django {% paginate entries %} After this call, the *entries* variable in the template context is replaced by only the entries of the current page. You can also keep your *entries* original variable (usually a queryset) and add to the context another name that refers to entries of the current page, e.g.: .. code-block:: html+django {% paginate entries as page_entries %} The *as* argument is also useful when a nested context variable is provided as queryset. In this case, and only in this case, the resulting variable name is mandatory, e.g.: .. code-block:: html+django {% paginate entries.all as entries %} The number of paginated entries is taken from settings, but you can override the default locally, e.g.: .. code-block:: html+django {% paginate 20 entries %} Of course you can mix it all: .. code-block:: html+django {% paginate 20 entries as paginated_entries %} By default, the first page is displayed the first time you load the page, but you can change this, e.g.: .. code-block:: html+django {% paginate entries starting from page 3 %} When changing the default page, it is also possible to reference the last page (or the second last page, and so on) by using negative indexes, e.g: .. code-block:: html+django {% paginate entries starting from page -1 %} This can be also achieved using a template variable that was passed to the context, e.g.: .. code-block:: html+django {% paginate entries starting from page page_number %} If the passed page number does not exist, the first page is displayed. If you have multiple paginations in the same page, you can change the querydict key for the single pagination, e.g.: .. code-block:: html+django {% paginate entries using article_page %} In this case *article_page* is intended to be a context variable, but you can hardcode the key using quotes, e.g.: .. code-block:: html+django {% paginate entries using 'articles_at_page' %} Again, you can mix it all (the order of arguments is important): .. code-block:: html+django {% paginate 20 entries starting from page 3 using page_key as paginated_entries %} Additionally you can pass a path to be used for the pagination: .. code-block:: html+django {% paginate 20 entries using page_key with pagination_url as paginated_entries %} This way you can easily create views acting as API endpoints, and point your Ajax calls to that API. In this case *pagination_url* is considered a context variable, but it is also possible to hardcode the URL, e.g.: .. code-block:: html+django {% paginate 20 entries with "/mypage/" %} If you want the first page to contain a different number of items than subsequent pages, you can separate the two values with a comma, e.g. if you want 3 items on the first page and 10 on other pages: .. code-block:: html+django {% paginate 3,10 entries %} You must use this tag before calling the {% show_more %} one. """ # Validate arguments. try: tag_name, tag_args = token.contents.split(None, 1) except ValueError: msg = '%r tag requires arguments' % token.contents.split()[0] raise template.TemplateSyntaxError(msg) # Use a regexp to catch args. match = PAGINATE_EXPRESSION.match(tag_args) if match is None: msg = 'Invalid arguments for %r tag' % tag_name raise template.TemplateSyntaxError(msg) # Retrieve objects. kwargs = match.groupdict() objects = kwargs.pop('objects') # The variable name must be present if a nested context variable is passed. if '.' in objects and kwargs['var_name'] is None: msg = ( '%(tag)r tag requires a variable name `as` argumnent if the ' 'queryset is provided as a nested context variable (%(objects)s). ' 'You must either pass a direct queryset (e.g. taking advantage ' 'of the `with` template tag) or provide a new variable name to ' 'store the resulting queryset (e.g. `%(tag)s %(objects)s as ' 'objects`).' ) % {'tag': tag_name, 'objects': objects} raise template.TemplateSyntaxError(msg) # Call the node. return PaginateNode(paginator_class, objects, **kwargs) @register.tag def lazy_paginate(parser, token): """Lazy paginate objects. Paginate objects without hitting the database with a *select count* query. Use this the same way as *paginate* tag when you are not interested in the total number of pages. """ return paginate(parser, token, paginator_class=LazyPaginator) class PaginateNode(template.Node): """Add to context the objects of the current page. Also add the Django paginator's *page* object. """ def __init__( self, paginator_class, objects, first_page=None, per_page=None, var_name=None, number=None, key=None, override_path=None): self.paginator = paginator_class or Paginator self.objects = template.Variable(objects) # If *var_name* is not passed, then the queryset name will be used. self.var_name = objects if var_name is None else var_name # If *per_page* is not passed then the default value form settings # will be used. self.per_page_variable = None if per_page is None: self.per_page = settings.PER_PAGE elif per_page.isdigit(): self.per_page = int(per_page) else: self.per_page_variable = template.Variable(per_page) # Handle first page: if it is not passed then *per_page* is used. self.first_page_variable = None if first_page is None: self.first_page = None elif first_page.isdigit(): self.first_page = int(first_page) else: self.first_page_variable = template.Variable(first_page) # Handle page number when it is not specified in querystring. self.page_number_variable = None if number is None: self.page_number = 1 else: try: self.page_number = int(number) except ValueError: self.page_number_variable = template.Variable(number) # Set the querystring key attribute. self.querystring_key_variable = None if key is None: self.querystring_key = settings.PAGE_LABEL elif key[0] in ('"', "'") and key[-1] == key[0]: self.querystring_key = key[1:-1] else: self.querystring_key_variable = template.Variable(key) # Handle *override_path*. self.override_path_variable = None if override_path is None: self.override_path = None elif ( override_path[0] in ('"', "'") and override_path[-1] == override_path[0]): self.override_path = override_path[1:-1] else: self.override_path_variable = template.Variable(override_path) def render(self, context): # Handle page number when it is not specified in querystring. if self.page_number_variable is None: default_number = self.page_number else: default_number = int(self.page_number_variable.resolve(context)) # Calculate the number of items to show on each page. if self.per_page_variable is None: per_page = self.per_page else: per_page = int(self.per_page_variable.resolve(context)) # Calculate the number of items to show in the first page. if self.first_page_variable is None: first_page = self.first_page or per_page else: first_page = int(self.first_page_variable.resolve(context)) # User can override the querystring key to use in the template. # The default value is defined in the settings file. if self.querystring_key_variable is None: querystring_key = self.querystring_key else: querystring_key = self.querystring_key_variable.resolve(context) # Retrieve the override path if used. if self.override_path_variable is None: override_path = self.override_path else: override_path = self.override_path_variable.resolve(context) # Retrieve the queryset and create the paginator object. objects = self.objects.resolve(context) paginator = self.paginator( objects, per_page) # Normalize the default page number if a negative one is provided. if default_number < 0: default_number = utils.normalize_page_number( default_number, paginator.page_range) # The current request is used to get the requested page number. page_number = utils.get_page_number_from_request( context['request'], querystring_key, default=default_number) # Get the page. try: page = paginator.page(page_number) except EmptyPage: page = paginator.page(1) # Populate the context with required data. data = { 'default_number': default_number, 'override_path': override_path, 'page': page, 'querystring_key': querystring_key, } context.update({'endless': data, self.var_name: page.object_list}) return '' @register.tag def show_pages(parser, token): """Show page links. Usage: .. code-block:: html+django {% show_pages %} It is just a shortcut for: .. code-block:: html+django {% get_pages %} {{ pages }} You can set ``ENDLESS_PAGINATION_PAGE_LIST_CALLABLE`` in your *settings.py* to a callable, or to a dotted path representing a callable, used to customize the pages that are displayed. See the *__unicode__* method of ``endless_pagination.models.PageList`` for a detailed explanation of how the callable can be used. Must be called after ``{% paginate objects %}``. """ # Validate args. if len(token.contents.split()) != 1: msg = '%r tag takes no arguments' % token.contents.split()[0] raise template.TemplateSyntaxError(msg) # Call the node. return ShowPagesNode() class ShowPagesNode(template.Node): """Show the pagination.""" def render(self, context): # This template tag could raise a PaginationError: you have to call # *paginate* or *lazy_paginate* before including the getpages template. data = utils.get_data_from_context(context) print data # Return the string representation of the sequence of pages. pages = models.PageList( context['request'], data['page'], data['querystring_key'], default_number=data['default_number'], override_path=data['override_path'], ) return utils.text(pages)
This entry was posted in Writing and tagged Book Launch, childbirth, Heart Wide Open, Shellie Rushing Tomlinson by Shellie Rushing Tomlinson. Bookmark the permalink. Known as The Belle of All Things Southern, Shellie Rushing Tomlinson is a national best-selling author, speaker, radio host and newspaper columnist. Shellie’s last nonfiction humor title from Penguin Group USA, "Sue Ellen's Girl Ain't Fat, She Just Weighs Heavy" came out with a hearty endorsement from legendary comedian Jeff Foxworthy, a blurb Shellie adamantly denies purchasing. Watch for Shellie's first faith book to be released from Random House in 2014! So true! I also think launching a book is like what Moses’ mother did sending him off in the basket. If she didn’t let go of him and trust him to the Lord, he would have died. But in being obedient and letting go, she prepared him to lead. Great analogy, Connie. 🙂 THX for sharing today. I would take it back “9 months” and say that trying to get published can be like trying to get pregnant. Doesn’t always happen within our timeline! I have just received a contract and so I am officially “pregnant,” although I do not know my due date 🙂 I appreciate all of the advice you shared about having this baby and look forward to learning from other “parents” about the process.
import fileinput import io import os import shutil import subprocess import zipfile import click import requests def _get_version(): with open('flexget/_version.py') as f: g = globals() l = {} exec(f.read(), g, l) # pylint: disable=W0122 if not l['__version__']: raise click.ClickException('Could not find __version__ from flexget/_version.py') return l['__version__'] @click.group() def cli(): pass @cli.command() def version(): """Prints the version number of the source""" click.echo(_get_version()) @cli.command() @click.argument('bump_type', type=click.Choice(['dev', 'release'])) def bump_version(bump_type): """Bumps version to the next release, or development version.""" cur_ver = _get_version() click.echo('current version: %s' % cur_ver) ver_split = cur_ver.split('.') if 'dev' in ver_split[-1]: if bump_type == 'dev': # If this is already a development version, increment the dev count by 1 ver_split[-1] = 'dev%d' % (int(ver_split[-1].strip('dev') or 0) + 1) else: # Just strip off dev tag for next release version ver_split = ver_split[:-1] else: # Increment the revision number by one if len(ver_split) == 2: # We don't have a revision number, assume 0 ver_split.append('1') else: if 'b' in ver_split[2]: # beta version minor, beta = ver_split[-1].split('b') ver_split[-1] = '%sb%s' % (minor, int(beta) + 1) else: ver_split[-1] = str(int(ver_split[-1]) + 1) if bump_type == 'dev': ver_split.append('dev') new_version = '.'.join(ver_split) for line in fileinput.FileInput('flexget/_version.py', inplace=1): if line.startswith('__version__ ='): line = "__version__ = '%s'\n" % new_version print(line, end='') click.echo('new version: %s' % new_version) @cli.command() def bundle_webui(): """Bundle webui for release packaging""" ui_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'flexget', 'ui') def download_extract(url, dest_path): print(dest_path) r = requests.get(url) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall(dest_path) # WebUI V1 click.echo('Bundle WebUI v1...') try: # Remove existing app_path = os.path.join(ui_path, 'v1', 'app') if os.path.exists(app_path): shutil.rmtree(app_path) # Just stashed the old webui zip on a random github release for easy hosting. # It doesn't get updated anymore, we should probably stop bundling it with releases soon. download_extract('https://github.com/Flexget/Flexget/releases/download/v3.0.6/webui_v1.zip', os.path.join(ui_path, 'v1')) except IOError as e: click.echo('Unable to download and extract WebUI v1 due to %e' % str(e)) raise click.Abort() # WebUI V2 try: click.echo('Bundle WebUI v2...') # Remove existing app_path = os.path.join(ui_path, 'v2', 'dist') if os.path.exists(app_path): shutil.rmtree(app_path) release = requests.get('https://api.github.com/repos/Flexget/webui/releases/latest').json() v2_package = None for asset in release['assets']: if asset['name'] == 'dist.zip': v2_package = asset['browser_download_url'] break if not v2_package: click.echo('Unable to find dist.zip in assets') raise click.Abort() download_extract(v2_package, os.path.join(ui_path, 'v2')) except (IOError, ValueError) as e: click.echo('Unable to download and extract WebUI v2 due to %s' % str(e)) raise click.Abort() @cli.command() @click.argument('files', nargs=-1) def autoformat(files): """Reformat code with black and isort""" if not files: project_root = os.path.dirname(os.path.realpath(__file__)) files = (project_root,) venv_path = os.environ['VIRTUAL_ENV'] if not venv_path: raise Exception('Virtualenv and activation required') # black and isort config are in pyproject.toml subprocess.call(('black',) + files) subprocess.call( ( 'isort', '--virtual-env', venv_path, '-rc', '--skip', 'flexget/__init__.py', '--skip', 'flexget/manager.py', ) + files ) if __name__ == '__main__': cli()
Learn to make beautiful scented Cupcake Candles and scented Teacup Candles in this fun candle class! The cost of our Teacup & Cupcake candle making workshop includes all materials (except for the teacups), equipment and expert tuition. All you need to bring is yourself! At the end of the class you will have exquisite candles, plus a set of scented tealights to take home. Enjoy them – or give away as special gifts to friends and family! This class lasts around 2 1/2 hours with a break halfway through for complementary tea and biscuits, the relaxed pace is ideal for beginners. Come and learn a new, hands-on skill in a fun and relaxed environment. This workshop is run on demand for small groups and can also be taught one on one. Payment is required at the time of booking for all our workshops. Simply send us an email stating the date you are interested in and we’ll get right back to you with all the information you need. Candle Making Workshops take place in an historic, Victorian Schoolhouse accessed by a flight of stone steps.
# # Copyright 2017 the original author or authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This helps loading http_pb2 and annotations_pb2. Without this, the Python importer will not be able to process the lines: from google.api import http_pb2 or from google.api import annotations_pb2 (Without importing these, the protobuf loader will not recognize http options in the protobuf definitions.) """ from importlib import import_module import os import sys class GoogleApiImporter(object): def find_module(self, full_name, path=None): if full_name == 'google.api': self.path = [os.path.dirname(__file__)] return self def load_module(self, name): if name in sys.modules: return sys.modules[name] full_name = 'ofagent.protos.third_party.' + name import_module(full_name) module = sys.modules[full_name] sys.modules[name] = module return module sys.meta_path.append(GoogleApiImporter()) from google.api import http_pb2, annotations_pb2 _ = http_pb2, annotations_pb2
In 2013 he was wearing lime green. The Canberra winger with “white blond hair” remembers towelling up Melbourne that day. A few months later he would be rubbed out of the sport for performance enhancing drugs offences, and he hasn’t been seen in a professional rugby league game since. On Friday night he returns to Kardinia Park to make his long-awaited first-grade return. This time he’ll be wearing purple, and a bit more muscle. “I guess I’m trying to use everything that’s happened in the past as motivation and fuel, but also just keep it really simple,” Earl told media on Thursday. “That’s my main motivation. It’s not as much about me as it might have been a year ago. “The support I’ve had from fans, family, friends, all the coaches here and the staff, they’ve gone above and beyond. “Everything you see, whether it’s in the trials or during the year in the NRL is a by-product of them. After his lengthy NRL suspension was handed down, Earl escaped to Thailand where he started his own business and stayed out of the Australian public’s eye. Then, three weeks into his first pre-season in half a decade, Earl ruptured his anterior cruciate ligament and the dream was shattered ... again. Melbourne hierarchy kept the faith. They overlooked his rehabilitation and then handed him a new two-year contract, which Earl grabbed with both hands. He will finally step out in a Storm jumper on Friday night. An NRL return can’t be too far away. “Last year was definitely a disappointment. The biggest thing was in 2017 I put so much hard work in by myself, training, just preparing to be ready for the pre-season. Then three weeks in to do what I did, tear my ACL, last year was a write-off,” Earl said. “But credit to the club I’m here with another opportunity to play first grade and I’m so grateful. I’m using that as fuel - everything the club has done for me, all the staff, all the players who I now have as mates. “I’m looking forward to running out with them and putting it all behind me to be honest. “It’s been the toughest pre-season I’ve ever done, (which is) typical of the Melbourne Storm. “The best word to explain it is ‘prepared’. I feel like I’ve done everything I can.
# -*- coding: utf-8 -*- """ Created on Mon Nov 10 17:14:37 2014 @author: Mehmet Emre Parser for command-line arguments, params will use this for setting it's values. """ import argparse parser = argparse.ArgumentParser(description="rasim - A radio network simulator") parser.add_argument('--batch-run', action='store_true', help='run simulator in batch-mode, no graph windows will be produced') parser.add_argument('--N-runs', action='store', default=10, help='number of runs per agent', type=int) parser.add_argument('--t-total', action='store', default=6000, help='total simulation time (time slots) per run, default = 6000', type=int) parser.add_argument('--individual-q', action='append_const', dest='agents', const='IndividualQ', help='run individual Q-learning agents') parser.add_argument('--random-channel', action='append_const', dest='agents', const='RandomChannel', help='run randomly channel selecting agents') parser.add_argument('--highest-snr', action='append_const', dest='agents', const='OptHighestSNR', help='run agents selecting constant') parser.add_argument('--output-dir', action='store', default='data/', help='set output directory, it must be already created') parser.add_argument('--n-agent', action='store', default=5, help='number of agents', type=int) parser.add_argument('--n-stationary', action='store', help='number of stationary agents', type=int) parser.add_argument('--n-channel', action='store', default=5, help='number of good (type-1) channels among 5 channels', type=int) parser.add_argument('--n-good-channel', action='store', default=2, help='number of good (type-1) channels among 5 channels', type=int) parser.add_argument('--buffer-size', action='store', default=512, help='size of buffer, default: 1024 packets', type=int) parser.add_argument('--buffer-levels', action='store', default=10, help='# of buffer levels in Q-learning, default: 10', type=int) parser.add_argument('--packet-size', action='store', default=1024, help='size of a packet, default: 1024 bits', type=int) parser.add_argument('--min-packet-rate', action='store', default=0, help='minimum packet rate per timeslot per agent, default = 0', type=int) parser.add_argument('--max-packet-rate', action='store', default=6, help='maximum packet rate per timeslot per agent, default = 6', type=int) parser.add_argument('--beta-idle', action='store', default=10, help='cost coefficient of staying idle for Q Learning default = 4', type=float) parser.add_argument('--verbose', action='store_true', help='increase verbosity, give statistics about each run') parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1') argv = parser.parse_args()
It's considered one of the highest honors in the military: protecting the Tomb of the Unknown Soldier at Arlington National Cemetery. It’s considered one of the highest honors in the military: protecting the Tomb of the Unknown Soldier at Arlington National Cemetery. These elite volunteers are handpicked and rigorously trained. But now, one of their own has a rare, incurable disease. Military doctors have told former Sentinel Andy Selga, out of the 17 people in the world with his precise diagnosis, he is the only one still alive. Selga has eosinophilic vasculitis isolated to the small bowel. The disease causes painful inflammation of the blood vessels. "It’s emotional. And it was always emotional," he said. Before his diagnosis, he'd been living his dream, walking 21 steps at a time. "It just like pumped me up. I was like, alright, I get to go guard the Unknown Soldiers today. For an entire 26-hour period! Cool!" Selga said. He loved serving as a tomb guard, saying, "This is way bigger than anything I’ve ever done in my life." The number 21 is a symbolic nod to the 21-gun salute, our nation’s highest honor. The Tomb of the Unknown Soldier is guarded 24 hours a day, 365 days a year, and in any weather condition. "To me, the only time I ever felt the true honor and respect was like at nighttime, when it’s like there’s nobody out there and America’s sleeping. But you get a sliver in the night to guard the Unknown Soldiers when nobody else is around and America’s entrusting you to be there," he said. Tomb Guards march 21 steps down the mat, turn and face east for 21 seconds. They turn again and face north for 21 seconds. Then, they take 21 more steps down the mat before repeating the process. It is haunting that Selga was 21 when doctors at Walter Reed National Military Medical Center decided on emergency surgery. They raced to find answers to his relentless abdominal pain. It had come on suddenly while he was walking the mat. "Almost as if somebody took a knife and drove it into my stomach. It wasn’t coming and going. It wasn’t here some days and here other days. It was constant," Selga described. "After the surgery, I wake up and they’re telling me I have eosinophilic vasculitis," he said. The condition is so rare that only two of every one million people in the world are diagnosed with it each year. There is no cure. "I sit back at the end of the day like, ‘Why me?’ What did I do? They ask me questions, ‘Is any of this stuff in your family?’ No. ‘Anywhere? Anywhere in your family?’ No." Selga’s found strength in his wife, Emily. High school sweethearts, the newlyweds tied the knot when Emily was just 17 years old. He was 20. "I got a crash course in adulthood, marriage and caregiving all at once at the age of 17," said Emily. "Marriage is 'til death. He’s my best friend. I’m going to be there til the end. We got the sickness part done! Sickness and in health. I’m really hoping health comes soon!" Despite the constant pain, it’s impossible to find a photograph of Selga where he isn’t smiling. "We’re making the best of it," he said. " I’m gonna live every day to the fullest." The couple was even tossed out of an emergency room once for uncontrollable giggling. "I love my husband!!" Emily laughed. "I appreciate every second I get with him!" And with Emily’s steadfast support, he’s transitioned from a wheelchair to a cane and is now able to walk again, on his own. "I still wake up feeling just like somebody hit me with a bat. On the inside," Selga said. But he’s responding well to treatment. "They expect me to live." He is fighting this disease with the same discipline and energy he mustered to protect the Tomb. Selga was forced to leave the military because of his illness. But recently, he has started an internship at the NASA Goddard Space Flight Center.
# ======================================================================== # Copyright (c) 2015 The University of Washington # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ======================================================================== # # # IAM messaging tools - AWS interface # from sys import exit from copy import deepcopy import logging import json from .dao import AWS_DAO class AWS(object): def __init__(self, conf): self._conf = conf # SNS actions def create_topic(self, name): dao = AWS_DAO(self._conf) response = dao.create_topic(name) return response def send_message(self, msg, context, cryptid, signid): dao = AWS_DAO(self._conf) response = dao.send_message(msg, context, cryptid, signid) return response # SQS actions def get_queue(self): dao = AWS_DAO(self._conf) response = dao.get_queue() return response def get_all_queues(self): dao = AWS_DAO(self._conf) response = dao.get_all_queues() return response def create_queue(self, name): dao = AWS_DAO(self._conf) response = dao.create_queue(name) return response def recv_message(self): dao = AWS_DAO(self._conf) response = dao.recv_message() return response def recv_and_process(self, handler, max=1): dao = AWS_DAO(self._conf) response = dao.recv_and_process(handler, max) return response def purge_queue(self): dao = AWS_DAO(self._conf) response = dao.purge_queue() return response # multi-actions def subscribe_queue(self, topic_name, queue_name): dao = AWS_DAO(self._conf) response = dao.subscribe_queue(topic_name, queue_name) return response
Stunning beautiful view of city and lake. Condo boasts floor-to-ceiling windows, extended hardwoods, quartz counters, washer/dryer, large walk-in closet in master. Amenities include heated pool, outdoor covered terrace, gas grills, club room w/ kitchen, business center, dog park + wash, 24hr concierge. Please add $50 to monthly rent if you want 1 Reserved parking. Seaholm itself is adjacent to Trader Joe’s and the new Central Library. LOCKBOX AT CONCIERGE.
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ Module clearHistory """ # noinspection PyUnresolvedReferences import logging import mari import traceback from PySide.QtGui import QMessageBox from stkMariTools.lib.ui_utils import MariToolsMenuItem def registerMenuItem(): """ This method acts as a identifier method to be run automatically when detected. It adds the menu item to the Mari menubar. :return: """ ClearHistoryMenuItem() class ClearHistoryMenuItem(MariToolsMenuItem): """ This class adds a Clear History action. """ logger = logging.getLogger(__name__) def __init__(self): """ The constructor. :return: """ super(ClearHistoryMenuItem, self).__init__() mari.ClearHistoryMenuItem = self self.actionIdentifier = 'Clear cached history' self.actionCommand = 'mari.ClearHistoryMenuItem.clearHistory()' self.actionPath = 'MainWindow/&Scripts/&Cache' self.addMariToolsMenuItem() def clearHistory(self): """ This method clears the Mari undo stack and cache. :return: """ try: mari.history.clear() except RuntimeError: self.logger.error('### Could not clear the project history!!!\n{0}' .format(traceback.print_exc())) # Display user prompt mari.utils.message(text='Could not clear the project history!\n' 'Check if there is no project open, ' 'or if the current project requires saving.', title='Could not clear project history!', icon=QMessageBox.Icon.Warning) return mari.ddi.garbageCollect() mari.ddi.clearMemoryCache()
Read more about Pancho Ramos Stierle, now known worldwide as one of the men arrested for meditating to reduce the growing tension between police and protesters during Occupy Oakland. Casa de Paz is part of the Canticle Farm community. Learn more about the founders, their philosophy, and the practices that anchor the community here. This video first appeared on Karmatube and is re-posted here with permission.
#!/usr/bin/python from xml.sax.saxutils import XMLFilterBase, XMLGenerator from xml.sax.xmlreader import AttributesImpl from xml.sax import make_parser import sys def AttributesUnion(base, **values): baseitems = dict(base) baseitems.update(values) return AttributesImpl(baseitems) class AnnotateType(XMLFilterBase): scopes = [] map = dict([(name, [name]) for name in [ 'BOOL', 'BYTE', 'CARD8', 'CARD16', 'CARD32', 'INT8', 'INT16', 'INT32', 'char', 'void', 'float', 'double', 'XID', ]]) def startScope(self, name): self.scopes.insert(0, name) def declareType(self, name): assert ':' not in name qname = self.scopes[0] + ':' + name self.map.setdefault(name, []).insert(0, qname) def getQualifiedType(self, name): if ':' in name: return name names = self.map.get(name, []) return names[0] def endScope(self): self.scopes.pop(0) def startElement(self, name, attrs): attnames = [] if name == 'xcb': self.startScope(attrs['header']) elif name in ['struct', 'union', 'xidtype', 'enum', 'event', 'eventcopy', 'error', 'errorcopy']: self.declareType(attrs['name']) attnames = ['name'] if name.endswith('copy'): attnames.append('ref') elif name == 'typedef': self.declareType(attrs['newname']) attnames = ['oldname', 'newname'] elif name == 'valueparam': attnames = ['value-mask-type'] elif attrs.has_key('type'): attnames = ['type'] newattrs = {} for attname in attnames: newattrs[attname] = self.getQualifiedType(attrs[attname]) if newattrs: attrs = AttributesUnion(attrs, **newattrs) XMLFilterBase.startElement(self, name, attrs) def endElement(self, name): XMLFilterBase.endElement(self, name) if name == 'xcb': self.endScope() annotator = AnnotateType(make_parser()) annotator.setContentHandler(XMLGenerator()) if len(sys.argv) > 1: annotator.parse(sys.argv[1]) else: annotator.parse(sys.stdin) for name,names in annotator.map.iteritems(): if len(names) != 1: print "<!-- warning:", name, "has the following definitions:", names, "-->"
A continent is one of several very large landmasses of the world. Generally identified by convention rather than any strict criteria, up to seven regions are commonly regarded as continents: Asia, Africa, North America, South America, Antarctica, Europe, and Australia. Depending on the convention and model, some continents may be consolidated or subdivided: For example, Eurasia is most often subdivided into Asia and Europe, while North America and South America are sometimes recognised as one American continent, The Americas. As for me, I would like to apply yet another convention and divide contents of this site into the regions / continents Americas, Asia, Europe and Scandinavia. Hope, you don’t mind as it does fit the presentation of all the pics taken by me while traveling the world (so far) much better. Scandinavia is a region in Northern Europe, with strong historical, cultural, and linguistic ties. The majority national languages of the region, and their many dialects, belong to the Scandinavian dialect continuum, and are mutually intelligible North Germanic languages. Find more info on Wikipedia.
from gi.repository import Gtk import json,os from log import logger import settings import cloudapi import utils import re from Spinner import SpinnerDialog from VcodeDialog import VcodeDialog import urllib.parse class Singleton(type): def __init__(cls, name, bases, dict): super(Singleton, cls).__init__(name, bases, dict) cls._instance = None def __call__(cls, *args, **kw): if cls._instance is None: cls._instance = super(Singleton2, cls).__call__(*args, **kw) return cls._instance def __new__(cls, name, bases, dct): return type.__new__(cls, name, bases, dct) class TaskDialog(Gtk.Dialog): __metaclass__ = Singleton def __init__(self,parent,tokens,save_path): Gtk.Dialog.__init__(self, "Download Task", parent, 0) self.file_list = [] #self.downlink = [] self.tokens = tokens self.bdstoken,sign1,sign3,timestamp = self.tokens #self.file_list = nlist #self.remove_list = file_list self.current_selection = None self.save_path = save_path #self.draw_widget(file_list) #def draw_widget(self,file_list): self.set_default_size(800, 500) self.set_border_width(10) box = self.get_content_area() ## num,filename,size,status,path, # 0 1 2 3 4 self.liststore = Gtk.ListStore(int,str, str, str,str,str) #self.liststore.connect("row-changed",self.row_changed) self.spinn = SpinnerDialog(self) self.spinn.show() self.init_view(self.bdstoken) #creating the treeview, making it use the filter as a model, and adding the columns self.treeview = Gtk.TreeView(model=self.liststore) for i, column_title in enumerate(["Num","File", "Size","Status", "Path"]): renderer = Gtk.CellRendererText() column = Gtk.TreeViewColumn(column_title, renderer,text=i) self.treeview.append_column(column) self.treeview.props.activate_on_single_click = False self.treeview.connect("row-activated",self.on_row_double_click) self.selection = self.treeview.get_selection() self.selection.connect("changed", self.on_tree_selection_changed) self.selection.set_mode(Gtk.SelectionMode.MULTIPLE) self.buttons = list() for act in ["Add Magnet or Ed2k Link File","Select All","Unselect All", "Remove Task"]: button = Gtk.Button(act) self.buttons.append(button) funcname = "on_%s_button_clicked"%act.lower().replace(" ","_") func = getattr(self, funcname) button.connect("clicked", func) self.scrollable_treelist = Gtk.ScrolledWindow() self.scrollable_treelist.set_vexpand(True) box.pack_start(self.scrollable_treelist, True, True, 0) for i, button in enumerate(self.buttons): #box.pack_start(self.buttons[i], False,False, 0) self.add_action_widget(self.buttons[i],i+1) self.scrollable_treelist.add(self.treeview) self.infobar = Gtk.InfoBar() self.infobar.set_message_type(Gtk.MessageType.ERROR) #box.pack_end(self.infobar, False, False, 0) #grid.attach_next_to(self.infobar,lbutton,Gtk.PositionType.BOTTOM,13,1) box.add(self.infobar) info_content = self.infobar.get_content_area() self.info_label = Gtk.Label.new("Add magnet/ed2k file to add offline download task") info_content.pack_start(self.info_label, False, False, 0) self.infobar.hide() box.show_all() def on_tree_selection_changed(self,*arg): self.current_selection = self.selection.get_selected_rows() def populate_view(self,*arg): listjson,error = arg print(listjson) if 'task_info' in list(listjson.keys()): task_list = listjson['task_info'] file_list = [] for i,row in enumerate(task_list): if int(row['status']) == 0: status = "Success" else: status = "Not Finised" nrow = (i,row['task_name'],'0B',status,row['save_path'],row['task_id']) file_list.append(nrow) self.fill_liststore(file_list) elif 'error_msg' in list(listjson.keys()): info =listjson['error_msg'] logger.info(info) self.info_label.set_text(info) self.spinn.destroy() def init_view(self,bdstoken): utils.async_call(cloudapi.list_task, bdstoken, callback=self.populate_view) self.fill_liststore([]) def fill_liststore(self,file_list): if file_list: self.liststore.clear() for i,filerow in enumerate(file_list): self.liststore.append(list(filerow)) def on_select_all_button_clicked(self,*arg): self.selection.select_all() def on_unselect_all_button_clicked(self,*arg): self.selection.unselect_all() def on_remove_task_button_clicked(self,*arg): def is_current_selection_null(): if not self.current_selection or not self.current_selection[1] : dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, "Attention.......") dialog.format_secondary_text("NO File is selected.!") dialog.run() dialog.destroy() return True else: return False def after_delete_task(data,error): self.info_label.set_text("Deletion is done") self.init_view(self.bdstoken) self.spinn.destroy() if is_current_selection_null(): return store,treepaths = self.current_selection for tpath in treepaths: task = () for i in store[tpath]: task = task + (i,) task_id = task[5] self.spinn = SpinnerDialog(self) self.spinn.show() self.info_label.set_text("Deleting task %s "%task[1]) utils.async_call(cloudapi.delete_task, self.bdstoken,task_id , callback=after_delete_task) #self.liststore.clear() #self.fill_liststore(file_list) def on_row_double_click(self,*arg): pass def after_cancel_task(self,*arg): taskdata,error = arg canceljson,task_id,task_name = taskdata logger.debug("canceljson: %s "%canceljson) info ="Task:%s,id:%s is cancelled."%(task_name,task_id) logger.info(info) self.info_label.set_text(info) self.init_view(self.bdstoken) self.spinn.destroy() def after_query_task(self,*arg): taskdata,error = arg taskjson,task_id = taskdata #self.init_view(self.bdstoken) #taskjson = cloudapi.query_task(task_id) logger.debug("taskjson: %s "%taskjson) #if task_json: file_size = int(taskjson['task_info'][task_id]['file_size']) finished_size = int(taskjson['task_info'][task_id]['finished_size']) task_name = taskjson['task_info'][task_id]['task_name'] logger.debug("file_size: %s "%file_size) logger.debug("finished_size: %s "%finished_size) if finished_size/file_size < 1 : info = "%s : Finished rate is less than 0.6, canceling."%task_name logger.info(info) self.info_label.set_text(info) utils.async_call(cloudapi.cancel_task, self.bdstoken,task_id,task_name, callback=self.after_cancel_task) else: info = "Task:%s,id:%s is successfully created."%(task_name,task_id) logger.info(info) self.info_label.set_text(info) #self.init_view(self.bdstoken) self.spinn.destroy() def after_add_task(self,*arg): taskjson,error = arg logger.debug("taskjson: %s "%taskjson) if 'task_id' in taskjson.keys(): task_id = str(taskjson['task_id']) utils.async_call(cloudapi.query_task, self.bdstoken,task_id, callback=self.after_query_task) else: error = taskjson['error_msg'] logger.info(error) self.info_label.set_text(error) #self.init_view(self.bdstoken) self.spinn.destroy() #self.spinn.destroy() def on_add_magnet_or_ed2k_link_file_button_clicked(self,*arg): dialog = Gtk.FileChooserDialog("Please choose a file", self, Gtk.FileChooserAction.OPEN, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK)) response = dialog.run() if response == Gtk.ResponseType.OK: #linkfile = dialog.get_file().read() filename = dialog.get_filename() print("Open clicked") print("File selected: " + dialog.get_filename()) elif response == Gtk.ResponseType.CANCEL: return dialog.destroy() link_list = open(filename).read() task_list = [] invalid_list = [] for line in link_list.split("\n"): line = line.strip() if line and ( line.startswith("magnet:?xt=urn") or \ line.startswith("ed2k://") ): task_list.append(line) elif line: invalid_list.append(line) if invalid_list: dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.QUESTION, Gtk.ButtonsType.OK, "Attention") dialog.format_secondary_text( "Only magnet or ed2k protocal is support! Invalid lines :%s"%str(invalid_list)) response = dialog.run() dialog.destroy() return print(self.save_path) maglist = [ i['source_url'] for i in self.task_list if "magnet:?xt=urn:" in i['source_url'] ] logger.debug("maglist: %s "%str(maglist)) for i,l in enumerate(task_list): mag = re.search('(&.*$)',l).group(1) task_name = dict(urllib.parse.parse_qsl(mag))['dn'] txt = "%s out of %s | %s is running."%(str(i),len(task_list),str(task_name)) logger.info(txt) self.info_label.set_text(txt) maglink = re.search("(magnet[^&]*)",l).group(1) logger.debug("maglink: %s "%maglink) self.spinn = SpinnerDialog(self) self.spinn.show() if maglink not in maglist: self.info_label.set_text("Adding task: %s "%task_name) taskjson = cloudapi.add_task(self.bdstoken, l,self.save_path,self) self.init_view(self.bdstoken) self.spinn.destroy() #taskjson = cloudapi.add_task(l,self.save_path) logger.debug("taskjson: %s "%taskjson) if 'task_id' in taskjson.keys(): self.spinn = SpinnerDialog(self) self.spinn.show() self.info_label.set_text("Querying task: %s "%task_name) task_id = str(taskjson['task_id']) utils.async_call(cloudapi.query_task, self.bdstoken,task_id, callback=self.after_query_task) self.spinn.destroy() else: error = taskjson['error_msg'] logger.info(error) self.info_label.set_text(error) #self.spinn.destroy() else: info = "Already existed,pass" logger.info(info) self.info_label.set_text(info) self.spinn.destroy()
UnCommon Evil brings you 20 of the most horrifying stories our deviant authors’ minds can conceive. From the monster under your bed, to the very real reason for that oily sinking feeling in the pit of your stomach, our UnCommon Authors bring you a whole new way of looking at the true nature of evil. I am not normally a fan of horror. That being said I found these stories held more of the psychological thriller than slasher. More of the uncanny than crude. Clever and frightening visions that were yes violent and horrific, but that made you think and consider evil from new perspectives.
## \file state.py # \brief python package for state # \author Trent Lukaczyk, Aerospace Design Laboratory (Stanford University) <http://su2.stanford.edu>. # \version 2.0.6 # # Stanford University Unstructured (SU2) Code # Copyright (C) 2012 Aerospace Design Laboratory # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ---------------------------------------------------------------------- # Imports # ---------------------------------------------------------------------- import os, sys, shutil, copy, time from ..io import expand_part, get_adjointSuffix, add_suffix, \ get_specialCases from ..util import bunch from ..util import ordered_bunch # ---------------------------------------------------------------------- # State Factory # ---------------------------------------------------------------------- def State_Factory(state=None): """ state = SU2.io.State() Starts a state class, an extension of ordered_bunch(). Stores data generated while traversing SU2 tool chain Parameters: FUNCTIONS - ordered bunch of objective function values GRADIENTS - ordered bunch of gradient value lists VARIABLES - ordered bunch of variables FILES - ordered bunch of file types HISTORY - ordered bunch of history information Parameters can be accessed by item or attribute ie: state['FUNCTIONS'] or state.FUNCTIONS Methods: update() - updates self with another state pullnlink() - returns files to pull and link design_vector() - vectorizes design variables find_files() - finds existing mesh and solutions Example of a filled state: FUNCTIONS: LIFT: 0.2353065809 DRAG: 0.042149736 SIDEFORCE: 0.0 MOMENT_X: 0.0 MOMENT_Y: 0.0 MOMENT_Z: 0.046370243 FORCE_X: 0.0370065195 FORCE_Y: 0.2361700759 FORCE_Z: 0.0 EFFICIENCY: 5.5826347517 GRADIENTS: DRAG: [0.133697, 0.41473, 0.698497, (...) VARIABLES: DV_VALUE_NEW: [0.002, 0.002, 0.002, (...) FILES: MESH: mesh.su2 DIRECT: solution_flow.dat ADJOINT_DRAG: solution_adj_cd.dat HISTORY: DIRECT: {ITERATION=[1.0, 2.0, 3.0, (...) ADJOINT_DRAG: {ITERATION=[1.0, 2.0, 3.0, (...) """ if not state is None: assert isinstance(state,State) , 'input is must be a state instance' return state NewClass = State() for key in ['FUNCTIONS','GRADIENTS','VARIABLES','FILES','HISTORY']: NewClass[key] = ordered_bunch() return NewClass # ---------------------------------------------------------------------- # State Class # ---------------------------------------------------------------------- class State(ordered_bunch): """ state = SU2.io.state.State() This is the State class that should be generated with the Factory Function SU2.io.state.State_Factory() Parameters: none, should be loaded with State_Factory() Methods: update() - updates self with another state pullnlink() - returns files to pull and link design_vector() - vectorizes design variables find_files() - finds existing mesh and solutions """ _timestamp = 0 def update(self,ztate): """ Updates self given another state """ if not ztate: return assert isinstance(ztate,State) , 'must update with another State-type' for key in self.keys(): if isinstance(ztate[key],dict): self[key].update( ztate[key] ) elif ztate[key]: self[key] = ztate[key] self.set_timestamp() def __repr__(self): return self.__str__() def __str__(self): output = 'STATE:' for k1,v1 in self.iteritems(): output += '\n %s:' % k1 if isinstance(v1,dict): for k2,v2 in v1.iteritems(): output += '\n %s: %s' % (k2,v2) else: output += '\n %s' % v1 return output def pullnlink(self,config): """ pull,link = SU2.io.State.pullnlink(config) returns lists pull and link of files for folder redirection, based on a given config """ pull = []; link = [] # choose files to pull and link for key,value in self.FILES.iteritems(): # link big files if key == 'MESH': # mesh (merged and partitions) if config.DECOMPOSED: value = expand_part(value,config) # hack - twl else: value = [value] link.extend(value) elif key == 'DIRECT': #if config.RESTART_SOL == 'YES': # direct solution link.append(value) elif 'ADJOINT_' in key: #if config.RESTART_SOL == 'YES': # adjoint solution link.append(value) # copy all other files else: pull.append(value) #: for each filename return pull,link def design_vector(self): """ vectorizes State.VARIABLES """ vector = [] for value in self.VARIABLES.values(): if isinstance(value,dict): for v in value.values(): vector.append(v) elif not isinstance(value,list): value = [value] vector.extend(value) return vector def find_files(self,config): """ SU2.io.State.find_files(config) finds mesh and solution files for a given config. updates state.FILES with filenames. files already logged in state are not overridden. will ignore solutions if config.RESTART_SOL == 'NO'. """ files = self.FILES mesh_name = config.MESH_FILENAME direct_name = config.SOLUTION_FLOW_FILENAME adjoint_name = config.SOLUTION_ADJ_FILENAME targetea_name = 'TargetEA.dat' adj_map = get_adjointSuffix() restart = config.RESTART_SOL == 'YES' special_cases = get_specialCases(config) def register_file(label,filename): if not files.has_key(label): if os.path.exists(filename): files[label] = filename print 'found: %s' % filename else: assert os.path.exists(files[label]) , 'state expected file: %s' % filename #: register_file() # mesh register_file('MESH',mesh_name) # direct solution if restart: register_file('DIRECT',direct_name) # adjoint solutions if restart: for obj,suff in adj_map.iteritems(): ADJ_LABEL = 'ADJOINT_' + obj adjoint_name_suffixed = add_suffix(adjoint_name,suff) register_file(ADJ_LABEL,adjoint_name_suffixed) # equivalent area if 'EQUIV_AREA' in special_cases: register_file('TARGET_EA',targetea_name) return def __setitem__(self,k,v): if self._initialized: self.set_timestamp() super(State,self).__setitem__(k,v) def set_timestamp(self): self._timestamp = time.time() def tic(self): """ timestamp = State.tic() returns the time that this state was last modified """ return self._timestamp def toc(self,timestamp): """ updated = State.toc(timestamp) returns True if state was modified since last timestamp """ return self._timestamp > timestamp #: def State
The 18th IMLAM was held from 30 June to 5 July 2017 culminating in a fantastic final between University of Queensland and the National University of Singapore, whose team ultimately prevailed. Please see attached the announcement of the various prizes. The AMTAC “Spirit of the Moot” prize was this year awarded to Koç University – Congratulations! The Moot will be held again from 29 June 2018 with the University of Queensland hosting.
# Copyright 2013 komola GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Robots middleware denies access for search engines """ from webob import Request, Response class RobotsMiddleware(object): """ Robots middleware denies access for search engines If the path is /robots.txt, it will respond with Deny All. """ def __init__(self, app, *args, **kwargs): self.app = app def GET(self, req): """Returns a 200 response with "GO AWAY!" in the body.""" return Response(request=req, body="User-agent: *\nDisallow: /", content_type="text/plain") def __call__(self, env, start_response): req = Request(env) try: if req.path == '/robots.txt': return self.GET(req)(env, start_response) except UnicodeError: # definitely, this is not /robots.txt pass return self.app(env, start_response) def filter_factory(global_conf, **local_conf): def robots_filter(app): return RobotsMiddleware(app) return robots_filter
Having worked on the renovation of this 1920’s Californian brick bungalow in three incremental stages over the years, the most recent one included the kitchen and dining area and laundry, plus upgrading the bathrooms. In addition, floorboards were stained and polished to usher a cohesive flow between rooms, walls were re-painted, and ornate wallpaper was applied in the upstairs bedrooms.Forming the hub of a family home, the open-plan dining area bracketed by a galley-style kitchen and corridor of custom-designed joinery in a palette of steel blue allows family members to flow to other areas of the house with ease. Shunning a heavy block-like appearance, the kitchen’s island bar incorporates considered custom-design features including a chiseled geometric cut-out motif in its base (inspired by period details in the the room’s original doors and echoed in the wall unit opposite it) plus an extended marble service lip counter-balanced by an open corner ‘cut-out’ displaying treasured pieces lit by concealed LED strip lights. These dynamic ‘off-centre’ eye diversions are also incorporated in the floating wall shelf opposite the galley. Together with the circular glass dining table and quilted dining chairs, the weighty presence of the hard surfaces and concealed cupboards are seamlessly lifted.
#!/usr/bin/env/python """Minimal example of OpenGL/CL interaction using textures. @author: Kai Ruhl @since 2013-02""" import sys import numpy as np import pyopencl as cl from glitter import Texture2D from glitter.raw import gl cl_source = """ const sampler_t T_RAW_SAMPLER = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST; __kernel void run(uint wid, uint hei, __read_only image2d_t img0) { } """ def get_gl_context(option="g"): """Returns an OpenGL context. Options: g(lut), q(t)""" if "g" == option: print "Creating GLUT context." from glitter.contexts.glut import GlutWindow gl_context = GlutWindow(shape=(1,1), hide=True) elif "q" == option: print "Creating QT context." from PySide import QtGui from glitter.contexts.qt import QtWidget app = QtGui.QApplication.instance() if app is None: app = QtGui.QApplication(sys.argv) gl_context = QtWidget(None) else: raise Exception("Unknown option: %s" % option) return gl_context def get_cl_context(gl_context): """Creates a CL context, with or without given GL context.""" if gl_context is not None: # ... with OpenGL interop? with gl_context: assert cl.have_gl(), "GL interoperability not enabled." from pyopencl.tools import get_gl_sharing_context_properties cl_platform = cl.get_platforms()[0] cl_properties = [(cl.context_properties.PLATFORM, cl_platform)] + get_gl_sharing_context_properties() cl_devices = [cl_platform.get_devices()[-1]] # Only one is allowed! cl_context = cl.Context(properties=cl_properties, devices=cl_devices) else: # ... or in stand-alone mode, CL context without GL? cl_platform = cl.get_platforms()[0] # @UndefinedVariable cl_properties = [(cl.context_properties.PLATFORM, cl_platform)] cl_devices = [cl_platform.get_devices()[-1]] # Only one is allowed! cl_context = cl.Context(properties=cl_properties, devices=cl_devices) return cl_context def test_clgl_texture_interop(gl_context, cl_context): """Tests that an OpenGL texture can be used in an OpenCL kernel.""" from scipy.misc import lena; img = np.dstack([lena() / 256.] * 3).astype(np.float32); hei, wid = img.shape[:2] gl_img = Texture2D(img, mipmap=True, context=gl_context) cl_img = cl.GLTexture(cl_context, cl.mem_flags.READ_ONLY, gl.GL_TEXTURE_2D, 1, gl_img._id, 2) cl_queue = cl.CommandQueue(cl_context) cl_program = cl.Program(cl_context, cl_source).build() if True: # usable in loop cl_gl_data = [cl_img] cl.enqueue_acquire_gl_objects(cl_queue, cl_gl_data) cl_args = [np.uint32(wid), np.uint32(hei), cl_img]; assert 3 == len(cl_args) cl_program.run(cl_queue, (wid, hei), None, *cl_args) cl.enqueue_release_gl_objects(cl_queue, cl_gl_data) cl_queue.flush() cl_queue.finish() if __name__ == "__main__": gl_context = get_gl_context("q" if len(sys.argv) < 2 else sys.argv[1]) cl_context = get_cl_context(gl_context) test_clgl_texture_interop(gl_context, cl_context); w, h = 800, 600; if False: from glitter.framebuffers.framebuffer import Framebuffer gl_frame_buffer = Framebuffer(Texture2D(shape=(h, w, 3), context=gl_context), depth=Texture2D(shape=(h, w, 1), depth=True, context=gl_context), context=self) if False: import glitter.utils.dtypes as gdtype gl_int_mipmap_texture = Texture2D(shape=(h, w, 3), dtype=gdtype.uint8, mipmap=True, context=gl_context) gl_int_mipmap_texture.min_filter = Texture2D.min_filters.LINEAR_MIPMAP_LINEAR gl_data = gl_int_mipmap_texture.get_data(level=2) print "Finished."
used conveyor belts exporters in australia used conveyor belts exporters in australia. rock stones are crushed and some other products are packaged, our belt conveyors can be used to transport them. Read More >> exporters of used conveyor belt in australia . We offer used conveyor belt. We are exported of all kinds of scrap, plastic, metals. New and Used Conveyor insights. There are 122 Conveyor for sale in Australia from which to choose. Overall 72% of Conveyor buyers enquire on only used listings, 27% on new and 4.86% on both new and used Conveyor items.Buyers usually enquire on 1.95 different Conveyor classifieds before organising finance for Conveyor. China conveyor belt export to Australia used for local iron ore mine . US $20-150 / Meter . 100 Meters (Min. Order) 1 YR . Shanxi RYOMA Conveyor Belt Manufacturing Co., Ltd. (1) 80.0%. Germany Conveyor Belts from German Manufacturers and Exporters - Germany B2B Marketplace providing Conveyor Belts Offers and Catalogs from pre-verified Germany Suppliers and Manufacturers. exporters of used conveyor belt in australia Crusher ... Australia Conveyor Belt, Australia Conveyor Belt Manufacturers Conveyor belt wholesalers,exporters ... Chat Online. belt australia rubber conveyor belt ... australia conveyor belt distridutor. United Kingdom Endless Conveyor Belt from England Manufacturers and Exporters - United Kingdom B2B Marketplace providing Endless Conveyor Belt Offers and Catalogs from pre-verified United Kingdom Suppliers and Manufacturers.
# -*- coding: utf-8 -*- import os import random import sys from subprocess import Popen, PIPE, STDOUT def message(msg, msgtype='progress'): """Send a message to console. Args: msgtype (str): one of 'progress', 'warning', 'error', or 'debug' """ message = "[%(level)s]: %(text)s" % dict(level=msgtype.upper(), text=msg) sys.stderr.write(message.strip() + "\n") if msgtype == 'error': sys.exit(1) def random_ports(port, n): """Generate a list of n random ports near the given port. The first 5 ports will be sequential, and the remaining n-5 will be randomly selected in the range [port-2*n, port+2*n]. (copied from IPython notebookapp.py) """ for i in range(min(5, n)): yield port + i for i in range(n-5): yield max(1, port + random.randint(-2*n, 2*n)) def ParseTargets(targetfile): """ Return list of targets, each is dict with region and name """ x = [] with open(targetfile, "r") as f: for line in f: items = line.strip().split("\t") if len(items) != 4: message("invalid target file. should have 4 columns", "error") chrom, start, end, name = items region = "%s:%s"%(chrom, start) x.append({"name": name, "region": region, "coords": (chrom, int(start), int(end))}) line = f.readline() with open(targetfile, "r") as f: line = f.readline() return x def WriteParamFile(paramfile, jspath, filetype, reference_track, samples, alignments_by_sample, fromindex, toindex): """ Generate paramfile for creating snapshots from the command line """ f = open(paramfile, "w") f.write('var exports = module.exports = {};\n') f.write('exports.reference_track = "%s";\n' % reference_track) f.write('exports.samples = %s;\n' % str(samples)) f.write('exports.alignBySample = {\n') for sample in alignments_by_sample: f.write('"%s": "%s",\n' % (sample, alignments_by_sample[sample])) f.write('};\n') f.write('exports.fromindex = %s;\n' % fromindex) f.write('exports.toindex = %s;\n' % toindex) f.write('exports.jspath = "%s";\n' % jspath) if filetype in ["html","svg"]: f.write('exports.filetype = "%s";\n' % filetype) elif filetype == "pdf": f.write('exports.filetype = "svg";\n') else: f.write('exports.filetype = "none";\n') f.close() def RunCommand(cmd): p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, \ stderr=STDOUT, close_fds=True) ex = p.wait() if ex != 0: stdout, stderr = "", "" if p.stdout is not None: stdout = p.stdout.read() if p.stderr is not None: stderr = p.stderr.read() message("ERROR: command '%s' failed.\n\nSTDOUT:%s\nSTDERR:%s"%(cmd, stdout, stderr)) return ex def CheckProgram(program): """ Check whether a program is installed """ def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return True return False def CheckNodeJSPackage(package): """ Check whether a node.js package is installed """ cmd = "node -e \"var d3=require('%s');\"" % package x = RunCommand(cmd) return x == 0
The Theology of the Body let me order a beer. After studying TOB for two years at the suggestion of my wonderful bride Katie, I recently found myself in Germany at a restaurant nursing a traveler’s thirst. Without thinking twice, I ordered a Radler (a lager mixed with lemonade). When I say that TOB let me order a beer, perhaps I need to clarify. You see, the FIRST time I ordered a Radler in Germany was 6 years prior. I had been traveling to Baden-Wüttenburg extensively for business, and had fallen in love with both the local wheat beer and especially the nearby Bavarian brews. A local suggested the Radler as a change from the Hefeweizen and Pils I had been downing. It. Was. Fantastic. About as refreshing a drink as you could imagine. The SECOND time I ordered a Radler, I was in Germany again, this time accompanied by my team of U.S. folks. I was excited to share this heavenly beverage. However, as soon as I explained what the drink was, I received quite the ribbing for ordering a “girl drink” and became pretty self-conscious about it. I didn’t order any more Radlers over the trip, and for the next dozen or so trips to Germany my urge to appear “manly” squashed my thirst for the lemony-beery goodness. My conception of what it was to be a man was (at that point anyway), if not 180 degrees away from the truth, at least 120 degrees off. I had allowed the culture, the media, and my peers to define for me what a man was, how a man acted, and how a man presented himself. I had allowed my identity to be found not in relation to God, but in relation to my buddies. I had allowed the culture, the media, and my peers to define for me what a man was, how a man acted, and how a man presented himself. I had allowed my identity to be found not in relation to God, but in relation to my buddies. The Theology of the Body has shown me another way – an identity defined by God, a lifestyle based on who God intended a man to BE: A real man gives of himself and doesn’t take. A man sacrifices and doesn’t expect to be waited on. A man loves and doesn’t lust. A man is tender and doesn’t seek to harm. A man loves his bride and wants to help her get to heaven. A man yearns for God, not material things. A man prepares his heart for heaven, not his possessions for the future. A man is not defined by having a Grizzly Adams-esque beard, a good golf swing, a sense of humor, a sharp wit, or fishing prowess…He is not defined by looking good in a suit, and certainly not by what he drinks. This new “TOB lens” through which I came to understand the Gospels has taught me that I am a child of God, specifically chosen to be male and to live in relation to my bride in a particularly self-donative way. I have learned to orient my relationships in this more authentic way and to stop worrying about my apparent manliness to others. The teachings of St. John Paul the Great realigned my expectations of myself, and fundamentally set me free to be a better me – a better Justin. These teachings allowed me to take up my identity not as the world would have it, but as God desires – free to live as my true self, and free to order that Radler. In addition to teaching me who I am as a man, TOB also showed me that all things that are true, good and beautiful—things like that Radler—point past themselves and can direct us to God (should we let them). In giving me a thirst for this beer, and giving me great satisfaction in it, God was telling me that there is an infinitely satisfying fountain of His mercy and grace beyond this beer. It was as if God reminded me : Whoever drinks this Radler will be thirsty again, but no one who drinks the water that I give will ever be thirsty again. All things that are true, good and beautiful—things like that Radler—point past themselves and can direct us to God (should we let them). TOB allowed me to see an image of my redemption in that beer. You see, the Radler is made of two parts: lemonade, normally understood as a child’s drink, and beer, an adult beverage. TOB let me understand that in this tasty mixture, God had given me (in one draught) a simultaneous icon of both my youth and my adulthood. In this drink God reminded me that He had redeemed the totality of my whole life—not just Past Justin, but Present Justin and Future Justin as well. Moreover, there is an image of sanctification in the Radler. God gives us lemons, and we quite literally make lemonade. God gives us barley, hops, and water, and we make beer (as is commonly attributed to Saint Arnold of Metz: “From man’s sweat and God’s love, beer came into the world”). The “delicious-ification” of God’s raw ingredients requires our hard work, just as our sanctification requires our participation & cooperation, even though the grace all comes from God. I would not have seen these icons nor understood their significance before studying TOB. In yet another display of God’s goodness, it was unusual that the drink I had recently was made not with bottled juice, but with fresh squeezed lemonade. And it was made of not just “normal” beer, but from a fresh keg of Paulaner (tapped not even an hour prior to the pour). God didn’t merely give me a beer to drink, He gave me the best of the fruit of the earth, and with TOB I understood that in doing so, He wanted to show me a glimmer of His goodness and ask me to give my goodness back to Him as a gift. Certainly, the Theology of the Body has transformed my life in more important ways than what happened at that restaurant in Germany – especially in regards to my relationship with my bride and children. But on that day, TOB let me order a beer. And it was delicious. JUSTIN SVEC resides in Noblesville, IN with his best friend Katie and their six children. He is a volunteer high school youth minister and teen bible study leader by night, and a mechanical engineer by day. He enjoys playing with his kids, holding hands with his wife, hiking, playing guitar, using the Oxford Comma, and being rather tall. He wants to pray, read, write, and ponder more than he does now.
#!/usr/bin/env python import glob import sgf try: from StringIO import StringIO # pragma: no cover except ImportError: # pragma: no cover from io import StringIO # pragma: no cover for filename in glob.glob("examples/*.sgf"): with open(filename) as f: sgf.parse(f.read()) example = "(;FF[4]GM[1]SZ[19];B[aa];W[bb];B[cc];W[dd];B[ad];W[bd])" collection = sgf.parse(example) for game in collection: for node in game: pass out = StringIO() collection[0].nodes[1].output(out) assert out.getvalue() == ";B[aa]" out.close() out = StringIO() collection.output(out) assert out.getvalue() == example out.close() example2 = "(;FF[4]GM[1]SZ[19];B[aa];W[bb](;B[cc];W[dd];B[ad];W[bd])" \ "(;B[hh];W[hg]))" collection = sgf.parse(example2) out = StringIO() collection.output(out) assert out.getvalue() == example2 out.close() example3 = "(;C[foo\\]\\\\])" collection = sgf.parse(example3) assert collection[0].nodes[0].properties["C"] == ["foo]\\"] out = StringIO() collection.output(out) assert out.getvalue() == example3 out.close() sgf.parse("foo(;)") # junk before first ( is supported sgf.parse("( ;)") # whitespace after ( is allowed sgf.parse("(;;)") # a node after an empty node is allowed sgf.parse("(;(;))") # a gametree after an empty node is allowed # errors try: sgf.parse("()") # games must have a node assert False # pragma: no cover except sgf.ParseException: pass try: sgf.parse("(W[tt])") # a property has to be in a node assert False # pragma: no cover except sgf.ParseException: pass try: sgf.parse("(;)W[tt]") # a property has to be in a game assert False # pragma: no cover except sgf.ParseException: pass try: sgf.parse("(;1)") # property names can't start with numbers assert False # pragma: no cover except sgf.ParseException: pass try: sgf.parse("(;A5[])") # property names can't have numbers at all assert False # pragma: no cover except sgf.ParseException: pass try: sgf.parse("(;FOO[bar]5)") # bad character after a property value assert False # pragma: no cover except sgf.ParseException: pass try: sgf.parse("(;") # finished mid-gametree assert False # pragma: no cover except sgf.ParseException: pass # new features for 0.5 with open("examples/ff4_ex.sgf") as f: ff4_ex = sgf.parse(f.read()) assert len(ff4_ex) == 2 game1 = ff4_ex[0] assert game1.root.properties["SZ"] == ["19"] count = 0 for node in game1.rest: count += 1 assert count == 13 collection = sgf.parse(example2) count = 0 for node in collection[0].rest: count += 1 assert count == 6 # test game.rest if only one node assert sgf.parse("(;)")[0].rest is None
The New York Times article below is a nice tribute to an influential scientist who fundamentally changed the field of infectious disease research by applying the newly emerging tools and strategies of molecular genetics. Perhaps just as important was the legacy of his remarkable track record of mentorship, with a long trainee list that includes M&I Professor Virginia Miller, Duke Professor Raphael Valdivia, and NCSU Professor Emeritus Paul Orndorff. Stanley, we’ll miss you.
#! /usr/bin/env python """ Add decorator to time pipeline steps. See the following links for more info: https://github.com/bunbun/ruffus/issues/15 https://github.com/daler/pipeline-example/blob/master/pipeline-2/helpers.py """ import sys import time class time_job(object): """ @time_job decorator. Wraps a function and prints elapsed time to standard out, or any other file-like object with a .write() method. """ def __init__(self, stream=sys.stdout, new_stream=False): """ """ self.stream = stream self.new_stream = new_stream def __call__(self, func): """ """ def inner(*args, **kwargs): # Start the timer. start = time.time() # Run the decorated function. ret = func(*args, **kwargs) # Stop the timer. end = time.time() elapsed = end - start name = func.__name__ runtime = "{0}\t{1:.4f}\n".format(name, elapsed) if type(self.stream) == str: if self.new_stream: with open(self.stream, 'w') as log: log.write(runtime) else: with open(self.stream, 'a') as log: log.write(runtime) else: self.stream.write(runtime) # Return the decorated function's return value. return ret inner.__name__ = func.__name__ if hasattr(func, "pipeline_task"): inner.pipeline_task = func.pipeline_task return inner
The use of discrete-event simulators in the design and development of distributed systems is appealing due to their efficiency and scalability. Their core abstractions of process and event map neatly to the components and interactions of modern-day distributed systems and allow designing realistic simulation scenarios. MONARC, a multithreaded, process oriented simulation framework designed for modelling large scale distributed systems, allows the realistic simulation of a wide-range of distributed system technologies, with respect to their specific components and characteristics. In this paper we present an innovative solution to the problem of evaluating the dependability characteristic of distributed systems. Our solution is based on several proposed extensions to the simulation model of the MONARC simulation framework. These extensions refer to fault tolerance and system orchestration mechanisms being added in order to asses the reliability and availability of distributed systems. The extended simulation model includes the necessary components to describe various actual failure situations and provides the mechanisms to evaluate different strategies for replication and redundancy procedures, as well as security enforcement mechanisms.
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'JobDescriptor.who_can' db.delete_column(u'shakti_jobdescriptor', 'who_can') # Adding field 'JobDescriptor.who_can_o' db.add_column(u'shakti_jobdescriptor', 'who_can_o', self.gf('django.db.models.fields.CharField')(default='N', max_length=5), keep_default=False) # Adding field 'JobDescriptor.who_can_b' db.add_column(u'shakti_jobdescriptor', 'who_can_b', self.gf('django.db.models.fields.CharField')(default='N', max_length=5), keep_default=False) # Adding field 'JobDescriptor.who_can_h' db.add_column(u'shakti_jobdescriptor', 'who_can_h', self.gf('django.db.models.fields.CharField')(default='N', max_length=5), keep_default=False) def backwards(self, orm): # Adding field 'JobDescriptor.who_can' db.add_column(u'shakti_jobdescriptor', 'who_can', self.gf('django.db.models.fields.TextField')(default=-1, max_length=20), keep_default=False) # Deleting field 'JobDescriptor.who_can_o' db.delete_column(u'shakti_jobdescriptor', 'who_can_o') # Deleting field 'JobDescriptor.who_can_b' db.delete_column(u'shakti_jobdescriptor', 'who_can_b') # Deleting field 'JobDescriptor.who_can_h' db.delete_column(u'shakti_jobdescriptor', 'who_can_h') models = { u'shakti.constraints': { 'Meta': {'object_name': 'Constraints'}, 'assistance_descr': ('django.db.models.fields.CharField', [], {'default': "' May need help'", 'max_length': '200'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'night_shift': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'relocatable': ('django.db.models.fields.CharField', [], {'max_length': '8'}), 'special_assistance': ('django.db.models.fields.CharField', [], {'max_length': '8'}), 'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"}) }, u'shakti.hearing': { 'Meta': {'object_name': 'Hearing'}, 'hearing_aid': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '5'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"}) }, u'shakti.jobdescriptor': { 'Meta': {'object_name': 'JobDescriptor'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'night_shift': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'post': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'qualification': ('django.db.models.fields.TextField', [], {}), 'skills_required': ('django.db.models.fields.TextField', [], {}), 'who_can_b': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'who_can_h': ('django.db.models.fields.CharField', [], {'max_length': '5'}), 'who_can_o': ('django.db.models.fields.CharField', [], {'max_length': '5'}) }, u'shakti.orthopedic': { 'Meta': {'object_name': 'Orthopedic'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lhand_amputee': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'lleg_amputee': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'orthopedic_aid': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'rhand_amputee': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'rleg_amputee': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"}) }, u'shakti.other': { 'Meta': {'object_name': 'Other'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'other_description': ('django.db.models.fields.TextField', [], {}), 'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"}) }, u'shakti.personalinfo': { 'Meta': {'object_name': 'PersonalInfo'}, 'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'dob': ('django.db.models.fields.DateField', [], {}), 'email': ('django.db.models.fields.EmailField', [], {'default': "'[email protected]'", 'max_length': '30'}), 'gender': ('django.db.models.fields.CharField', [], {'max_length': '10'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('django.db.models.fields.TextField', [], {}), 'maritial_status': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'mobile_num': ('django.db.models.fields.CharField', [], {'default': "'+9129089998'", 'max_length': '15'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}) }, u'shakti.qualification': { 'Meta': {'object_name': 'Qualification'}, 'eduIndex': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'other_desc': ('django.db.models.fields.TextField', [], {'default': 'None', 'blank': 'True'}), 'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"}) }, u'shakti.skills': { 'Meta': {'object_name': 'Skills'}, 'computer_skills': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'projects': ('django.db.models.fields.TextField', [], {}), 'speciality': ('django.db.models.fields.TextField', [], {}), 'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"}) }, u'shakti.tracker': { 'Meta': {'object_name': 'Tracker'}, 'details': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '10'}), 'doj': ('django.db.models.fields.DateField', [], {'default': 'None'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'placed': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"}) }, u'shakti.vision': { 'Meta': {'object_name': 'Vision'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'severity': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'uid': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shakti.PersonalInfo']"}) } } complete_apps = ['shakti']
New Natures Sleep Coupon Codes 2019: Up To $41 Off & Free Shipping. Use these new Natures Sleep promo codes to save on your current purchase. Our exclusive Natures Sleep coupon codes are updated frequently, so please bookmark this page! Also, check out other jaw dropping discounts for your favorite brands and online stores! Hot Natures Sleep coupon code: 65% off Sitewide. Order now to claim your discount! Try one of our exclusive Natures Sleep coupons before they expire! Huge savings with this Natures Sleep promo code. Grab up to 50% off sitewide discount on entire order. Use a latest Natures Sleep coupon and save your order instantly. Multiple use Natures Sleep promo codes for new and repeat customers! Get 70% off on any luxurious gel infused memory foam mattress. New Natures Sleep coupon codes expire soon. Hot Natures Sleep coupon code: Like Natures Sleep on Facebook and win a free mattress from Natures Sleep. Try one of our exclusive Natures Sleep coupons before they expire!
# coding=utf-8 """Multi Exposure Tool.""" import logging from collections import OrderedDict from qgis.PyQt.QtCore import Qt from qgis.PyQt.QtWidgets import ( QDialog, QComboBox, QLabel, QSizePolicy, QTreeWidgetItem, QListWidgetItem ) from qgis.PyQt.QtGui import QIcon from qgis.PyQt.QtXml import QDomDocument from qgis.core import QgsProject, QgsApplication from qgis.utils import iface as iface_object from safe import messaging as m from safe.common.exceptions import ( NoKeywordsFoundError, KeywordNotFoundError, MetadataReadError, ) from safe.common.signals import send_error_message from safe.definitions.constants import ( inasafe_keyword_version_key, ANALYSIS_FAILED_BAD_INPUT, PREPARE_SUCCESS, ANALYSIS_FAILED_BAD_CODE, entire_area_item_aggregation, MULTI_EXPOSURE_ANALYSIS_FLAG, ) from safe.definitions.exposure import exposure_all from safe.definitions.font import bold_font from safe.definitions.layer_purposes import ( layer_purpose_hazard, layer_purpose_exposure, layer_purpose_aggregation, ) from safe.definitions.reports.components import ( standard_impact_report_metadata_html, standard_multi_exposure_impact_report_metadata_html) from safe.definitions.utilities import definition from safe.gis.tools import full_layer_uri from safe.gui.analysis_utilities import ( add_impact_layers_to_canvas, add_layers_to_canvas_with_custom_orders, ) from safe.gui.gui_utilities import layer_from_combo, add_ordered_combo_item from safe.gui.widgets.message import ( enable_messaging, send_static_message, ready_message, ) from safe.impact_function.impact_function_utilities import ( LAYER_ORIGIN_ROLE, FROM_CANVAS, FROM_ANALYSIS, LAYER_PARENT_ANALYSIS_ROLE, LAYER_PURPOSE_KEY_OR_ID_ROLE, ) from safe.impact_function.multi_exposure_wrapper import ( MultiExposureImpactFunction) from safe.messaging import styles from safe.report.impact_report import ImpactReport from safe.utilities.extent import Extent from safe.utilities.gis import qgis_version, layer_icon from safe.utilities.i18n import tr from safe.utilities.keyword_io import KeywordIO from safe.utilities.qgis_utilities import display_warning_message_bar from safe.utilities.qt import disable_busy_cursor, enable_busy_cursor from safe.utilities.resources import ( get_ui_class, resources_path, ) from safe.utilities.settings import setting from safe.utilities.utilities import ( is_keyword_version_supported, basestring_to_message, get_error_message, ) LOGGER = logging.getLogger('InaSAFE') FORM_CLASS = get_ui_class('multi_exposure_dialog_base.ui') INFO_STYLE = styles.BLUE_LEVEL_4_STYLE LOGO_ELEMENT = m.Brand() class MultiExposureDialog(QDialog, FORM_CLASS): """Dialog for multi exposure tool.""" def __init__(self, parent=None, iface=iface_object): """Constructor for the multi exposure dialog. :param parent: Parent widget of this dialog. :type parent: QWidget :param iface: An instance of QgisInterface :type iface: QgisInterface """ QDialog.__init__(self, parent) self.use_selected_only = setting( 'useSelectedFeaturesOnly', expected_type=bool) self.parent = parent self.iface = iface self.setupUi(self) icon = resources_path('img', 'icons', 'show-multi-exposure.svg') self.setWindowIcon(QIcon(icon)) self.tab_widget.setCurrentIndex(0) self.combos_exposures = OrderedDict() self.keyword_io = KeywordIO() self._create_exposure_combos() self._multi_exposure_if = None self._extent = Extent(iface) self._extent.show_rubber_bands = setting( 'showRubberBands', False, bool) enable_messaging(self.message_viewer, self) self.btn_back.clicked.connect(self.back_clicked) self.btn_next.clicked.connect(self.next_clicked) self.btn_cancel.clicked.connect(self.reject) self.btn_run.clicked.connect(self.accept) self.validate_impact_function() self.tab_widget.currentChanged.connect(self._tab_changed) self.tree.itemSelectionChanged.connect(self._tree_selection_changed) self.list_layers_in_map_report.itemSelectionChanged.connect( self._list_selection_changed) self.add_layer.clicked.connect(self._add_layer_clicked) self.remove_layer.clicked.connect(self._remove_layer_clicked) self.move_up.clicked.connect(self.move_layer_up) self.move_down.clicked.connect(self.move_layer_down) self.cbx_hazard.currentIndexChanged.connect( self.validate_impact_function) self.cbx_aggregation.currentIndexChanged.connect( self.validate_impact_function) # Keep track of the current panel self._current_index = 0 self.tab_widget.setCurrentIndex(self._current_index) def _tab_changed(self): """Triggered when the current tab is changed.""" current = self.tab_widget.currentWidget() if current == self.analysisTab: self.btn_back.setEnabled(False) self.btn_next.setEnabled(True) elif current == self.reportingTab: if self._current_index == 0: # Only if the user is coming from the first tab self._populate_reporting_tab() self.reporting_options_layout.setEnabled( self._multi_exposure_if is not None) self.btn_back.setEnabled(True) self.btn_next.setEnabled(True) else: self.btn_back.setEnabled(True) self.btn_next.setEnabled(False) self._current_index = current def back_clicked(self): """Back button clicked.""" self.tab_widget.setCurrentIndex(self.tab_widget.currentIndex() - 1) def next_clicked(self): """Next button clicked.""" self.tab_widget.setCurrentIndex(self.tab_widget.currentIndex() + 1) def ordered_expected_layers(self): """Get an ordered list of layers according to users input. From top to bottom in the legend: [ ('FromCanvas', layer name, full layer URI, QML), ('FromAnalysis', layer purpose, layer group, None), ... ] The full layer URI is coming from our helper. :return: An ordered list of layers following a structure. :rtype: list """ registry = QgsProject.instance() layers = [] count = self.list_layers_in_map_report.count() for i in range(count): layer = self.list_layers_in_map_report.item(i) origin = layer.data(LAYER_ORIGIN_ROLE) if origin == FROM_ANALYSIS['key']: key = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE) parent = layer.data(LAYER_PARENT_ANALYSIS_ROLE) layers.append(( FROM_ANALYSIS['key'], key, parent, None )) else: layer_id = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE) layer = registry.mapLayer(layer_id) style_document = QDomDocument() error = '' layer.exportNamedStyle(style_document, error) layers.append(( FROM_CANVAS['key'], layer.name(), full_layer_uri(layer), style_document.toString() )) return layers def _add_layer_clicked(self): """Add layer clicked.""" layer = self.tree.selectedItems()[0] origin = layer.data(0, LAYER_ORIGIN_ROLE) if origin == FROM_ANALYSIS['key']: parent = layer.data(0, LAYER_PARENT_ANALYSIS_ROLE) key = layer.data(0, LAYER_PURPOSE_KEY_OR_ID_ROLE) item = QListWidgetItem('%s - %s' % (layer.text(0), parent)) item.setData(LAYER_PARENT_ANALYSIS_ROLE, parent) item.setData(LAYER_PURPOSE_KEY_OR_ID_ROLE, key) else: item = QListWidgetItem(layer.text(0)) layer_id = layer.data(0, LAYER_PURPOSE_KEY_OR_ID_ROLE) item.setData(LAYER_PURPOSE_KEY_OR_ID_ROLE, layer_id) item.setData(LAYER_ORIGIN_ROLE, origin) self.list_layers_in_map_report.addItem(item) self.tree.invisibleRootItem().removeChild(layer) self.tree.clearSelection() def _remove_layer_clicked(self): """Remove layer clicked.""" layer = self.list_layers_in_map_report.selectedItems()[0] origin = layer.data(LAYER_ORIGIN_ROLE) if origin == FROM_ANALYSIS['key']: key = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE) parent = layer.data(LAYER_PARENT_ANALYSIS_ROLE) parent_item = self.tree.findItems( parent, Qt.MatchContains | Qt.MatchRecursive, 0)[0] item = QTreeWidgetItem(parent_item, [definition(key)['name']]) item.setData(0, LAYER_PARENT_ANALYSIS_ROLE, parent) else: parent_item = self.tree.findItems( FROM_CANVAS['name'], Qt.MatchContains | Qt.MatchRecursive, 0)[0] item = QTreeWidgetItem(parent_item, [layer.text()]) layer_id = layer.data(LAYER_PURPOSE_KEY_OR_ID_ROLE) item.setData(0, LAYER_PURPOSE_KEY_OR_ID_ROLE, layer_id) item.setData(0, LAYER_ORIGIN_ROLE, origin) index = self.list_layers_in_map_report.indexFromItem(layer) self.list_layers_in_map_report.takeItem(index.row()) self.list_layers_in_map_report.clearSelection() def move_layer_up(self): """Move the layer up.""" layer = self.list_layers_in_map_report.selectedItems()[0] index = self.list_layers_in_map_report.indexFromItem(layer).row() item = self.list_layers_in_map_report.takeItem(index) self.list_layers_in_map_report.insertItem(index - 1, item) self.list_layers_in_map_report.item(index - 1).setSelected(True) def move_layer_down(self): """Move the layer down.""" layer = self.list_layers_in_map_report.selectedItems()[0] index = self.list_layers_in_map_report.indexFromItem(layer).row() item = self.list_layers_in_map_report.takeItem(index) self.list_layers_in_map_report.insertItem(index + 1, item) self.list_layers_in_map_report.item(index + 1).setSelected(True) def _list_selection_changed(self): """Selection has changed in the list.""" items = self.list_layers_in_map_report.selectedItems() self.remove_layer.setEnabled(len(items) >= 1) if len(items) == 1 and self.list_layers_in_map_report.count() >= 2: index = self.list_layers_in_map_report.indexFromItem(items[0]) index = index.row() if index == 0: self.move_up.setEnabled(False) self.move_down.setEnabled(True) elif index == self.list_layers_in_map_report.count() - 1: self.move_up.setEnabled(True) self.move_down.setEnabled(False) else: self.move_up.setEnabled(True) self.move_down.setEnabled(True) else: self.move_up.setEnabled(False) self.move_down.setEnabled(False) def _tree_selection_changed(self): """Selection has changed in the tree.""" self.add_layer.setEnabled(len(self.tree.selectedItems()) >= 1) def _populate_reporting_tab(self): """Populate trees about layers.""" self.tree.clear() self.add_layer.setEnabled(False) self.remove_layer.setEnabled(False) self.move_up.setEnabled(False) self.move_down.setEnabled(False) self.tree.setColumnCount(1) self.tree.setRootIsDecorated(False) self.tree.setHeaderHidden(True) analysis_branch = QTreeWidgetItem( self.tree.invisibleRootItem(), [FROM_ANALYSIS['name']]) analysis_branch.setFont(0, bold_font) analysis_branch.setExpanded(True) analysis_branch.setFlags(Qt.ItemIsEnabled) if self._multi_exposure_if: expected = self._multi_exposure_if.output_layers_expected() for group, layers in list(expected.items()): group_branch = QTreeWidgetItem(analysis_branch, [group]) group_branch.setFont(0, bold_font) group_branch.setExpanded(True) group_branch.setFlags(Qt.ItemIsEnabled) for layer in layers: layer = definition(layer) if layer.get('allowed_geometries', None): item = QTreeWidgetItem( group_branch, [layer.get('name')]) item.setData( 0, LAYER_ORIGIN_ROLE, FROM_ANALYSIS['key']) item.setData(0, LAYER_PARENT_ANALYSIS_ROLE, group) item.setData( 0, LAYER_PURPOSE_KEY_OR_ID_ROLE, layer['key']) item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) canvas_branch = QTreeWidgetItem( self.tree.invisibleRootItem(), [FROM_CANVAS['name']]) canvas_branch.setFont(0, bold_font) canvas_branch.setExpanded(True) canvas_branch.setFlags(Qt.ItemIsEnabled) # List layers from the canvas loaded_layers = list(QgsProject.instance().mapLayers().values()) canvas_layers = self.iface.mapCanvas().layers() flag = setting('visibleLayersOnlyFlag', expected_type=bool) for loaded_layer in loaded_layers: if flag and loaded_layer not in canvas_layers: continue title = loaded_layer.name() item = QTreeWidgetItem(canvas_branch, [title]) item.setData(0, LAYER_ORIGIN_ROLE, FROM_CANVAS['key']) item.setData(0, LAYER_PURPOSE_KEY_OR_ID_ROLE, loaded_layer.id()) item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) self.tree.resizeColumnToContents(0) def _create_exposure_combos(self): """Create one combobox for each exposure and insert them in the UI.""" # Map registry may be invalid if QGIS is shutting down project = QgsProject.instance() canvas_layers = self.iface.mapCanvas().layers() # MapLayers returns a QMap<QString id, QgsMapLayer layer> layers = list(project.mapLayers().values()) # Sort by name for tests layers.sort(key=lambda x: x.name()) show_only_visible_layers = setting( 'visibleLayersOnlyFlag', expected_type=bool) # For issue #618 if len(layers) == 0: # self.message_viewer.setHtml(getting_started_message()) return for one_exposure in exposure_all: label = QLabel(one_exposure['name']) combo = QComboBox() combo.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) combo.addItem(tr('Do not use'), None) self.form_layout.addRow(label, combo) self.combos_exposures[one_exposure['key']] = combo for layer in layers: if (show_only_visible_layers and (layer not in canvas_layers)): continue try: layer_purpose = self.keyword_io.read_keywords( layer, 'layer_purpose') keyword_version = str(self.keyword_io.read_keywords( layer, inasafe_keyword_version_key)) if not is_keyword_version_supported(keyword_version): continue except BaseException: # pylint: disable=W0702 # continue ignoring this layer continue # See if there is a title for this layer, if not, # fallback to the layer's filename # noinspection PyBroadException try: title = self.keyword_io.read_keywords(layer, 'title') except (NoKeywordsFoundError, KeywordNotFoundError, MetadataReadError): # Skip if there are no keywords at all, or missing keyword continue except BaseException: # pylint: disable=W0702 pass else: # Lookup internationalised title if available title = self.tr(title) # Register title with layer set_layer_from_title = setting( 'set_layer_from_title_flag', True, bool) if title and set_layer_from_title: if qgis_version() >= 21800: layer.setName(title) else: # QGIS 2.14 layer.setLayerName(title) source = layer.id() icon = layer_icon(layer) if layer_purpose == layer_purpose_hazard['key']: add_ordered_combo_item( self.cbx_hazard, title, source, icon=icon) elif layer_purpose == layer_purpose_aggregation['key']: if self.use_selected_only: count_selected = layer.selectedFeatureCount() if count_selected > 0: add_ordered_combo_item( self.cbx_aggregation, title, source, count_selected, icon=icon ) else: add_ordered_combo_item( self.cbx_aggregation, title, source, None, icon) else: add_ordered_combo_item( self.cbx_aggregation, title, source, None, icon) elif layer_purpose == layer_purpose_exposure['key']: # fetching the exposure try: exposure_type = self.keyword_io.read_keywords( layer, layer_purpose_exposure['key']) except BaseException: # pylint: disable=W0702 # continue ignoring this layer continue for key, combo in list(self.combos_exposures.items()): if key == exposure_type: add_ordered_combo_item( combo, title, source, icon=icon) self.cbx_aggregation.addItem(entire_area_item_aggregation, None) for combo in list(self.combos_exposures.values()): combo.currentIndexChanged.connect(self.validate_impact_function) def progress_callback(self, current_value, maximum_value, message=None): """GUI based callback implementation for showing progress. :param current_value: Current progress. :type current_value: int :param maximum_value: Maximum range (point at which task is complete. :type maximum_value: int :param message: Optional message dictionary to containing content we can display to the user. See safe.definitions.analysis_steps for an example of the expected format :type message: dict """ report = m.Message() report.add(LOGO_ELEMENT) report.add(m.Heading( self.tr('Analysis status'), **INFO_STYLE)) if message is not None: report.add(m.ImportantText(message['name'])) report.add(m.Paragraph(message['description'])) report.add( self._multi_exposure_if .current_impact_function.performance_log_message()) send_static_message(self, report) self.progress_bar.setMaximum(maximum_value) self.progress_bar.setValue(current_value) QgsApplication.processEvents() def validate_impact_function(self): """Check validity of the current impact function.""" # Always set it to False self.btn_run.setEnabled(False) for combo in list(self.combos_exposures.values()): if combo.count() == 1: combo.setEnabled(False) hazard = layer_from_combo(self.cbx_hazard) aggregation = layer_from_combo(self.cbx_aggregation) exposures = [] for combo in list(self.combos_exposures.values()): exposures.append(layer_from_combo(combo)) exposures = [layer for layer in exposures if layer] multi_exposure_if = MultiExposureImpactFunction() multi_exposure_if.hazard = hazard multi_exposure_if.exposures = exposures multi_exposure_if.debug = False multi_exposure_if.callback = self.progress_callback if aggregation: multi_exposure_if.use_selected_features_only = ( self.use_selected_only) multi_exposure_if.aggregation = aggregation else: multi_exposure_if.crs = ( self.iface.mapCanvas().mapSettings().destinationCrs()) if len(self.ordered_expected_layers()) != 0: self._multi_exposure_if.output_layers_ordered = ( self.ordered_expected_layers()) status, message = multi_exposure_if.prepare() if status == PREPARE_SUCCESS: self._multi_exposure_if = multi_exposure_if self.btn_run.setEnabled(True) send_static_message(self, ready_message()) self.list_layers_in_map_report.clear() return else: disable_busy_cursor() send_error_message(self, message) self._multi_exposure_if = None def accept(self): """Launch the multi exposure analysis.""" if not isinstance( self._multi_exposure_if, MultiExposureImpactFunction): # This should not happen as the "accept" button must be disabled if # the impact function is not ready. return ANALYSIS_FAILED_BAD_CODE, None self.tab_widget.setCurrentIndex(2) self.set_enabled_buttons(False) enable_busy_cursor() try: code, message, exposure = self._multi_exposure_if.run() message = basestring_to_message(message) if code == ANALYSIS_FAILED_BAD_INPUT: LOGGER.warning(tr( 'The impact function could not run because of the inputs.' )) send_error_message(self, message) LOGGER.warning(message.to_text()) disable_busy_cursor() self.set_enabled_buttons(True) return code, message elif code == ANALYSIS_FAILED_BAD_CODE: LOGGER.warning(tr( 'The impact function could not run because of a bug.')) LOGGER.exception(message.to_text()) send_error_message(self, message) disable_busy_cursor() self.set_enabled_buttons(True) return code, message if setting('generate_report', True, bool): LOGGER.info( 'Reports are going to be generated for the multiexposure.') # Report for the multi exposure report = [standard_multi_exposure_impact_report_metadata_html] error_code, message = (self._multi_exposure_if.generate_report( report)) message = basestring_to_message(message) if error_code == ImpactReport.REPORT_GENERATION_FAILED: LOGGER.warning( 'The impact report could not be generated.') send_error_message(self, message) LOGGER.exception(message.to_text()) disable_busy_cursor() self.set_enabled_buttons(True) return error_code, message else: LOGGER.warning( 'Reports are not generated because of your settings.') display_warning_message_bar( tr('Reports'), tr('Reports are not going to be generated because of your ' 'InaSAFE settings.'), duration=10, iface_object=self.iface ) # We always create the multi exposure group because we need # reports to be generated. root = QgsProject.instance().layerTreeRoot() if len(self.ordered_expected_layers()) == 0: group_analysis = root.insertGroup( 0, self._multi_exposure_if.name) group_analysis.setItemVisibilityChecked(True) group_analysis.setCustomProperty( MULTI_EXPOSURE_ANALYSIS_FLAG, True) for layer in self._multi_exposure_if.outputs: QgsProject.instance().addMapLayer(layer, False) layer_node = group_analysis.addLayer(layer) layer_node.setItemVisibilityChecked(False) # set layer title if any try: title = layer.keywords['title'] if qgis_version() >= 21800: layer.setName(title) else: layer.setLayerName(title) except KeyError: pass for analysis in self._multi_exposure_if.impact_functions: detailed_group = group_analysis.insertGroup( 0, analysis.name) detailed_group.setItemVisibilityChecked(True) add_impact_layers_to_canvas(analysis, group=detailed_group) if self.iface: self.iface.setActiveLayer( self._multi_exposure_if.analysis_impacted) else: add_layers_to_canvas_with_custom_orders( self.ordered_expected_layers(), self._multi_exposure_if, self.iface) if setting('generate_report', True, bool): LOGGER.info( 'Reports are going to be generated for each single ' 'exposure.') # Report for the single exposure with hazard for analysis in self._multi_exposure_if.impact_functions: # we only want to generate non pdf/qpt report html_components = [standard_impact_report_metadata_html] error_code, message = ( analysis.generate_report(html_components)) message = basestring_to_message(message) if error_code == ( ImpactReport.REPORT_GENERATION_FAILED): LOGGER.info( 'The impact report could not be generated.') send_error_message(self, message) LOGGER.info(message.to_text()) disable_busy_cursor() self.set_enabled_buttons(True) return error_code, message else: LOGGER.info( 'Reports are not generated because of your settings.') display_warning_message_bar( tr('Reports'), tr('Reports are not going to be generated because of your ' 'InaSAFE settings.'), duration=10, iface_object=self.iface ) # If zoom to impact is enabled if setting( 'setZoomToImpactFlag', expected_type=bool): self.iface.zoomToActiveLayer() # If hide exposure layers if setting('setHideExposureFlag', expected_type=bool): treeroot = QgsProject.instance().layerTreeRoot() for combo in list(self.combos_exposures.values()): layer = layer_from_combo(combo) if layer is not None: treelayer = treeroot.findLayer(layer.id()) if treelayer: treelayer.setItemVisibilityChecked(False) # Set last analysis extent self._extent.set_last_analysis_extent( self._multi_exposure_if.analysis_extent, self._multi_exposure_if.crs) self.done(QDialog.Accepted) except Exception as e: error_message = get_error_message(e) send_error_message(self, error_message) LOGGER.exception(e) LOGGER.debug(error_message.to_text()) finally: disable_busy_cursor() self.set_enabled_buttons(True) def reject(self): """Redefinition of the reject method.""" self._populate_reporting_tab() super(MultiExposureDialog, self).reject() def set_enabled_buttons(self, enabled): self.btn_cancel.setEnabled(enabled) self.btn_back.setEnabled(enabled) self.btn_next.setEnabled(enabled) self.btn_run.setEnabled(enabled)
An aquaintence of mine showed me this site. Ultimately, it&#39;s amusing and offending. The kid didn&#39;t mean it to be amusing, although the reference to the death penalty for vampires made me think it was, until my aquaintence (who wrote this kid an email) told me that his response was that my aquaintence "must be in jail, because [he] was smoking crack while writing that email." Now a friend of mine is going to write one as well, although it&#39;ll probably be more toasty-flavoured. The National Dress Code is ..interesting. And this is the kid who owns the website. Anyhow, I thought the content may provide a good conversation. that pneumonia really got into his brain.. He wants to ban a lot of other things too. And yes, the site is for real. The more I read....the more I hate him. I also support a ban on interracial marriage. While I am not racist, I believe that God intends people to marry within their own race. Man.....it is all Pink inside.Try another Race sometime and you will see. Never had 45 Minutes of Slow Head have you?
# -*- encoding: utf-8 -*- from supriya.tools.ugentools.UGen import UGen class PulseDivider(UGen): r''' :: >>> pulse_divider = ugentools.PulseDivider.ar( ... div=2, ... start=0, ... trigger=0, ... ) >>> pulse_divider PulseDivider.ar() ''' ### CLASS VARIABLES ### __documentation_section__ = None __slots__ = () _ordered_input_names = ( 'trigger', 'div', 'start', ) _valid_calculation_rates = None ### INITIALIZER ### def __init__( self, calculation_rate=None, div=2, start=0, trigger=0, ): UGen.__init__( self, calculation_rate=calculation_rate, div=div, start=start, trigger=trigger, ) ### PUBLIC METHODS ### @classmethod def ar( cls, div=2, start=0, trigger=0, ): r'''Constructs an audio-rate PulseDivider. :: >>> pulse_divider = ugentools.PulseDivider.ar( ... div=2, ... start=0, ... trigger=0, ... ) >>> pulse_divider PulseDivider.ar() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.AUDIO ugen = cls._new_expanded( calculation_rate=calculation_rate, div=div, start=start, trigger=trigger, ) return ugen @classmethod def kr( cls, div=2, start=0, trigger=0, ): r'''Constructs a control-rate PulseDivider. :: >>> pulse_divider = ugentools.PulseDivider.kr( ... div=2, ... start=0, ... trigger=0, ... ) >>> pulse_divider PulseDivider.kr() Returns ugen graph. ''' from supriya.tools import synthdeftools calculation_rate = synthdeftools.CalculationRate.CONTROL ugen = cls._new_expanded( calculation_rate=calculation_rate, div=div, start=start, trigger=trigger, ) return ugen ### PUBLIC PROPERTIES ### @property def div(self): r'''Gets `div` input of PulseDivider. :: >>> pulse_divider = ugentools.PulseDivider.ar( ... div=2, ... start=0, ... trigger=0, ... ) >>> pulse_divider.div 2.0 Returns ugen input. ''' index = self._ordered_input_names.index('div') return self._inputs[index] @property def start(self): r'''Gets `start` input of PulseDivider. :: >>> pulse_divider = ugentools.PulseDivider.ar( ... div=2, ... start=0, ... trigger=0, ... ) >>> pulse_divider.start 0.0 Returns ugen input. ''' index = self._ordered_input_names.index('start') return self._inputs[index] @property def trigger(self): r'''Gets `trigger` input of PulseDivider. :: >>> pulse_divider = ugentools.PulseDivider.ar( ... div=2, ... start=0, ... trigger=0, ... ) >>> pulse_divider.trigger 0.0 Returns ugen input. ''' index = self._ordered_input_names.index('trigger') return self._inputs[index]
Celtics, Cavs and Spurs all take losses … not a bad night for the Lakers, despite the fact that they didn’t even set foot on a basketball court. These are great templates. Kenya is a great place with many beautiful scenaries and am sure kenya would look beautiful in them. Will recommend them to my friends. No specific ideas, but I was struck by the difference when attending a service in a hall, and I’ve been trying to figure out what made the difference. With the smaller space, there was an intimacy to the layout. Your comment (I think it was on your more recent post) about the congregation being gathered more around the alter reminded me of it. Thanks, Donna. It’s lovely to find out that others share your reading history.The main character in the Mallory Towers books is Darrell Rivers – wouldn’t have remembered that but I looked it up – and Blyton’s second husband was Kenneth Darrell Waters! I don’t remember reading the St Clare’s books, but perhaps they’ve all merged into one.