text
stringlengths 29
850k
|
---|
#!/usr/bin/env python2
import curses
import fcntl
import json
import os
import requests
import sys
import termios
import traceback
settings = {
'ip' : '127.0.0.1',
'port' : '80',
'user' : '',
'pssw' : '',
}
keymap = {
'i' : 'ip',
'p' : 'port',
'u' : 'user',
'w' : 'pssw',
}
try:
with open('/home/osmc/cli_remote.conf' , 'r') as f:
lines = f.readlines()
single = ''.join(lines)
raw_sets = json.loads(single)
settings.update(raw_sets)
except:
print 'USAGE : cli-remote i=Your_ip_address p=your_port u=your_username w=your_password'
print 'All the settings are optional. The default will be used in their place if you dont specifiy them.'
print 'Defaults:'
print ' ip : 127.0.0.1'
print ' port : 80'
print ' user : ""'
print ' pass : ""'
print ''
print 'If you are using this script on the device (via ssh or something) then you dont need to put in the IP address.'
print 'The default of 127.0.0.1 already points to the local host.'
print ''
print 'Alternatively, you can save a file called /home/osmc/cli_remote.conf with this:'
print '{"ip": "your_ip", "port": "your_port", "user" : "your_user", "pssw": "your_pass"}'
print 'Or just {"port": "your_port"} if that is all you would like to change.'
print ''
for arg in sys.argv[1:]:
try:
k, v = arg.split('=')
key = keymap.get(k, None)
if key is not None:
settings[key] = v
except:
continue
def call(settings, action, params=None):
url = 'http://%s:%s/jsonrpc' % (settings['ip'], settings['port'])
headers = {'Content-Type': 'application/json'}
command = {"jsonrpc":"2.0","method":"%s" % action, "id": 1}
if params is not None:
command['params'] = params
data=json.dumps(command)
data = data.replace('"true"', 'true').replace('"false"', 'false')
r = requests.post( url, data=data, headers=headers, auth=(settings['user'], settings['pssw']))
def call_keyboard(settings, text, params=None):
url = 'http://%s:%s/jsonrpc' % (settings['ip'], settings['port'])
headers = {'Content-Type': 'application/json'}
command = {"jsonrpc":"2.0","method":"Input.SendText", "params": {"text": text}, "id": 1}
data=json.dumps(command)
r = requests.post( url, data=data, headers=headers, auth=(settings['user'], settings['pssw']))
def test(settings):
url = 'http://%s:%s/jsonrpc' % (settings['ip'], settings['port'])
headers = {'Content-Type': 'application/json'}
data=json.dumps({"jsonrpc":"2.0","method":"JSONRPC.Ping", "id": 1})
r = requests.post( url, data=data, headers=headers, auth=(settings['user'], settings['pssw']))
data=json.dumps({"jsonrpc":"2.0", "method":"GUI.ShowNotification", "params":{"title":"Kodi CLI Remote", "message":"Connected!"}, "id":1})
r = requests.post( url, data=data, headers=headers, auth=(settings['user'], settings['pssw']))
def redraw(stdscr):
stdscr.erase()
stdscr.refresh()
stdscr.addstr(2,0,">>> 'Arrow Keys' to navigate")
stdscr.addstr(3,0,">>> 'Enter' to select")
stdscr.addstr(4,0,">>> 'Backspace' or 'Esc' to navigate back")
stdscr.addstr(5,0,">>> 'c' for the context menu")
stdscr.addstr(6,0,">>> 'i' for info")
stdscr.addstr(7,0,">>> 'o' to toggle the OSD")
stdscr.addstr(8,0,">>> 's' to show codec info")
stdscr.addstr(9,0,">>> '[' and ']' volume up and down")
stdscr.addstr(10,0,">>> 'm' to toggle mute")
stdscr.addstr(11,0,">>> 'k' to enter keyboard mode (send text to Kodi's keyboard)")
stdscr.addstr(12,0,">>> 'd' debugger on, 'f' debugger off")
stdscr.addstr(13,0,">>> 'q' to quit")
stdscr.refresh()
key_map = {
curses.KEY_UP : {'name' : 'Up', 'action' : 'Input.Up'},
curses.KEY_DOWN : {'name' : 'Down', 'action' : 'Input.Down'},
curses.KEY_LEFT : {'name' : 'Left', 'action' : 'Input.Left'},
curses.KEY_RIGHT : {'name' : 'Right', 'action' : 'Input.Right'},
curses.KEY_BACKSPACE : {'name' : 'Back', 'action' : 'Input.Back'},
27 : {'name' : 'Back', 'action' : 'Input.Back'}, # ESC
99 : {'name' : 'ContextMenu', 'action' : 'Input.ContextMenu'}, # c
13 : {'name' : 'Select', 'action' : 'Input.Select'}, # ENTER
105 : {'name' : 'Info', 'action' : 'Input.Info'}, # i
104 : {'name' : 'Home', 'action' : 'Input.Home'}, # h
111 : {'name' : 'ShowOSD', 'action' : 'Input.ShowOSD'}, # o
115 : {'name' : 'ShowCodec', 'action' : 'Input.ShowCodec'}, #s
91 : {'name' : 'VolDown', 'action' : 'Application.SetVolume', # [
"params": { "volume": "decrement" }},
93 : {'name' : 'VolUp', 'action' : 'Application.SetVolume', # ]
"params": { "volume": "increment" }},
100 : {'name' : 'Debugger On', 'action' : 'Settings.SetSettingValue', # d
"params": {"setting":"debug.showloginfo", "value":"true"}},
102 : {'name' : 'Debugger Off', 'action' : 'Settings.SetSettingValue', # f
"params": {"setting":"debug.showloginfo", "value":"false"}},
109 : {'name' : 'Toggle Mute', 'action' : 'Application.SetMute', # m
"params": {"mute":"toggle"}},
}
try:
test(settings)
except requests.ConnectionError:
print 'Failed to connect.'
print 'Ensure that Kodi is able to be controlled via HTTP'
print 'Open the Kodi settings, Service, Web Server, and Enable HTTP remote.'
sys.exit()
stdscr = curses.initscr()
curses.cbreak()
curses.nonl()
stdscr.keypad(1)
redraw(stdscr)
curses.noecho()
key = ''
name = ''
while key != ord('q'):
redraw(stdscr)
if name:
stdscr.addstr(0,0, name)
key = stdscr.getch()
stdscr.refresh()
action = key_map.get(key, {}).get('action', None)
params = key_map.get(key, {}).get('params', None)
name = key_map.get(key, {}).get('name' , None)
if action is not None:
curses.setsyx(0, 0)
call(settings, action, params)
continue
if key == ord('k'):
curses.echo()
redraw(stdscr)
stdscr.addstr(0,0,"<<< KEYBOARD MODE >>>")
text = stdscr.getstr(0,23)
call_keyboard(settings, text)
curses.noecho()
redraw(stdscr)
curses.endwin()
|
Dr. Don Gordy uses the high-tech Damon System of braces so patients experience more comfort, shorter appointments and high quality results.
Orthodontics for Families office is committed to providing only the latest technologies in orthodontic care which is why we are a provider of Damon System of braces. This revolutionary system has a built in sliding door that allows the wire to slide more freely so your teeth will move more comfortably during treatment. Traditional braces use elastic bands to hold wires in place which causes more friction and can at times be very uncomfortable for the patient.
|
"""Wrapper for UDPipe (more pythonic than ufal.udpipe)."""
import io
import sys
from ufal.udpipe import Model, Pipeline, ProcessingError, Sentence # pylint: disable=no-name-in-module
from udapi.core.resource import require_file
from udapi.block.read.conllu import Conllu as ConlluReader
from udapi.core.root import Root
class UDPipe:
"""Wrapper for UDPipe (more pythonic than ufal.udpipe)."""
def __init__(self, model):
"""Create the UDPipe tool object."""
self.model = model
path = require_file(model)
self.tool = Model.load(path)
if not self.tool:
raise IOError("Cannot load model from file '%s'" % path)
self.error = ProcessingError()
self.conllu_reader = ConlluReader()
self.tokenizer = self.tool.newTokenizer(Model.DEFAULT)
def tag_parse_tree(self, root):
"""Tag (+lemmatize, fill FEATS) and parse a tree (already tokenized)."""
descendants = root.descendants
if not descendants:
return
pipeline = Pipeline(self.tool, 'horizontal', Pipeline.DEFAULT, Pipeline.DEFAULT, 'conllu')
in_data = " ".join([n.form for n in descendants])
out_data = pipeline.process(in_data, self.error)
if self.error.occurred():
raise IOError("UDPipe error " + self.error.message)
self.conllu_reader.files.filehandle = io.StringIO(out_data)
parsed_root = self.conllu_reader.read_tree()
nodes = [root] + descendants
for parsed_node in parsed_root.descendants:
node = nodes[parsed_node.ord]
node.parent = nodes[parsed_node.parent.ord]
for attr in 'upos xpos lemma feats deprel'.split():
setattr(node, attr, getattr(parsed_node, attr))
# TODO: benchmark which solution is the fastest one. E.g. we could also do
# for node, parsed_node in zip(root.descendants, parsed_root.descendants):
# parsed_node.misc = node.misc
# pylint: disable=protected-access
#root._children, root._descendants = parsed_root._children, parsed_root._descendants
def tokenize_tag_parse_tree(self, root, resegment=False, tag=True, parse=True):
"""Tokenize, tag (+lemmatize, fill FEATS) and parse the text stored in `root.text`.
If resegment=True, the returned list of Udapi trees may contain multiple trees.
"""
if root.children:
raise ValueError('Tree already contained nodes before tokenization')
# Tokenize and segment the text (segmentation cannot be turned off in older UDPipe versions).
self.tokenizer.setText(root.text)
is_another = True
u_sentences = []
while is_another:
u_sentence = Sentence()
is_another = self.tokenizer.nextSentence(u_sentence)
if is_another:
u_sentences.append(u_sentence)
# If resegmentation was not required, we need to join the segments.
if not resegment and len(u_sentences) > 1:
first_sent = u_sentences[0]
n_words = first_sent.words.size() - 1
for other_sent in u_sentences[1:]:
other_words = other_sent.words.size() - 1
for i in range(1, other_words + 1):
u_w = other_sent.words[i]
n_words += 1
u_w.id = n_words
first_sent.words.append(u_w)
u_sentences = [first_sent]
# tagging and parsing
if tag:
for u_sentence in u_sentences:
self.tool.tag(u_sentence, Model.DEFAULT)
if parse:
self.tool.parse(u_sentence, Model.DEFAULT)
elif parse:
raise ValueError('Combination parse=True tag=False is not allowed.')
# converting UDPipe nodes to Udapi nodes
new_root = root
trees = []
for u_sentence in u_sentences:
if not new_root:
new_root = Root()
new_root.text = u_sentence.getText() if resegment else root.text
heads, nodes = [], [new_root]
u_words = u_sentence.words
for i in range(1, u_words.size()):
u_w = u_words[i]
node = new_root.create_child(
form=u_w.form, lemma=u_w.lemma, upos=u_w.upostag,
xpos=u_w.xpostag, feats=u_w.feats, deprel=u_w.deprel, misc=u_w.misc,
)
if parse:
heads.append(u_w.head)
nodes.append(node)
if parse:
for node in nodes[1:]:
head = heads.pop(0)
node.parent = nodes[head]
trees.append(new_root)
new_root = None
return trees
def segment_text(self, text):
"""Segment the provided text into sentences."""
self.tokenizer.setText(text)
is_another = True
sentences = []
while is_another:
u_sentence = Sentence()
is_another = self.tokenizer.nextSentence(u_sentence)
if is_another:
sentences.append(u_sentence.getText())
return sentences
|
Get ready for non-stop, fast-paced action as Speed, Conor and Lucy prepare for the next big race, the Redwood Rally. All seems to be going well until strange things begin to happen on the virtual track, including blinding leaves, falling acorns and a menacing Giant Conor. Clearly someone's been tampering with the virtual programming, but Speed and the gang are hot on the trail to solve the mystery. Annalise, the daughter of evil billionaire Zile Zazic, sets out to spy on Speed's investigation. And before you can say "Go, Speed Racer," they both get caught up in a freak accident that leaves them trapped inside the malfunctioning virtual world with a hungry high-tech virus closing in fast! In order to get them out alive, everyone will have to put their differences aside and work together in a fast and furious race against time. But just when it seems like the day is saved, there is one last - and very big - surprise as the real and virtual worlds collide!
One of the first examples of Japanese anime to find a significant audience in the United States, Speed Racer was an animated television series whose bold graphic style, fast-paced action, and curious English-language dubbing won a cult following in America. Despite its title, Speed Racer: The Movie is actually a short feature cobbled together from two vintage episodes of the original TV show.
anime & animation, children & family - Conor pledges to improve the Mach 6's stealth systems after an unsuccessful training exercise. Before Conor is able to make the final modifications, Zile's goons attempt to capture the Mach 6.
Maurizio Merli stars as a hot-shot police driver who has more guts than brains, often landing him in hot water with his middle-aged mentor, who was once a legendary police interceptor responsible for numerous large scale arrests.
Speed Racer: Race to the Future is a 2013 Indian-American flash animated film based on Tatsuo Yoshida's Speed Racer manga franchise. The movie was directed by Robert H. Fuentes III and produced by Toonz Entertainment for Imira Entertainment. Unlike Toonz's work on the second season of Speed Racer: The Next Generation, Race for the Future is set in the universe of the original animated series. Viva Pictures acquired the rights to distribute the film in North America in 2015. Through Cinedigm, the film was released direct-to-video on DVD and streaming platforms on January 12, 2016.
A portrait of Athens, Georgia singer-songwriter Vic Chesnutt.
Teenager Speed Racer aspires to be the world's best race-car champion with the help of his friends, family and his father's high-tech race-car, the Mach 5.
Speed Racer X, known in Japan as Mach Go Go Go, is a remake of the original 1967 series produced by Tatsunoko Production, the same studio that did the original. The show originally aired in Japan in 1997 on TV Tokyo and lasted only 34 episodes of a planned 52. An English adaptation was later produced by DIC Entertainment and aired in the United States on Nickelodeon's short-lived action block, Slam. This show was quickly taken off the air due to a lawsuit between DiC and the Santa Monica-based Speed Racer Enterprises, the company which owns the American rights of the franchise.
Speed Racer: The Next Generation is an American animated television series based on the classic Japanese Speed Racer franchise, in which the internal events take place decades after those in the 1967 Japanese series. It is the fourth television adaptation of the franchise, and is executive produced by Lions Gate Entertainment, Larry Schwarz, and Ken Katsumoto. It is the first Nicktoon not to be based on an original property. Animation Collective produced the series, while the Flash character animation was handled by the now-defunct Collideascope Studios as their very last project. The last episode of Season 1 features the voice of NASCAR racer Jeff Gordon, who plays Turbo McCalister. This series was partly made to promote the live-action film, and the pilot movie premiered on Nicktoons Network on May 2, 2008, a week before the feature film adaptation was released in theatres. However, both projects were produced independently from one another and featured different generations of "Speed Racers", though both featured a Mach 6. Five three-part specials aired on Nickelodeon from March 14, 2009 to April 11, 2009. A second season began airing on March 24, 2011. The animation, layout, and 3D effects were outsourced to Toonz Entertainment in India for this season. After his death, Peter Fernandez's roles were replaced by Greg Abbey.
|
# --------------------------------------------------------------------------------------------
# Image collection generation.
# I used this python script to generate a number of scaled images for Apple device slices.
#
# Run from Gimp->Filters->Python-Fu console
#
# Assume that we have a collection of related images that are scaled the same way
# to the Apple device slices.
#
# We choose one image as the 'key image' that the other images use for resizing ratios.
# 8 Jan 2017 - Barrett Davis
# --------------------------------------------------------------------------------------------
def load_png( directory, filebase ):
filetype = '.png'
filename = filebase + filetype
filepath = directory + filename
return pdb.file_png_load(filepath, filename)
def export_png( img, width, height, directory, filebase, descriptor ):
filetype = '.png'
filename = filebase + '_' + descriptor + filetype
filepath = directory + filename
dupe = pdb.gimp_image_duplicate(img)
dupe.scale( width, height )
layer = pdb.gimp_image_merge_visible_layers(dupe, CLIP_TO_IMAGE)
# print 'saving ' + filepath
pdb.file_png_save2(dupe, layer, filepath, filename,1,9,1,1,1,1,1,0,1)
pdb.gimp_image_delete(dupe)
def generate_png( img, keySize, key1xSize, multiplier, directory, filebase, descriptor ):
ratio = (float(key1xSize) * float( multiplier )) / float(keySize)
width = int(round( float( img.width ) * ratio ))
height = int(round( float( img.height ) * ratio ))
export_png( img, width, height, directory, filebase, descriptor )
def generate_iphone( img, keySize, key1xSize, directory, filebase ):
descriptor = 'iPhone'
generate_png( img, keySize, key1xSize, 1.0, directory, filebase, descriptor + '1x')
generate_png( img, keySize, key1xSize, 2.0, directory, filebase, descriptor + '2x')
generate_png( img, keySize, key1xSize, 3.0, directory, filebase, descriptor + '3x')
def generate_ipad( img, keySize, key1xSize, directory, filebase ):
descriptor = 'iPad'
generate_png( img, keySize, key1xSize, 1.0, directory, filebase, descriptor + '1x')
generate_png( img, keySize, key1xSize, 2.0, directory, filebase, descriptor + '2x')
def generate_apple_tv( img, keySize, key1xSize, directory, filebase ):
descriptor = 'AppleTV'
generate_png( img, keySize, key1xSize, 1.0, directory, filebase, descriptor + '1x')
def generate_mac( img, keySize, key1xSize, directory, filebase ):
descriptor = 'Mac'
generate_png( img, keySize, key1xSize, 1.0, directory, filebase, descriptor + '1x')
generate_png( img, keySize, key1xSize, 2.0, directory, filebase, descriptor + '2x')
# Images
imageDir = '/Volumes/Data/Pictures/Games/tumble/master/'
# Bot - key image
botName = 'bot'
botDir = imageDir + botName + '/'
botImage = load_png( botDir, botName );
# Collar
collarName = 'collar'
collarDir = imageDir + collarName + '/'
collarImage = load_png( collarDir, collarName );
# Strut
strutName = 'strut'
strutDir = imageDir + strutName + '/'
strutImage = load_png( strutDir, strutName );
# Sizes should be float
keySize = float(botImage.height) # All resizing keys off of the bot height
iPhone1xSize = 64.0 # Bot height for iPhone 1x
iPad1xSize = 154.0 # Bot height for iPad 1x
tv1xSize = 154.0 # Bot height for Apple TV 1x
mac1xSize = 288.0 # Bot height for Mac 1x
# iPhone scale
generate_iphone( botImage, keySize, iPhone1xSize, botDir, botName )
generate_iphone( collarImage, keySize, iPhone1xSize, collarDir, collarName )
generate_iphone( strutImage, keySize, iPhone1xSize, strutDir, strutName )
# iPad scale
generate_ipad( botImage, keySize, iPad1xSize, botDir, botName )
generate_ipad( collarImage, keySize, iPad1xSize, collarDir, collarName )
generate_ipad( strutImage, keySize, iPad1xSize, strutDir, strutName )
# Apple TV scale
generate_apple_tv( botImage, keySize, tv1xSize, botDir, botName )
generate_apple_tv( collarImage, keySize, tv1xSize, collarDir, collarName )
generate_apple_tv( strutImage, keySize, tv1xSize, strutDir, strutName )
# Mac scale
generate_mac( botImage, keySize, mac1xSize, botDir, botName )
generate_mac( collarImage, keySize, mac1xSize, collarDir, collarName )
generate_mac( strutImage, keySize, mac1xSize, strutDir, strutName )
|
Here is your class management materials for this month. Our Drill for Skill theme is Power and our Life Lesson theme for your mat chats is Self-Discipline.
Here is your class management materials for this month. Our Drill for Skill theme is Accuracy and our Life Lesson theme for your mat chats is Honesty.
|
import fresh_tomatoes
import media
the_matrix = media.Movie("The Matrix",
"A computer hacker learns from mysterious rebels about the true nature of his reality and his role in the war against its controllers.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMDMyMmQ5YzgtYWMxOC00OTU0LWIwZjEtZWUwYTY5MjVkZjhhXkEyXkFqcGdeQXVyNDYyMDk5MTU@._V1_SY1000_CR0,0,723,1000_AL_.jpg",
"https://www.youtube.com/watch?v=m8e-FF8MsqU")
#print (the_matrix.storyline)
#the_matrix.show_trailer()
the_matrix_reloaded = media.Movie("The Matrix Reloaded",
"Neo and the rebel leaders estimate that they have 72 hours until 250,000 probes discover Zion and destroy it and its inhabitants. During this, Neo must decide how he can save Trinity from a dark fate in his dreams.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMjA0NDM5MDY2OF5BMl5BanBnXkFtZTcwNzg5OTEzMw@@._V1_SY1000_CR0,0,674,1000_AL_.jpg",
"https://www.youtube.com/watch?v=kYzz0FSgpSU")
#print (the_matrix_reloaded.storyline)
#the_matrix_reloaded.show_trailer()
the_matrix_revolutions = media.Movie("The Matrix Revolutions",
"The human city of Zion defends itself against the massive invasion of the machines as Neo fights to end the war at another front while also opposing the rogue Agent Smith.",
"https://images-na.ssl-images-amazon.com/images/M/MV5BMTkyNjc4NTQzOV5BMl5BanBnXkFtZTcwNDYzMTQyMQ@@._V1_.jpg",
"https://www.youtube.com/watch?v=hMbexEPAOQI")
#print (the_matrix_revolutions.storyline)
#the_matrix_revolutions.show_trailer()
movies = [the_matrix, the_matrix_reloaded, the_matrix_revolutions]
#fresh_tomatoes.open_movies_page(movies)
print (media.Movie.VALID_RATINGS)
|
Katharine Vincent is interested in the human dimensions of climate change, particularly vulnerability and adaptation to climate change, from a gendered perspective. She is a Lead Author for the cross-chapter gender box in the IPCC Fifth Assessment Report. As well as supporting the integration of gender across the DECCMA consortium, Katharine is particularly involved in Work Packages 1 and 6.
|
# Copyright (c) 2011-2012 Vit Suchomel and Jan Pomikalek
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Character encoding detection library."""
import os
import sys
import struct
ENCODE_REPLACEMENT_CHARACTER = '\x00'
MODEL_VERSION = '1.3'
def list_models():
"Returns a list of inbuilt models."
models = []
models_dir = os.path.join(
os.path.dirname(sys.modules['chared'].__file__), 'models')
for filename in os.listdir(models_dir):
if filename.endswith('.edm'):
models.append(filename.rsplit('.', 1)[0])
return sorted(models)
def get_model_path(model_id):
"""
Returns the full path to the model with given id or None if no model with
the ID exists.
"""
models_dir = os.path.join(
os.path.dirname(sys.modules['chared'].__file__), 'models')
filepath = os.path.join(models_dir, model_id + '.edm')
if os.path.isfile(filepath):
return filepath
else:
return None
def scalar_product(vec1, vec2):
"Returns a scalar product of the two vectors."
result = 0
for key in vec1.keys():
if vec2.has_key(key):
result += vec1[key] * vec2[key]
return result
def replace_by_zero(error):
"""
Replaces unknown bytes while encoding/decoding.
The function has to be registered using codecs.register_error.
"""
if isinstance(error, UnicodeEncodeError):
return (unicode(ENCODE_REPLACEMENT_CHARACTER), error.end)
elif isinstance(error, UnicodeDecodeError):
return (u'\ufffd', error.end)
raise error
class EncodingDetector(object):
VECTOR_TUPLE_LENGTH = 3
def __init__(self, version=MODEL_VERSION, vectors={}, enc_order=()):
self._version = version
self._vectors = vectors
self._encodings_order = enc_order
def get_version(self):
return self._version
def save(self, path):
"""
Saves the model to the specified path.
File format:
general row: <verison><TAB><tuple length><TAB><encodings count>
for each encoding:
info row: <name><TAB><order><TAB><vector length>
vector row: <key><packed value>...
"""
with open(path, 'wb') as fp:
#basic attributes
fp.write('%s\t%d\t%d\n' %
(self._version, self.VECTOR_TUPLE_LENGTH, len(self._vectors)))
#vectors
for enc, vector in self._vectors.iteritems():
#encoding name, encoding order
vect_len = len(vector)
enc_order = self.get_encoding_order(enc)
fp.write('%s\t%d\t%d\n' % (enc, enc_order, vect_len))
#vector keys & values
for k, v in vector.iteritems():
fp.write('%s%s' % (k, struct.pack('=I', v)))
fp.write('\n')
@classmethod
def load(cls, path):
"""
Loads the model from the specified path.
Returns a new instance of EncodingDetector.
"""
version = ''
vectors = {}
enc_order = {}
with open(path, 'rb') as fp:
#basic attributes
version, vect_tuple_length, enc_count = fp.readline().split('\t')
if MODEL_VERSION != version:
sys.stderr.write('WARNING: Potentially incompatible model versions!\n')
sys.stderr.write('\t%s: %s\n\tthis module: %s\n' % (path, version, MODEL_VERSION))
vect_tuple_length = int(vect_tuple_length)
#vectors
for i in range(int(enc_count)):
#encoding name, encoding order
enc, order, vect_len = fp.readline().split('\t')
enc_order[int(order)] = enc
#vector keys & values
vectors[enc] = {}
for j in range(int(vect_len)):
key = fp.read(vect_tuple_length)
vectors[enc][key] = struct.unpack('=I', fp.read(4))[0]
fp.read(1)
return EncodingDetector(version, vectors, enc_order.values())
def vectorize(self, string):
"""
Transforms the input strings into a frequency vector of n-grams of
contained characters.
Omits vector keys containing the encoding replacement character.
"""
str_len = len(string)
if self.VECTOR_TUPLE_LENGTH > str_len:
return {}
vector = {}
for i in range(str_len - self.VECTOR_TUPLE_LENGTH + 1):
key = string[i:i + self.VECTOR_TUPLE_LENGTH]
if ENCODE_REPLACEMENT_CHARACTER not in key:
vector[key] = vector.get(key, 0) + 1
return vector
def train(self, string, encoding):
"Trains the detector. The input must be a string and its encoding."
self._vectors[encoding] = self.vectorize(string)
def set_encodings_order(self, encodings):
"""
Defines the order (importance / frequency of use) of the encodings
the classifier has been trained on. The input must be a list or a
tuple of encodings. The first is the most important and the last is
the least important.
"""
if not isinstance(encodings, (tuple, list)):
raise TypeError
self._encodings_order = tuple(encodings)
def get_encoding_order(self, encoding):
"""
Returns the order of the encoding or sys.maxint if no order is
defined for it.
"""
if encoding in self._encodings_order:
return self._encodings_order.index(encoding)
return sys.maxint
def classify(self, string):
"""
Returns the predicted character encoding(s) for the input string as
a list. The list may contain more than one element if there are
multiple equally likely candidates. In this case, the candidates are
returned in the order of importance (see set_encodings_order). Empty
list may be returned if there are no valid candidates.
"""
input_vector = self.vectorize(string)
classification = []
for clas, vector in self._vectors.iteritems():
score = scalar_product(input_vector, vector)
clas_info = {'clas': clas, 'score': score,
'order': self.get_encoding_order(clas)}
classification.append(clas_info)
if not classification:
return []
#order result classes
# 1.) by vector similarity score (higher score is better)
# 2.) by the encoding order (lower index is better)
classification.sort(lambda x, y:
cmp(y['score'], x['score']) or cmp(x['order'], y['order']))
#return a list of the top classes
# the top classes have the same score and order as the first one
first = classification[0]
result = []
for clas in classification:
if first['score'] == clas['score']:
result.append(clas['clas'])
return result
def reduce_vectors(self):
"""
Remove the common parts of all vectors. Should be called after all
training data has been loaded. Provided the training has been performed
on the same data for all encodings, reducing vectors increases both
efficiency and accuracy of the classification.
"""
#get frequencies of (key, value) pairs
key_value_count = {}
for vect in self._vectors.values():
for key, value in vect.iteritems():
key_value_count[(key, value)] = key_value_count.get(
(key, value), 0) + 1
#remove common parts of vectors (the (key, value) pairs with the
#frequency equal to the number of vectors)
encodings_count = len(self._vectors)
for (key, value), count in key_value_count.iteritems():
if count >= encodings_count:
for vect in self._vectors.values():
if vect.has_key(key):
del vect[key]
|
Ireland donated the former LÉ Aoife to help tackle the refugee crisis in the Mediterranean. But the gesture’s getting a mixed response from the Maltese.
AS YOU’LL NO DOUBT have read on popular news website TheJournal.ie, the Irish Government is giving the former naval vessel the LÉ Aoife to Malta to help deal with the refugee crisis in the Mediterranean.
The vessel was decommissioned from the Naval Service in Waterford in at the end of last month, after 36 years in service.
We’d initially planned to sell it – but Defence Minister Simon Coveney confirmed yesterday that he had agreed to donate the ship “to address a pressing short-term shortfall in the naval capacity of Malta”.
“I kinda like the idea that the LÉ AOIFE will still be patrolling somewhere especially where it will help to save lives instead of ending up scrapped.
“Malta is a very small country with it’s own debt problem that has to deal with an ongoing humanitarian crisis on the borders of Europe.
The reaction on the website of English-language newspaper The Times of Malta was a little more mixed, however.
That’s easy the donate an obsolete ship to show solidarity. At the end we (Maltese Tax payers) have to pay for its upkeep and running (no joke for a 35 year old ship). What we need most is the relocation of immigrants, not ships.
We neither need vessels and nor money we just need push back that was what we were promised.
Thank you for giving us a 36 year old ship.. but what Malta was hoping for is Burden Sharing!!
Excellent done by the Republic of Ireland to give an offshore patrol vessel to Malta to help it cope with the migration crisis. That will help to rescue more migrants. But more European countries should do the same and give Malta patrol vessels. More and more migrants will try to cross the Mediterranean therefore Malta needs more patrol vessels.
Meanwhile, in his official response, Malta’s Home Affairs and National Security Minister Carmelo Abela said the country appreciated the Irish donation, and said it would be useful in supporting Malta’s border security work and efforts to tackle the migration crisis.
Malta routinely coordinates the rescue and takes in scores of refugees from the Middle East, N.Africa and the Sahel, often in treacherous sea conditions. This donation from the Irish Defence forces will contribute additional capability to the Maltese authorities, and especially the Armed Forces of Malta in their humanitarian work.
The UNHCR says that almost 3,500 people died trying to cross the Mediterranean Sea to Europe last year. In the latest tragedy – last week – at least 300 migrants are feared to have drowned – trying to cross from north Africa.
Email “We gave them a ship ... but some people in Malta aren't AT ALL happy about it ”.
Feedback on “We gave them a ship ... but some people in Malta aren't AT ALL happy about it ”.
|
#!/usr/bin/python3
# coding=utf-8
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" IDNA encoding/decoding differential fuzzer for Python idna vs libidn2
This is a differential fuzzer that compares the Python `idna` package with the
`libidn2` package. It only considers situations where both libraries consider a
domain to be valid, but produce different results. libidn2 is called via a thin
wrapper that defines libidn2 Python bindings.
This fuzzer enables UTS#46 translation (a feature that transforms certain
invalid characters into valid ones), and fuzzes against other encoding options.
To run this fuzzer, you'll need to install a thin wrapper to make libidn2
callable from Python; install libidn2, then cd to `libidn2_wrapper/` and run
`pip3 install .`.
This fuzzer found a number of domains which encode differently in Python `idna`
vs. `libidn2`. The fuzzer was designed to find mistakes in the Python idna
package, but actually found problems with libidn2.
As an example, `a.İ᷹` (codepoints `['61', '2e', '130', '1df9']`) encodes to the
Punycode `a.xn--i-9bb708r` in Python, but `a.xn--i-9bb808r` in libidn2. This
error occurs because libidn2 supports Unicode 11 and therefore accepts the
domain as valid; but it relies on `libunistring`, which only supports
Unicode 9 and therefore produces incorrect metadata about Unicode 11 characters.
"""
import atheris
import idna
import sys
import unicodedata
import libidn2
def TestOneInput(input_bytes):
global total_iters
global comparison_iters
fdp = atheris.FuzzedDataProvider(input_bytes)
transitional = fdp.ConsumeBool()
std3 = fdp.ConsumeBool()
original = "a." + fdp.ConsumeUnicode(253)
try:
nfc_original = unicodedata.normalize("NFC", original)
libidn2_encoded = libidn2.encode(
original,
uts46=True,
transitional=transitional,
nfc=True,
std3=std3)
idna_encoded = idna.encode(
original,
strict=False,
uts46=True,
transitional=transitional,
std3_rules=std3).lower()
except Exception as e:
return
if idna_encoded != libidn2_encoded:
sys.stderr.write("Transitional=%s, std3=%s\n" % (transitional, std3))
sys.stderr.write("Input codepoints: %s\n" %
[hex(ord(x))[2:] for x in original])
raise RuntimeError(
"IDNA encoding disagrees with libidn2 encoding.\nInput: %s\nIDNA encoding: %s\nlibidn2 encoding: %s\n"
% (original, idna_encoded, libidn2_encoded))
idna_decoded = idna.decode(idna_encoded, uts46=True, std3_rules=std3)
libidn2_decoded = libidn2.decode(idna_encoded, uts46=True, std3=std3)
if idna_decoded != libidn2_decoded:
raise RuntimeError(
"IDNA decoding disagrees with libidn2 decoding.\nInput: %s\nEncoding: %s\nIDNA decoding: %s\nlibidn2 decoding: %s"
% (original, idna_encoded, idna_decoded, libidn2_decoded))
def main():
atheris.Setup(sys.argv, TestOneInput)
atheris.Fuzz()
if __name__ == "__main__":
main()
|
Site is offline. CREAPHP.COM has #13 947 053 rank on the internet. This rank shows site's popularity. Lower rank means more visitors that site gets. This website is estimated to get 0 unique visitors per day. These unique visitors make 0 pageviews. We estimate that this website earns at least $0.00 USD per day with advertising revenues so it can be valued at least $7.54 USD. This site has a /10 PageRank. It has backward links from domains, backward links from .edu domains and links from .gov domains. IP address of this site is . We detected that the average page load time of this website is 0.00 seconds.
CREAPHP.COM is ranked #13 947 053 on the internet. This rank shows site's popularity. Lower rank means more visitors that site gets. Most of users come to this website from n/a. This website is ranked #0 in n/a. It has 0 visitors per day, and has 0 pageviews per day. Click on the tabs below to get more info.
We found that the main page of creaphp.com has external links and internal links.
|
"""
Helper functions for loading environment settings.
"""
from __future__ import print_function
import json
import os
import sys
from time import sleep
import memcache
from lazy import lazy
from path import Path as path
from paver.easy import sh
from pavelib.utils.cmd import django_cmd
def repo_root():
"""
Get the root of the git repository (edx-platform).
This sometimes fails on Docker Devstack, so it's been broken
down with some additional error handling. It usually starts
working within 30 seconds or so; for more details, see
https://openedx.atlassian.net/browse/PLAT-1629 and
https://github.com/docker/for-mac/issues/1509
"""
file_path = path(__file__)
attempt = 1
while True:
try:
absolute_path = file_path.abspath()
break
except OSError:
print('Attempt {}/180 to get an absolute path failed'.format(attempt))
if attempt < 180:
attempt += 1
sleep(1)
else:
print('Unable to determine the absolute path of the edx-platform repo, aborting')
raise
return absolute_path.parent.parent.parent
class Env(object):
"""
Load information about the execution environment.
"""
# Root of the git repository (edx-platform)
REPO_ROOT = repo_root()
# Reports Directory
REPORT_DIR = REPO_ROOT / 'reports'
METRICS_DIR = REPORT_DIR / 'metrics'
# Generic log dir
GEN_LOG_DIR = REPO_ROOT / "test_root" / "log"
# Python unittest dirs
PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc"
# Bok_choy dirs
BOK_CHOY_DIR = REPO_ROOT / "common" / "test" / "acceptance"
BOK_CHOY_LOG_DIR = GEN_LOG_DIR
BOK_CHOY_REPORT_DIR = REPORT_DIR / "bok_choy"
BOK_CHOY_A11Y_REPORT_DIR = REPORT_DIR / "a11y"
BOK_CHOY_COVERAGERC = BOK_CHOY_DIR / ".coveragerc"
BOK_CHOY_A11Y_COVERAGERC = BOK_CHOY_DIR / ".a11ycoveragerc"
BOK_CHOY_A11Y_CUSTOM_RULES_FILE = (
REPO_ROOT / "node_modules" / "edx-custom-a11y-rules" /
"lib" / "custom_a11y_rules.js"
)
PA11YCRAWLER_REPORT_DIR = REPORT_DIR / "pa11ycrawler"
PA11YCRAWLER_COVERAGERC = BOK_CHOY_DIR / ".pa11ycrawlercoveragerc"
# If set, put reports for run in "unique" directories.
# The main purpose of this is to ensure that the reports can be 'slurped'
# in the main jenkins flow job without overwriting the reports from other
# build steps. For local development/testing, this shouldn't be needed.
if os.environ.get("SHARD", None):
shard_str = "shard_{}".format(os.environ.get("SHARD"))
BOK_CHOY_REPORT_DIR = BOK_CHOY_REPORT_DIR / shard_str
BOK_CHOY_LOG_DIR = BOK_CHOY_LOG_DIR / shard_str
# For the time being, stubs are used by both the bok-choy and lettuce acceptance tests
# For this reason, the stubs package is currently located in the Django app called "terrain"
# where other lettuce configuration is stored.
BOK_CHOY_STUB_DIR = REPO_ROOT / "common" / "djangoapps" / "terrain"
# Directory that videos are served from
VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video"
# Detect if in a Docker container, and if so which one
SERVER_HOST = os.environ.get('BOK_CHOY_HOSTNAME', '0.0.0.0')
USING_DOCKER = SERVER_HOST != '0.0.0.0'
SETTINGS = 'bok_choy_docker' if USING_DOCKER else 'bok_choy'
DEVSTACK_SETTINGS = 'devstack_docker' if USING_DOCKER else 'devstack'
TEST_SETTINGS = 'test'
BOK_CHOY_SERVERS = {
'lms': {
'host': SERVER_HOST,
'port': os.environ.get('BOK_CHOY_LMS_PORT', '8003'),
'log': BOK_CHOY_LOG_DIR / "bok_choy_lms.log"
},
'cms': {
'host': SERVER_HOST,
'port': os.environ.get('BOK_CHOY_CMS_PORT', '8031'),
'log': BOK_CHOY_LOG_DIR / "bok_choy_studio.log"
}
}
BOK_CHOY_STUBS = {
'xqueue': {
'port': 8040,
'log': BOK_CHOY_LOG_DIR / "bok_choy_xqueue.log",
'config': 'register_submission_url=http://0.0.0.0:8041/test/register_submission',
},
'ora': {
'port': 8041,
'log': BOK_CHOY_LOG_DIR / "bok_choy_ora.log",
'config': '',
},
'comments': {
'port': 4567,
'log': BOK_CHOY_LOG_DIR / "bok_choy_comments.log",
},
'video': {
'port': 8777,
'log': BOK_CHOY_LOG_DIR / "bok_choy_video_sources.log",
'config': "root_dir={}".format(VIDEO_SOURCE_DIR),
},
'youtube': {
'port': 9080,
'log': BOK_CHOY_LOG_DIR / "bok_choy_youtube.log",
},
'edxnotes': {
'port': 8042,
'log': BOK_CHOY_LOG_DIR / "bok_choy_edxnotes.log",
},
'ecommerce': {
'port': 8043,
'log': BOK_CHOY_LOG_DIR / "bok_choy_ecommerce.log",
},
'catalog': {
'port': 8091,
'log': BOK_CHOY_LOG_DIR / "bok_choy_catalog.log",
},
}
# Mongo databases that will be dropped before/after the tests run
MONGO_HOST = 'edx.devstack.mongo' if USING_DOCKER else 'localhost'
BOK_CHOY_MONGO_DATABASE = "test"
BOK_CHOY_CACHE_HOST = 'edx.devstack.memcached' if USING_DOCKER else '0.0.0.0'
BOK_CHOY_CACHE = memcache.Client(['{}:11211'.format(BOK_CHOY_CACHE_HOST)], debug=0)
# Test Ids Directory
TEST_DIR = REPO_ROOT / ".testids"
# Configured browser to use for the js test suites
SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox')
if USING_DOCKER:
KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker'
else:
KARMA_BROWSER = 'FirefoxNoUpdates'
# Files used to run each of the js test suites
# TODO: Store this as a dict. Order seems to matter for some
# reason. See issue TE-415.
KARMA_CONFIG_FILES = [
REPO_ROOT / 'cms/static/karma_cms.conf.js',
REPO_ROOT / 'cms/static/karma_cms_squire.conf.js',
REPO_ROOT / 'lms/static/karma_lms.conf.js',
REPO_ROOT / 'lms/static/karma_lms_coffee.conf.js',
REPO_ROOT / 'common/lib/xmodule/xmodule/js/karma_xmodule.conf.js',
REPO_ROOT / 'common/static/karma_common.conf.js',
REPO_ROOT / 'common/static/karma_common_requirejs.conf.js',
]
JS_TEST_ID_KEYS = [
'cms',
'cms-squire',
'lms',
'lms-coffee',
'xmodule',
'common',
'common-requirejs'
]
JS_REPORT_DIR = REPORT_DIR / 'javascript'
# Directories used for common/lib/tests
IGNORED_TEST_DIRS = ('__pycache__', '.cache')
LIB_TEST_DIRS = []
for item in (REPO_ROOT / "common/lib").listdir():
dir_name = (REPO_ROOT / 'common/lib' / item)
if dir_name.isdir() and not dir_name.endswith(IGNORED_TEST_DIRS):
LIB_TEST_DIRS.append(path("common/lib") / item.basename())
LIB_TEST_DIRS.append(path("pavelib/paver_tests"))
# Directory for i18n test reports
I18N_REPORT_DIR = REPORT_DIR / 'i18n'
# Service variant (lms, cms, etc.) configured with an environment variable
# We use this to determine which envs.json file to load.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# If service variant not configured in env, then pass the correct
# environment for lms / cms
if not SERVICE_VARIANT: # this will intentionally catch "";
if any(i in sys.argv[1:] for i in ('cms', 'studio')):
SERVICE_VARIANT = 'cms'
else:
SERVICE_VARIANT = 'lms'
@classmethod
def get_django_setting(self, django_setting, system, settings=None):
"""
Interrogate Django environment for specific settings values
:param django_setting: the django setting to get
:param system: the django app to use when asking for the setting (lms | cms)
:param settings: the settings file to use when asking for the value
:return: unicode value of the django setting
"""
if not settings:
settings = os.environ.get("EDX_PLATFORM_SETTINGS", "aws")
value = sh(
django_cmd(
system,
settings,
"print_setting {django_setting} 2>/dev/null".format(
django_setting=django_setting
)
),
capture=True
)
return unicode(value).strip()
@lazy
def env_tokens(self):
"""
Return a dict of environment settings.
If we couldn't find the JSON file, issue a warning and return an empty dict.
"""
# Find the env JSON file
if self.SERVICE_VARIANT:
env_path = self.REPO_ROOT.parent / "{service}.env.json".format(service=self.SERVICE_VARIANT)
else:
env_path = path("env.json").abspath()
# If the file does not exist, here or one level up,
# issue a warning and return an empty dict
if not env_path.isfile():
env_path = env_path.parent.parent / env_path.basename()
if not env_path.isfile():
print(
"Warning: could not find environment JSON file "
"at '{path}'".format(path=env_path),
file=sys.stderr,
)
return dict()
# Otherwise, load the file as JSON and return the resulting dict
try:
with open(env_path) as env_file:
return json.load(env_file)
except ValueError:
print(
"Error: Could not parse JSON "
"in {path}".format(path=env_path),
file=sys.stderr,
)
sys.exit(1)
@lazy
def feature_flags(self):
"""
Return a dictionary of feature flags configured by the environment.
"""
return self.env_tokens.get('FEATURES', dict())
|
Since at this time I am on Fall Break, I set up my accordion and sword in a pile, and did this small (5"x7") oil painting. I have at times done oil paintings, and still life paintings, but not at the same time.
I think as I will call it The Sword and Squeeze-Box. I generally don't name pictures, yet I happened to think of this nifty alliterative name, because in a certain sea-chantey, the words say, "and I'll play my old squeeze-box as we sail along..." meaning, accordion. Of course, sword means sword, as in, "see the swords of Glen Imayle/flashing over the English pale/see all the children of the Gael/beneath O'Byrne's banners."
Although I did this inside, I still used my handy-dandy 'pochade briefcase'.
Perhaps I will do still life/oil paintings more often; it's kinda fun.
|
from __future__ import absolute_import, unicode_literals
from allauth.socialaccount.models import SocialToken
from django.conf import settings as django_settings
from django.contrib.auth import get_user_model
from django.db.models import Q
from . import chinup, exceptions
class NoSuchUser(exceptions.ChinupError):
pass
exceptions.NoSuchUser = NoSuchUser
class MissingToken(exceptions.ChinupError):
pass
exceptions.MissingToken = MissingToken
class Chinup(chinup.Chinup):
def __init__(self, **kwargs):
self.user = kwargs.pop('user', None)
super(Chinup, self).__init__(**kwargs)
def __unicode__(self, extra=''):
extra = '{}user={}'.format(extra and extra + ' ', self.user)
return super(Chinup, self).__unicode__(extra=extra)
def __getstate__(self):
return dict(super(Chinup, self).__getstate__(),
user=getattr(self.user, 'pk', self.user))
@classmethod
def prepare_batch(cls, chinups):
# Populate user tokens into chinups. This also immediately "completes"
# any chinups which require a token that isn't available, by setting
# chinup.exception.
cls._fetch_users(chinups)
cls._fetch_user_tokens(chinups)
# Weed out any chinups that didn't pass token stage.
chinups = [c for c in chinups if not c.completed]
return super(Chinup, cls).prepare_batch(chinups)
@classmethod
def _fetch_users(cls, chinups):
chinups = [c for c in chinups if not c.completed and not c.token
and isinstance(c.user, (int, basestring))]
if chinups:
users = cls._users_dict(chinups)
for c in chinups:
user = users.get(c.user)
if user:
c.user = user
else:
c.exception = NoSuchUser("No user %r" % c.user)
@classmethod
def _users_dict(cls, chinups):
User = get_user_model()
db_users = User.objects.filter(
Q(pk__in=set(c.user for c in chinups if isinstance(c.user, int))) |
Q(username__in=set(c.user for c in chinups if isinstance(c.user, basestring))))
users = {u.pk: u for u in db_users}
users.update({u.username: u for u in db_users})
return users
@classmethod
def _fetch_user_tokens(cls, chinups):
chinups = [c for c in chinups if not c.completed and not c.token
and c.user]
if chinups:
social_tokens = cls._social_token_queryset(chinups)
social_tokens = social_tokens.select_related('account')
assert (len(set(st.account.user_id for st in social_tokens)) ==
len(social_tokens))
tokens = {st.account.user_id: st.token for st in social_tokens}
for c in chinups:
token = tokens.get(c.user.pk)
if token:
c.token = token
else:
c.exception = MissingToken("No token for %r" % c.user)
@classmethod
def _social_token_queryset(cls, chinups, **kwargs):
site_id = getattr(django_settings, 'SITE_ID', None)
if site_id:
kwargs.setdefault('app__sites__id', site_id)
return SocialToken.objects.filter(
account__provider='facebook',
account__user__in=set(c.user for c in chinups),
**kwargs)
class ChinupBar(chinup.ChinupBar):
chinup_class = Chinup
def __init__(self, **kwargs):
self.user = kwargs.pop('user', None)
super(ChinupBar, self).__init__(**kwargs)
def _get_chinup(self, **kwargs):
return super(ChinupBar, self)._get_chinup(
user=self.user, **kwargs)
def __getstate__(self):
return dict(super(ChinupBar, self).__getstate__(),
user=getattr(self.user, 'pk', self.user))
__all__ = ['Chinup', 'ChinupBar', 'NoSuchUser', 'MissingToken']
|
Self-care is many things. It’s the whole-wheat toasted bagel with jalapeno cream cheese I had the other day. It’s calling my best friends, it’s drinking water. It’s sleeping. It’s me actually paying attention to my body.
Paying attention to my body has been higher and higher on my list lately, especially since it’s flu season.
Over the past few days, I’ve been soooooooooooo (the extra o’s helping you catch my drift?) uncomfortable. As a matter of fact, that’s an understatement. Having the flu is THE WORSTTTTTTT. Like…. I was SICK sick. I could not breathe through my nose, then once that cleared up, I had an acute pain by my jaw and winced every time I moved my mouth, only to have that subside and my ears refuse to pop. Finally, I was BURNING and had the worst headache- every smell and sound drove me nuts. I love living in NYC. It’s a pretty awesome city but it STINKS. Like STINK stink, especially when your nose and ears pick up on everything – the trash, the people, the aroma from restaurants mingling.
After a few days, I woke up and could smell the vanilla and coconut scent that lingers in my room and my phone’s ringtone did not sound like pounding – I realized WOW my body is working again. My eyes, my nose, my ears – they don’t hurt. In all, I was grateful and mindful of feeling. I didn’t feel especially great – like a post workout can do – at the same time, I didn’t feel as awful as the previous days. I just was.
Every time I get sick I go through a similar process of discomfort and realize how I take feeling “normal” for granted.
I hope to be more mindful moving forward and really listen to my body. I’ve been seeing self-care as attending to myself more, listening to my body, and being more intentional about its upkeep and general maintenance.
I stay on top of my doctor’s appointments, my diet is pretty healthy, and I work out about an hour, 4 days a week. I started noticing which foods energize me and which ones make me slow and ugh. Working out hasn’t necessarily changed my body but I move so much better and can better identify what causes discomfort. When I don’t work out in the morning, I feel awful. Like coffee and bagels cannot get me to smile and nod at my co-workers’ rants of the day.
It’s a tasking process but always remember that HEALTH IS WEALTH!
Time constraints, cost, and asinine policies inhibit people from visiting the doctor. AND, urgent care costs are $$$$, but if you can, PLEASE GO. Apps like ZocDoc, Yelp for doctors, help find doctors in your network, identify why you are visiting and book/ reschedule appointments. ZocDoc also reminds you of the appointment as well as other doctors you should be visiting, and when. It is recommended to see the following doctors at regular intervals.
Huffington post gives a little more detail on the doctors you should see and why. Again, if you can go, GO!
Doctor visits are annoying, costly, not to mention, time-consuming. If I’m going because something is wrong, then I’m nervous because WTF is wrong with me?! Even though finding out what is wrong is helpful in the long run, it’s still a very scary process. Then if I go to the doctor and everything is fine, then I’m tight I gave them my money!
What I’m saying is, take a minute to listen to your body. Paying attention to your body is a form of self-care that encompasses being cognizant of what you eat, being active daily and generally caring for yourself. This gives you the tools needed to know when to get help, and of course, actually seeking it.
Take a second now, make a doctors appointment. If you don’t have a doctor or know who takes your insurance or trying to find a POC doctor- download ZocDoc or ask your friends.
Have a gym membership, but don’t go? Look up classes they offer at the gym or pen some time into your calendar. No membership, no money for a membership – hit up Youtube or download Apps like Nike Training.
And of course, drink your water, and eat ColorFull food.
This post is not sponsored by any of the mentioned products.
|
# Lint as: python3
r"""Example demo loading pre-trained language models.
Currently supports the following model types:
- BERT (bert-*) as a masked language model
- GPT-2 (gpt2* or distilgpt2) as a left-to-right language model
To run locally:
python -m lit_nlp.examples.pretrained_lm_demo \
--models=bert-base-uncased --top_k 10 --port=5432
Then navigate to localhost:5432 to access the demo UI.
"""
import os
import sys
from absl import app
from absl import flags
from absl import logging
from lit_nlp import dev_server
from lit_nlp import server_flags
from lit_nlp.api import dtypes as lit_dtypes
from lit_nlp.components import word_replacer
from lit_nlp.examples.datasets import classification
from lit_nlp.examples.datasets import glue
from lit_nlp.examples.datasets import lm
from lit_nlp.examples.models import pretrained_lms
# NOTE: additional flags defined in server_flags.py
FLAGS = flags.FLAGS
flags.DEFINE_list(
"models", ["bert-base-uncased", "gpt2"],
"Models to load. Currently supports variants of BERT and GPT-2.")
flags.DEFINE_integer("top_k", 10,
"Rank to which the output distribution is pruned.")
flags.DEFINE_integer(
"max_examples", 1000,
"Maximum number of examples to load from each evaluation set. Set to None to load the full set."
)
flags.DEFINE_bool(
"load_bwb", False,
"If true, will load examples from the Billion Word Benchmark dataset. This may download a lot of data the first time you run it, so disable by default for the quick-start example."
)
# Custom frontend layout; see client/lib/types.ts
LM_LAYOUT = lit_dtypes.LitComponentLayout(
components={
"Main": [
"embeddings-module",
"data-table-module",
"datapoint-editor-module",
"lit-slice-module",
"color-module",
],
"Predictions": [
"lm-prediction-module",
"confusion-matrix-module",
],
"Counterfactuals": ["generator-module"],
},
description="Custom layout for language models.",
)
CUSTOM_LAYOUTS = {"lm": LM_LAYOUT}
# You can also change this via URL param e.g. localhost:5432/?layout=default
FLAGS.set_default("default_layout", "lm")
def get_wsgi_app():
FLAGS.set_default("server_type", "external")
FLAGS.set_default("demo_mode", True)
# Parse flags without calling app.run(main), to avoid conflict with
# gunicorn command line flags.
unused = flags.FLAGS(sys.argv, known_only=True)
return main(unused)
def main(_):
##
# Load models, according to the --models flag.
models = {}
for model_name_or_path in FLAGS.models:
# Ignore path prefix, if using /path/to/<model_name> to load from a
# specific directory rather than the default shortcut.
model_name = os.path.basename(model_name_or_path)
if model_name.startswith("bert-"):
models[model_name] = pretrained_lms.BertMLM(
model_name_or_path, top_k=FLAGS.top_k)
elif model_name.startswith("gpt2") or model_name in ["distilgpt2"]:
models[model_name] = pretrained_lms.GPT2LanguageModel(
model_name_or_path, top_k=FLAGS.top_k)
else:
raise ValueError(
f"Unsupported model name '{model_name}' from path '{model_name_or_path}'"
)
datasets = {
# Single sentences from movie reviews (SST dev set).
"sst_dev": glue.SST2Data("validation").remap({"sentence": "text"}),
# Longer passages from movie reviews (IMDB dataset, test split).
"imdb_train": classification.IMDBData("test"),
# Empty dataset, if you just want to type sentences into the UI.
"blank": lm.PlaintextSents(""),
}
# Guard this with a flag, because TFDS will download and process 1.67 GB
# of data if you haven't loaded `lm1b` before.
if FLAGS.load_bwb:
# A few sentences from the Billion Word Benchmark (Chelba et al. 2013).
datasets["bwb"] = lm.BillionWordBenchmark(
"train", max_examples=FLAGS.max_examples)
for name in datasets:
datasets[name] = datasets[name].slice[:FLAGS.max_examples]
logging.info("Dataset: '%s' with %d examples", name, len(datasets[name]))
generators = {"word_replacer": word_replacer.WordReplacer()}
lit_demo = dev_server.Server(
models,
datasets,
generators=generators,
layouts=CUSTOM_LAYOUTS,
**server_flags.get_flags())
return lit_demo.serve()
if __name__ == "__main__":
app.run(main)
|
Here at Bathroom Renovation Guys, we'll be ready to satisfy your needs regarding Bathroom Renovation in Repton, AL. You'll need the most innovative modern technology in the field, and our crew of highly skilled professionals will offer just that. We grantee that you get the best services, the best value, and the highest quality materials. We will help you to come up with decisions for the task, respond to all your questions, and organize an appointment with our workers whenever you call us at 888-341-7776.
You will have a budget to stick to, and you need to cut costs. Still you require superior services on Bathroom Renovation in Repton, AL, so you can rely on us to help you save money while continuing with offering the highest quality services. We offer the highest quality even while still saving you money. If you work with us, you'll receive the advantage of our own practical knowledge and superior materials to be sure that your project can last even while saving time and cash. For example, we are alert to keep clear of costly mistakes, do the job promptly to help save hours, and guarantee that you receive the top discounts on supplies and work. If you need to get lower rates, Bathroom Renovation Guys is the company to contact. Dial 888-341-7776 to talk to our client care staff, right now.
You will need to be informed when it comes to Bathroom Renovation in Repton, AL. We won't encourage you to come up with ill advised judgments, as we know exactly what we'll be working at, and we make sure you know very well what to expect from the project. That's why we make every effort to be sure that you understand the procedure and aren't confronted by any surprises. Begin by calling 888-341-7776 to talk about your job. We'll resolve all of your questions and schedule the initial meeting. We are going to work together with you throughout the whole project, and our company will appear on time and ready.
Lots of reasons exist to decide on Bathroom Renovation Guys for Bathroom Renovation in Repton, AL. Our supplies are of the highest quality, our cash saving solutions are practical and powerful, and our customer support ratings won't be beat. Our company has the experience that you need to fulfill your goals and objectives. When you need Bathroom Renovation in Repton, call Bathroom Renovation Guys by dialing 888-341-7776, and we will be pleased to help you.
|
"""
Flacon
-------------
Flask application manager
"""
from setuptools import setup
setup(
name='Flacon',
version='0.0.1',
url='',
license='BSD',
author='Mehdi Bayazee, Mostafa Rokooie',
author_email='[email protected], [email protected]',
description='Flask based web framework',
long_description=__doc__,
packages=['flacon', 'flacon.commands'],
include_package_data=True,
package_data={'flacon': ['flacon/actions/project_template/*']},
namespace_packages=['flacon'],
zip_safe=False,
platforms='any',
install_requires=[
'flask>=0.9'
],
# scripts=['flacon/actions/flacon.py'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
10 Dec 2016 - 737-400, JY-JAQ (27826/2694), FF 27/01/1995, operated by Safi Airways suffered a collapse of the right hand main landing gear at Kabul on runway 11 at 12:24L (08:24Z). The combination of a soft landing, at high speed and high airport elevation caused the torsion link of the shimmy damper remained in an extended vertical position and fail. The RH MLG collapsed causing the aircraft to skid and come to rest on the #2 engine. All 155 POB evacuated down the slides with no injuries.
The operations of Safi Airways was suspended for a short time in September 2016 due to debts and taxes of AFN 1,152,774,691. According to a statement by Afghanistan Civil Aviation Authority, the decision to suspend the operations of the airline was taken by the Ministry of Finance of Afghanistan.
Safi Airways is on the European Commission's airline blacklist, as part of a blanket ban on all Afghani carriers. However this aircraft was sub leased from from Jordan Aviation and was Jordanian-registered.
On 10 December 2016, JAV Boeing 737-400 Aircraft, registration JY-JAQ, operating a leased scheduled passenger flight SFW 502, on behalf of SAFI Airways under wet lease contract with the later call sign, departed a domestic flight from Herat Airport (OAHR), at 07:00 Z from RWY 36 to Kabul International Airport (OAKB) Afghanistan. At approximately 07:57:45 Z, the Aircraft touched down RWY 29 at Kabul. The aircraft departed Herat with 164 passengers ,07 Operating Crewmembers and 02 Engineers , Total on board were 173 person. As the flight approached OAKB, the crew received the automatic terminal information service (ATIS) from OAKB station at 07:45 Z indicating normal weather with visibility of 6 Km, temperature 07 degrees Celsius and wind of 150/07. The Aircraft was configured for landing with the flaps set to 30, and approach speed selected of 152 knots (VREF + 10) indicated airspeed (IAS). The Aircraft was cleared to approach ILS 29. The Aircraft was vectored by the radar for RWY 29. Air traffic control cleared the flight to land, with the wind reported to be 190 degrees at 15 knots. The crew stated that a few seconds after the touchdown, they felt the aircraft vibrating, during which they applied brakes and deployed the reverse thrust. The vibration was followed by the aircraft rolling slightly low to the right. It later came to a full stop left of the runway centre line, resting on its left main landing gear and the right engine, with the nose landing gear in the air. The occurrence occurred at approximately 3,806 ft / 1,160 m past the threshold. The PIC declared Emergency to the ATC and the cockpit crew initiated an evacuation command from the left side of the aircraft. Evacuation was successfully accomplished with No reported injuries. Kabul airport RFF reached the occurrence aircraft and observed the smoke coming from right side and immediately deployed their procedures by spraying foam on engine # 2. The aircraft sustained substantial damage due to the separation of the right main gear resulting on the aircraft skidding on the right engine cowlings. No injuries were sustained by any of the occupants during the occurrence or the evacuation sequence. Operating crew of the incident flight were called by the Afghani Civil Aviation Authority (ACAA) for interview and medical examination (alcohol and drugs, blood test).
The Aircraft sustained a substantial damage as it can be seen from the pictures, the actual damages and status of the aircraft will determined in the damage report, in addition to the pictures shown below the following estimation for the damage can be summarized with the following; Right Main Landing Gear was detached from its place; Right trailing inboard flap was detached due to the impact of separated parts of the landing gear. In the absence of the right MLG, the aircraft was skidding on its Engine # 2 hence an additional damage to the underside of the right engine nacelle occurred as it was sliding along the surface during the landing roll.
The separation of the Right Main Landing Gear caused damages to the runway surface and might cause damages to runway lights.
The captain of the occurrence flight has joined JAV on 01.10.2005, He started his type rating on B737 on 31.10.2005, he was cleared FO on 15.08.2007 with 250 hrs. His Command upgrade course was on 14.03.2015 ends 28.03.2015 with total of 3592 hrs On the B737. He started line training as PIC on 03.05.2015 and he was cleared on 21.05.2015 with 48.35 hrs. / 26 sectors.
The point of touchdown of the Aircraft with the runway was approximately 1,067 meters from runway 29 threshold, approximately 240 meters before TWY M, the RWY approximate remaining distance was measured to be 2,351 meters from the point of touch down to the point at RWY 11 threshold. The Aircraft came to rest at 326 meters from runway 11 threshold. At touchdown, the right main landing gear suffered a damage that resulted with the detachment of the assembly. The aircraft slid on the right engine until it came to complete stop. As the right main landing gear detached from its position, the tyres and detached components resulted in damage to the fuselage and right inboard flaps, which was detached also as a result of the impact with parts of the landing gear assembly. The right engine suffered significant damage to impact with ground. The aircraft was removed from the runway to one of the adjacent aprons. The tyres suffered damage no several locations, the tires were showing damage on the side walls.
The aircraft’s right-hand main landing gear inboard tire shaking tire marks occurred for a distance of approximately 400 - 500 m, at this time the right main landing gear had detached and was dragged away, causing the right wing to drop. There was evidence of runway surface damage at almost similar intervals of appearing together with the hard tire contact marks, and this may also indicate that both tires were damaged after the shimmy damper failed and the oscillations were occurring. No map diagram for the accident scene was performed by the Afghan Authority to show the wreckage distribution, and all damaged parts of the aircraft were collected and stored at the civil defense hangar.
The FDR data show the airplane descending from 1000 feet radio altitude (RA) configured for a flaps 30 landing with the speedbrakes armed, the autopilot disengaged by time 3810 seconds, and the autothrottle engaged through touchdown (Figure 1). The airplane was on approach to Runway 29 at KBL (verified by the recorded magnetic heading) and landed at a gross weight of 123,100 pounds (LB) [the maximum design landing weight is 123,899 LB]. Based on the landing weight and flap position, the landing reference speed (VREF) should have been approximately 142 knots. The approach speed was not recorded; however the computed airspeed was maintained at approximately 162 knots during the final approach with the autothrottle engaged which indicates that the approach speed was most likely VREF+20. From 900 feet RA until 75 feet RA, the descent rate (negative calculated vertical speed) was maintained at above 1000 feet/minute (fpm) with an average descent rate of 1250 fpm and maximum descent rate of 1400 fpm at time 3828 seconds. The glideslope deviation indicated the airplane was below the beam during the approach and the localizer deviation indicated that the airplane was either on or slightly right of the runway centerline (Figures 1 and 2). The calculated wind data were comparable to the airplane recorded ship system winds in magnitude, with more variation in the direction (Figure 2). However, both wind data sources indicate that the airplane was in a left quartering tailwind. Beginning at about time 3740 seconds at approximately 2000 feet RA, just as the airplane descended to capture the glideslope (not shown), the atmospheric conditions became turbulent with increased perturbations in computed airspeed, vane angle of attack, normal load factor and lateral acceleration, along with increased control wheel and column inputs to maintain the desired attitudes.
During the approach, the airplane experienced an average 7-knot tailwind with an approximate 15-knot left crosswind component. At about 35 feet radio altitude, an airplane nose-up column input was commanded around time 3851 seconds initiating landing flare, and the sink rate began to decrease (Figure 3). The airplane was in a left crab (negative drift angle) which is consistent with a left crosswind, which was nearly removed at touchdown with right rudder pedal input (Figure 4). Based on a change in character of the longitudinal acceleration, the main gear contacted the runway at time 3862 seconds at a computed airspeed of approximately 158 knots (VREF+16) and a ground speed of 178 knots (Figure 3). The main gear air/ground discrete parameter changed state from AIR to GROUND just after time 3863 seconds. The descent rate at the center of gravity (CG) when the main gear transitioned to GROUND state was 3.0 feet/second (fps). Touchdown occurred at a pitch attitude of approximately 0.4 degrees nose-up and a bank angle of approximately 1 degree to the right (Figures 3 and 4). The closure rate (negative calculated vertical speed) of the right main gear was also calculated which takes into account the runway slope and Euler angle rates; however the slope of the runway was unknown and therefore was not included in the closure rate calculation. At touchdown, the Euler angle rates were negligible (not shown) which resulted in a right main gear closure rate that was very similar to the descent rate at the CG of 3.0 fps (Figure 3).
Following the initial main gear ground contact, the speedbrakes deployed at about time 3862 seconds and approximately 1 second later the throttle levers began to transition to the reverse idle detent (Figure 3). At time 3865 seconds, large spikes in all three acceleration parameters were observed (Figures 3 and 4). These large spikes corresponded with the initiation of a bank angle change from 1 degree to 6 degrees to the right, the main gear discrete momentary transition to AIR for 1 second, and the nose gear discrete transition to GROUND for 1 data point. As the bank angle increased to the right, a left control wheel input was commanded to 35 degrees (Figure 4). A second set of smaller spikes in the acceleration data occurred just after time 3866 seconds as the bank angle reached 6 degrees and nose gear discrete transitioned to AIR [Figures 3 and 4]. The airplane came to rest at approximately time 3906 seconds while closely maintaining the runway heading (not shown).
Between the initial main gear ground contact and the air/ground discrete transition to GROUND, the normal load factor, longitudinal acceleration, and lateral acceleration began to fluctuate until approximately time 3865 seconds when the large spikes in the accelerations were observed (Figures 3 and 4). In addition, the fluctuations in the lateral acceleration increased in magnitude during this time (Figure 4). Due to the report in the SR that the right main gear departed the airplane during the landing rollout, it was deduced that the first set of large acceleration spikes most likely corresponded to the loss of the right main gear. This is also the time that the bank angle began to increase to the right. The second set of acceleration spikes most likely corresponded to the airplane settling onto the right engine nacelle after banking to the right as a result of the loss of the right main gear. The airplane completed the landing rollout balanced on the left main gear and the right nacelle with a pitch attitude of approximately 2 to 3.6 degrees nose-up and bank angle of approximately 6 degrees to the right (Figures 3 and 4). Additional damage to the right wing control surfaces can been observed in the right aileron deflection. The deflection limits of the ailerons is +/-20 degrees. After the right main gear collapse at time 3865 seconds, the right aileron deflection increased to +/-40 degrees, whereas the left aileron deflection remained in the expected range (Figure 4).
A ground track was generated to show the airplane’s path during the approach and landing rollout (Figure 5). Runway 29 at KBL has a length of 11,520 feet and a width of 140 feet. Longitudinal and lateral distances were calculated using a combination of inertial data (ground speed, drift angle, heading), glideslope/localizer deviation, and airport information (runway dimensions, taxiway dimensions, etc.). The airplane’s actual final resting position was not provided. The distances shown in Figure 5 were calculated based on the analysis of the recorded FDR data without reference to a physical anchor position. If the final resting position is provided (latitude/longitude or runway distances), the calculated ground track can be adjusted. The calculated airplane path is referenced to the airplane CG.
The ground track analysis results indicate that the airplane crossed over the runway threshold at 40 feet radio altitude and flare was initiated immediately after. Initial main gear contact occurred 3500 feet beyond the threshold as evidenced by the decrease in longitudinal acceleration and the speedbrake handle deployed soon after. The main gear air/ground discrete transitioned to GROUND at 3750 feet beyond the threshold. Large spikes in all three acceleration parameters occurred at 4350 feet beyond the threshold, which most likely correlates with the loss of the right main gear, followed by the bank angle increasing to the right with control wheel commanded to the left. The main gear discrete temporarily transitioned to AIR after the first set of acceleration spikes and the nose gear discrete transitioned to GROUND for 1 data point. At 4700 feet beyond the threshold, a second set of spikes was observed in the acceleration parameters just as the bank angle neared its maximum value of 6 degrees to the right, which most likely correlates to the airplane settling onto the right engine nacelle. At this point, the nose gear discrete transitioned back to AIR and the main gear discrete transitioned back to GROUND. The remainder of the landing rollout was performed on the left main gear and right engine nacelle, with the airplane pitch attitude between 2 and 3.6 degrees nose-up and the bank angle at approximately 6 degrees to the right. The estimated final stopping location of the airplane was 10,450 feet beyond the runway threshold and 20 feet to the left of the runway centerline.
Load sheet Information: - MTOW : 60 000 KG. - ATOW : 58 467 KG. with under weight of 1533 KG.
This extra fuel load resulted in a higher final approach VREF and Touch Down Speeds.
ATC reported landing wind (CVR) on R/W 29 wind 190/15 KTS on landing clearance will result in a tail wind component of 1 KTS tail wind and 15 KTS Cross wind.
Jeppessen Chart ILS R/W 29 indicate a 3.50 Degrees angle which will result on a higher sink rate than normal approaches on different R/Ws.
All the above mentioned factors will result in higher ground speed and can effect in unstabilized approach which clearly indicated by (SINK RATE) EGPWS warning triggered bellow 300 Feet AGL and the commander gave the call to continue the landing and the F/O PF corrected for the sink rate and continued the landing resulting in floating for approximately 4 seconds and touchdown longer than normal landing.
Jordan Aviation is exercising different operations on wet lease bases on behalf of foreign operators and sometimes it operates to airports that requires specific crew and pilot training as they may have unusual and often difficult approaches . Jordan aviation have no evidence that Kabul airport has been categorized or a training for the crew in SAFI operation has been made for this airport. Jordan Aviation operation supervision need to create qualification requirement for certain route and airports were scheduling should account for level of experience requirements for certain flights and airports and should specify PIC landings in certain runways and conditions, like the situation in hand (High Elevation, Higher than normal glide slope angle, Tail wind Close to the limit) situation indicate a PIC landing is more likely.
When Asked about the high fuel weight (5200 KG Fuel onboard) the PIC replied that Kabul is famous for drastic weather change and his alternate was the departure airfield.
Flight Duty and Rest Limitation was not considered a factor contributing to this occurrence.
Crew Qualification and Standard Operations Procedures (SOP). a. Jordan Aviation need to qualify and address steep approaches operation in their Operations Manuals and accommodate the required training in JAV Training Policy. b. Training was done on time and no reported deficiencies. But the training does not accommodate for the irregularity of operations and does include unstable approach recognition.
• Sink rate is no greater than 1000 fpm. Throughout the approach, there were several sink rate exceedance of 1000 fpm.
The CVR data show that a “Sink Rate” warning was triggered for 2 seconds between 159 - 115ft AAL. The average Vertical speed during the warning was -1093 ft/min.
This rate of descent warnings is normal on approach to a high altitude airport with a steep (3.5 degree glideslope) due to the higher True Airspeed and consequent higher Groundspeed. The rate of descent required to maintain a 3.5 degree glideslope with a Groundspeed of 180Kts is 1064ft/min.
• The flight crew selected an approach speed of Vapp+10, while the ATC a wind of 140/07 which indicates a tailwind component of 6 knots, the crew should select Vapp+5 at that stage.
Although they were selecting Vapp+10, the FDR data was showing an average of 165-170 knots IAS which is 15 – 18 knots higher than the selected approach speed, and that speed deviation continued until the aircraft reached the flare height were the trends went down towards a speed of Vapp+5.
The Flight Crew Training Manual also contains the following recommendations that are applicable to this event: Initiate the flare when the main gear is approximately 20 feet above the runway by increasing pitch attitude approximately 2° - 3°. This slows the rate of descent. After the flare is initiated, smoothly retard the thrust levers to idle, and make small pitch attitude adjustments to maintain the desired descent rate to the runway. A smooth thrust reduction to idle also assists in controlling the natural nose-down pitch change associated with thrust reduction. Hold sufficient back pressure on the control column to keep the pitch attitude constant. Ideally, main gear touchdown should occur simultaneously with thrust levers reaching idle. Do not allow the airplane to float or attempt to hold it off. Fly the airplane onto the runway at the desired touchdown point and at the desired airspeed. Prolonged flare increases airplane pitch attitude 2° to 3°. When prolonged flare is coupled with a misjudged height above the runway, a tail strike is possible. Do not prolong the flare in an attempt to achieve a perfectly smooth touchdown. A smooth touchdown is not the criterion for a safe landing.
The flare followed with aircraft floating action above the runway for a distance of 1,067 m from runway threshold was only justified with pilots’ judgment to bleed the energy of the aircraft before touchdown to avoid a hard landing. The flight crew stated that this kind of techniques is always used in high altitude airports to avoid high energy touchdown keeping into account the remaining runway distance to stop the aircraft on the landing run.
The characteristics of the landing are consistent with past landing gear shimmy events. The airplane touched down at a high ground speed and low sink rate, and the air/ground discrete transition to GROUND occurred approximately 1 to 1.5 seconds after initial main gear ground contact, indicating that the struts were extended for that period of time. As a result, the torsion links of the shimmy damper remained in an extended, vertical position, where the damper has less mechanical advantage for longer periods of time. Despite the presence of shimmy damper hardware, which is designed to reduce the torsional vibration energy generated during landing, airplanes occasionally experience main landing gear shimmy. As a result, the torsion links of the shimmy damper remained in an extended, vertical position, where the damper has less mechanical advantage for longer periods of time.
“Based on operator reports, MLG shimmy is an infrequent event that is characterized by strong vibration, usually from one MLG, that begins at touchdown and continues until the airplane is fully stopped. Historically, there have been two or three shimmy events a year in the worldwide 737-200/ -300/-400/-500 fleet. However, in the last few years, the rate of shimmy events has increased sharply on these models. In a few particularly severe shimmy events, the affected main landing gear collapsed during the landing. This article discusses causes of shimmy and recommended actions operators can take to reduce the likelihood of it occurring. Boeing sometimes receives reports from operators of what is assumed to be a hard landing because of the violent nature of the landing and the observation of a torsion link fracture.
Due to the geometry of the torsion links, the shimmy damper is most effective when the landing gear strut is compressed in the ground mode. Lower touchdown descent rates increase the likelihood of a shimmy damper failure. It is important to note, however, that proper maintenance of the gear components is the best way to prevent shimmy damper failures. The possibility of landing gear shimmy events is greater at high altitude airports.
For shimmy to occur, the landing gear must have a force applied to it that excites this torsional vibration mode. The 737 has a vibration frequency of approximately 15 Hertz (Hz). Boeing engineers theorize that the force needed to initiate shimmy is probably an alternating drag force, such as if one tire touches down, causing a twisting motion of the inner cylinder in one direction and the second tire touches down a fraction of a second later, causing the inner cylinder to twist in the opposite direction. If the timing between the first tire and second tire contacting the runway is similar to the shimmy frequency, the gear can oscillate in the shimmy mode.
Boeing also recommends that pilots strive for a landing with normal sink rates with particular emphasis on ensuring that the auto speedbrakes are armed and deploy promptly at touchdown. An overly soft landing, or a landing in which the speedbrakes do not promptly deploy, allows the landing gears to remain in the air mode longer, which makes them more vulnerable to shimmy. This is especially true when landing at airports located at higher elevations, where the touchdown speed is increased.
Boeing has stated that a high-speed soft landing can contribute to excessive main gear shimmy or vibration in the 737-400 airplanes. This is detailed in Flight Operations Tech Bulletin (FTOB) 737-15 released December 14, 2015 which states “Based on analysis of main gear shimmy events, low sink rate landings of less than 1 ft/sec (60 feet/minute) can increase the possibility of inducing main gear shimmy”.
The conditions at actual touchdown and whether the gear can handle these conditions are questionable. Boeing agrees that a high-speed soft landing can cause the excessive shimmy with resultant failure. But, nowhere does Boeing state what the actual limitations are in terms of the limiting groundspeed and or touch-down vertical forces which are usually measured in g. This aircraft did a flap 30 landing, while Boeing allows flap 15 landings and even flapless landings, which will result in much higher landing speeds than were recorded in this case, but nowhere in the Operations Manuals does Boeing state that pilots need to beware of shimmy conditions at high speed and with soft landings and that this can cause a failure of the gear. This was explained by Boeing as they do not provide limitations to pilots on this circumstances since maintenance, tire wear, runway conditions landing speed and firmness of landing can all contribute to some varying degree. Additionally; Boeing do not as a normal course of action provide consequences in the Operations Manual.
For SAW 502 Flight; a steep approach requirements for Kabul airport which has a 3.5 degrees glide slope profile and the high approach speed, while landing at a high altitude airport, resulted in excessive ground speed (165 - 170 knots) before touchdown. An extended flare that was the result of pilot judgment to bleed the aircraft energy to avoid a hard landing led to touchdown at a low sink rate (58 feet/minute).
Excessive wear or freeplay in the joint where the shimmy damper connects to the lower torsion link (referred to as the apex joint). Wear at this location allows undamped torsional freeplay to exist in the landing gear at the apex joint, which greatly increases the likelihood of shimmy.
Wear or freeplay in the torsion link bushings (e.g., where the torsion links connect to the outer and inner cylinder). Wear at these locations also allows undamped torsional freeplay.
Landing with extremely low sink rates. This type of landing is more likely to experience shimmy than a firmer landing because the torsion links remain in an extended, vertical position where the damper has less mechanical advantage for longer periods to time.
Air in the damper. Several shimmy events occurred within a few flights after a new or overhauled damper was installed. In these cases, it is suspected that a thorough bleeding of air from the damper was not performed, thus preventing proper damper operation.
Damper piston fracture. In a small number of events, it is suspected that the damper piston fractured due to a preexisting fault (e.g., a fatigue crack).
Overserviced shock strut. In several events, an overserviced shock strut has been suspected to have been a contributing factor. A shock strut overserviced with nitrogen allows the torsion links to have a reduced mechanical advantage to react to the torsional motion of the inner cylinder.
Incorrect damper installation. In one event, a damper designed for a very early 737-200 had inadvertently been installed on a later airplane that required a more heavy-duty damper.
Unconnected hydraulic tube. In one event, a hydraulic tube for the damper was inadvertently left unconnected after unrelated maintenance, so there was no hydraulic fluid available to the damper.
The aircraft had 16 open deferred defects at the time of the occurrence, nothing related to the landing gear.
It was noted that a suspected hard landing was reported on 13/11/2016 at Kabul (KBL) and the aircraft was inspected I.A.W AMM 05-51-51showing no damage on the aircraft, technical log sheet no. 22318.
It was noted that all landing gear shock struts were serviced I.A.W AMM 12-15-31 and AMM 12-15-41 with dry nitrogen on 30/06/2016, based on an open defect as per inspection discrepancy sheet no. 7151, the latter corrective action was followed up on the deferred defect sheet no. 3038 dated 30/06/2016, in order to check the X-dimension of all landing gear shock struts after 5 to ten landings, and this deferred defect was closed by the satisfactory check of the X-dimension of all landing gear shock struts with no further defects on 06/07/2016.
3.1.1. The pilots held valid licenses and medical certificates.
3.1.2. The aircraft had a valid Certificate of Airworthiness (C of A) and Certificate of Registration (C of R ) and was operated within the weight and balance envelop.
3.1.3 There were no reports of aircraft system abnormalities during flight.
3.1.4. The torsion link and shimmy damper of the right Main Landing Gear (MLG) assembly found broken.
3.1.5. Oscillating tire marks left on the runway.
3.1.6. The aircraft gross landing weight was 123.100 pounds LB, ( the maximum design landing weight is 123.899 LB ).
3.1.7. Based on the Landing weight and flap position , the landing reference speed (VREF) should have been approximately 142 knots.
3.1.8. During the approach the airplane experienced an average of 7-knots tailwind with an approximate 15-knots left crosswind component.
3.1.9. The main Landing Gear (MLG) contacted the runway at a computed airspeed of approximately 158 knots ( VREF+16) and ground speed 178 knots.
3.1.10. The speedbrake deployed immediately after the airplane touched down at 3500 feet beyond the threshold.
3.1.11. The airplane touched down at a high ground speed and low sink rate.
3.1.12. The lateral acceleration starts to fluctuate and grown until gear collapse.
3.1.13. A “Sink Rate” was triggered by EGPWS warning for 2 seconds between 159 - 115ft AAL. The average Vertical speed during the warning was -1093 ft/min.
3.1.14. The flight crew selected an approach speed of Vapp+10, while the ATC a wind of 190/15 which indicates a tailwind component of 1 knots, the crew should select Vapp+5 at that stage.
3.1.15. The PIC declared Emergency to the ATC and the cockpit crew initiated an evacuation command from the left side of the aircraft. Evacuation was successfully accomplished with No reported injuries.
3.1.17. Extra fuel load resulted in a higher final approach VREF and Touch Down Speeds.
3.1.18. ATC reported landing wind (CVR) on R/W 29 wind 190/15 Knots on landing clearance will result in a tail wind component of 1 Knots tail wind and 15 Knots Cross wind.
3.1.19. Jeppessen Chart ILS R/W 29 indicate a 3.50 Degrees angle which will result on a higher sink rate than normal approaches on different R/Ws.
3.1.20. Nowhere in the Operations Manuals does Boeing state that pilots need to beware of shimmy conditions at high speed and with soft landings and that this can cause a failure of the gear.
3.1.21. Jordan Aviation Operation depend very much on lease out to foreign operators and sometimes to use strange airfields operation. Jordan aviation training policy does not account for Route and Airfield competency.
The Investigation committee determines that the airplane occasionally experienced main landing gear shimmy and the most probable cause indicated that the struts were extended for long period of time. As a result, the torsion link of the shimmy damper remained in an extended vertical position, where the damper has less mechanical advantage for longer periods of time. Despite the presence of shimmy damper hardware which is designed to reduce the torsional vibration energy generated during landing.
The CARC has published the following Safety Recommendation in reference letter 31/100/508/15 on preventing MLG shimmy events to Jordanian operators that operate the Boeing B737-300/400/500 aircraft.
The letter uses references from Boeing to describe MLG shimmy and give both maintenance and operational recommended operator actions.
"Boeing also recommends that pilots strive for a landing with normal sink rates with particular emphasis on ensuring that the auto speedbrakes are armed and deploy promptly at touchdown. An overly soft landing, or a landing in which the speedbrakes do not promptly deploy, allows the landing gears to remain in the air mode longer, which makes them more vulnerable to shimmy. This is especially true when landing at airports located at higher elevations, where the touchdown speed is increased."
In addition pilots are kindly requested to report any event of MLG shimmy or vibration to aircraft during landing or takeoff.
The full final report can be read here.
|
# -*- coding: utf-8 -*-
"""
abm.xypops
~~~~~~~~~~
Environments not backed by networkx whose x, y traits are used in visualization
"""
from scipy.stats.distributions import norm
from scipy.stats.distributions import uniform
from sklearn.metrics.pairwise import euclidean_distances
from abm.viz import display_network
from abm.pops import Environment
from abm.entities import XyEntity
import numpy as np
from random import choice
Y_DIST = norm(300, 10)
CLUSTER_X_DIST_MAP = {
'A': uniform(0, 50),
'B': uniform(30, 50),
'C': uniform(60, 50)
}
CLUSTER_SIZES = {
'A': 8,
'B': 10,
'C': 8
}
def make_points(cluster, size, y_dist, x_dist):
"""Creates a set of points using y_dist and x_dist to draw the location."""
ys = y_dist.rvs(size)
xs = x_dist.rvs(size)
return list(zip(xs, ys, [cluster] * size))
class XyEnvironment(Environment):
"""
A set of connected Entities. Handles message passing and displaying.
Entities are connected randomly.
"""
def __init__(self, y_pos_dist=Y_DIST, cluster_x_dists=CLUSTER_X_DIST_MAP,
cluster_sizes=CLUSTER_SIZES, single_component=True,
entity_class=XyEntity, **kwargs):
super(XyEnvironment, self).__init__(**kwargs)
self.population = []
self.connectivity_matrix = None
self.connected_components = []
self.node_component_map = {}
self.entity_class = entity_class
self._set_entities(y_pos_dist, cluster_x_dists, cluster_sizes)
self._set_connectivity_matrix()
self._set_connections()
if single_component:
self._ensure_single_component()
def _set_entities(self, y_pos_dist, cluster_x_dists, cluster_sizes):
point_args = []
for cluster, size in cluster_sizes.iteritems():
point_args += make_points(cluster, size,
y_pos_dist, cluster_x_dists[cluster])
for ix, (x, y, cluster) in enumerate(point_args):
pt = self.entity_class(environment=self, index=ix, x=x, y=y, cluster=cluster)
self.population.append(pt)
self.size = len(self.population)
def _set_connections(self, track_components=True):
"""Initializes each Entity's adjacency list.
:param track_components: Flag for tracking connected components during graph construction
"""
for index, point in enumerate(self.population):
# make set of connections to indices; np.where returns a tuple
adjacencies = set(np.where(self.connectivity_matrix[index] > 0)[0])
adjacencies.discard(index)
# pass adjacency information down to agent
point.set_adjacencies(adjacencies)
if track_components:
# track connected components as we construct edges
if index in self.node_component_map:
component = self.node_component_map[index]
else:
component = set([index])
self.node_component_map[index] = component
self.connected_components.append(component)
# update the component in place with potential new members
component.update(adjacencies)
# update the node - component map so we can fetch this object
# for adjacencies
self.node_component_map.update(
{a: component for a in adjacencies})
# resolve potential component connections
self._resolve_components(component)
n = float(len(self.population))
k = float(np.sum(self.connectivity_matrix)) / 2
self.edge_density = k / (n * (n - 1) / 2)
def _ensure_single_component(self):
"""
Iterate through disjoint component list, adding connections between sequential components
Update other datastructures to reflect the new connections
"""
for ix, component in enumerate(self.connected_components[:-1]):
start, end = (choice(list(component)), choice(
list(self.connected_components[ix + 1])))
self.population[start].adjacencies.append(end)
self.population[end].adjacencies.append(start)
self.connectivity_matrix[start][end] = True
self.connectivity_matrix[end][start] = True
self.connected_components[ix].add(end)
self.connected_components[ix + 1].add(start)
self._resolve_components(self.connected_components[0])
def _resolve_components(self, component):
"""
Find components thought to be separate that now have intersections
Condense these and set self.connected_components to be a list of disjoint sets
"""
resolved_components = [component]
for other_component in self.connected_components:
if other_component.intersection(component) or other_component is component:
component.update(other_component)
self.node_component_map.update(
{a: component for a in other_component})
else:
resolved_components.append(other_component)
self.connected_components = resolved_components
def _set_connectivity_matrix(self):
"""
Computes the connectivity matrix of this Environment. Each point is
connected to each other within a radius.
"""
if self.connectivity_matrix is not None:
return
# generate a random symmetric matrix
point_count = len(self.population)
matrix = np.random.randint(
0, 2, point_count ** 2).reshape(point_count, point_count)
matrix = (matrix + matrix.T) / 2
for i in range(point_count):
matrix[i][i] = 0
self.connectivity_matrix = matrix
def display(self, current=None, target=None):
"""
Plots the state of the task. If <show> = False, doesn't plot
anything and the simulation can run faster.
"""
if not self.show:
return
display_network(self.population, self.connectivity_matrix,
current=current, target=target)
class CappedPreferentialEnvironment(XyEnvironment):
"""
A set of connected Entities. Handles message passing and displaying. Connections are laid
out such that entities of the same cluster are more likely to be tied together,
proportionally to a parameter alpha. The overall density of the network is controlled
by a parameter beta.
"""
def __init__(self, alpha=0.8, beta=0.4, *args, **kwargs):
self.alpha = alpha
self.beta = beta
super(CappedPreferentialEnvironment, self).__init__(*args, **kwargs)
def _set_connectivity_matrix(self):
"""
Computes the connectivity matrix of this Environment. Each point is
connected to each other within a radius.
"""
if self.connectivity_matrix is not None:
return
def decide_connection(point1, point2):
# A point is connected to another point of its same cluster
# with high probability proportional to alpha, and to
# another point of a different clluester with probability
# proportional to 1 - alpha.
# Moreover, the edge density of a network is capped at a value
# beta. That's why we choose a 0 with probability 1-beta,
# and partition beta into alpha and 1-alpha.
alpha = self.alpha
beta = self.beta
if point1.cluster == point2.cluster:
tie = np.random.choice(
[0, 0, 1], p=[1 - beta, beta * (1 - alpha), beta * alpha])
else:
tie = np.random.choice(
[0, 0, 1], p=[1 - beta, beta * alpha, beta * (1 - alpha)])
return tie
matrix = np.array([[0] * len(self.population)
for _ in range(len(self.population))])
# since the graph is undirected, the matrix is symmetric,
# which in turn means we need only compute the lower triangular
# elements and then copy them into the upper triangular elements
for i, point1 in enumerate(self.population):
for j, point2 in enumerate(self.population[:i]):
matrix[i][j] = decide_connection(point1, point2)
matrix[j][i] = matrix[i][j]
self.connectivity_matrix = matrix
class NearestNeighborsEnvironment(XyEnvironment):
"""
A set of connected Entities. Handles message passing and displaying. Connections laid
out geographically: each point is connected to some of its nearest neighbors.
"""
def _set_connectivity_matrix(self):
"""
Computes the connectivity matrix of this Environment. Each point is
connected to each other within a radius.
"""
if self.connectivity_matrix is not None:
return
points_arr = np.array([[p.x, p.y] for p in self.population])
distance_mat = euclidean_distances(points_arr, points_arr)
# Every point p will be connected to each other point whose distance
# to p is less than a cut-off value. This value is computed as the
# mean of {min_nonzero(dist_mat(p)) | p is a point}, times a factor
def min_nonzero(r):
return min(r[r > 0])
# apply_along_axis(f, axis=1, arr) applies f to each row
min_neighbor_distances = np.apply_along_axis(
min_nonzero, axis=1, arr=distance_mat)
factor = 2.2
neighbor_cutoff = np.mean(min_neighbor_distances) * factor
connectivity_matrix = distance_mat < neighbor_cutoff
self.connectivity_matrix = connectivity_matrix
|
A client in the midwest recently sent over some pics of his baby. He recently treated the car to some interior upgrades from Works Bell. The hub and limited edition matte black Yoshi Rapfix II allows him to easily and safely remove his rare NSX-R steering wheel. Stay tuned for future mods!
Out of the Corner of My Eye….
Driving home the other day, I spotted this exceptionally clean 928. Had an exhaust, sounded wonderful. Can’t quite plate the wheels, perhaps they are the Club style? Anyway, it almost sounded boosted, and I spotted a gauge on the a pillar (boost?). Looked to be a late model with the revised front bumper and S4 style spoiler. While traveling he pulls up ahead a bit and on the back I spot the badge: 928GT. Interesting…very interesting. The GT was a limited run car Porsche made in 1989-1991. From what I have looked up only 150 were brought stateside. I’ve heard of them before but if genuine this would be the first I’ve ever seen. The GT had a 5 liter V8, 330hp, electronic rear LSD, and even TPMS. The Germans are always so far ahead of the curve with accessories like that.
I have always appreciated these cars – they have obvious road prescience, and they have aged extremely well. Particularly the later cars. They are 20-25 years old now and could easily pass for something far more modern. The engines are like cashmere – totally smooth, but pack a punch. While they are heavy cars, they are an awesome GT. I can’t think but help that the modem Aston/Jaguar/Maserati/BMW 6 series didn’t have a 928 somewhere in the back of the engineers mind when they were being designed. It’s an awesome long touring sports coupe. We had a client that had a very early car – a 1978 as I recall, which I was lucky enough to take out for a brief run years ago, after some work was completed. I would estimate around 1997-1998. I recall even today the V8 pulling solidly, even though it had all that mass to lug around, and the car being extremely tight.
Saw this over on http://www.petrolicious.com/owner-restores-king-of-ferrari-s-a-f40-lm and http://www.build-threads.com/build-threads/ferrari-f40-lm-restoration/ and it’s worth sharing to those who haven’t seen it.
This new forged wheel from Advan Japan combines a classic 10 spoke sports wheel shape, with a series of different lips. These are sold in 19 inch sizes only, in widths ranging from 8 inches to 10.5 inches in both 5×100 and 5×114.3 patterns. Available in Hyper Bronze, Black and Hyper Silver.
Photo by @NileshRParmar – with just an iPhone!
On this day in ’88, Hungary: Ayrton Senna & Alain Prost took McLaren’s 10th 1-2 finish. A truly awe-inspiring season.
|
'''
Copyright (c) 2013, Kenneth Langga ([email protected])
All rights reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import subprocess
import time
# Check if run dir exists
RUNDIR = os.path.abspath('run')
if os.path.isdir(RUNDIR):
while True:
count = 0.
# List folder contents
for c in os.listdir(RUNDIR):
# Check if it's a batch file
if c.endswith('.bat'):
# Run batch file
print '\n', '#' * 40, 'Running', c, '#' * 40, '\n'
run = subprocess.Popen(os.path.join(RUNDIR, c), cwd=RUNDIR)
run.wait()
count += 1
# Sleep for a set interval
dur = 60 / count * 60
print '\n', '#' * 40, 'Sleeping for', dur, 'secs', '#' * 40, '\n'
time.sleep(dur)
|
This article should have been called Why you have never heard of Costa Ballena? but you may know the area so I decided for the Unknown Coastline title instead.
After living in Andalucia for many years I am surprised that this coastline was unknown to me. On one hand I am quite pleased that I still have some parts of Andalucia to discover on my travels. On the other, thinking that I need to visit Cadiz and Huelva a little more. This weeks post is to share the 3 days spent on Costa Ballena last weekend.
It is South from Sevilla, on the coastline of Cadiz province. Situated in the South Western corner of Spain, between Chipiona and Rota. Even though it may not be familiar, it does have 10 blue flag beaches in the Rota area alone. Many Spanish families spend their summers here each year. With 16 kms of beaches the main ones are La Costilla, El Rompidillo or Punta Candor.
It is also well known for the American Naval base too. NAVSTA Rota is 6,100 acre site. Of course this has some impact on the area as US military families live in the area so you here quite a few American accents as you move around the area. The large ships are also clear to see on the horizon manouvering.
There are 3 four star hotels in Rota. My accomodation for the weekend was at Hotel Playa Ballena. The room was a ground floor double opening out onto a terrace overlooking the gardens and pool.
After the long drive from Granada (300kms) I headed straight to the hotel spa. Kitted out with a vast array of jets, pools and bubbles, this was the perfect way to get into the holiday mood. I didnt have chance to try any of the bookable services, beauty treatments and massages as I had to get ready for an evening in Rota, even so the 90 minutes spa session was a good start to my weekend.
What to do in Rota?
Rota is a coastal town with a typical Andalusian feel to it. White washed houses, plantpots filled with geraniums on windowsills and of course an historic castle. On the first evening I got to visit the Bodegas El Gato to find out about the local tipple. Tintilla de Rota this sweet dark wine has been produced in Rota for over 500 years. Bodegas El Gato is actually the oldest producer and still make this wine today.
Wandering around the casks of this winery and hearing about all the different varities they produce is a real treat. As this is a small family run business I enjoyed learning about the artisan touch to the processes and could really hear the passion in their explanations as they spoke about the wine.
Other activities you can enjoy in the area are watersports, horse riding, cycling, walking and golf.
While I was there I enjoyed a morning at the Ocean Beach Golf Club. The driving range is the largest in Europe so it was great for a beginner like me. To be able to have all that space around me to practice. This course was designed by Spanish golfer Jose Maria Olazabal, it is a 27 hole golf course, suitable for all abilities. Being located next to the sea it´s a beautiful setting to spend a day. They also have a croquet lawn too.
That evening we rented bikes to ride around the beaches at sunset. Bicicletas Valdes pick up and drop off the bikes at your hotel door so it makes it simple and fun. Riding around the beaches and parks of the area was a lovely way to end a great day discovering Rota. There are lots of green spaces in the area especially as the pine trees actually reach the coast. So it´s a beautiful place for outdoor activities.
There are several places to see around the old town. The Castillo de la Luna, a 13th century castle now doubles up as the tourist information centre. Inside the pretty patio there is a fountain and along the walls original 15th decorations. Across the square opposite the castle is the main city gate and old lighthouse. This part also is the beginning of the seafront.
As you wander through Rota the old city walls are visible in different parts of the city and you can still see some of the gates. Some of the more well known ones are Chipiona Gate, Sanlúcar Gate and Puerta del Maro. There is also the port and fish auction house in this area opposite the lighthouse.
Also in the old town the Food market Mercado de Abastos has now been carefully restored. Inside this large hall there is a mix of food stalls and small bars. The central part is dotted with tables and chairs, so you may enjoy a drink and some delicious food. We stopped here to sample typical food here ´Arranque´ which is like a thick tomato paste. This was delicious with breadsticks and was served with chilled white wine. We also tasted some of the local Cadiz cheese too.
Food in this area is traditional and often locally sourced. With a rich history in fishing, wine production and agriculture, some of the local delicacies will really make a lasting impression. Don´t miss the sherry when you are in the area. Whether it be fino, oloroso or the sweet Pedro Ximenez, make the most of it when you are there.
Traditional fishing methods are still being used in the Corrales de Pesca on the Rota coast. These stone walls on the shoreline make a pond effect, trapping fish at low tide. These sustainable methods are historic and must be protected by locals. Damage to the walls can interrupt or ruin these corrals for good. When I visited I managed to talk to one of the locals about this method of fishing and the way of life of the Corraleros.
Many thanks to Bodegas El Gato, Andalucía Travel Bloggers, La Quesería El Bucarito, El Mercado Central de Abasto, Intervenciones por Rota, Ayuntamiento de Rota, Asociación de Corraleros de Rota, Descubre Rota, Valdes Bicicletas, Antonio Cordero Solís – Photographer and Bombastic Teatro for making the Rota Costa Ballena experience so special.
Gran post, nos alegramos mucho de que hayas descubierto Rota y Costa Ballena. Cuando quieras seguimos planeando viajes para Cádiz y Huelva.
Si, tengo ganas de conocer mas, la zona me sorpendio mucho.
Sounds like a wonderful and relaxing place for a beach holiday, Molly.
Rota is so special. We’re really glad you went and liked it. You had such fantastic hosts too! Nice blog too!
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
from ReadWeibo.mainapp.models import Category, Weibo, Comment
from ReadWeibo.account.models import Account
from main import Config
import DataUtil as du
from scipy.sparse import *
from scipy import *
import numpy as np
import logging
import operator
import math, random, sys, csv
class MultiRank:
def __init__(self, graph, topic_words=Config._ML_WORDS,
alpha=.0, beta=0.05, mu=0.3, eta=0.15, max_iter=40):
self.max_iter = max_iter
self.alpha = alpha
self.beta = beta
self.mu = mu
self.eta = eta
self.graph = graph
self.ranks = {}
self.topic_words = set(topic_words.lower().split("-"))
def _adj_mat(self, graph, topic_words):
'''
build weibo ajd matrix by user activity
build bipartite graph between weibo and keywords
'''
# label id to each node
wb_id = 0; wd_id = 0
for key, info in graph.nodes(data=True):
if info['tp'] == 'weibo':
graph.node[key]['id'] = wb_id
wb_id += 1
elif info['tp'] == 'word':
graph.node[key]['id'] = wd_id
wd_id += 1
# make adj matrix
W = lil_matrix((wb_id, wb_id))
R = lil_matrix((wb_id, wd_id))
y_wb = np.zeros((wb_id, 1))
y_wd = np.zeros((wd_id, 1))
print 'wb_id:%s\twd_id:%s' % (wb_id, wd_id)
for key, info in graph.nodes(data=True):
if info['tp'] == 'weibo':
continue
if info['tp'] == 'user':
weight = 1.0
neis = graph.neighbors(key)
for i in range(len(neis)):
for j in range(len(neis))[i+1:]:
nod1 = graph.node[neis[i]]
nod2 = graph.node[neis[j]]
if nod1['tp']=='weibo' and nod2['tp']=='weibo':
W[nod1['id'], nod2['id']] += weight
W[nod2['id'], nod1['id']] += weight
elif info['tp'] == 'word':
for nod in graph.neighbors(key):
if graph.node[nod]['tp'] == 'weibo':
id1 = graph.node[nod]['id']
id2 = info['id']
R[id1, id2] += 1.0
if key in topic_words:
y_wd[graph.node[key]['id'], 0] = 1.0
return W, R, y_wb, y_wd
def rank(self):
W, R, y_wb, y_wd = self._adj_mat(self.graph, self.topic_words)
logging.info("make adjacent matrix over, and labeled %d words" % y_wd.sum())
D = lil_matrix(W.shape)
D_d = lil_matrix((R.shape[0], R.shape[0]))
D_t = lil_matrix((R.shape[1], R.shape[1]))
_sum = W.sum(1)
for _i in range(W.shape[0]):
if _sum[_i,0] != 0:
D[_i, _i] = _sum[_i,0]**(-0.5)
_sum = R.sum(1)
for _i in range(R.shape[0]):
if _sum[_i,0] != 0:
D_d[_i, _i] = _sum[_i,0]**(-0.5)
_sum = R.sum(0)
for _i in range(R.shape[1]):
if _sum[0, _i] != 0:
D_t[_i, _i] = _sum[0,_i]**(-0.5)
Sw = D.dot(W).dot(D)
Sr = D_d.dot(R).dot(D_t)
f = np.zeros(y_wb.shape)
alpha, beta, mu, eta = self.alpha, self.beta, self.mu, self.eta
for _iter in range(self.max_iter):
logging.info('iter : %d' % _iter)
f = (1.0/(1-beta))*(mu*Sw+eta*eta/(beta+eta)*Sr.dot(Sr.T)).dot(f) \
+ alpha/(1-beta)*y_wb + beta*eta/(1-beta)/(beta+eta)*Sr.dot(y_wd)
for key, node in self.graph.nodes(data=True):
if node['tp'] == 'weibo':
self.ranks[key] = f[node['id']]
def test(self, verbose=False):
sorted_r = sorted(self.ranks.iteritems(), key=operator.itemgetter(1), reverse=True)
found=0; tot=0; cost=.0
for w_id, weight in sorted_r:
wb = Weibo.objects.get(w_id=w_id)
tot += 1
if wb.real_category==1:
found += 1
cost += math.log(tot-found+1)
if verbose:
logging.info("%s\t%s\t%s" % (wb.real_category, weight, wb.text[:30]))
return cost
if __name__ == '__main__':
if len(sys.argv)<2:
print '''Expected input format: %s graph [-t topic] [-m max_iter]
graph: graph file path
-t: specify topic words
topic: topic words seperated by '-', default with ML words
-m: specify max iter count
max_iter: max iter count, default with 20
''' % sys.argv[0]
sys.exit(1)
load_path = sys.argv[1]
topic_words=Config._ML_WORDS
max_iter=20
_id = 2
while _id<len(sys.argv)-1:
if sys.argv[_id]=='-t':
topic_words = sys.argv[_id+1].decode('utf-8')
elif sys.argv[_id]=='-m':
max_iter = int(sys.argv[_id+1])
_id += 2
G = du.load_graph(load_path)
for mu, eta, beta in [(.1,.1,.8), (.1,.3,.6), (.3,.1,.6), (.2,.4,.4), (.4,.2,.4)]:
mr = MultiRank(G, topic_words=topic_words, max_iter=max_iter,
alpha=.0, beta=beta, mu=mu, eta=eta)
mr.rank()
cost = mr.test(verbose=False)
logging.info("cost=%s \t mu=%s, eta=%s, beta=%s" % (cost, mu, eta, beta))
|
Look here and fill the Canon CL-51 cartridge, follow the steps and if you have question, make a comment.
These ink cartridges have high performance and good quality when printing, we have previously seen how to fill cartridges Canon PG-30 black ink, both cartridges can give about 3 backfilled and still work perfectly, it is recommended to put on these cartridges about 6ml of ink in each color, the combination of these cartridges it is the following colors, you can take as a reference the image of a cartridge below.
The procedure for filling the Canon Ink Cartridge CL-51 is simple we just have to supply the ink slowly as it should be on all ink cartridges, as always recommend setting the cartridge on a piece of fabric which does not allow it to spill ink, the fabric must not be a needed one in the future.
Other factors to consider are the ink to spill into our hands or our clothes; we need to know how we can remove ink spilling. Cartridges Canon CL-51 are compatible with the iP1700, MP150, MP160, MP170, MP450, MP460, MX300, MX310 printers.
We recommend that you use water-based ink that you can get easily on any computer shop, if you will only make it possible to achieve pigment ink better.
In case you decide to fill by the hole do it very carefully to prevent this reaches the sponge from the cartridge. Remember you have to use three syringes for each ink, you cannot use the same syringe for all colors, it is a syringe for each color.
It is recommended that the syringe it is introduced of the way we have previously outlined, do it slowly, firmness and with strength for each of the holes in the cartridge.
Remember that this cartridge supports the exact amount of ink 6ml and with the combination as shown in the table above that indicates how you ink should be. If it is necessary and you are not sure where the colors go pass the head of the cartridge with a napkin.
After filling the cartridge we recommend that a proper cleaning cartridge is made to prevent damage on the connectors and then present these error also we recommend printing a test page to identify the operation of these.
In case the printer does not recognize the ink or said ink cartridge has not ink there is not problem this is normal. Also it may be that when printing says that there is a problem with the refilled cartridge, to fix this you have to reset the Canon printer.
|
#!/usr/bin/env ../../jazzshell
from subprocess import PIPE, Popen, STDOUT
from optparse import OptionParser
import os, csv
from jazzparser.utils.config import ConfigFile
BASE_TRAINING_OPTIONS = """
# Model type
%% ARG 0 ngram-multi
# Input data
%% ARG 2 %{PROJECT_ROOT}/input/fullseqs
# Input type specification
filetype = bulk-db-annotated
# Train for cross-evaluation
partitions = 10
# Don't use a cutoff on any backoff models
opts = backoff_cutoff=0
"""
BASE_TEST_OPTIONS = """
%% ARG 0 ngram-multi
%% ARG 2 %{PROJECT_ROOT}/input/fullseqs
partitions = 10
"""
BASE_ENTROPY_OPTIONS = BASE_TEST_OPTIONS + "+entropy\n"
BASE_ACCURACY_OPTIONS = BASE_TEST_OPTIONS + "+agreement\n"
def output_proc(proc):
output = ""
line = proc.stdout.readline()
while line:
output += line
print line.strip("\n")
line = proc.stdout.readline()
return output
def main():
usage = "%prog [options]"
description = "Trains a suite of ngram models and tests them all"
parser = OptionParser(usage=usage, description=description)
parser.add_option('-n', '--no-train', dest="no_train", action="store_true", help="don't train the models. Only do this if you've previously used this script to train all the models")
parser.add_option('--train', '--only-train', dest="only_train", action="store_true", help="only train the models, don't do the experiments")
parser.add_option('--bt', '--bigram-trigram', dest="bigram_trigram", action="store_true", help="only include bigram and trigram models")
parser.add_option('-t', '--trigram', dest="trigram", action="store_true", help="only include trigram models")
parser.add_option('--wb', '--witten-bell', dest="witten_bell", action="store_true", help="only use witten-bell smoothing (skip laplace)")
parser.add_option('--lap', '--laplace', dest="laplace", action="store_true", help="only use laplace smoothing (skip witten-bell)")
parser.add_option('-v', '--viterbi', dest="viterbi", action="store_true", help="use Viterbi decoding")
parser.add_option('-4', '--4grams', dest="fourgrams", action="store_true", help="run experiments for 4-gram models")
parser.add_option('-c', '--cutoff', dest="cutoff", action="store", type="int", help="custom cutoff to use, instead of trying several")
parser.add_option('--gt', '--good-turing', dest="good_turing", action="store_true", help="only use Good-Turing smoothing (not usually included)")
options, arguments = parser.parse_args()
cmd_dir = os.path.abspath("..")
train_cmd = "./train.py"
tageval_cmd = "./tageval.py"
if options.bigram_trigram:
orders = [2, 3]
elif options.trigram:
orders = [3]
elif options.fourgrams:
orders = [4]
else:
orders = [1, 2, 3]
if options.witten_bell:
smoothings = [("witten-bell", "wb")]
elif options.laplace:
smoothings = [("laplace", "lap")]
elif options.good_turing:
smoothings = [("simple-good-turing", "gt")]
else:
smoothings = [("witten-bell", "wb"), ("laplace", "lap")]
if options.cutoff is None:
cutoffs = [0, 2, 5]
else:
cutoffs = [options.cutoff]
# Open a CSV file to write the results to
with open("test_suite.csv", "w") as result_file:
results = csv.writer(result_file)
results.writerow(["Order", "Cutoff", "Smoothing", "Entropy", "Agreement"])
for model_order in orders:
for cutoff in cutoffs:
for smoothing,smoothing_short in smoothings:
#for chord_map in ["none", "small", "big"]:
print "\n#####################################################"
print "### Order %d, cutoff %d, smoothing %s ###" % (model_order, cutoff, smoothing)
# Build a unique name for the model
model_name = "suite_n%d_c%d_%s" % (model_order, cutoff, smoothing_short)
# Train the model
if not options.no_train:
# Prepare options to train the model
model_options = "n=%d:cutoff=%d:backoff=%d:estimator=%s" % \
(model_order, cutoff, model_order-1, smoothing)
training_opts = BASE_TRAINING_OPTIONS + \
"opts = %s\n%%%% ARG 1 %s" % (model_options, model_name)
# Turn these nice option specifications into command-line args
conf = ConfigFile.from_string(training_opts)
# Train this model
#train_output = check_output([train_cmd]+conf.get_strings(), cwd=cmd_dir)
train_proc = Popen([train_cmd]+conf.get_strings(),
cwd=cmd_dir, stdout=PIPE, stderr=STDOUT)
output_proc(train_proc)
if not options.only_train:
# Entropy doesn't tell us much for Viterbi decoding
if not options.viterbi:
# Test the model's entropy
print "### Entropy ###"
entropy_opts = BASE_ENTROPY_OPTIONS + "%%%% ARG 1 %s" % model_name
conf = ConfigFile.from_string(entropy_opts)
entropy_proc = Popen([tageval_cmd]+conf.get_strings(),
cwd=cmd_dir, stdout=PIPE, stderr=STDOUT)
# Output as we go
output = output_proc(entropy_proc)
# Get the last line and pull out the entropy value
last_line = output.strip("\n").rpartition("\n")[2]
entropy = float(last_line.split()[0])
else:
entropy = 0.0
# Test the model's top tag accuracy
print "\n### Agreement ###"
accuracy_opts = BASE_ACCURACY_OPTIONS + "%%%% ARG 1 %s" % model_name
if options.viterbi:
accuracy_opts += "\ntopt = decode=viterbi"
conf = ConfigFile.from_string(accuracy_opts)
accuracy_proc = Popen([tageval_cmd]+conf.get_strings(),
cwd=cmd_dir, stdout=PIPE, stderr=STDOUT)
# Output as we go
output = output_proc(accuracy_proc)
# Get the last line and pull out the agreement value
last_line = output.strip("\n").rpartition("\n")[2]
agreement = float(last_line.split()[-1].strip("()%"))
results.writerow(["%d" % model_order,
"%d" % cutoff,
"%s" % smoothing,
"%f" % entropy,
"%f" % agreement])
# Flush the file object so each result appears in the
# file immediately
result_file.flush()
if __name__ == "__main__":
main()
|
Previous Post where is my easy button?
My dad is the non-CS guy who nonetheless has become his library department’s computer expert, simply because they discovered that when they sent him to training sessions, he came back knowing everything the session was supposed to teach, remembered it well, could build on it through judicious experimentation, and could pass it on to others in the department, which made him really weird in their experience.
I have had some similar frustrations with open-source software. At this stage in my life, I am not interested in spending time on making my computer work; I want it to be a dependable tool, not a fun, nifty, time-sucking gadget. The price tag to open-source software is appealing, but the lack of support designed for individuals and the fact that you usually have to decide among a multitude of updates and add-ons when installing is generally enough to send me for the generic commercially-available software, where I may be annoyed at the lack of options, but at least I’m not overwhelmed by too many decisions to be made.
I feel like I ought to defend open source software as useful, necessary, and morally good and therefore worth sticking with… but I can’t. So much of it is so terrible. Free and open source software developers reliably forget that the code is not the software, it is only part of the software. They spend so much of their time on polishing code (making it something that is worth using) and very little time on installation, documentation, and interface, let alone offering support as you say.
And that appealing price tag often has the appalling effect of driving other software out of the market — which may be why you are annoyed by the lack of options.
Yeah, the state of the documentation for Koha made me sad.
I don’t think, though, that I’m seeing the driving-out effect in re library software; open source is a *tiny* percent of market share (albeit conceptually very important), up against some long-established competitors with a lot of installed sites. And there seems to be high awareness that purchase price != total cost of ownership.
“not the software, only part of the software” is a nice frame.
Remind me what exactly Chris develops?
I’m curious what kinds of communication channels/strategies his employer has developed for ensuring that information gets from users to him, and from him and/or users to developers in a non-support capacity (if applicable). Any ideas there?
And yeah, your last paragraph is why I tossed Debian and got a Mac. All the command-line access, none of the requirement that I use it (unless I want to).
Chris designs and develops means for businesses to access the information they have stored in databases. Data warehousing and business intelligence are the buzzwords in his industry; I’m not sure if you would have heard either one.
Chris would also be quick to point out that he’s not actually a programmer anymore; rather than “writing code,” he uses specialized software to manipulate databases, extract data from them, and display the information in various ways that help users make decisions. To the average person, the difference is completely immaterial, and the distinction actually confusing, but it might be relevant (and understood) in this discussion.
Chris often meets with clients directly to discuss their needs, how they want things displayed, etc. At this point, he and his company have been doing this for long enough that they can often predict some of what will be useful, which is helpful in making sales presentations and starting the discussion. Discussing what the client needs/wants is the best reason for sending Chris out of town; it seems to work best face-to-face. Actually doing the work on-site is pretty pointless, and sometimes counterproductive; clients who insist on this are annoying, both because they tend to want to micromanage and because it requires developers to spend more time away from their families. But face-to-face meetings periodically throughout the process do seem to help fine-tune what the client wants, etc. The results of these various meetings get translated into specifications, designs, etc., for internal communication.
Most of the work Chris does is client-specific. Conversations with my dad lead me to believe that library software usually comes mostly pre-done, designed for the generic library system, with varying levels of personalization available depending on the vender and how much you’re willing to pay them. I know much less about how that process works.
John, you can’t assume that all open source is good – just like not all proprietary software is good. But you can’t assume that it’s all bad either – when I teach open source for librarians I make it very clear that your evaluation of open source software should be no different from your evaluation of proprietary software.
Andromeda, we talked about the Koha documentation already on this blog. As documentation manager – and someone who works nearly full time on the manual I find it pretty annoying to have a non-Koha user constantly bashing my work – when those using Koha are extremely grateful and appreciative of the manual and how it has helped them.
On the issue of the post – I think the fact that the library open source products out there have a smaller programmer base is a good thing. In your question above about the communication issues – how is that any different than when you use a proprietary product? Do the developers at Microsoft know how you want to use Word? Do the developers at proprietary ILSes use the software they’re developing on a day to day basis? At least in the open source arena you know the names of the primary developers and how to contact them – and you’d be surprised how few of these developers have no library experience – most of them started in the library world in one way or another and so have a feel for how libraries are run and how they work.
I think that the fact that these software products have mailing lists and chat rooms open to developers and users alike show that everyone is willing to work together to better the product – even if the developers are no longer sitting in a library setting day-to-day (although many are).
Don’t know if that answered your question, but I sure hope so. The problem as I see it is that everyone seems to think that open source proponents are saying that all open source is great – and we’re not – we’re saying that all software should be open source 🙂 but that’s another rant for another day.
I admit I didn’t want to call out the documentation lest you were reading this, but the fact is it consistently failed to have the answers to the questions I was asking. Maybe it is strong in the parts that active users find themselves using a lot (I’d be surprised if it’s not, as a participatory project), but from my perspective as a learner, and given the particular problems our install had, neither I nor my classmates found it adequate. Perhaps someday I will be in a position to improve it :).
In what way do you think a smaller programmer base is a positive?
As for the communication issues — I don’t think it’s different from when people use proprietary software (and insofar as people developing such software don’t know about its use, actual and desired, that’s a problem) — what I’m wondering about is the difference between how the open source development community works for ILSes and how it works for things like Linux (where I’m more familiar with the latter because I know a lot of software engineers, many of whom have made at least the occasional contribution to some open source project or other). Certainly in the conversation I was thinking about, my software-engineer husband assumed there would be much more overlap between users and developers of open-source ILSes than I assumed there would be, so I’m curious about both what the community is actually shaped like, and how the community and process differ from more-familiar-to-me open source communities and processes if my assumption is correct.
The smaller programmer base means a more intimate community – like I said you know the names of and the contact info for every developer and they will all help you in any way possible (in my experience).
I apologize for misunderstanding the question. I don’t actually know the exact make up of the community working on Koha but I do think your husband is right, there is a lot of overlap (more than Linux – I’m not sure because I’m not a member of that community).
See, this is interesting to me in that the received wisdom I hear about open source development, from the codemonkeys in my life, is that a huge development community is an asset, because it means mroe eyes on the code, more chances to catch bugs, more capacity for development.
In short – that rant means … there is no real answer to your question – as librarians we must evaluate all aspects of the software before making the decision.
|
###############################################################################
#
# file: __init__.py
#
# Purpose: refer to module documentation for details
#
# Note: This file is part of Termsaver application, and should not be used
# or executed separately.
#
###############################################################################
#
# Copyright 2012 Termsaver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###############################################################################
"""
This module holds base classes that are used by all screens within termsaver
application. Each individual "screen" represents a unique screensaver that can
be triggered by termsaver.
The base classes available in this package are:
* `ScreenBase`: the most basic screen class, which will handle simple
interaction with the terminal.
* `filereader.FileReaderBase`: handles screens that require recursive
directory scanning for files to be printed out
* `urlfetcher.UrlFetcherBase`: handles screens that require Internet
connectivity.
* `urlfetcher.SimpleUrlFetcherBase`: similar as `UrlFetcherBase`,
with simpler options (to avoid overhead to build your own argument
parsing, and usage documentation)
* `rssfeed.RSSFeedScreenBase`: handles RSS parsing from Internet.
* `rssfeed.SimpleRSSFeedScreenBase`: similar as `RSSFeedScreenBase`,
with simpler options (to avoid overhead to build your own argument
parsing, and usage documentation)
Build your own screen
=====================
It is very simple to inherit from these base classes and create your own
screen. See some of the examples implemented here already. Basically, you will
need to:
* define a name and description for your screen (class instantiation)
and keep them as short as possible (avoid too much typing)
* if applicable, define your command-line usage guidelines and options
(see `cli_opts`), if appropriate and override `_parse_args` method.
Create your help/usage text by overriding the `_usage_options_example`
method.
* build your action by overriding the `_run_cycle` method, if applicable
(the base class will be triggered by the `autorun` method that loops
indefinitely or until there is a keyboard interruption (ctrl+C).
Before you start, though, I strongly advise you to check out the code here
thoroughly, to avoid reinventing the wheel in parts that are already covered.
Additionally, consistency is important, so try to keep the same concept of how
things are done here... Well, if you have better idea, I am very opened to
adapt (but then instead of making a mess, we would change it all to be still
consistent).
"""
#
# Python built-in modules
#
import os
import getopt
import sys
#
# Internal modules
#
from termsaverlib import common, constants, exception
from termsaverlib.screen.helper import ScreenHelperBase
from termsaverlib.i18n import _
class ScreenBase(ScreenHelperBase):
"""
This is the main screen that all screens must inherit in order to be part
of the screensaver list, accessible with termsaver command-line options.
When inheriting this to your own screen, remember to override the
following methods:
* `_run_cycle`: define here the algorithm to display a text-based
look-alike screensaver. See other classes for example on how to
use this.
* `_usage_options_example`: print out here the options and examples
on how to use your screen. See other classes for examples on how
to use this.
* `_parse_args`: from a properly parsed (using getopt) argument list,
customize the configuration of your screen accordingly
That's all you need to do!
Additionally, you can also call the following helper methods:
* `screen_exit`: if by any reason you need to close the application
(remember, in most cases, you can just rely on throwing exceptions
that are understood by termsaver application, available in
`termsaverlib.exception` module)
* `log` : if you need to write anything on screen before or after a
screen cycle, you can do it in style by calling this method, which
will inform the screen as a prefix to the message being displayed
on screen.
You can also use the following optional property:
* `cleanup_per_cycle`: Defines if the screen should be cleaned up for
every rotation cycle (new file).
IMPORTANT:
All other methods are not to be tempered with!
"""
name = ''
"""
Defines the name of the screen.
"""
description = ''
"""
Defines the description (short) of the screen.
"""
cli_opts = {}
"""
Defines the getopt format command-line options of the screen. It should be
an object in the following structure:
cli_opts = {
'opts': 'h',
'long_opts': ['help',],
}
"""
cleanup_per_cycle = False
"""
Defines if the screen should be cleaned up for every rotation cycle
(new file).
"""
def __init__(self, name, description, cli_opts):
"""
The basic constructor of this class. You need to inform basic
information about your screen:
* `name`: describes the name of the screen (try to keep it short,
and/or abbreviated, as much as possible)
* `description`: a brief (very brief) description of what the screen
does (if you need to write more documentation about
it, you can rely on man docs for that)
* `cli_opts`: the command line options that will be available for
your screen (use getopt formatting)
"""
self.name = name
self.description = description
self.cli_opts = cli_opts
def autorun(self, args, loop=True):
"""
The accessible method for dynamically running a screen.
This method will basically parse the arguments, prepare them with
the method `_parse_args` that is inherited in sub-classes, and with
the property `cli_opts` that holds the formatting of the arguments.
Once all is ready to go, this will call the `_run_cycle` method, which
is filled in the sub-classes with the algorithms to display text on
screen to behave as a screensaver.
The arguments of this method are:
* args: (MANDATORY) the arguments passed when termsaver is executed
from command-line. See `termsaver` script for details.
* loop: (OPTIONAL) defines if termsaver should be executing on an
infinite looping (goes on until the keyboard interrupt
(Ctrl+C) is pressed), or not. This is up to the screen
action (or end-user through configuable setting) to decide.
"""
# prepare values and validate
if not args:
args = ''
if not self.cli_opts \
or 'opts' not in self.cli_opts.keys() \
or not self.cli_opts['opts']:
self.cli_opts['opts'] = ''
if not self.cli_opts['long_opts']:
self.cli_opts['long_opts'] = []
else:
if not type(self.cli_opts['long_opts']) is list or \
[type(i) == str for i in self.cli_opts['long_opts']] \
!= [True for __ in range(len(self.cli_opts['long_opts']))]:
#
# Don't worry too much about errors here. This is supposed to
# help developers while programming screens for this app.
#
raise Exception("Value of 'long_opts' in cli_opts dict MUST "\
"be a list of strings.")
try:
self._parse_args(getopt.getopt(args, self.cli_opts['opts'],
self.cli_opts['long_opts']))
except getopt.GetoptError, e:
raise exception.InvalidOptionException("", str(e))
# execute the cycle
self.clear_screen()
while(loop):
try:
self._run_cycle()
except KeyboardInterrupt, e:
#
# do some cleanup if applicable
#
self._on_keyboard_interrupt()
raise e
# Clear screen if appropriate
if self.cleanup_per_cycle:
self.clear_screen()
def _run_cycle(self):
"""
Executes a cycle of this screen. This base class actually does not hold
any special actions to begin with, but executing it from inheriting
classes is also a good practice, to allow future implementations that
must be taken from a base class.
"""
pass
@staticmethod
def usage_header():
"""
Simply prints a header information, used with the `usage` method.
See also `usage` method for details.
"""
print """%(app_title)s v.%(app_version)s - %(app_description)s.
""" % {
'app_title': constants.App.TITLE,
'app_version': constants.App.VERSION,
'app_description': constants.App.DESCRIPTION,
}
@staticmethod
def usage_footer():
"""
Simply prints a footer information, used with the `usage` method.
See also `usage` method for details.
"""
print """--
See more information about this project at:
%(url)s
Report bugs to authors at:
%(source_url)s
""" % {
'url': constants.App.URL,
'source_url': constants.App.SOURCE_URL,
}
def _usage_options_example(self):
"""
Describe here the options and examples of your screen.
See some examples of already implemented base screens so you can
write similar stuff on your own, and keep consistency.
"""
pass
def usage(self):
"""
Defines the usage information that is presented when a user hits the
help option.You should not directly override this method, instead, just
override the protected method `_usage_options_example`, created for
this purpose. All other stuff will be defined by the `usage_header` and
`usage_footer` methods.
"""
# header
self.usage_header()
print _("""Screen: %(screen)s
Description: %(description)s
Usage: %(app_name)s %(screen)s [options]""") % {
'app_name': constants.App.NAME,
'screen': self.name,
'description': self.description,
}
# any additional info in between (see other classes for reference)
self._usage_options_example()
#footer
self.usage_footer()
def _parse_args(self, prepared_args):
"""
(protected) MUST be overriden in inheriting classes, to deal with
special arguments that will customize values for them.
"""
pass
def screen_exit(self, error=0):
"""
Exits the screen (and finishes the application) with a specific error.
If none is informed, it exits as successful (error 0).
"""
sys.exit(error)
def log(self, text):
"""
Prints a log message on screen in the format:
%(app_name)s.%(screen)s: %(message)s
"""
print "%s.%s: %s" % (constants.App.NAME, self.name, text)
def _on_keyboard_interrupt(self):
"""
Executes extra commands if the keyboard interrupt exception happened
while running a cycle.
"""
pass
|
Image Description: Fine-tip pen drawing. From a top left diagonal, one hand passes a small card to another hand. Card has the word, "Ableism" written in the center. Drawn image of hands featured on a ground of crosshatch marks.
There are multiple ways to access the Calling Card for Ableism.
Select the printable card if you would like to downloand and print a copy of the Calling Card.
Or, download and save the digital card as a photo on your mobile device or tablet.
Disclaimer: By downloading the "Print Card", or "Digital Card", I acknowledge that this card should be used with care and caution, and that I am responsible for the use and delivery of this card, as well as any consequences that may arise out of its use.
|
"""
Python Interchangeable Virtual Instrument Driver
Copyright (c) 2017 Coburn Wightman
derived from agilent436a.py driver by:
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import counter
from .. import vxi11
#import time
# Parameter Values
ChanNameMap = {0 : 'A', 1 : 'B', 2 : 'C'}
Units = set(['Sec', 'Hz', 'Volts'])
Operator = set(['none', 'difference', 'sum', 'quotient'])
RangeType = set(['in_range', 'under_range', 'over_range'])
OperationState = set(['complete', 'in_progress', 'unknown'])
MeasurementFunction = set(['frequency',
'period',
'pulse_width',
'duty_cycle',
'edge_time',
'frequency_ratio',
'time_interval',
'totalize_continuous',
'totalize_gated',
'totalize_timed',
'invalid'])
MeasurementFunctionMap = {'frequency' : 'FN', # fn1, fn2, fn3 is a, b, and c channel
'period': 'FN4',
'time_interval' : 'FN5',
'time_interval_delay' : 'FN6',
'frequency_ratio' : 'FN7',
'total_stop' : 'FN8', # non standard
'total_start' : 'FN9', # non standard
'pulse_width' : 'FN10',
'edge_time' : 'FN11',
#'dc_voltage' : 'FN12',
#'trigger_voltage' : 'FN13',
#'peak_to_peak_voltage' : 'FN14',
#'totalize_timed' : 'x',
#'totalize_gated' : 'xx',
'invalid' : 'inv'}
ErrorMessages = { 0 : 'No error', # to accurately reflect error codes of device, divide by 10
10 : 'Parameter disallowed in present mode',
11 : 'Attenuators controlled by AUTO TRIG',
12 : '50-ohm B, AC B settings preset by COM A',
13 : 'Slope B set by Slope A in Rise/Fall mode',
14 : 'Parameter disallowed in High Speed mode',
15 : 'Calibration data unaccessible in present mode',
20 : 'Invalid key entry',
21 : 'Data outside valid range',
22 : 'Data exceeds maximum resolution',
23 : 'Mantissa digit buffer full',
24 : 'Decimal point previously entered',
30 : 'Multiple key closures',
40 : 'Mnemonic not recognizable',
41 : 'Numeric syntax error',
42 : 'Alpha character expected',
43 : 'Data exceeds valid range',
44 : 'Attention (ATN) asserted in Talk-Only mode',
50 : 'Store instrument setup operation failed', #50.X where x is the register number: 0-9
51 : 'Recall instrument setup operation failed', #51.X
52 : 'HP-IB address cannot be recalled at power up; address default to 03'}
class agilentBase5334(ivi.Driver, counter.Base):
"Agilent HP5334 Series IVI Universal Counter driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(agilentBase5334, self).__init__(*args, **kwargs)
self._identity_description = "Agilent HP5334 Universal Counter driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 1
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['HP5334A','HP5334B']
self._init_defaults()
self._init_channels()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilentBase5334, self)._initialize(resource, id_query, reset, **keywargs)
# configure interface
if self._interface is not None:
self._interface.term_char = '\n'
# interface clear
if not self._driver_operation_simulate:
self._clear()
# verify instrument model matches
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
def _load_id_string(self):
self._set_cache_valid(False, 'identity_instrument_manufacturer')
self._set_cache_valid(False, 'identity_instrument_model')
self._set_cache_valid(False, 'identity_instrument_firmware_revision')
idstr = "HP5334S"
if not self._driver_operation_simulate:
idstr = self._ask("ID")
if idstr.find('HP') == 0:
self._identity_instrument_manufacturer = 'Agilent'
self._set_cache_valid(True, 'identity_instrument_manufacturer')
self._identity_instrument_model = idstr
self._identity_instrument_firmware_revision = 'Cannot query from instrument'
self._set_cache_valid(True, 'identity_instrument_model')
self._set_cache_valid(True, 'identity_instrument_firmware_revision')
def _get_identity_instrument_manufacturer(self):
if self._get_cache_valid('identity_instrument_manufacturer'):
return self._identity_instrument_manufacturer
self._load_id_string()
return self._identity_instrument_manufacturer
def _get_identity_instrument_model(self):
if self._get_cache_valid('identity_instrument_model'):
return self._identity_instrument_model
self._load_id_string()
return self._identity_instrument_model
def _get_identity_instrument_firmware_revision(self):
if self._get_cache_valid():
return self._identity_instrument_firmware_revision
self._load_id_string()
return self._identity_instrument_firmware_revision
def _utility_disable(self):
pass
def _utility_error_query(self):
error_code = 0
error_message = "No error"
try:
error_code = self._ask("TE")
error_code = float(error_code) * 10
if error_code < 50 or error_code > 52:
error_message = ErrorMessages[error_code]
elif error_code < 51:
regnum = int((error_code % 50) * 10.01)
error_code = int(error_code)
error_message = ErrorMessages[error_code]
elif error_code < 52:
regnum = int((error_code % 51) * 10.01)
error_code = int(error_code)
error_message = "Register " + str(regnum) + ' ' + ErrorMessages[error_code]
except vxi11.vxi11.Vxi11Exception as err:
error_message = err.msg
error_code = -1
except ValueError:
error_message = "bad error code: " + str(error_code)
error_code = -1
except KeyError:
error_message = "undefined error code: " + str(error_code)
error_code = -1
return (int(error_code), error_message)
def _utility_lock_object(self):
pass
def _utility_unlock_object(self):
pass
def _utility_reset(self):
#if not self._driver_operation_simulate:
self._write("IN")
self._clear()
self.driver_operation.invalidate_all_attributes()
self._init_defaults()
def _utility_reset_with_defaults(self):
self._utility_reset()
def _utility_self_test(self):
raise ivi.OperationNotSupportedException()
def _init_defaults(self):
self._measurement_function = 'frequency'
self.driver_operation.invalidate_all_attributes()
self._frequency_aperture = 0.3
self._period_aperture = 0.3
self._time_interval_resolution == 1e-9
def _init_channels(self):
try:
super(agilentBase5334, self)._init_channels()
except AttributeError:
pass
self._channel_name = list()
self._channel_impedance = list()
self._channel_coupling = list()
self._channel_attenuation = list()
self._channel_level = list()
self._channel_hysteresis = list()
self._channel_slope = list()
self._channel_filter_enabled = list()
self._channel_count = 3
for i in range(self._channel_count):
self._channel_name.append(ChanNameMap[i])
self._channel_impedance.append(1e6)
self._channel_coupling.append('dc')
self._channel_attenuation.append(1)
self._channel_level.append(-50)
self._channel_hysteresis.append(0)
self._channel_slope.append('positive')
self._channel_filter_enabled.append(False)
self.channels._set_list(self._channel_name)
# Chan C not settable, override defaults
self._channel_impedance[2] = 50
self._channel_coupling[2] = 'ac'
def _get_channel_impedance(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_impedance[index]
def _set_channel_impedance(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
value = float(value)
#if not self._driver_operation_simulate:
if value > 99:
self._write(ChanNameMap[index] + "Z0") # set to 1meg
self._channel_impedance[index] = 1e6
else:
self._write(ChanNameMap[index] + "Z1") # set to 50ohm
self._channel_impedance[index] = 50
def _get_channel_coupling(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_coupling[index]
def _set_channel_coupling(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
if value not in counter.Coupling:
raise ivi.ValueNotSupportedException()
if value == "ac":
self._write(ChanNameMap[index] + "A1") # ac
else:
self._write(ChanNameMap[index] + "A0") # dc
self._channel_coupling[index] = value
def _get_channel_attenuation(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_attenuation[index]
def _set_channel_attenuation(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
value = float(value)
if value == 1:
self._write(ChanNameMap[index] + "X0") # x1
elif value == 10:
self._write(ChanNameMap[index] + "X1") # x10
else:
raise ivi.ValueNotSupportedException("attenuation must be '1' or '10'")
self._channel_attenuation[index] = value
def _get_channel_level(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_level[index]
def _set_channel_level(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
value = float(value)
max_atten = 10
if value > 4.999 * max_atten:
# set instrument to manual trigger (front panel knobs)
self._write('AU0')
elif value < -4.999 * max_atten:
# set instrument to automatic trigger
self._write('AU1')
elif self._get_identity_instrument_model() == 'HP5334A':
# set A instrument trigger dac values
self._write(ChanNameMap[index] + "T" + value)
else:
# B instrument has no dac. ignore for now.
pass
self._channel_level[index] = value
def _get_channel_hysteresis(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_level[index]
def _set_channel_hysteresis(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
self._channel_hysteresis[index] = value
def _get_channel_slope(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_slope[index]
def _set_channel_slope(self, index, value):
if index > 1:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
if value not in counter.Slope:
raise ivi.ValueNotSupportedException()
if value == "positive":
self._write(ChanNameMap[index] + "S0") # positive
else:
self._write(ChanNameMap[index] + "S1") # negative
self._channel_slope[index] = value
def _get_channel_filter_enabled(self, index):
index = ivi.get_index(self._channel_name, index)
if index != 0:
raise ivi.ValueNotSupportedException()
return self._channel_filter_enabled[index]
def _set_channel_filter_enabled(self, index, value):
if index != 0:
raise ivi.SelectorNameException()
index = ivi.get_index(self._channel_name, index)
if value == True:
self._write("FI1") # 100khz filter on (a channel only)
else:
self._write("FI0") # filter off.
self._channel_filter_enabled[index] = value
# totalize
def _totalize_continuous_configure(self, channel):
if channel != 0:
raise ivi.SelectorNameException()
else:
self._totalize_continuous.channel = channel
def _totalize_continuous_fetch_count(self):
return self._measurement_fetch()
def _totalize_continuous_start(self):
cmd = 'RE FN9'
self._write(cmd)
def _totalize_continuous_stop(self):
cmd = 'FN8'
self._write(cmd)
# measurement
def _set_measurement_function(self, value): # override to limit functionality
if value not in MeasurementFunction:
raise ivi.ValueNotSupportedException()
self._measurement_function = value
def _measurement_is_measurement_complete(self): # counter.py version of get_state?
return True
def _measurement_abort(self):
self._write("RE")
#self._clear()
def _measurement_fetch(self):
val = self._read()
if val[0] == 'O':
return float("inf")
f = float(val[1:19])
return f
def _measurement_initiate(self):
if self._measurement_function == 'frequency' :
func = MeasurementFunctionMap[self._measurement_function] + repr(self._frequency_channel + 1)
gate = 'GA' + str(self._frequency_aperture_time)
cmd = func + gate
elif self._measurement_function == 'period' :
func = MeasurementFunctionMap[self._measurement_function]
gate = 'GA' + str(self._period_aperture_time)
cmd = func + gate
elif self._measurement_function == 'time_interval' :
func = MeasurementFunctionMap[self._measurement_function]
if self._time_interval_resolution == 1e-10:
gate = 'GV1'
else:
gate = 'GV0'
cmd = func + gate
elif self._measurement_function == 'frequency_ratio' :
cmd = MeasurementFunctionMap[self._measurement_function]
elif self._measurement_function == 'invalid' :
cmd = MeasurementFunctionMap['invalid']
self._write(cmd)
def _measurement_read(self, maximum_time):
self._measurement_initiate()
return self._measurement_fetch()
|
Weights + Measures is located in Midtown and serves Breakfast in their Bakeshop and Brunch, Lunch and Dinner in their main dining room. Their Bakeshop offers a variety of pastries, fresh baked breads, hand cut pretzels and fantastic locally, roasted coffee, lattes and teas.
Weights + Measures is a cozy , Midtown restaurant with a “living room/dining room” styled layout. The “living room” is where you’ll find their Bakeshop. A small space, whose walls are lined with flaky crusts and soft breads, some filled with meat…some filled with sweet preserves. The “dining room” is where you have the option of sitting to enjoy your pastry & hot beverage or juice (6:00 am – 3:00 pm – daily), it’s also the main seating area for brunch, lunch and dinner. This is just the spot for an early business meeting and that’s why I was there. It was very peaceful. Their large wooden tables provide ample working space for small groups.
Common Bond is one of the only other places that I can use for a quick, early morning meeting. Though Common Bond has a tendency to be much noisier and parking can be a beast!
The Bakeshop also offers an extensive yet succinct beverage menu. I’m not a coffee drinker, and didn’t want my normal old hot cup of tea. So I had to chose what I wanted quickly. I decided on a cappuccino with almond milk. What I got was a masterpiece! I am always in awe of what I like to call “Barista Uchawi” (“magic” – Swahili). It takes a special type of person to make these lovely little designs in foam. They get all my respect!
Options like lattes, flat whites, and mocha’s are available as well as hot chocolate for those who may want something hot, but without caffeine. Here is where you place your order, should you want a pastry or baked good. They also offer hot breakfast sandwiches! With so many options It was hard to choose, especially with those huge pretzels glaring at me! Check out more below as I’ve highlighted a little more on Weights + Measures sweet and savory, Bakeshop!
These beauties give Easy Tiger a run for their money!
You have to get your hands on the Lemon Thyme!
Breads like sourdough make great French Open-Faced Sandwiches!
The cost for a quick breakfast or group meeting is nothing at Weights + Measures. You can get a hot beverage and 1 or 2 breads for under $10 bucks. Another plus, is the parking. There’s no circling or waiting in the morning, stop here if you’re in a hurry.
|
import networkx as nx
import numpy as np
import methods.util.write_graph as write_graph
import methods.util.util as util
g = nx.DiGraph()
def prepare(line):
g.add_edge(line[1], line[2])
def do(filename_out, delimiter, mode, gephi_out):
edges_to_be_removed = remove_cycle_edges_by_mfas()
cycles_removed = util.remove_edges_from_network_graph(g, edges_to_be_removed)
write_graph.network_graph(filename_out, g, gephi_out=gephi_out, delimiter=delimiter)
return cycles_removed
def pick_from_dict(d, order="max"):
min_k, min_v = 0, 10000
min_items = []
max_k, max_v = 0, -10000
max_items = []
for k, v in d.iteritems():
if v > max_v:
max_v = v
max_items = [(k, max_v)]
elif v == max_v:
max_items.append((k, v))
if v < min_v:
min_v = v
min_items = [(k, min_v)]
elif v == min_v:
min_items.append((k, v))
max_k, max_v = pick_randomly(max_items)
min_k, min_v = pick_randomly(min_items)
if order == "max":
return max_k, max_v
if order == "min":
return min_k, min_v
else:
return max_k, max_v, min_k, min_v
def pick_randomly(source):
np.random.shuffle(source)
np.random.shuffle(source)
np.random.shuffle(source)
return source[0]
def filter_big_scc(g, edges_to_be_removed):
# Given a graph g and edges to be removed
# Return a list of big scc subgraphs (# of nodes >= 2)
g.remove_edges_from(edges_to_be_removed)
sub_graphs = filter(lambda scc: scc.number_of_nodes() >= 2, nx.strongly_connected_component_subgraphs(g))
return sub_graphs
def get_big_sccs(g):
self_loop_edges = g.selfloop_edges()
g.remove_edges_from(g.selfloop_edges())
num_big_sccs = 0
edges_to_be_removed = []
big_sccs = []
for sub in nx.strongly_connected_component_subgraphs(g):
number_of_nodes = sub.number_of_nodes()
if number_of_nodes >= 2:
# strongly connected components
num_big_sccs += 1
big_sccs.append(sub)
# print(" # big sccs: %d" % (num_big_sccs))
return big_sccs
def nodes_in_scc(sccs):
scc_nodes = []
scc_edges = []
for scc in sccs:
scc_nodes += list(scc.nodes())
scc_edges += list(scc.edges())
# print("# nodes in big sccs: %d" % len(scc_nodes))
# print("# edges in big sccs: %d" % len(scc_edges))
return scc_nodes
def scc_nodes_edges(g):
scc_nodes = set()
scc_edges = set()
num_big_sccs = 0
num_nodes_biggest_scc = 0
biggest_scc = None
for sub in nx.strongly_connected_component_subgraphs(g):
number_nodes = sub.number_of_nodes()
if number_nodes >= 2:
scc_nodes.update(sub.nodes())
scc_edges.update(sub.edges())
num_big_sccs += 1
if num_nodes_biggest_scc < number_nodes:
num_nodes_biggest_scc = number_nodes
biggest_scc = sub
nonscc_nodes = set(g.nodes()) - scc_nodes
nonscc_edges = set(g.edges()) - scc_edges
print("num nodes biggest scc: %d" % num_nodes_biggest_scc)
print("num of big sccs: %d" % num_big_sccs)
if biggest_scc == None:
return scc_nodes, scc_nodes, nonscc_nodes, nonscc_edges
print("# nodes in biggest scc: %d, # edges in biggest scc: %d" % (
biggest_scc.number_of_nodes(), biggest_scc.number_of_edges()))
print("# nodes,edges in scc: (%d,%d), # nodes, edges in non-scc: (%d,%d) " % (
len(scc_nodes), len(scc_edges), len(nonscc_nodes), len(nonscc_edges)))
num_of_nodes = g.number_of_nodes()
num_of_edges = g.number_of_edges()
print(
"# nodes in graph: %d, # of edges in graph: %d, percentage nodes, edges in scc: (%0.4f,%0.4f), percentage nodes, edges in non-scc: (%0.4f,%0.4f)" % (
num_of_nodes, num_of_edges, len(scc_nodes) * 1.0 / num_of_nodes, len(scc_edges) * 1.0 / num_of_edges,
len(nonscc_nodes) * 1.0 / num_of_nodes, len(nonscc_edges) * 1.0 / num_of_edges))
return scc_nodes, scc_edges, nonscc_nodes, nonscc_edges
def get_nodes_degree_dict(g, nodes):
# get nodes degree dict: key = node, value = (max(d(in)/d(out),d(out)/d(in),"in" or "out")
in_degrees = g.in_degree(nodes)
out_degrees = g.out_degree(nodes)
degree_dict = {}
for node in nodes:
in_d = in_degrees[node]
out_d = out_degrees[node]
if in_d >= out_d:
try:
value = in_d * 1.0 / out_d
except Exception as e:
value = 0
f = "in"
else:
try:
value = out_d * 1.0 / in_d
except Exception as e:
value = 0
f = "out"
degree_dict[node] = (value, f)
# print("node: %d: %s" % (node,degree_dict[node]))
return degree_dict
def greedy_local_heuristic(sccs, degree_dict, edges_to_be_removed):
while True:
graph = sccs.pop()
temp_nodes_degree_dict = {}
for node in graph.nodes():
temp_nodes_degree_dict[node] = degree_dict[node][0]
max_node, _ = pick_from_dict(temp_nodes_degree_dict)
max_value = degree_dict[max_node]
# degrees = [(node,degree_dict[node]) for node in list(graph.nodes())]
# max_node,max_value = max(degrees,key = lambda x: x[1][0])
if max_value[1] == "in":
# indegree > outdegree, remove out-edges
edges = [(max_node, o) for o in graph.neighbors(max_node)]
else:
# outdegree > indegree, remove in-edges
edges = [(i, max_node) for i in graph.predecessors(max_node)]
edges_to_be_removed += edges
sub_graphs = filter_big_scc(graph, edges_to_be_removed)
if sub_graphs:
for index, sub in enumerate(sub_graphs):
sccs.append(sub)
if not sccs:
return
def remove_self_loops_from_graph(g):
self_loops = list(g.selfloop_edges())
g.remove_edges_from(self_loops)
return self_loops
def remove_cycle_edges_by_mfas():
self_loops = remove_self_loops_from_graph(g)
scc_nodes, _, _, _ = scc_nodes_edges(g)
degree_dict = get_nodes_degree_dict(g, scc_nodes)
sccs = get_big_sccs(g)
if len(sccs) == 0:
print("After removal of self loop edgs: %s" % nx.is_directed_acyclic_graph(g))
return self_loops
edges_to_be_removed = []
import timeit
t1 = timeit.default_timer()
greedy_local_heuristic(sccs, degree_dict, edges_to_be_removed)
t2 = timeit.default_timer()
print("mfas time usage: %0.4f s" % (t2 - t1))
edges_to_be_removed = list(set(edges_to_be_removed))
# g.remove_edges_from(edges_to_be_removed)
edges_to_be_removed += self_loops
return edges_to_be_removed
|
Specify unique identifiers, data owners, and data standards and policies for each data set within the organization.
Policies surrounding data quality, refresh and validation frequency, and unique identifiers are essential to smooth-running data management.
This could be a useful tool if modified properly. 1) There is no place to indicate what you are assessing, 2) Data assessed can include, but doesn't always include Email or Customer Loss. There is no way to exclude a question, such as a "Not Applicable" to make the impact neutral. 3) Finally, there isn't a neutral setting where something has no positive or negative impact, such as occasional occurrences. The choices are either positive or negative - no gray area. If the template was modifed for these items, it would be a much more useful tool. Also, another section for assessing databases and reporting tools would be helpful. There is nothing directly in here for Data warehouse and reporting tools.
Thanks for your comment, John. We'll definitely consider these enhancements when the parent solution set Tackle Data Quality Issues becomes scheduled for revision.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import uuid
import mimetypes
from thumbor.handlers import ImageApiHandler
from thumbor.engines import BaseEngine
##
# Handler to upload images.
# This handler support only POST method, but images can be uploaded :
# - through multipart/form-data (designed for forms)
# - or with the image content in the request body (rest style)
##
class ImageUploadHandler(ImageApiHandler):
def post(self):
# Check if the image uploaded is a multipart/form-data
if self.multipart_form_data():
file_data = self.request.files['media'][0]
body = file_data['body']
# Retrieve filename from 'filename' field
filename = file_data['filename']
else:
body = self.request.body
# Retrieve filename from 'Slug' header
filename = self.request.headers.get('Slug')
# Check if the image uploaded is valid
if self.validate(body):
# Use the default filename for the uploaded images
if not filename:
content_type = self.request.headers.get('Content-Type', BaseEngine.get_mimetype(body))
extension = mimetypes.guess_extension(content_type.split(';', 1)[0], False)
if extension is None: # Content-Type is unknown, try with body
extension = mimetypes.guess_extension(BaseEngine.get_mimetype(body), False)
if extension == '.jpe':
extension = '.jpg' # Hack because mimetypes return .jpe by default
if extension is None: # Even body is unknown, return an empty string to be contat
extension = ''
filename = self.context.config.UPLOAD_DEFAULT_FILENAME + extension
# Build image id based on a random uuid (32 characters)
image_id = str(uuid.uuid4().hex)
self.write_file(image_id, body)
self.set_status(201)
self.set_header('Location', self.location(image_id, filename))
def multipart_form_data(self):
if 'media' not in self.request.files or not self.request.files['media']:
return False
else:
return True
def location(self, image_id, filename):
base_uri = self.request.uri
return '%s/%s/%s' % (base_uri, image_id, filename)
|
This exhibition showcases the portfolio A Swarm, A Flock, A Host: A Compendium of Creatures, 12 color aquatints by noted artist Darren Waterston and text plates featuring a poem by award-winning writer Mark Doty. The portfolio, a contemporary version of a medieval bestiary, was commissioned by the Achenbach Graphic Arts Council (AGAC), in support of the art acquisitions program of the Achenbach Foundation for Graphic Arts, the Fine Arts Museums of San Francisco’s prints and drawings department.
The bestiary is a literary and illustrative form in which a number of known animal species, as well as mythological creatures, are catalogued encyclopedically. As a genre, the medieval bestiary constituted a natural history of creation that frequently included moralizing allegories, with the animal kingdom providing apt symbols for human behavior.
|
class GameClock(object):
"""Manages time in a game."""
def __init__(self, game_ticks_per_second=20):
"""Create a Game Clock object.
game_ticks_per_second -- The number of logic frames a second.
"""
self.game_ticks_per_second = float(game_ticks_per_second)
self.game_tick = 1. / self.game_ticks_per_second
self.speed = 1.
self.clock_time = 0.
self.virtual_time = 0.
self.game_time = 0.
self.game_frame_count = 0
self.real_time_passed = 0.
self.real_time = self.get_real_time()
self.started = False
self.paused = False
self.between_frame = 0.0
self.fps_sample_start_time = 0.0
self.fps_sample_count = 0
self.average_fps = 0
def start(self):
"""Starts the Game Clock. Must be called once."""
if self.started:
return
self.clock_time = 0.
self.virtual_time = 0.
self.game_time = 0.
self.game_frame_count = 0
self.real_time_passed = 0.
self.real_time = self.get_real_time()
self.started = True
self.fps = 0.0
self.fps_sample_start_time = self.real_time
self.fps_sample_count = 0
def set_speed(self, speed):
"""Sets the speed of the clock.
speed -- A time factor (1 is normal speed, 2 is twice normal)
"""
assert isinstance(speed, float), "Must be a float"
if speed < 0.0:
raise ValueError("Negative speeds not supported")
self.speed = speed
def pause(self):
"""Pauses the Game Clock."""
self.pause = True
def unpause(self):
"""Un-pauses the Game Clock."""
self.pause = False
def get_real_time(self):
"""Returns the real time, as reported by the system clock.
This method may be overriden."""
import time
return time.clock()
def get_fps(self):
"""Retrieves the current frames per second as a tuple containing
the fps and average fps over a second."""
return self.fps, self.average_fps
def get_between_frame(self):
"""Returns the interpolant between the previous game tick and the
next game tick."""
return self.between_frame
def update(self, max_updates = 0):
"""Advances time, must be called once per frame. Yields tuples of
game frame count and game time.
max_updates -- Maximum number of game time updates to issue.
"""
assert self.started, "You must call 'start' before using a GameClock."
real_time_now = self.get_real_time()
self.real_time_passed = real_time_now - self.real_time
self.real_time = real_time_now
self.clock_time += self.real_time_passed
if not self.paused:
self.virtual_time += self.real_time_passed * self.speed
update_count = 0
while self.game_time + self.game_tick < self.virtual_time:
self.game_frame_count += 1
self.game_time = self.game_frame_count * self.game_tick
yield (self.game_frame_count, self.game_time)
if max_updates and update_count == max_updates:
break
self.between_frame = ( self.virtual_time - self.game_time ) / self.game_tick
if self.real_time_passed != 0:
self.fps = 1.0 / self.real_time_passed
else:
self.fps = 0.0
self.fps_sample_count += 1
if self.real_time - self.fps_sample_start_time > 1.0:
self.average_fps = self.fps_sample_count / (self.real_time - self.fps_sample_start_time)
self.fps_sample_start_time = self.real_time
self.fps_sample_count = 0
if __name__ == "__main__":
import time
t = GameClock(20) # AI is 20 frames per second
t.start()
while t.virtual_time < 2.0:
for (frame_count, game_time) in t.update():
print "Game frame #%i, %2.4f" % (frame_count, game_time)
virtual_time = t.virtual_time
print "\t%2.2f%% between game frame, time is %2.4f"%(t.between_frame*100., virtual_time)
time.sleep(0.2) # Simulate time to render frame
|
So, you might have heard that Sunday was a pretty good news day.
1) Elation. Full disclosure, this is my camp. I actually saw a video last night of people chanting “USA! USA!” in a stadium and didn’t think they looked like they were at some creepy Soviet rally circa 1950.
2) Conspiracy theory. These are people who hate good news. Mostly, the popular story seems to be that we killed Bin Laden’s double, or something. This is so bananas, I don’t even know what to do with it. How do you argue with a person who thinks that the most complicated version of history must be right? I think the only thing to do is to suggest they stay away from TV and movies for a month. Maybe all forms of narrative storytelling. Because clearly, someone wants a twist ending.
3) Weirdly displaced grief. OK, OK, I get that it’s weird to celebrate a person’s death. I understand that it’s not the spiritually evolved stance to take. But maybe let’s not pretend that we killed Bambi’s mom. This was a mass murderer of innocent men, women, and children. Maybe Jesus and the Buddha wouldn’t have been as totally psyched as I am right now, but last time I checked, I wasn’t Jesus or the Buddha, so … whee!
Also, for those of you who were wondering, Sgt. Lucky and I totally danced around the apartment to the Ewok Celebration song last night when we heard the news.
At best, it feels like VE Day on a smaller scale. At worst, it feels like a largely symbolic milestone, unimportant in the larger geopolitical scale – this was one guy, after all. As soon as I get down about that, though, I think of the flowering of freedom that I hope will come from the Arab Spring and I feel better.
|
# -*- test-case-name: buildbot.test.test_status -*-
# the email.MIMEMultipart module is only available in python-2.2.2 and later
import re
from email.Message import Message
from email.Utils import formatdate
from email.MIMEText import MIMEText
try:
from email.MIMEMultipart import MIMEMultipart
canDoAttachments = True
except ImportError:
canDoAttachments = False
import urllib
from zope.interface import implements
from twisted.internet import defer
from twisted.python import log as twlog
from buildbot import interfaces, util
from buildbot.status import base
from buildbot.status.builder import FAILURE, SUCCESS, WARNINGS, Results
VALID_EMAIL = re.compile("[a-zA-Z0-9\.\_\%\-\+]+@[a-zA-Z0-9\.\_\%\-]+.[a-zA-Z]{2,6}")
def message(attrs):
"""Generate a buildbot mail message and return a tuple of message text
and type.
This function can be replaced using the customMesg variable in MailNotifier.
A message function will *always* get a dictionary of attributes with
the following values:
builderName - (str) Name of the builder that generated this event.
projectName - (str) Name of the project.
mode - (str) Mode set in MailNotifier. (failing, passing, problem).
result - (str) Builder result as a string. 'success', 'warnings',
'failure', 'skipped', or 'exception'
buildURL - (str) URL to build page.
buildbotURL - (str) URL to buildbot main page.
buildText - (str) Build text from build.getText().
slavename - (str) Slavename.
reason - (str) Build reason from build.getReason().
responsibleUsers - (List of str) List of responsible users.
branch - (str) Name of branch used. If no SourceStamp exists branch
is an empty string.
revision - (str) Name of revision used. If no SourceStamp exists revision
is an empty string.
patch - (str) Name of patch used. If no SourceStamp exists patch
is an empty string.
changes - (list of objs) List of change objects from SourceStamp. A change
object has the following useful information:
who - who made this change
revision - what VC revision is this change
branch - on what branch did this change occur
when - when did this change occur
files - what files were affected in this change
comments - comments reguarding the change.
The functions asText and asHTML return a list of strings with
the above information formatted.
logs - (List of Tuples) List of tuples that contain the log name, log url
and log contents as a list of strings.
"""
text = ""
if attrs['mode'] == "all":
text += "The Buildbot has finished a build"
elif attrs['mode'] == "failing":
text += "The Buildbot has detected a failed build"
elif attrs['mode'] == "passing":
text += "The Buildbot has detected a passing build"
else:
text += "The Buildbot has detected a new failure"
text += " of %s on %s.\n" % (attrs['builderName'], attrs['projectName'])
if attrs['buildURL']:
text += "Full details are available at:\n %s\n" % attrs['buildURL']
text += "\n"
if attrs['buildbotURL']:
text += "Buildbot URL: %s\n\n" % urllib.quote(attrs['buildbotURL'], '/:')
text += "Buildslave for this Build: %s\n\n" % attrs['slavename']
text += "Build Reason: %s\n" % attrs['reason']
#
# No source stamp
#
if attrs['branch']:
source = "unavailable"
else:
source = ""
if attrs['branch']:
source += "[branch %s] " % attrs['branch']
if attrs['revision']:
source += attrs['revision']
else:
source += "HEAD"
if attrs['patch']:
source += " (plus patch)"
text += "Build Source Stamp: %s\n" % source
text += "Blamelist: %s\n" % ",".join(attrs['responsibleUsers'])
text += "\n"
t = attrs['buildText']
if t:
t = ": " + " ".join(t)
else:
t = ""
if attrs['result'] == 'success':
text += "Build succeeded!\n"
elif attrs['result'] == 'warnings':
text += "Build Had Warnings%s\n" % t
else:
text += "BUILD FAILED%s\n" % t
text += "\n"
text += "sincerely,\n"
text += " -The Buildbot\n"
text += "\n"
return (text, 'plain')
class Domain(util.ComparableMixin):
implements(interfaces.IEmailLookup)
compare_attrs = ["domain"]
def __init__(self, domain):
assert "@" not in domain
self.domain = domain
def getAddress(self, name):
"""If name is already an email address, pass it through."""
if '@' in name:
return name
return name + "@" + self.domain
class MailNotifier(base.StatusReceiverMultiService):
"""This is a status notifier which sends email to a list of recipients
upon the completion of each build. It can be configured to only send out
mail for certain builds, and only send messages when the build fails, or
when it transitions from success to failure. It can also be configured to
include various build logs in each message.
By default, the message will be sent to the Interested Users list, which
includes all developers who made changes in the build. You can add
additional recipients with the extraRecipients argument.
To get a simple one-message-per-build (say, for a mailing list), use
sendToInterestedUsers=False, extraRecipients=['[email protected]']
Each MailNotifier sends mail to a single set of recipients. To send
different kinds of mail to different recipients, use multiple
MailNotifiers.
"""
implements(interfaces.IEmailSender)
compare_attrs = ["extraRecipients", "lookup", "fromaddr", "mode",
"categories", "builders", "addLogs", "relayhost",
"subject", "sendToInterestedUsers", "customMesg"]
def __init__(self, fromaddr, mode="all", categories=None, builders=None,
addLogs=False, relayhost="localhost",
subject="buildbot %(result)s in %(projectName)s on %(builder)s",
lookup=None, extraRecipients=[],
sendToInterestedUsers=True, customMesg=message):
"""
@type fromaddr: string
@param fromaddr: the email address to be used in the 'From' header.
@type sendToInterestedUsers: boolean
@param sendToInterestedUsers: if True (the default), send mail to all
of the Interested Users. If False, only
send mail to the extraRecipients list.
@type extraRecipients: tuple of string
@param extraRecipients: a list of email addresses to which messages
should be sent (in addition to the
InterestedUsers list, which includes any
developers who made Changes that went into this
build). It is a good idea to create a small
mailing list and deliver to that, then let
subscribers come and go as they please.
@type subject: string
@param subject: a string to be used as the subject line of the message.
%(builder)s will be replaced with the name of the
builder which provoked the message.
@type mode: string (defaults to all)
@param mode: one of:
- 'all': send mail about all builds, passing and failing
- 'failing': only send mail about builds which fail
- 'passing': only send mail about builds which succeed
- 'problem': only send mail about a build which failed
when the previous build passed
@type builders: list of strings
@param builders: a list of builder names for which mail should be
sent. Defaults to None (send mail for all builds).
Use either builders or categories, but not both.
@type categories: list of strings
@param categories: a list of category names to serve status
information for. Defaults to None (all
categories). Use either builders or categories,
but not both.
@type addLogs: boolean.
@param addLogs: if True, include all build logs as attachments to the
messages. These can be quite large. This can also be
set to a list of log names, to send a subset of the
logs. Defaults to False.
@type relayhost: string
@param relayhost: the host to which the outbound SMTP connection
should be made. Defaults to 'localhost'
@type lookup: implementor of {IEmailLookup}
@param lookup: object which provides IEmailLookup, which is
responsible for mapping User names (which come from
the VC system) into valid email addresses. If not
provided, the notifier will only be able to send mail
to the addresses in the extraRecipients list. Most of
the time you can use a simple Domain instance. As a
shortcut, you can pass as string: this will be
treated as if you had provided Domain(str). For
example, lookup='twistedmatrix.com' will allow mail
to be sent to all developers whose SVN usernames
match their twistedmatrix.com account names.
@type customMesg: func
@param customMesg: A function that returns a tuple containing the text of
a custom message and its type. This function takes
the dict attrs which has the following values:
builderName - (str) Name of the builder that generated this event.
projectName - (str) Name of the project.
mode - (str) Mode set in MailNotifier. (failing, passing, problem).
result - (str) Builder result as a string. 'success', 'warnings',
'failure', 'skipped', or 'exception'
buildURL - (str) URL to build page.
buildbotURL - (str) URL to buildbot main page.
buildText - (str) Build text from build.getText().
slavename - (str) Slavename.
reason - (str) Build reason from build.getReason().
responsibleUsers - (List of str) List of responsible users.
branch - (str) Name of branch used. If no SourceStamp exists branch
is an empty string.
revision - (str) Name of revision used. If no SourceStamp exists revision
is an empty string.
patch - (str) Name of patch used. If no SourceStamp exists patch
is an empty string.
changes - (list of objs) List of change objects from SourceStamp. A change
object has the following useful information:
who - who made this change
revision - what VC revision is this change
branch - on what branch did this change occur
when - when did this change occur
files - what files were affected in this change
comments - comments reguarding the change.
The functions asText and asHTML return a list of strings with
the above information formatted.
logs - (List of Tuples) List of tuples that contain the log name, log url,
and log contents as a list of strings.
"""
base.StatusReceiverMultiService.__init__(self)
assert isinstance(extraRecipients, (list, tuple))
for r in extraRecipients:
assert isinstance(r, str)
assert VALID_EMAIL.search(r) # require full email addresses, not User names
self.extraRecipients = extraRecipients
self.sendToInterestedUsers = sendToInterestedUsers
self.fromaddr = fromaddr
assert mode in ('all', 'failing', 'problem')
self.mode = mode
self.categories = categories
self.builders = builders
self.addLogs = addLogs
self.relayhost = relayhost
self.subject = subject
if lookup is not None:
if type(lookup) is str:
lookup = Domain(lookup)
assert interfaces.IEmailLookup.providedBy(lookup)
self.lookup = lookup
self.customMesg = customMesg
self.watched = []
self.status = None
# you should either limit on builders or categories, not both
if self.builders != None and self.categories != None:
twlog.err("Please specify only builders to ignore or categories to include")
raise # FIXME: the asserts above do not raise some Exception either
def setServiceParent(self, parent):
"""
@type parent: L{buildbot.master.BuildMaster}
"""
base.StatusReceiverMultiService.setServiceParent(self, parent)
self.setup()
def setup(self):
self.status = self.parent.getStatus()
self.status.subscribe(self)
def disownServiceParent(self):
self.status.unsubscribe(self)
for w in self.watched:
w.unsubscribe(self)
return base.StatusReceiverMultiService.disownServiceParent(self)
def builderAdded(self, name, builder):
# only subscribe to builders we are interested in
if self.categories != None and builder.category not in self.categories:
return None
self.watched.append(builder)
return self # subscribe to this builder
def builderRemoved(self, name):
pass
def builderChangedState(self, name, state):
pass
def buildStarted(self, name, build):
pass
def buildFinished(self, name, build, results):
# here is where we actually do something.
builder = build.getBuilder()
if self.builders is not None and name not in self.builders:
return # ignore this build
if self.categories is not None and \
builder.category not in self.categories:
return # ignore this build
if self.mode == "failing" and results != FAILURE:
return
if self.mode == "passing" and results != SUCCESS:
return
if self.mode == "problem":
if results != FAILURE:
return
prev = build.getPreviousBuild()
if prev and prev.getResults() == FAILURE:
return
# for testing purposes, buildMessage returns a Deferred that fires
# when the mail has been sent. To help unit tests, we return that
# Deferred here even though the normal IStatusReceiver.buildFinished
# signature doesn't do anything with it. If that changes (if
# .buildFinished's return value becomes significant), we need to
# rearrange this.
return self.buildMessage(name, build, results)
def buildMessage(self, name, build, results):
#
# logs is a list of tuples that contain the log
# name, log url, and the log contents as a list of strings.
#
logs = list()
for log in build.getLogs():
stepName = log.getStep().getName()
logName = log.getName()
logs.append(('%s.%s' % (stepName, logName),
'%s/steps/%s/logs/%s' % (self.status.getURLForThing(build), stepName, logName),
log.getText().splitlines()))
attrs = {'builderName': name,
'projectName': self.status.getProjectName(),
'mode': self.mode,
'result': Results[results],
'buildURL': self.status.getURLForThing(build),
'buildbotURL': self.status.getBuildbotURL(),
'buildText': build.getText(),
'slavename': build.getSlavename(),
'reason': build.getReason(),
'responsibleUsers': build.getResponsibleUsers(),
'branch': "",
'revision': "",
'patch': "",
'changes': [],
'logs': logs}
ss = build.getSourceStamp()
if ss:
attrs['branch'] = ss.branch
attrs['revision'] = ss.revision
attrs['patch'] = ss.patch
attrs['changes'] = ss.changes[:]
text, type = self.customMesg(attrs)
assert type in ('plain', 'html'), "'%s' message type must be 'plain' or 'html'." % type
haveAttachments = False
if attrs['patch'] or self.addLogs:
haveAttachments = True
if not canDoAttachments:
twlog.msg("warning: I want to send mail with attachments, "
"but this python is too old to have "
"email.MIMEMultipart . Please upgrade to python-2.3 "
"or newer to enable addLogs=True")
if haveAttachments and canDoAttachments:
m = MIMEMultipart()
m.attach(MIMEText(text, type))
else:
m = Message()
m.set_payload(text)
m.set_type("text/%s" % type)
m['Date'] = formatdate(localtime=True)
m['Subject'] = self.subject % { 'result': attrs['result'],
'projectName': attrs['projectName'],
'builder': attrs['builderName'],
}
m['From'] = self.fromaddr
# m['To'] is added later
if attrs['patch']:
a = MIMEText(attrs['patch'][1])
a.add_header('Content-Disposition', "attachment",
filename="source patch")
m.attach(a)
if self.addLogs:
for log in build.getLogs():
name = "%s.%s" % (log.getStep().getName(),
log.getName())
if self._shouldAttachLog(log.getName()) or self._shouldAttachLog(name):
a = MIMEText(log.getText())
a.add_header('Content-Disposition', "attachment",
filename=name)
m.attach(a)
# now, who is this message going to?
dl = []
recipients = []
if self.sendToInterestedUsers and self.lookup:
for u in build.getInterestedUsers():
d = defer.maybeDeferred(self.lookup.getAddress, u)
d.addCallback(recipients.append)
dl.append(d)
d = defer.DeferredList(dl)
d.addCallback(self._gotRecipients, recipients, m)
return d
def _shouldAttachLog(self, logname):
if type(self.addLogs) is bool:
return self.addLogs
return logname in self.addLogs
def _gotRecipients(self, res, rlist, m):
recipients = set()
for r in rlist:
if r is None: # getAddress didn't like this address
continue
# Git can give emails like 'User' <[email protected]>@foo.com so check
# for two @ and chop the last
if r.count('@') > 1:
r = r[:r.rindex('@')]
if VALID_EMAIL.search(r):
recipients.add(r)
else:
twlog.msg("INVALID EMAIL: %r" + r)
# if we're sending to interested users move the extra's to the CC
# list so they can tell if they are also interested in the change
# unless there are no interested users
if self.sendToInterestedUsers and len(recipients):
m['CC'] = ", ".join(sorted(self.extraRecipients[:]))
else:
[recipients.add(r) for r in self.extraRecipients[:]]
m['To'] = ", ".join(sorted(recipients))
# The extras weren't part of the TO list so add them now
if self.sendToInterestedUsers:
for r in self.extraRecipients:
recipients.add(r)
return self.sendMessage(m, list(recipients))
def sendMessage(self, m, recipients):
from twisted.mail.smtp import sendmail
s = m.as_string()
twlog.msg("sending mail (%d bytes) to" % len(s), recipients)
return sendmail(self.relayhost, self.fromaddr, recipients, s)
|
I see your point, but what about NATO?
And right now, the one military threat Europe faces is Russia.
Yo, the Eu is horrified by Nazism (but not by Communism), so I doubt that will ever happen.
This is something that I respect all opinions.
And you still want a EU Army?
Who is not to say that tomorrow there will be new threat?
Im saying that the parallels between the EU and the Nazis are worth Noting. And Nazis stood for nationalist socialist party ( maybe there was another word in there I have missed) But EU run by corrupt socialists. EU was great back in the early days, but eventually many great things grow big and grow dishonest. I think Brexit was good, because it gonna hopefully be the downfall of the EU and it will be replaced with something that works for free trade. A US-EU free trade deal ... should been here a long time ago, EU has too many socialists to want that and is heading towards a protectionist stance.
Yo, there's an intermediate step between saying the EU is corrupt and that there are parallels with the Nazis.
But we're going OT (like in the good old days), so let's get back on track.
I certainly hope Ireland doesnt decide to screw over the UK and turn our backs on them like brussels wants to. The Dutch wont Id say they will be #2 to leave. Then maybe Italy or greece.
And yeah, the british don't really have a variety of units at all.
Im am going to make it a "British Common Wealth" Tree,and include some south african tech and that completes most of it. I have a draft on my PC and will post it later.
Just going to add that if the British doctrine is added, their tanks should have the highest hitpoints out of all the tanks, and best spec ops stats. Their anti air is, well... Meh. Guess Britain is a good country for tank rushes.
MBT-80 Really, what about the Chieftain MK.11? MBT-80 didn't even exist. Chieftain was replaced by the Challenger 1.
Woo Woo Woo Panavia Tornado IDS has been used 3 times! in this tech tree. The Panavia Tornado IDS is the Tornado GR1 and GR4 it is literally the same plane. RAF removed IDS and just added the GR instead. It's literally the British designation for the Tornado IDS. Everyone else in Europe calls it the IDS. It's not air-air at all. The English Electric Lighting would be a much better Tier 1 Fighter. The Tornado ADV replaced it in service, which was later kind of replaced by the Typhoon (More of a cost saving measure).
Demote the Nimrods and add the P-8 Poseidon as the Tier 3.
I know you don't like it but the RAF and FAA are buying the F-35B variant which Britain is partially constructing and helped to develop.Although Rubbish as we previously stated it will be Britain Stealth Strike Fighter. Either that or you use the BAe Replica (Stupid name) which was a stealth strike proposed to replace the Tornado IDS.
Britain is designing a Stealthy Air Superiority Fighter for Turkey. Closest you will get to a British Stealth Air Superiority Aircraft.
Malta class? Wtf that's a world war 2 carrier and wasn't even built.
Castle Class, once again WTF. It's a world war 2 Corvette!!! River Class are patrol Ships. Britain designs and builds corvettes, We don't use them.
Get rid of the Valiant it was put into service alongside the Victor.
Promote the Victor to tier 1 and Vulcan to tier 2. Because Britain was going to join the supersonic bomber club back in the cold war.
Remember they going to have to make 3 tier skins progressions for these. Dont think it is great with the lynx being the attack choppers.
Im just straight out stuck here, hence I was going to make this a "Commonwealth" Tech tree, add in the South African cheetah as tier 1. I just think the Lighting is pretty damn ugly.
Yep, good call with the Phantom. Did they even draw a sketch of the P.125?
Haha, I have heard of it before. Is there even a sketch or anything?
Hehe, yea but sure I didnt want to use the Centaur. They added the ski jump later and I didnt notice that ha. That fine. Fits perfect!
Yea... See when we have the Brexit from the Euroskins, gotta keep in mind that tree too. Tiger class is being used there but we will move it here. The peruvians had some sort of Cruiser Made by the Dutch we can add in. Just hope it isnt too out of date to be added.
Get rid of the Valiant it was put into service alongside the Victor.Promote the Victor to tier 1 and Vulcan to tier 2. Because Britain was going to join the supersonic bomber club back in the cold war.
Yea, I didnt at the TSR for a reason. Isnt really a bomber compared to the Vulcan. Seemed really like a big strike aircraft. Id say it could fit better in the tier 1. But the Vulcan is much better suited for the tier 3.
Hmm... good yea, didnt think of that. But really want that to be tier 3 ?
Well the Krab SPG is the most advanced variant of the AS-90 hence why I put it as tier-3.
The TRS.2 Can carry the same bomb-load as the Vulcan can fly faster and lower. As well as launching Cruise Missiles. It's more of a tactical Bomber, but not a strike fighter. It has no air defense capability unlike the F-111 Aardvark. The TRS.2 would probably been a much better bomber than the Aardvark but the Aardvark had some multi-role capabilities. Hence why the RAF selected it over the TRS.2 and also because it was meant to be cheaper. The TRS.2 at it's heart was a strategic nuclear bomber, it just wasn't as big as the B-1 Lancer, Tu-22M or Tu-160 Supersonic heavy bombers.
Didnt want to add the Warthog in, but the super AV is fine. Good get :P well those are only amphibious assault vehicles and the Viking is tailored to British service much like the Warthog is.
Kind of looks like a YF-23.
Sorry. It's actually the P.1154 sorry my mistake. Kind of looks like a Harrier and a F-4 Phantom had a Baby. Yh they drew stretches, they even made the engines. But scrapped it in favor of the subsonic harrier for cost reasons.
South African cheetah as tier 1. I just think the Lighting is pretty damn ugly. Lighting maybe ugly but it was a our premier air superiority fighter of it's time and the first British Fighter to reach Mach 2. The Cheetah isn't South African, sure it was made there. But it was based off a French Mirage IV and designed by Israelis for the Apartheid government of South Africa since they were under sanctions and couldn't buy them off the French. If you want a British Tech Tree use British equipment when it is available. Besides I've always found the MiG-23 and F-5 tiger to be pretty ugly looking so it will fit right in.
To me it's like MiG-21 on steroids. It's ugly but that shouldn't be a reason to keep it out of the tech tree now I've finished it for you.
The Lynx AH models have pretty much zero anti-infantry capacity apart from machine guns used by the crew chief. They use radar and anti-tank guided missiles. The Super Lynx is the second generation Lynx and the Wildcat the third generation. The wildcat does look substantially different from the other two lynx helicopters. But the Super Lynx AH7 was a significant upgrade over the Lynx AH1. The reason I put them in the attack class is simply because they are good tank hunters. The Apache models are used by Britain in a anti-infantry capacity against insurgents. Their 30mm and unguided rockets pods are heavily used. The Lynx doesn't have this Infantry chewing ability hence why I put it in Tank Hunters (Attack Helicopters). Surely similar looking aircraft makes it easier on the developers? Their is one major disgusting feature that can Seperate the Earlier Lynx AH1 to the Super Lynx AH7. AH7 served during the gulf war so were given a light tan color scheme and big bulky dust protectors for their engines. They were fairly similar externally to the AH1, but internally the AH7 was a big upgrade.
How do I break up quotes?
Like I take your quote, break it up so I can type my bit. Then do the next part.
Well it's all finished now. So he will probably like it. But I would say it would take a while until it's implement. Some of the animation skins can be transfered over from the European Skin which would save time. I reckon they will focus adding all the additional unit animations like they've are doing recently with every update. But I can imagine this tech tree and the Chinese one maybe introduced in a future update down the line. Will just have to be patient and wait for the developers to finish current projects relating to the game. I've also started my own skin. East Asia doctrine. Compromising of South Korea, Taiwan, Japan and Singapore. Our little conversation inspired me to create one and see how far I could go before running into trouble with gaps. So far it's like 60% Indigenous East Asian. But I'm running into trouble with the Naval aspects of it (Submarines, Aircraft Carriers, Naval Aircraft, ICBMs) it's largely filled out other than that. I have however been forced to use US bombers. But I figure since the British Skin will share bombers with Europe it won't be such a big deal.
That true, but its more the case that the British bombers that are not in British tree will be used.
Russia isn't the Only threat to the EU. The USA could be a threat is the United States see Europe challenging it's foreign policy or trying to make NATO obsolete.
Turkey is constantly threatening Europe and especially Greece. The South Eastern European countries (Greece, Bulgaria, Romania) are more threatened by Turkey than Russia.
The Balkans is still a mess. Everyone hates Serbia and Serbia is still pissed at NATO and Albania over Kosovo.
The UK is actually the Biggest threat to the EU due to Brexit.
And let's not forget Ethnic national or Separatist and Islamic Terrorism which is actually killing Europeans.
Russia is more a threat to NATO. Besides Geo-politically the EU is good for Russia since it makes them less dependent on Russia-phobic America. If the EU broke up everyone would fold back into NATO and love the US again. Russians want to end NATO not necessarily the EU which I believe is their biggest trading partner outside the Former states.
|
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
DataObject module
"""
import uuid
import copy
import re
import json
import inspect
from ovs.dal.exceptions import (ObjectNotFoundException, ConcurrencyException, LinkedObjectException,
MissingMandatoryFieldsException, SaveRaceConditionException, InvalidRelationException,
VolatileObjectException)
from ovs.dal.helpers import Descriptor, Toolbox, HybridRunner
from ovs.dal.relations import RelationMapper
from ovs.dal.dataobjectlist import DataObjectList
from ovs.dal.datalist import DataList
from ovs.extensions.generic.volatilemutex import VolatileMutex
from ovs.extensions.storage.exceptions import KeyNotFoundException
from ovs.extensions.storage.persistentfactory import PersistentFactory
from ovs.extensions.storage.volatilefactory import VolatileFactory
class MetaClass(type):
"""
This metaclass provides dynamic __doc__ generation feeding doc generators
"""
def __new__(mcs, name, bases, dct):
"""
Overrides instance creation of all DataObject instances
"""
if name != 'DataObject':
for internal in ['_properties', '_relations', '_dynamics']:
data = set()
for base in bases:
if hasattr(base, internal):
data.update(getattr(base, internal))
if '_{0}_{1}'.format(name, internal) in dct:
data.update(dct.pop('_{0}_{1}'.format(name, internal)))
dct[internal] = list(data)
for prop in dct['_properties']:
docstring = prop.docstring
if isinstance(prop.property_type, type):
itemtype = prop.property_type.__name__
extra_info = ''
else:
itemtype = 'Enum({0})'.format(prop.property_type[0].__class__.__name__)
extra_info = '(enum values: {0})'.format(', '.join(prop.property_type))
dct[prop.name] = property(
doc='[persistent] {0} {1}\n@type: {2}'.format(docstring, extra_info, itemtype)
)
for relation in dct['_relations']:
itemtype = relation.foreign_type.__name__ if relation.foreign_type is not None else name
dct[relation.name] = property(
doc='[relation] one-to-{0} relation with {1}.{2}\n@type: {3}'.format(
'one' if relation.onetoone else 'many',
itemtype,
relation.foreign_key,
itemtype
)
)
for dynamic in dct['_dynamics']:
if bases[0].__name__ == 'DataObject':
if '_{0}'.format(dynamic.name) not in dct:
raise LookupError('Dynamic property {0} in {1} could not be resolved'.format(dynamic.name, name))
method = dct['_{0}'.format(dynamic.name)]
else:
methods = [getattr(base, '_{0}'.format(dynamic.name)) for base in bases if hasattr(base, '_{0}'.format(dynamic.name))]
if len(methods) == 0:
raise LookupError('Dynamic property {0} in {1} could not be resolved'.format(dynamic.name, name))
method = [0]
docstring = method.__doc__.strip()
if isinstance(dynamic.return_type, type):
itemtype = dynamic.return_type.__name__
extra_info = ''
else:
itemtype = 'Enum({0})'.format(dynamic.return_type[0].__class__.__name__)
extra_info = '(enum values: {0})'.format(', '.join(dynamic.return_type))
dct[dynamic.name] = property(
fget=method,
doc='[dynamic] ({0}s) {1} {2}\n@rtype: {3}'.format(dynamic.timeout, docstring, extra_info, itemtype)
)
return super(MetaClass, mcs).__new__(mcs, name, bases, dct)
class DataObject(object):
"""
This base class contains all logic to support our multiple backends and the caching
- Storage backends:
- Persistent backend for persistent storage (key-value store)
- Volatile backend for volatile but fast storage (key-value store)
- Storage backends are abstracted and injected into this class, making it possible to use
fake backends
- Features:
- Hybrid property access:
- Persistent backend
- 3rd party component for "live" properties
- Individual cache settings for "live" properties
- 1-n relations with automatic property propagation
- Recursive save
"""
__metaclass__ = MetaClass
#######################
# Attributes
#######################
# Properties that needs to be overwritten by implementation
_properties = [] # Blueprint data of the objec type
_dynamics = [] # Timeout of readonly object properties cache
_relations = [] # Blueprint for relations
#######################
## Constructor
#######################
def __new__(cls, *args, **kwargs):
"""
Initializes the class
"""
hybrid_structure = HybridRunner.get_hybrids()
identifier = Descriptor(cls).descriptor['identifier']
if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
new_class = Descriptor().load(hybrid_structure[identifier]).get_object()
return super(cls, new_class).__new__(new_class, *args, **kwargs)
return super(DataObject, cls).__new__(cls)
def __init__(self, guid=None, data=None, datastore_wins=False, volatile=False):
"""
Loads an object with a given guid. If no guid is given, a new object
is generated with a new guid.
* guid: The guid indicating which object should be loaded
* datastoreWins: Optional boolean indicating save conflict resolve management.
** True: when saving, external modified fields will not be saved
** False: when saving, all changed data will be saved, regardless of external updates
** None: in case changed field were also changed externally, an error will be raised
"""
# Initialize super class
super(DataObject, self).__init__()
# Initialize internal fields
self._frozen = False
self._datastore_wins = datastore_wins
self._guid = None # Guid identifier of the object
self._original = {} # Original data copy
self._metadata = {} # Some metadata, mainly used for unit testing
self._data = {} # Internal data storage
self._objects = {} # Internal objects storage
# Initialize public fields
self.dirty = False
self.volatile = volatile
# Worker fields/objects
self._name = self.__class__.__name__.lower()
self._namespace = 'ovs_data' # Namespace of the object
self._mutex_listcache = VolatileMutex('listcache_{0}'.format(self._name))
self._mutex_reverseindex = VolatileMutex('reverseindex')
# Rebuild _relation types
hybrid_structure = HybridRunner.get_hybrids()
for relation in self._relations:
if relation.foreign_type is not None:
identifier = Descriptor(relation.foreign_type).descriptor['identifier']
if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
relation.foreign_type = Descriptor().load(hybrid_structure[identifier]).get_object()
# Init guid
self._new = False
if guid is None:
self._guid = str(uuid.uuid4())
self._new = True
else:
guid = str(guid).lower()
if re.match('^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$', guid) is not None:
self._guid = str(guid)
else:
raise ValueError('The given guid is invalid: {0}'.format(guid))
# Build base keys
self._key = '{0}_{1}_{2}'.format(self._namespace, self._name, self._guid)
# Version mutex
self._mutex_version = VolatileMutex('ovs_dataversion_{0}_{1}'.format(self._name, self._guid))
# Load data from cache or persistent backend where appropriate
self._volatile = VolatileFactory.get_client()
self._persistent = PersistentFactory.get_client()
self._metadata['cache'] = None
if self._new:
self._data = {}
else:
self._data = self._volatile.get(self._key)
if self._data is None:
Toolbox.log_cache_hit('object_load', False)
self._metadata['cache'] = False
try:
self._data = self._persistent.get(self._key)
except KeyNotFoundException:
raise ObjectNotFoundException('{0} with guid \'{1}\' could not be found'.format(
self.__class__.__name__, self._guid
))
else:
Toolbox.log_cache_hit('object_load', True)
self._metadata['cache'] = True
# Set default values on new fields
for prop in self._properties:
if prop.name not in self._data:
self._data[prop.name] = prop.default
self._add_property(prop)
# Load relations
for relation in self._relations:
if relation.name not in self._data:
if relation.foreign_type is None:
cls = self.__class__
else:
cls = relation.foreign_type
self._data[relation.name] = Descriptor(cls).descriptor
self._add_relation_property(relation)
# Add wrapped properties
for dynamic in self._dynamics:
self._add_dynamic_property(dynamic)
# Load foreign keys
relations = RelationMapper.load_foreign_relations(self.__class__)
if relations is not None:
for key, info in relations.iteritems():
self._objects[key] = {'info': info,
'data': None}
self._add_list_property(key, info['list'])
# Store original data
self._original = copy.deepcopy(self._data)
if not self._new:
# Re-cache the object
self._volatile.set(self._key, self._data)
# Freeze property creation
self._frozen = True
# Optionally, initialize some fields
if data is not None:
for field, value in data.iteritems():
setattr(self, field, value)
#######################
# Helper methods for dynamic getting and setting
#######################
def _add_property(self, prop):
"""
Adds a simple property to the object
"""
# pylint: disable=protected-access
fget = lambda s: s._get_property(prop)
fset = lambda s, v: s._set_property(prop, v)
# pylint: enable=protected-access
setattr(self.__class__, prop.name, property(fget, fset))
def _add_relation_property(self, relation):
"""
Adds a complex property to the object (hybrids)
"""
# pylint: disable=protected-access
fget = lambda s: s._get_relation_property(relation)
fset = lambda s, v: s._set_relation_property(relation, v)
gget = lambda s: s._get_guid_property(relation)
# pylint: enable=protected-access
setattr(self.__class__, relation.name, property(fget, fset))
setattr(self.__class__, '{0}_guid'.format(relation.name), property(gget))
def _add_list_property(self, attribute, list):
"""
Adds a list (readonly) property to the object
"""
# pylint: disable=protected-access
fget = lambda s: s._get_list_property(attribute)
gget = lambda s: s._get_list_guid_property(attribute)
# pylint: enable=protected-access
setattr(self.__class__, attribute, property(fget))
setattr(self.__class__, ('{0}_guids' if list else '{0}_guid').format(attribute), property(gget))
def _add_dynamic_property(self, dynamic):
"""
Adds a dynamic property to the object
"""
# pylint: disable=protected-access
fget = lambda s: s._get_dynamic_property(dynamic)
# pylint: enable=protected-access
setattr(self.__class__, dynamic.name, property(fget))
# Helper method spporting property fetching
def _get_property(self, prop):
"""
Getter for a simple property
"""
return self._data[prop.name]
def _get_relation_property(self, relation):
"""
Getter for a complex property (hybrid)
It will only load the object once and caches it for the lifetime of this object
"""
attribute = relation.name
if attribute not in self._objects:
descriptor = Descriptor().load(self._data[attribute])
self._objects[attribute] = descriptor.get_object(instantiate=True)
return self._objects[attribute]
def _get_guid_property(self, relation):
"""
Getter for a foreign key property
"""
attribute = relation.name
return self._data[attribute]['guid']
def _get_list_property(self, attribute):
"""
Getter for the list property
It will execute the related query every time to return a list of hybrid objects that
refer to this object. The resulting data will be stored or merged into the cached list
preserving as much already loaded objects as possible
"""
info = self._objects[attribute]['info']
remote_class = Descriptor().load(info['class']).get_object()
remote_key = info['key']
datalist = DataList.get_relation_set(remote_class, remote_key, self.__class__, attribute, self.guid)
if self._objects[attribute]['data'] is None:
self._objects[attribute]['data'] = DataObjectList(datalist.data, remote_class)
else:
self._objects[attribute]['data'].merge(datalist.data)
if info['list'] is True:
return self._objects[attribute]['data']
else:
data = self._objects[attribute]['data']
if len(data) > 1:
raise InvalidRelationException('More than one element found in {0}'.format(attribute))
return data[0] if len(data) == 1 else None
def _get_list_guid_property(self, attribute):
"""
Getter for guid list property
"""
dataobjectlist = getattr(self, attribute)
if dataobjectlist is None:
return None
if hasattr(dataobjectlist, '_guids'):
return dataobjectlist._guids
return dataobjectlist.guid
def _get_dynamic_property(self, dynamic):
"""
Getter for dynamic property, wrapping the internal data loading property
in a caching layer
"""
data_loader = getattr(self, '_{0}'.format(dynamic.name))
return self._backend_property(data_loader, dynamic)
# Helper method supporting property setting
def _set_property(self, prop, value):
"""
Setter for a simple property that will validate the type
"""
self.dirty = True
if value is None:
self._data[prop.name] = value
else:
correct, allowed_types, given_type = Toolbox.check_type(value, prop.property_type)
if correct:
self._data[prop.name] = value
else:
raise TypeError('Property {0} allows types {1}. {2} given'.format(
prop.name, str(allowed_types), given_type
))
def _set_relation_property(self, relation, value):
"""
Setter for a complex property (hybrid) that will validate the type
"""
self.dirty = True
attribute = relation.name
if value is None:
self._objects[attribute] = None
self._data[attribute]['guid'] = None
else:
descriptor = Descriptor(value.__class__).descriptor
if descriptor['identifier'] != self._data[attribute]['identifier']:
raise TypeError('An invalid type was given: {0} instead of {1}'.format(
descriptor['type'], self._data[attribute]['type']
))
self._objects[attribute] = value
self._data[attribute]['guid'] = value.guid
def __setattr__(self, key, value):
"""
__setattr__ hook that will block creating on the fly new properties, except
the predefined ones
"""
if not hasattr(self, '_frozen') or not self._frozen:
allowed = True
else:
# If our object structure is frozen (which is after __init__), we only allow known
# property updates: items that are in __dict__ and our own blueprinting dicts
allowed = key in self.__dict__ \
or key in (prop.name for prop in self._properties) \
or key in (relation.name for relation in self._relations) \
or key in (dynamic.name for dynamic in self._dynamics)
if allowed:
super(DataObject, self).__setattr__(key, value)
else:
raise RuntimeError('Property {0} does not exist on this object.'.format(key))
#######################
# Saving data to persistent store and invalidating volatile store
#######################
def save(self, recursive=False, skip=None):
"""
Save the object to the persistent backend and clear cache, making use
of the specified conflict resolve settings.
It will also invalidate certain caches if required. For example lists pointing towards this
object
"""
if self.volatile is True:
raise VolatileObjectException()
tries = 0
successful = False
while successful is False:
invalid_fields = []
for prop in self._properties:
if prop.mandatory is True and self._data[prop.name] is None:
invalid_fields.append(prop.name)
for relation in self._relations:
if relation.mandatory is True and self._data[relation.name]['guid'] is None:
invalid_fields.append(relation.name)
if len(invalid_fields) > 0:
raise MissingMandatoryFieldsException('Missing fields on {0}: {1}'.format(self._name, ', '.join(invalid_fields)))
if recursive:
# Save objects that point to us (e.g. disk.vmachine - if this is disk)
for relation in self._relations:
if relation.name != skip: # disks will be skipped
item = getattr(self, relation.name)
if item is not None:
item.save(recursive=True, skip=relation.foreign_key)
# Save object we point at (e.g. machine.disks - if this is machine)
relations = RelationMapper.load_foreign_relations(self.__class__)
if relations is not None:
for key, info in relations.iteritems():
if key != skip: # machine will be skipped
if info['list'] is True:
for item in getattr(self, key).iterloaded():
item.save(recursive=True, skip=info['key'])
else:
item = getattr(self, key)
if item is not None:
item.save(recursive=True, skip=info['key'])
try:
data = self._persistent.get(self._key)
except KeyNotFoundException:
if self._new:
data = {'_version': 0}
else:
raise ObjectNotFoundException('{0} with guid \'{1}\' was deleted'.format(
self.__class__.__name__, self._guid
))
changed_fields = []
data_conflicts = []
for attribute in self._data.keys():
if attribute == '_version':
continue
if self._data[attribute] != self._original[attribute]:
# We changed this value
changed_fields.append(attribute)
if attribute in data and self._original[attribute] != data[attribute]:
# Some other process also wrote to the database
if self._datastore_wins is None:
# In case we didn't set a policy, we raise the conflicts
data_conflicts.append(attribute)
elif self._datastore_wins is False:
# If the datastore should not win, we just overwrite the data
data[attribute] = self._data[attribute]
# If the datastore should win, we discard/ignore our change
else:
# Normal scenario, saving data
data[attribute] = self._data[attribute]
elif attribute not in data:
data[attribute] = self._data[attribute]
if data_conflicts:
raise ConcurrencyException('Got field conflicts while saving {0}. Conflicts: {1}'.format(
self._name, ', '.join(data_conflicts)
))
# Refresh internal data structure
self._data = copy.deepcopy(data)
# First, update reverse index
try:
self._mutex_reverseindex.acquire(60)
for relation in self._relations:
key = relation.name
original_guid = self._original[key]['guid']
new_guid = self._data[key]['guid']
if original_guid != new_guid:
if relation.foreign_type is None:
classname = self.__class__.__name__.lower()
else:
classname = relation.foreign_type.__name__.lower()
if original_guid is not None:
reverse_key = 'ovs_reverseindex_{0}_{1}'.format(classname, original_guid)
reverse_index = self._volatile.get(reverse_key)
if reverse_index is not None:
if relation.foreign_key in reverse_index:
entries = reverse_index[relation.foreign_key]
if self.guid in entries:
entries.remove(self.guid)
reverse_index[relation.foreign_key] = entries
self._volatile.set(reverse_key, reverse_index)
if new_guid is not None:
reverse_key = 'ovs_reverseindex_{0}_{1}'.format(classname, new_guid)
reverse_index = self._volatile.get(reverse_key)
if reverse_index is not None:
if relation.foreign_key in reverse_index:
entries = reverse_index[relation.foreign_key]
if self.guid not in entries:
entries.append(self.guid)
reverse_index[relation.foreign_key] = entries
self._volatile.set(reverse_key, reverse_index)
else:
reverse_index[relation.foreign_key] = [self.guid]
self._volatile.set(reverse_key, reverse_index)
else:
reverse_index = {relation.foreign_key: [self.guid]}
self._volatile.set(reverse_key, reverse_index)
reverse_key = 'ovs_reverseindex_{0}_{1}'.format(self._name, self.guid)
reverse_index = self._volatile.get(reverse_key)
if reverse_index is None:
reverse_index = {}
relations = RelationMapper.load_foreign_relations(self.__class__)
if relations is not None:
for key, _ in relations.iteritems():
reverse_index[key] = []
self._volatile.set(reverse_key, reverse_index)
finally:
self._mutex_reverseindex.release()
# Second, invalidate property lists
try:
self._mutex_listcache.acquire(60)
cache_key = '{0}_{1}'.format(DataList.cachelink, self._name)
cache_list = Toolbox.try_get(cache_key, {})
change = False
for list_key in cache_list.keys():
fields = cache_list[list_key]
if ('__all' in fields and self._new) or list(set(fields) & set(changed_fields)):
change = True
self._volatile.delete(list_key)
del cache_list[list_key]
if change is True:
self._volatile.set(cache_key, cache_list)
self._persistent.set(cache_key, cache_list)
finally:
self._mutex_listcache.release()
# Save the data
try:
self._mutex_version.acquire(5)
this_version = self._data['_version']
try:
store_version = self._persistent.get(self._key)['_version']
except KeyNotFoundException:
store_version = 0
if this_version == store_version:
self._data['_version'] = this_version + 1
self._persistent.set(self._key, self._data)
self._volatile.delete(self._key)
successful = True
else:
tries += 1
finally:
self._mutex_version.release()
if tries > 5:
raise SaveRaceConditionException()
self._original = copy.deepcopy(self._data)
self.dirty = False
self._new = False
#######################
# Other CRUDs
#######################
def delete(self, abandon=False):
"""
Delete the given object. It also invalidates certain lists
"""
if self.volatile is True:
raise VolatileObjectException()
# Check foreign relations
relations = RelationMapper.load_foreign_relations(self.__class__)
if relations is not None:
for key, info in relations.iteritems():
items = getattr(self, key)
if info['list'] is True:
if len(items) > 0:
if abandon is True:
for item in items.itersafe():
setattr(item, info['key'], None)
try:
item.save()
except ObjectNotFoundException:
pass
else:
raise LinkedObjectException('There are {0} items left in self.{1}'.format(len(items), key))
elif items is not None:
# No list (so a 1-to-1 relation), so there should be an object, or None
item = items # More clear naming
if abandon is True:
setattr(item, info['key'], None)
try:
item.save()
except ObjectNotFoundException:
pass
else:
raise LinkedObjectException('There is still an item linked in self.{0}'.format(key))
# Delete the object out of the persistent store
try:
self._persistent.delete(self._key)
except KeyNotFoundException:
pass
# First, update reverse index
try:
self._mutex_reverseindex.acquire(60)
for relation in self._relations:
key = relation.name
original_guid = self._original[key]['guid']
if original_guid is not None:
if relation.foreign_type is None:
classname = self.__class__.__name__.lower()
else:
classname = relation.foreign_type.__name__.lower()
reverse_key = 'ovs_reverseindex_{0}_{1}'.format(classname, original_guid)
reverse_index = self._volatile.get(reverse_key)
if reverse_index is not None:
if relation.foreign_key in reverse_index:
entries = reverse_index[relation.foreign_key]
if self.guid in entries:
entries.remove(self.guid)
reverse_index[relation.foreign_key] = entries
self._volatile.set(reverse_key, reverse_index)
self._volatile.delete('ovs_reverseindex_{0}_{1}'.format(self._name, self.guid))
finally:
self._mutex_reverseindex.release()
# Second, invalidate property lists
try:
self._mutex_listcache.acquire(60)
cache_key = '{0}_{1}'.format(DataList.cachelink, self._name)
cache_list = Toolbox.try_get(cache_key, {})
change = False
for list_key in cache_list.keys():
fields = cache_list[list_key]
if '__all' in fields:
change = True
self._volatile.delete(list_key)
del cache_list[list_key]
if change is True:
self._volatile.set(cache_key, cache_list)
self._persistent.set(cache_key, cache_list)
finally:
self._mutex_listcache.release()
# Delete the object and its properties out of the volatile store
self.invalidate_dynamics()
self._volatile.delete(self._key)
# Discard all pending changes
def discard(self):
"""
Discard all pending changes, reloading the data from the persistent backend
"""
self.__init__(guid = self._guid,
datastore_wins = self._datastore_wins)
def invalidate_dynamics(self, properties=None):
"""
Invalidates all dynamic property caches. Use with caution, as this action can introduce
a short performance hit.
"""
for dynamic in self._dynamics:
if properties is None or dynamic.name in properties:
self._volatile.delete('{0}_{1}'.format(self._key, dynamic.name))
def export(self):
"""
Exports this object's data for import in another object
"""
data = {}
for prop in self._properties:
data[prop.name] = self._data[prop.name]
return data
def serialize(self, depth=0):
"""
Serializes the internal data, getting rid of certain metadata like descriptors
"""
data = {'guid': self.guid}
for relation in self._relations:
key = relation.name
if depth == 0:
data['{0}_guid'.format(key)] = self._data[key]['guid']
else:
instance = getattr(self, key)
if instance is not None:
data[key] = getattr(self, key).serialize(depth=(depth - 1))
else:
data[key] = None
for prop in self._properties:
data[prop.name] = self._data[prop.name]
for dynamic in self._dynamics:
data[dynamic.name] = getattr(self, dynamic.name)
return data
def copy(self, other_object, include=None, exclude=None, include_relations=False):
"""
Copies all _properties (and optionally _relations) properties over from a given hybrid to
self. One can pass in a list of properties that should be copied, or a list of properties
that should not be copied. Exclude > Include
"""
if include is not None and not isinstance(include, list):
raise TypeError('Argument include should be None or a list of strings')
if exclude is not None and not isinstance(exclude, list):
raise TypeError('Argument exclude should be None or a list of strings')
if self.__class__.__name__ != other_object.__class__.__name__:
raise TypeError('Properties can only be loaded from hybrids of the same type')
all_properties = [prop.name for prop in self._properties]
all_relations = [relation.name for relation in self._relations]
if include:
properties_to_copy = include
else:
properties_to_copy = all_properties
if include_relations:
properties_to_copy += all_relations
if exclude:
properties_to_copy = [p for p in properties_to_copy if p not in exclude]
possible_options = all_properties + (all_relations if include_relations else [])
properties_to_copy = [p for p in properties_to_copy if p in possible_options]
for key in properties_to_copy:
setattr(self, key, getattr(other_object, key))
def updated_on_datastore(self):
"""
Checks whether this object has been modified on the datastore
"""
if self.volatile is True:
return False
this_version = self._data['_version']
try:
store_version = self._persistent.get(self._key)['_version']
except KeyNotFoundException:
store_version = -1
return this_version != store_version
#######################
# Properties
#######################
@property
def guid(self):
"""
The primary key of the object
"""
return self._guid
#######################
# Helper methods
#######################
def _backend_property(self, function, dynamic):
"""
Handles the internal caching of dynamic properties
"""
caller_name = dynamic.name
cache_key = '{0}_{1}'.format(self._key, caller_name)
mutex = VolatileMutex(cache_key)
try:
cached_data = self._volatile.get(cache_key)
if cached_data is None:
if dynamic.locked:
mutex.acquire()
cached_data = self._volatile.get(cache_key)
if cached_data is None:
function_info = inspect.getargspec(function)
if 'dynamic' in function_info.args:
cached_data = function(dynamic=dynamic) # Load data from backend
else:
cached_data = function()
if cached_data is not None:
correct, allowed_types, given_type = Toolbox.check_type(cached_data, dynamic.return_type)
if not correct:
raise TypeError('Dynamic property {0} allows types {1}. {2} given'.format(
caller_name, str(allowed_types), given_type
))
if dynamic.timeout > 0:
self._volatile.set(cache_key, cached_data, dynamic.timeout)
return cached_data
finally:
mutex.release()
def __str__(self):
"""
The string representation of a DataObject is the serialized value
"""
return json.dumps(self.serialize(), indent=4)
def __hash__(self):
"""
Defines a hashing equivalent for a given object. The key (object type and guid) is considered to be identifying
"""
return hash(self._key)
def __eq__(self, other):
"""
Checks whether two objects are the same.
"""
if not isinstance(other, DataObject):
return False
return self.__hash__() == other.__hash__()
def __ne__(self, other):
"""
Checks whether to objects are not the same.
"""
if not isinstance(other, DataObject):
return True
return not self.__eq__(other)
|
Take me out to the Phillies ball game! Batter up for the Phillies vs. Rockies game with your CampOut friends and families. You'll get the chance to tour the stadium, participate in batting practice, and watch the game in a prime location. Due to very limited space, we can only accept up to 4 participants for family. This is one you don't want to miss!
Please note that one parent or guardian must be present for the entirety of your visit. CampOut events are open to families of children 17 years and younger who have attended The Hole in the Wall Gang Camp at least once.
|
'''
Created on Mar 13, 2013
@author: dstu
'''
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer
import AIClass as AI
import CreatureClass as Cr
import colors
import Game as G
class Player(Cr.Creature):
color = colors.white
description = 'player'
species = 'player'
creatureType = 'player'
def __init__(self, **kwargs):
super(Player, self).__init__(symbol = u'@', name=u"player",
AIClass = AI.PlayerAI, maxHP=10, **kwargs)
id = Column(Integer, ForeignKey('creatures.id'), primary_key=True)
def move(self, dx, dy):
newX = self.getX() + dx
newY = self.getY() + dy
level = self.getLevel()
newTile = level.getTile(newX, newY)
if newTile is not None and level.placeCreature(self, newTile):
return True
elif newTile is not None:
return newTile.bump(self)
else:
return level.bumpEdge(self)
def die(self):
message = self.The() + " dies!"
G.message(message)
print message
G.game.quit()
def the(self):
return self.getName()
def The(self):
return self.getName()
def giveItemToCreature(self, item, creature):
self.getInventory().removeItem(item)
creature.getInventory().addItem(item)
def getQuestItemOfType(self, itemType):
return self.getInventory().getQuestItemOfType(itemType)
|
Today several exchange students from Australia came to our school. Two of them, Olivia and Ahoy, joined our G1D group to enjoy one-day school life in Ulink. We all felt excited and communicated with them actively. In English Basic lesson, they introduced their school life and eating habits in Australia. Then we were separated into two groups to have further communication. I found that they care more about after-school activities and hobbies, however, we pay more attention to academic results, homework and so on. This activity supplies a chance for us to learn more about the cultural and educational differences between Australia and China. Tough we currently have a little difficulty to understand some of their words, we clearly realize that communicating with foreigners is the greatest way to improve our listening and speaking skills.
I hope that all of us can learn something today and be more active to communicate with foreigners and study in foreign countries in the future.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013,2016 The Font Bakery Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
# Adapted for the Hack typeface build workflow by Chris Simpkins
from __future__ import print_function, unicode_literals
import sys
import os
from fontTools import ttLib
def set_empty_dsig(ttFont):
newDSIG = ttLib.newTable("DSIG")
newDSIG.ulVersion = 1
newDSIG.usFlag = 0
newDSIG.usNumSigs = 0
newDSIG.signatureRecords = []
ttFont.tables["DSIG"] = newDSIG
def main(argv):
for path in argv:
if not os.path.exists(path):
sys.stderr.write("[fix-dsig.py] ERROR: " + path + " is not a valid path to a font file")
sys.exit(1)
else:
font = ttLib.TTFont(path)
set_empty_dsig(font)
font.save(path)
print(path + " - successful DSIG table fix")
if __name__ == '__main__':
main(sys.argv[1:])
|
On the 10th of November, Agios Athanasios’ municipality organized an incredible, colourful show featuring traditional music and dances from different parts of the world.
The mayor, mr. Marinos Kyriakou mentioned in his speech that these kinds of events benefit with the integration of immigrants in our society, as well as with the acceptance of different cultures and nationalities in general. Agios Athanasios municipality has been a significant part of this cultural acceptance movement by organizing events and educational trips for third world nationals who live and work in Cyprus.
The dance show was in form of a story, in which the protagonists were two children living in a village Caucasus until a war forces them to abandon their home, separating the two children in different parts of the country. The two then go through an incredible journey to reunite, going through different countries and coming face to face with difficult situations, which they were able to fend off with the help of their “guardian angels”. Η χορευτική παράσταση έμοιαζε με μυθιστόρημα στο οποίο οι πρωταγωνίστριες ήταν δύο παιδιά τα οποία ζούσαν σε ένα χωριό του Καυκάσου. Όμως μια μέρα ξέσπασε πόλεμος στην χώρα τους που τους ανάγκασε να ξεριζωθούν. Το ένα παιδί βρέθηκε στην μια άκρη του Καυκάσου στην Μαύρη Θάλασσα και το άλλο στην Κασπία θάλασσα στην άλλη άκρη της οροσειράς. Ξεκινούν το μεγάλο τους ταξίδι για να ξανασμίξουν περνώντας από διάφορες χώρες και αντιμετωπίζοντας διάφορες καταστάσεις. Πάντα όμως είχαν μαζί τους φύλακες Άγγελους που τους προστάτευαν στην διάρκεια του ταξιδιού τους.
The event was part of the integration program “Limassol, one city, the whole world”.
|
# -*- coding: utf-8 -*-
# pylint: disable=too-many-lines
"""
This module provides the model element class that represent a behave model:
* :class:`Feature`
* :class:`Scenario`
* :class:`ScenarioOutline`
* :class:`Step`
* ...
"""
from __future__ import absolute_import, with_statement, unicode_literals
import copy
import difflib
import logging
import traceback
import itertools
import sys
import time
import six
from six.moves import zip # pylint: disable=redefined-builtin
from behave.model_core import \
BasicStatement, TagAndStatusStatement, TagStatement, Replayable
from behave.matchers import NoMatch
from behave.textutil import text as _text
class Feature(TagAndStatusStatement, Replayable):
"""A `feature`_ parsed from a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
be "Feature".
.. attribute:: name
The name of the feature (the text after "Feature".)
.. attribute:: description
The description of the feature as seen in the *feature file*. This is
stored as a list of text lines.
.. attribute:: background
The :class:`~behave.model.Background` for this feature, if any.
.. attribute:: scenarios
A list of :class:`~behave.model.Scenario` making up this feature.
.. attribute:: tags
A list of @tags (as :class:`~behave.model.Tag` which are basically
glorified strings) attached to the feature.
See :ref:`controlling things with tags`.
.. attribute:: status
Read-Only. A summary status of the feature's run. If read before the
feature is fully tested it will return "untested" otherwise it will
return one of:
"untested"
The feature was has not been completely tested yet.
"skipped"
One or more steps of this feature was passed over during testing.
"passed"
The feature was tested successfully.
"failed"
One or more steps of this feature failed.
.. attribute:: hook_failed
Indicates if a hook failure occured while running this feature.
.. versionadded:: 1.2.6
.. attribute:: duration
The time, in seconds, that it took to test this feature. If read before
the feature is tested it will return 0.0.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the feature
was found.
.. attribute:: line
The line number of the *feature file* where the feature was found.
.. attribute:: language
Indicates which spoken language (English, French, German, ..) was used
for parsing the feature file and its keywords. The I18N language code
indicates which language is used. This corresponds to the language tag
at the beginning of the feature file.
.. versionadded:: 1.2.6
.. _`feature`: gherkin.html#features
"""
type = "feature"
def __init__(self, filename, line, keyword, name, tags=None,
description=None, scenarios=None, background=None,
language=None):
tags = tags or []
super(Feature, self).__init__(filename, line, keyword, name, tags)
self.description = description or []
self.scenarios = []
self.background = background
self.language = language
self.parser = None
self.hook_failed = False
if scenarios:
for scenario in scenarios:
self.add_scenario(scenario)
def reset(self):
"""Reset to clean state before a test run."""
super(Feature, self).reset()
self.hook_failed = False
for scenario in self.scenarios:
scenario.reset()
def __repr__(self):
return '<Feature "%s": %d scenario(s)>' % \
(self.name, len(self.scenarios))
def __iter__(self):
return iter(self.scenarios)
def add_scenario(self, scenario):
scenario.feature = self
scenario.background = self.background
self.scenarios.append(scenario)
def compute_status(self):
"""Compute the status of this feature based on its:
* scenarios
* scenario outlines
* hook failures
:return: Computed status (as string-enum).
"""
skipped = True
passed_count = 0
for scenario in self.scenarios:
scenario_status = scenario.status
if scenario_status == "failed":
return "failed"
elif scenario_status == "untested":
if passed_count > 0:
return "failed" # ABORTED: Some passed, now untested.
return "untested"
if scenario_status != "skipped":
skipped = False
if scenario_status == "passed":
passed_count += 1
if skipped:
return "skipped"
elif self.hook_failed:
return "failed"
else:
return "passed"
@property
def duration(self):
# -- NEW: Background is executed N times, now part of scenarios.
feature_duration = 0.0
for scenario in self.scenarios:
feature_duration += scenario.duration
return feature_duration
def walk_scenarios(self, with_outlines=False):
"""
Provides a flat list of all scenarios of this feature.
A ScenarioOutline element adds its scenarios to this list.
But the ScenarioOutline element itself is only added when specified.
A flat scenario list is useful when all scenarios of a features
should be processed.
:param with_outlines: If ScenarioOutline items should be added, too.
:return: List of all scenarios of this feature.
"""
all_scenarios = []
for scenario in self.scenarios:
if isinstance(scenario, ScenarioOutline):
scenario_outline = scenario
if with_outlines:
all_scenarios.append(scenario_outline)
all_scenarios.extend(scenario_outline.scenarios)
else:
all_scenarios.append(scenario)
return all_scenarios
def should_run(self, config=None):
"""
Determines if this Feature (and its scenarios) should run.
Implements the run decision logic for a feature.
The decision depends on:
* if the Feature is marked as skipped
* if the config.tags (tag expression) enable/disable this feature
:param config: Runner configuration to use (optional).
:return: True, if scenario should run. False, otherwise.
"""
answer = not self.should_skip
if answer and config:
answer = self.should_run_with_tags(config.tags)
return answer
def should_run_with_tags(self, tag_expression):
"""Determines if this feature should run when the tag expression is used.
A feature should run if:
* it should run according to its tags
* any of its scenarios should run according to its tags
:param tag_expression: Runner/config environment tags to use.
:return: True, if feature should run. False, otherwise (skip it).
"""
run_feature = tag_expression.check(self.tags)
if not run_feature:
for scenario in self:
if scenario.should_run_with_tags(tag_expression):
run_feature = True
break
return run_feature
def mark_skipped(self):
"""Marks this feature (and all its scenarios and steps) as skipped.
Note this function may be called before the feature is executed.
"""
self.skip(require_not_executed=True)
assert self.status == "skipped"
def skip(self, reason=None, require_not_executed=False):
"""Skip executing this feature or the remaining parts of it.
Note that this feature may be already partly executed
when this function is called.
:param reason: Optional reason why feature should be skipped (as string).
:param require_not_executed: Optional, requires that feature is not
executed yet (default: false).
"""
if reason:
logger = logging.getLogger("behave")
logger.warning(u"SKIP FEATURE %s: %s", self.name, reason)
self._cached_status = None
self.should_skip = True
self.skip_reason = reason
for scenario in self.scenarios:
scenario.skip(reason, require_not_executed)
if not self.scenarios:
# -- SPECIAL CASE: Feature without scenarios
self._cached_status = "skipped"
assert self.status in self.final_status #< skipped, failed or passed.
def run(self, runner):
# pylint: disable=too-many-branches
self._cached_status = None
self.hook_failed = False
runner.context._push() # pylint: disable=protected-access
runner.context.feature = self
# run this feature if the tags say so or any one of its scenarios
run_feature = self.should_run(runner.config)
if run_feature or runner.config.show_skipped:
for formatter in runner.formatters:
formatter.feature(self)
# current tags as a set
runner.context.tags = set(self.tags)
hooks_called = False
if not runner.config.dry_run and run_feature:
hooks_called = True
for tag in self.tags:
runner.run_hook("before_tag", runner.context, tag)
runner.run_hook("before_feature", runner.context, self)
# -- RE-EVALUATE SHOULD-RUN STATE:
# Hook may call feature.mark_skipped() to exclude it.
run_feature = self.should_run()
if self.background and (run_feature or runner.config.show_skipped):
for formatter in runner.formatters:
formatter.background(self.background)
failed_count = 0
for scenario in self.scenarios:
# -- OPTIONAL: Select scenario by name (regular expressions).
if (runner.config.name and
not scenario.should_run_with_name_select(runner.config)):
scenario.mark_skipped()
continue
failed = scenario.run(runner)
if failed:
failed_count += 1
if runner.config.stop or runner.aborted:
# -- FAIL-EARLY: Stop after first failure.
break
self._cached_status = None # -- ENFORCE: compute_status() after run.
if not self.scenarios and not run_feature:
# -- SPECIAL CASE: Feature without scenarios
self._cached_status = "skipped"
if hooks_called:
runner.run_hook("after_feature", runner.context, self)
if self.hook_failed and failed_count == 0:
failed_count = 1
for tag in self.tags:
runner.run_hook("after_tag", runner.context, tag)
runner.context._pop() # pylint: disable=protected-access
if run_feature or runner.config.show_skipped:
for formatter in runner.formatters:
formatter.eof()
failed = (failed_count > 0)
return failed
class Background(BasicStatement, Replayable):
"""A `background`_ parsed from a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Background".
.. attribute:: name
The name of the background (the text after "Background:".)
.. attribute:: steps
A list of :class:`~behave.model.Step` making up this background.
.. attribute:: duration
The time, in seconds, that it took to run this background. If read
before the background is run it will return 0.0.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the background
was found.
.. attribute:: line
The line number of the *feature file* where the background was found.
.. _`background`: gherkin.html#backgrounds
"""
type = "background"
def __init__(self, filename, line, keyword, name, steps=None):
super(Background, self).__init__(filename, line, keyword, name)
self.steps = steps or []
def __repr__(self):
return '<Background "%s">' % self.name
def __iter__(self):
return iter(self.steps)
@property
def duration(self):
duration = 0
for step in self.steps:
duration += step.duration
return duration
class Scenario(TagAndStatusStatement, Replayable):
"""A `scenario`_ parsed from a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Scenario".
.. attribute:: name
The name of the scenario (the text after "Scenario:".)
.. attribute:: description
The description of the scenario as seen in the *feature file*.
This is stored as a list of text lines.
.. attribute:: feature
The :class:`~behave.model.Feature` this scenario belongs to.
.. attribute:: steps
A list of :class:`~behave.model.Step` making up this scenario.
.. attribute:: tags
A list of @tags (as :class:`~behave.model.Tag` which are basically
glorified strings) attached to the scenario.
See :ref:`controlling things with tags`.
.. attribute:: status
Read-Only. A summary status of the scenario's run. If read before the
scenario is fully tested it will return "untested" otherwise it will
return one of:
"untested"
The scenario was has not been completely tested yet.
"skipped"
One or more steps of this scenario was passed over during testing.
"passed"
The scenario was tested successfully.
"failed"
One or more steps of this scenario failed.
.. attribute:: hook_failed
Indicates if a hook failure occured while running this scenario.
.. versionadded:: 1.2.6
.. attribute:: duration
The time, in seconds, that it took to test this scenario. If read before
the scenario is tested it will return 0.0.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the scenario
was found.
.. attribute:: line
The line number of the *feature file* where the scenario was found.
.. _`scenario`: gherkin.html#scenarios
"""
# pylint: disable=too-many-instance-attributes
type = "scenario"
continue_after_failed_step = False
def __init__(self, filename, line, keyword, name, tags=None, steps=None,
description=None):
tags = tags or []
super(Scenario, self).__init__(filename, line, keyword, name, tags)
self.description = description or []
self.steps = steps or []
self.background = None
self.feature = None # REFER-TO: owner=Feature
self.hook_failed = False
self._background_steps = None
self._row = None
self.was_dry_run = False
self.stderr = None
self.stdout = None
def reset(self):
"""Reset the internal data to reintroduce new-born state just after the
ctor was called.
"""
super(Scenario, self).reset()
self._row = None
self.was_dry_run = False
self.stderr = None
self.stdout = None
for step in self.all_steps:
step.reset()
@property
def background_steps(self):
"""Provide background steps if feature has a background.
Lazy init that copies the background steps.
Note that a copy of the background steps is needed to ensure
that the background step status is specific to the scenario.
:return: List of background steps or empty list
"""
if self._background_steps is None:
# -- LAZY-INIT (need copy of background.steps):
# Each scenario needs own background.steps status.
# Otherwise, background step status of the last scenario is used.
steps = []
if self.background:
steps = [copy.copy(step) for step in self.background.steps]
self._background_steps = steps
return self._background_steps
@property
def all_steps(self):
"""Returns iterator to all steps, including background steps if any."""
if self.background is not None:
return itertools.chain(self.background_steps, self.steps)
else:
return iter(self.steps)
def __repr__(self):
return '<Scenario "%s">' % self.name
def __iter__(self):
return self.all_steps
def compute_status(self):
"""Compute the status of the scenario from its steps
(and hook failures).
:return: Computed status (as string).
"""
for step in self.all_steps:
if step.status == "undefined":
if self.was_dry_run:
# -- SPECIAL CASE: In dry-run with undefined-step discovery
# Undefined steps should not cause failed scenario.
return "untested"
else:
# -- NORMALLY: Undefined steps cause failed scenario.
return "failed"
elif step.status != "passed":
assert step.status in ("failed", "skipped", "untested")
return step.status
#elif step.status == "failed":
# return "failed"
#elif step.status == "skipped":
# return "skipped"
#elif step.status == "untested":
# return "untested"
if self.hook_failed:
return "failed"
return "passed"
@property
def duration(self):
# -- ORIG: for step in self.steps: Background steps were excluded.
scenario_duration = 0
for step in self.all_steps:
scenario_duration += step.duration
return scenario_duration
@property
def effective_tags(self):
"""
Effective tags for this scenario:
* own tags
* tags inherited from its feature
"""
tags = self.tags
if self.feature:
tags = self.feature.tags + self.tags
return tags
def should_run(self, config=None):
"""
Determines if this Scenario (or ScenarioOutline) should run.
Implements the run decision logic for a scenario.
The decision depends on:
* if the Scenario is marked as skipped
* if the config.tags (tag expression) enable/disable this scenario
* if the scenario is selected by name
:param config: Runner configuration to use (optional).
:return: True, if scenario should run. False, otherwise.
"""
answer = not self.should_skip
if answer and config:
answer = (self.should_run_with_tags(config.tags) and
self.should_run_with_name_select(config))
return answer
def should_run_with_tags(self, tag_expression):
"""
Determines if this scenario should run when the tag expression is used.
:param tag_expression: Runner/config environment tags to use.
:return: True, if scenario should run. False, otherwise (skip it).
"""
return tag_expression.check(self.effective_tags)
def should_run_with_name_select(self, config):
"""Determines if this scenario should run when it is selected by name.
:param config: Runner/config environment name regexp (if any).
:return: True, if scenario should run. False, otherwise (skip it).
"""
# -- SELECT-ANY: If select by name is not specified (not config.name).
return not config.name or config.name_re.search(self.name)
def mark_skipped(self):
"""Marks this scenario (and all its steps) as skipped.
Note that this method can be called before the scenario is executed.
"""
self.skip(require_not_executed=True)
assert self.status == "skipped", "OOPS: scenario.status=%s" % self.status
def skip(self, reason=None, require_not_executed=False):
"""Skip from executing this scenario or the remaining parts of it.
Note that the scenario may be already partly executed
when this method is called.
:param reason: Optional reason why it should be skipped (as string).
"""
if reason:
scenario_type = self.__class__.__name__
logger = logging.getLogger("behave")
logger.warning(u"SKIP %s %s: %s", scenario_type, self.name, reason)
self._cached_status = None
self.should_skip = True
self.skip_reason = reason
for step in self.all_steps:
not_executed = step.status in ("untested", "skipped")
if not_executed:
step.status = "skipped"
else:
assert not require_not_executed, \
"REQUIRE NOT-EXECUTED, but step is %s" % step.status
if not self.all_steps:
# -- SPECIAL CASE: Scenario without steps
self._cached_status = "skipped"
assert self.status in self.final_status #< skipped, failed or passed
def run(self, runner):
# pylint: disable=too-many-branches, too-many-statements
self._cached_status = None
failed = False
run_scenario = self.should_run(runner.config)
run_steps = run_scenario and not runner.config.dry_run
dry_run_scenario = run_scenario and runner.config.dry_run
self.was_dry_run = dry_run_scenario
if run_scenario or runner.config.show_skipped:
for formatter in runner.formatters:
formatter.scenario(self)
runner.context._push() # pylint: disable=protected-access
runner.context.scenario = self
runner.context.tags = set(self.effective_tags)
hooks_called = False
if not runner.config.dry_run and run_scenario:
hooks_called = True
for tag in self.tags:
runner.run_hook("before_tag", runner.context, tag)
runner.run_hook("before_scenario", runner.context, self)
# -- RE-EVALUATE SHOULD-RUN STATE:
# Hook may call scenario.mark_skipped() to exclude it.
run_scenario = run_steps = self.should_run()
runner.setup_capture()
if run_scenario or runner.config.show_skipped:
for step in self:
for formatter in runner.formatters:
formatter.step(step)
for step in self.all_steps:
if run_steps:
if not step.run(runner):
# -- CASE: Failed or undefined step
# Optionally continue_after_failed_step if enabled.
# But disable run_steps after undefined-step.
run_steps = (self.continue_after_failed_step and
step.status == "failed")
failed = True
# pylint: disable=protected-access
runner.context._set_root_attribute("failed", True)
self._cached_status = "failed"
elif self.should_skip:
# -- CASE: Step skipped remaining scenario.
# assert self.status == "skipped", "Status: %s" % self.status
run_steps = False
elif failed or dry_run_scenario:
# -- SKIP STEPS: After failure/undefined-step occurred.
# BUT: Detect all remaining undefined steps.
step.status = "skipped"
if dry_run_scenario:
step.status = "untested"
found_step = runner.step_registry.find_match(step)
if not found_step:
step.status = "undefined"
runner.undefined_steps.append(step)
else:
# -- SKIP STEPS: For disabled scenario.
# CASES:
# * Undefined steps are not detected (by intention).
# * Step skipped remaining scenario.
step.status = "skipped"
self._cached_status = None # -- ENFORCE: compute_status() after run.
if not run_scenario:
# -- SPECIAL CASE: Scenario without steps.
self._cached_status = "skipped"
# Attach the stdout and stderr if generate Junit report
if runner.config.junit:
self.stdout = runner.context.stdout_capture.getvalue()
self.stderr = runner.context.stderr_capture.getvalue()
runner.teardown_capture()
if hooks_called:
runner.run_hook("after_scenario", runner.context, self)
if self.hook_failed:
failed = True
for tag in self.tags:
runner.run_hook("after_tag", runner.context, tag)
runner.context._pop() # pylint: disable=protected-access
return failed
class ScenarioOutlineBuilder(object):
"""Helper class to use a ScenarioOutline as a template and
build its scenarios (as template instances).
"""
def __init__(self, annotation_schema):
self.annotation_schema = annotation_schema
@staticmethod
def render_template(text, row=None, params=None):
"""Render a text template with placeholders, ala "Hello <name>".
:param row: As placeholder provider (dict-like).
:param params: As additional placeholder provider (as dict).
:return: Rendered text, known placeholders are substituted w/ values.
"""
if not ("<" in text and ">" in text):
return text
safe_values = False
for placeholders in (row, params):
if not placeholders:
continue
for name, value in placeholders.items():
if safe_values and ("<" in value and ">" in value):
continue # -- OOPS, value looks like placeholder.
text = text.replace("<%s>" % name, value)
return text
def make_scenario_name(self, outline_name, example, row, params=None):
"""Build a scenario name for an example row of this scenario outline.
Placeholders for row data are replaced by values.
SCHEMA: "{outline_name} -*- {examples.name}@{row.id}"
:param outline_name: ScenarioOutline's name (as template).
:param example: Examples object.
:param row: Row of this example.
:param params: Additional placeholders for example/row.
:return: Computed name for the scenario representing example/row.
"""
if params is None:
params = {}
params["examples.name"] = example.name or ""
params.setdefault("examples.index", example.index)
params.setdefault("row.index", row.index)
params.setdefault("row.id", row.id)
# -- STEP: Replace placeholders in scenario/example name (if any).
examples_name = self.render_template(example.name, row, params)
params["examples.name"] = examples_name
scenario_name = self.render_template(outline_name, row, params)
class Data(object):
def __init__(self, name, index):
self.name = name
self.index = index
self.id = name # pylint: disable=invalid-name
example_data = Data(examples_name, example.index)
row_data = Data(row.id, row.index)
return self.annotation_schema.format(name=scenario_name,
examples=example_data, row=row_data)
@classmethod
def make_row_tags(cls, outline_tags, row, params=None):
if not outline_tags:
return []
tags = []
for tag in outline_tags:
if "<" in tag and ">" in tag:
tag = cls.render_template(tag, row, params)
if "<" in tag or ">" in tag:
# -- OOPS: Unknown placeholder, drop tag.
continue
new_tag = Tag.make_name(tag, unescape=True)
tags.append(new_tag)
return tags
@classmethod
def make_step_for_row(cls, outline_step, row, params=None):
# -- BASED-ON: new_step = outline_step.set_values(row)
new_step = copy.deepcopy(outline_step)
new_step.name = cls.render_template(new_step.name, row, params)
if new_step.text:
new_step.text = cls.render_template(new_step.text, row)
if new_step.table:
for name, value in row.items():
for row in new_step.table:
for i, cell in enumerate(row.cells):
row.cells[i] = cell.replace("<%s>" % name, value)
return new_step
def build_scenarios(self, scenario_outline):
"""Build scenarios for a ScenarioOutline from its examples."""
# -- BUILD SCENARIOS (once): For this ScenarioOutline from examples.
params = {
"examples.name": None,
"examples.index": None,
"row.index": None,
"row.id": None,
}
scenarios = []
for example_index, example in enumerate(scenario_outline.examples):
example.index = example_index+1
params["examples.name"] = example.name
params["examples.index"] = _text(example.index)
for row_index, row in enumerate(example.table):
row.index = row_index+1
row.id = "%d.%d" % (example.index, row.index)
params["row.id"] = row.id
params["row.index"] = _text(row.index)
scenario_name = self.make_scenario_name(scenario_outline.name,
example, row, params)
row_tags = self.make_row_tags(scenario_outline.tags, row, params)
row_tags.extend(example.tags)
new_steps = []
for outline_step in scenario_outline.steps:
new_step = self.make_step_for_row(outline_step, row, params)
new_steps.append(new_step)
# -- STEP: Make Scenario name for this row.
# scenario_line = example.line + 2 + row_index
scenario_line = row.line
scenario = Scenario(scenario_outline.filename, scenario_line,
scenario_outline.keyword,
scenario_name, row_tags, new_steps)
scenario.feature = scenario_outline.feature
scenario.background = scenario_outline.background
scenario._row = row # pylint: disable=protected-access
scenarios.append(scenario)
return scenarios
class ScenarioOutline(Scenario):
"""A `scenario outline`_ parsed from a *feature file*.
A scenario outline extends the existing :class:`~behave.model.Scenario`
class with the addition of the :class:`~behave.model.Examples` tables of
data from the *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Scenario Outline".
.. attribute:: name
The name of the scenario (the text after "Scenario Outline:".)
.. attribute:: description
The description of the `scenario outline`_ as seen in the *feature file*.
This is stored as a list of text lines.
.. attribute:: feature
The :class:`~behave.model.Feature` this scenario outline belongs to.
.. attribute:: steps
A list of :class:`~behave.model.Step` making up this scenario outline.
.. attribute:: examples
A list of :class:`~behave.model.Examples` used by this scenario outline.
.. attribute:: tags
A list of @tags (as :class:`~behave.model.Tag` which are basically
glorified strings) attached to the scenario.
See :ref:`controlling things with tags`.
.. attribute:: status
Read-Only. A summary status of the scenario outlines's run. If read
before the scenario is fully tested it will return "untested" otherwise
it will return one of:
"untested"
The scenario was has not been completely tested yet.
"skipped"
One or more scenarios of this outline was passed over during testing.
"passed"
The scenario was tested successfully.
"failed"
One or more scenarios of this outline failed.
.. attribute:: duration
The time, in seconds, that it took to test the scenarios of this
outline. If read before the scenarios are tested it will return 0.0.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the scenario
was found.
.. attribute:: line
The line number of the *feature file* where the scenario was found.
.. _`scenario outline`: gherkin.html#scenario-outlines
"""
type = "scenario_outline"
annotation_schema = u"{name} -- @{row.id} {examples.name}"
def __init__(self, filename, line, keyword, name, tags=None,
steps=None, examples=None, description=None):
super(ScenarioOutline, self).__init__(filename, line, keyword, name,
tags, steps, description)
self.examples = examples or []
self._scenarios = []
def reset(self):
"""Reset runtime temporary data like before a test run."""
super(ScenarioOutline, self).reset()
for scenario in self._scenarios: # -- AVOID: BUILD-SCENARIOS
scenario.reset()
@property
def scenarios(self):
"""Return the scenarios with the steps altered to take the values from
the examples.
"""
if self._scenarios:
return self._scenarios
# -- BUILD SCENARIOS (once): For this ScenarioOutline from examples.
builder = ScenarioOutlineBuilder(self.annotation_schema)
self._scenarios = builder.build_scenarios(self)
return self._scenarios
def __repr__(self):
return '<ScenarioOutline "%s">' % self.name
def __iter__(self):
return iter(self.scenarios) # -- REQUIRE: BUILD-SCENARIOS
def compute_status(self):
skipped_count = 0
for scenario in self._scenarios: # -- AVOID: BUILD-SCENARIOS
scenario_status = scenario.status
if scenario_status in ("failed", "untested"):
return scenario_status
elif scenario_status == "skipped":
skipped_count += 1
if skipped_count > 0 and skipped_count == len(self._scenarios):
# -- ALL SKIPPED:
return "skipped"
# -- OTHERWISE: ALL PASSED
return "passed"
@property
def duration(self):
outline_duration = 0
for scenario in self._scenarios: # -- AVOID: BUILD-SCENARIOS
outline_duration += scenario.duration
return outline_duration
def should_run_with_tags(self, tag_expression):
"""Determines if this scenario outline (or one of its scenarios)
should run when the tag expression is used.
:param tag_expression: Runner/config environment tags to use.
:return: True, if scenario should run. False, otherwise (skip it).
"""
if tag_expression.check(self.effective_tags):
return True
for scenario in self.scenarios: # -- REQUIRE: BUILD-SCENARIOS
if scenario.should_run_with_tags(tag_expression):
return True
# -- NOTHING SELECTED:
return False
def should_run_with_name_select(self, config):
"""Determines if this scenario should run when it is selected by name.
:param config: Runner/config environment name regexp (if any).
:return: True, if scenario should run. False, otherwise (skip it).
"""
if not config.name:
return True # -- SELECT-ALL: Select by name is not specified.
for scenario in self.scenarios: # -- REQUIRE: BUILD-SCENARIOS
if scenario.should_run_with_name_select(config):
return True
# -- NOTHING SELECTED:
return False
def mark_skipped(self):
"""Marks this scenario outline (and all its scenarios/steps) as skipped.
Note that this method may be called before the scenario outline
is executed.
"""
self.skip(require_not_executed=True)
assert self.status == "skipped"
def skip(self, reason=None, require_not_executed=False):
"""Skip from executing this scenario outline or its remaining parts.
Note that the scenario outline may be already partly executed
when this method is called.
:param reason: Optional reason why it should be skipped (as string).
"""
if reason:
logger = logging.getLogger("behave")
logger.warning(u"SKIP ScenarioOutline %s: %s", self.name, reason)
self._cached_status = None
self.should_skip = True
for scenario in self.scenarios:
scenario.skip(reason, require_not_executed)
if not self.scenarios:
# -- SPECIAL CASE: ScenarioOutline without scenarios/examples
self._cached_status = "skipped"
assert self.status in self.final_status #< skipped, failed or passed
def run(self, runner):
# pylint: disable=protected-access
# REASON: context._set_root_attribute(), scenario._row
self._cached_status = None
failed_count = 0
for scenario in self.scenarios: # -- REQUIRE: BUILD-SCENARIOS
runner.context._set_root_attribute("active_outline", scenario._row)
failed = scenario.run(runner)
if failed:
failed_count += 1
if runner.config.stop or runner.aborted:
# -- FAIL-EARLY: Stop after first failure.
break
runner.context._set_root_attribute("active_outline", None)
return failed_count > 0
class Examples(TagStatement, Replayable):
"""A table parsed from a `scenario outline`_ in a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Example".
.. attribute:: name
The name of the example (the text after "Example:".)
.. attribute:: table
An instance of :class:`~behave.model.Table` that came with the example
in the *feature file*.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the example
was found.
.. attribute:: line
The line number of the *feature file* where the example was found.
.. _`examples`: gherkin.html#examples
"""
type = "examples"
def __init__(self, filename, line, keyword, name, tags=None, table=None):
super(Examples, self).__init__(filename, line, keyword, name, tags)
self.table = table
self.index = None
class Step(BasicStatement, Replayable):
"""A single `step`_ parsed from a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Given", "When", "Then" or a number of other words.
.. attribute:: name
The name of the step (the text after "Given" etc.)
.. attribute:: step_type
The type of step as determined by the keyword. If the keyword is "and"
then the previous keyword in the *feature file* will determine this
step's step_type.
.. attribute:: text
An instance of :class:`~behave.model.Text` that came with the step
in the *feature file*.
.. attribute:: table
An instance of :class:`~behave.model.Table` that came with the step
in the *feature file*.
.. attribute:: status
Read-Only. A summary status of the step's run. If read before the
step is tested it will return "untested" otherwise it will
return one of:
"skipped"
This step was passed over during testing.
"passed"
The step was tested successfully.
"failed"
The step failed.
.. attribute:: hook_failed
Indicates if a hook failure occured while running this step.
.. versionadded:: 1.2.6
.. attribute:: duration
The time, in seconds, that it took to test this step. If read before the
step is tested it will return 0.0.
.. attribute:: error_message
If the step failed then this will hold any error information, as a
single string. It will otherwise be None.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the step was
found.
.. attribute:: line
The line number of the *feature file* where the step was found.
.. _`step`: gherkin.html#steps
"""
type = "step"
def __init__(self, filename, line, keyword, step_type, name, text=None,
table=None):
super(Step, self).__init__(filename, line, keyword, name)
self.step_type = step_type
self.text = text
self.table = table
self.status = "untested"
self.hook_failed = False
self.duration = 0
self.exception = None
self.exc_traceback = None
self.error_message = None
def reset(self):
"""Reset temporary runtime data to reach clean state again."""
self.status = "untested"
self.hook_failed = False
self.duration = 0
self.exception = None
self.exc_traceback = None
self.error_message = None
def store_exception_context(self, exception):
self.exception = exception
self.exc_traceback = sys.exc_info()[2]
def __repr__(self):
return '<%s "%s">' % (self.step_type, self.name)
def __eq__(self, other):
return (self.step_type, self.name) == (other.step_type, other.name)
def __hash__(self):
return hash(self.step_type) + hash(self.name)
def set_values(self, table_row):
"""Clone a new step from this one, used for ScenarioOutline.
Replace ScenarioOutline placeholders w/ values.
:param table_row: Placeholder data for example row.
:return: Cloned, adapted step object.
.. note:: Deprecating
Use 'ScenarioOutlineBuilder.make_step_for_row()' instead.
"""
import warnings
warnings.warn("Use 'ScenarioOutline.make_step_for_row()' instead",
PendingDeprecationWarning, stacklevel=2)
outline_step = self
return ScenarioOutlineBuilder.make_step_for_row(outline_step, table_row)
def run(self, runner, quiet=False, capture=True):
# pylint: disable=too-many-branches, too-many-statements
# -- RESET: Run-time information.
self.exception = self.exc_traceback = self.error_message = None
self.status = "untested"
self.hook_failed = False
match = runner.step_registry.find_match(self)
if match is None:
runner.undefined_steps.append(self)
if not quiet:
for formatter in runner.formatters:
formatter.match(NoMatch())
self.status = "undefined"
if not quiet:
for formatter in runner.formatters:
formatter.result(self)
return False
keep_going = True
error = u""
if not quiet:
for formatter in runner.formatters:
formatter.match(match)
runner.run_hook("before_step", runner.context, self)
if capture:
runner.start_capture()
try:
start = time.time()
# -- ENSURE:
# * runner.context.text/.table attributes are reset (#66).
# * Even EMPTY multiline text is available in context.
runner.context.text = self.text
runner.context.table = self.table
match.run(runner.context)
if self.status == "untested":
# -- NOTE: Executed step may have skipped scenario and itself.
self.status = "passed"
except KeyboardInterrupt as e:
runner.aborted = True
error = u"ABORTED: By user (KeyboardInterrupt)."
self.status = "failed"
self.store_exception_context(e)
except AssertionError as e:
self.status = "failed"
self.store_exception_context(e)
if e.args:
message = _text(e)
error = u"Assertion Failed: "+ message
else:
# no assertion text; format the exception
error = _text(traceback.format_exc())
except Exception as e: # pylint: disable=broad-except
self.status = "failed"
error = _text(traceback.format_exc())
self.store_exception_context(e)
self.duration = time.time() - start
if capture:
runner.stop_capture()
runner.run_hook("after_step", runner.context, self)
if self.hook_failed:
self.status = "failed"
# flesh out the failure with details
if self.status == "failed":
assert isinstance(error, six.text_type)
if capture:
# -- CAPTURE-ONLY: Non-nested step failures.
if runner.config.stdout_capture:
output = runner.stdout_capture.getvalue()
if output:
output = _text(output)
error += u"\nCaptured stdout:\n" + output
if runner.config.stderr_capture:
output = runner.stderr_capture.getvalue()
if output:
output = _text(output)
error += u"\nCaptured stderr:\n" + output
if runner.config.log_capture:
output = runner.log_capture.getvalue()
if output:
output = _text(output)
error += u"\nCaptured logging:\n" + output
self.error_message = error
keep_going = False
if not quiet:
for formatter in runner.formatters:
formatter.result(self)
return keep_going
class Table(Replayable):
"""A `table`_ extracted from a *feature file*.
Table instance data is accessible using a number of methods:
**iteration**
Iterating over the Table will yield the :class:`~behave.model.Row`
instances from the .rows attribute.
**indexed access**
Individual rows may be accessed directly by index on the Table instance;
table[0] gives the first non-heading row and table[-1] gives the last
row.
The attributes are:
.. attribute:: headings
The headings of the table as a list of strings.
.. attribute:: rows
An list of instances of :class:`~behave.model.Row` that make up the body
of the table in the *feature file*.
Tables are also comparable, for what that's worth. Headings and row data
are compared.
.. _`table`: gherkin.html#table
"""
type = "table"
def __init__(self, headings, line=None, rows=None):
Replayable.__init__(self)
self.headings = headings
self.line = line
self.rows = []
if rows:
for row in rows:
self.add_row(row, line)
def add_row(self, row, line=None):
self.rows.append(Row(self.headings, row, line))
def add_column(self, column_name, values=None, default_value=u""):
"""Adds a new column to this table.
Uses :param:`default_value` for new cells (if :param:`values` are
not provided). param:`values` are extended with :param:`default_value`
if values list is smaller than the number of table rows.
:param column_name: Name of new column (as string).
:param values: Optional list of cell values in new column.
:param default_value: Default value for cell (if values not provided).
:returns: Index of new column (as number).
"""
# assert isinstance(column_name, unicode)
assert not self.has_column(column_name)
if values is None:
values = [default_value] * len(self.rows)
elif not isinstance(values, list):
values = list(values)
if len(values) < len(self.rows):
more_size = len(self.rows) - len(values)
more_values = [default_value] * more_size
values.extend(more_values)
new_column_index = len(self.headings)
self.headings.append(column_name)
for row, value in zip(self.rows, values):
assert len(row.cells) == new_column_index
row.cells.append(value)
return new_column_index
def remove_column(self, column_name):
if not isinstance(column_name, int):
try:
column_index = self.get_column_index(column_name)
except ValueError:
raise KeyError("column=%s is unknown" % column_name)
assert isinstance(column_index, int)
assert column_index < len(self.headings)
del self.headings[column_index]
for row in self.rows:
assert column_index < len(row.cells)
del row.cells[column_index]
def remove_columns(self, column_names):
for column_name in column_names:
self.remove_column(column_name)
def has_column(self, column_name):
return column_name in self.headings
def get_column_index(self, column_name):
return self.headings.index(column_name)
def require_column(self, column_name):
"""Require that a column exists in the table.
Raise an AssertionError if the column does not exist.
:param column_name: Name of new column (as string).
:return: Index of column (as number) if it exists.
"""
if not self.has_column(column_name):
columns = ", ".join(self.headings)
msg = "REQUIRE COLUMN: %s (columns: %s)" % (column_name, columns)
raise AssertionError(msg)
return self.get_column_index(column_name)
def require_columns(self, column_names):
for column_name in column_names:
self.require_column(column_name)
def ensure_column_exists(self, column_name):
"""Ensures that a column with the given name exists.
If the column does not exist, the column is added.
:param column_name: Name of column (as string).
:return: Index of column (as number).
"""
if self.has_column(column_name):
return self.get_column_index(column_name)
else:
return self.add_column(column_name)
def __repr__(self):
return "<Table: %dx%d>" % (len(self.headings), len(self.rows))
def __eq__(self, other):
if isinstance(other, Table):
if self.headings != other.headings:
return False
for my_row, their_row in zip(self.rows, other.rows):
if my_row != their_row:
return False
else:
# -- ASSUME: table <=> raw data comparison
other_rows = other
for my_row, their_row in zip(self.rows, other_rows):
if my_row != their_row:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __iter__(self):
return iter(self.rows)
def __getitem__(self, index):
return self.rows[index]
def assert_equals(self, data):
"""Assert that this table's cells are the same as the supplied "data".
The data passed in must be a list of lists giving:
[
[row 1],
[row 2],
[row 3],
]
If the cells do not match then a useful AssertionError will be raised.
"""
assert self == data
raise NotImplementedError
class Row(object):
"""One row of a `table`_ parsed from a *feature file*.
Row data is accessible using a number of methods:
**iteration**
Iterating over the Row will yield the individual cells as strings.
**named access**
Individual cells may be accessed by heading name; row["name"] would give
the cell value for the column with heading "name".
**indexed access**
Individual cells may be accessed directly by index on the Row instance;
row[0] gives the first cell and row[-1] gives the last cell.
The attributes are:
.. attribute:: cells
The list of strings that form the cells of this row.
.. attribute:: headings
The headings of the table as a list of strings.
Rows are also comparable, for what that's worth. Only the cells are
compared.
.. _`table`: gherkin.html#table
"""
def __init__(self, headings, cells, line=None, comments=None):
self.headings = headings
self.comments = comments
for c in cells:
assert isinstance(c, six.text_type)
self.cells = cells
self.line = line
def __getitem__(self, name):
try:
index = self.headings.index(name)
except ValueError:
if isinstance(name, int):
index = name
else:
raise KeyError('"%s" is not a row heading' % name)
return self.cells[index]
def __repr__(self):
return "<Row %r>" % (self.cells,)
def __eq__(self, other):
return self.cells == other.cells
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.cells)
def __iter__(self):
return iter(self.cells)
def items(self):
return zip(self.headings, self.cells)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def as_dict(self):
"""Converts the row and its cell data into a dictionary.
:return: Row data as dictionary (without comments, line info).
"""
from behave.compat.collections import OrderedDict
return OrderedDict(self.items())
class Tag(six.text_type):
"""Tags appear may be associated with Features or Scenarios.
They're a subclass of regular strings (unicode pre-Python 3) with an
additional ``line`` number attribute (where the tag was seen in the source
feature file.
See :ref:`controlling things with tags`.
"""
allowed_chars = u"._-=:" # In addition to aplha-numerical chars.
quoting_chars = ("'", '"', "<", ">")
def __new__(cls, name, line):
o = six.text_type.__new__(cls, name)
o.line = line
return o
@classmethod
def make_name(cls, text, unescape=False, allowed_chars=None):
"""Translate text into a "valid tag" without whitespace, etc.
Translation rules are:
* alnum chars => same, kept
* space chars => "_"
* other chars => deleted
Preserve following characters (in addition to alnums, like: A-z, 0-9):
* dots => "." (support: dotted-names, active-tag name schema)
* minus => "-" (support: dashed-names)
* underscore => "_"
* equal => "=" (support: active-tag name schema)
* colon => ":" (support: active-tag name schema or similar)
:param text: Unicode text as input for name.
:param unescape: Optional flag to unescape some chars (default: false)
:param allowed_chars: Optional string with additional preserved chars.
:return: Unicode name that can be used as tag.
"""
assert isinstance(text, six.text_type)
if allowed_chars is None:
allowed_chars = cls.allowed_chars
if unescape:
# -- UNESCAPE: Some escaped sequences
text = text.replace("\\t", "\t").replace("\\n", "\n")
chars = []
for char in text:
if char.isalnum() or (allowed_chars and char in allowed_chars):
chars.append(char)
elif char.isspace():
chars.append(u"_")
elif char in cls.quoting_chars:
pass # -- NORMALIZE: Remove any quoting chars.
# -- MAYBE:
# else:
# # -- OTHERWISE: Accept gracefully any other character.
# chars.append(char)
return u"".join(chars)
class Text(six.text_type):
"""Store multiline text from a Step definition.
The attributes are:
.. attribute:: value
The actual text parsed from the *feature file*.
.. attribute:: content_type
Currently only "text/plain".
"""
def __new__(cls, value, content_type=u"text/plain", line=0):
assert isinstance(value, six.text_type)
assert isinstance(content_type, six.text_type)
o = six.text_type.__new__(cls, value)
o.content_type = content_type
o.line = line
return o
def line_range(self):
line_count = len(self.splitlines())
return (self.line, self.line + line_count + 1)
def replace(self, old, new, count=-1):
return Text(super(Text, self).replace(old, new, count), self.content_type,
self.line)
def assert_equals(self, expected):
"""Assert that my text is identical to the "expected" text.
A nice context diff will be displayed if they do not match.
"""
if self == expected:
return True
diff = []
for line in difflib.unified_diff(self.splitlines(),
expected.splitlines()):
diff.append(line)
# strip unnecessary diff prefix
diff = ["Text does not match:"] + diff[3:]
raise AssertionError("\n".join(diff))
# -----------------------------------------------------------------------------
# UTILITY FUNCTIONS:
# -----------------------------------------------------------------------------
def reset_model(model_elements):
"""Reset the test run information stored in model elements.
:param model_elements: List of model elements (Feature, Scenario, ...)
"""
for model_element in model_elements:
model_element.reset()
|
If you reading this it means you have not yet registered.
Please take a second to Click the register button and in a few simple steps, you will be able to enjoy all the many features of our D&D Old Comrades Site has to offer.
Please note that nicknames are not allowed but can be placed in brackets when registering.
Once you have registered with your full name I will activate you so you can start enjoying the meeting up with old buddies.
|
#Written for the 135-102DAG-J01 Thermistor
import Adafruit_BBIO.ADC as ADC
import time
import math as mt
import goprohero as gp
ADC.setup()
camera = gp.GoProHero('10.5.5.9', 'goprohero316')
cameraOn = False
recording = False
#See June 4 comment on http://ealmberg.blogspot.com/2015/06/4-june-15.html
Bvalue = 3348 #Beta
Ro = 1000 #Resistance at 25 C
To = 298.15 #Room temperature Kelvin
while camera :
adcValue = ADC.read("P9_35")
R = 1000/((1/adcValue) - 1) #Get measured resistance
T = 1.0/To + (1.0/Bvalue)*mt.log(R/Ro) #Formula from above blogspot address
T_K = 1.0/T
if T_K > 301 and not cameraOn :
cameraOn = camera.command('power', 'on')
time.sleep(5)
print str(T_K) + ' K'
t_c = 1.0/T - 273.15 #Convert to celsius
print str(t_c) + ' C'
t_f = t_c*(9/5.0) + 32.0 #Convert to Fahrenheit
print str(t_f)
if T_K > 301 and cameraOn and not recording :
recording = camera.command('record','on')
time.sleep(5)
<<<<<<< HEAD
elif T_K <= 301 and cameraOn and recording :
recording = not camera.command('record', 'off')
=======
elif T_K <= 301 and cameraOn and recording :
recording = not camera.command('record', 'off')
>>>>>>> b9c8e0014ec6dd4e5a6378c7034a8ec6642549ab
time.sleep(5)
cameraOn = not camera.command('power', 'sleep')
time.sleep(5)
time.sleep(1)
|
So, you are running a travel blog. Obviously, you need travel photos for creating travel posts and content. Most of the established bloggers use their own photographs which mean they are good photographers. They travel a lot and they shoot. They own their photographs and can use in any way they want. But if you don’t usually click photos or simply you need free travel photo for your travel blog, you will sure look online. If you use someone’s photographs for your blog no doubt that will be a great deal of trouble for violating copy right policies. You just can’t use images available online for your own purposes. Either you need to take permission from the owner or you need to the licence for using the photographs. You may not be a great photographer which can be a reason that you are searching best free travel photos for travel blog.
Well, there are hundreds of websites which provide stock photographs and you can buy those photographs for your own use. But there are some websites where you don’t have to pay for using the photographs available there which means you are free to use. If you are looking for best free travel photos for travel blogs, then this post is for you. You can visit these websites and can find the photographs of your choice for your blog. There are several other websites from where you can find free photographs for travel blog. These websites are my personal picks.
Pexels.com has a large collection of photographs. You can visit the website and can search for any sort of photograph you want. This website has thousands of photographs including so many categories such as travel, food, life, abstract, vehicles, corporate etc. This website is totally free and a great place to find best free travel photos for travel blog. I myself use this website. The photos here are of great quality and high resolution.
Just like pexels.com, this website is also a wonderful place where you can find best free travel photos for travel blog. There is a great collection of photographs from different categories. Using this website and its photographs is totally free and you don’t need to register. You just simple go there and search for the photograph you need and download it.
These are the two website to find best free travel photos for travel blog. These are my personal recommendations. However, using your own photographs is a good practice. You should carry a camera whenever you travel. These days smartphones have good camera quality and give good results. You don’t even need to spend on buying a camera. Yes, later on you can. I too started with the phone only and then at first I bought a point and shoot camera of Sony. This also improves your photography skills and there will be a day when you won’t be needing photographs from other sources.
Please also check out https://www.goodfreephotos.com , I specialize in travel photos and I have categorized out photos by location like country, city, national park, etc. Appreciate it if you could add my site to the list.
|
import re
import os
import numpy as np
import numexpr as ne
from urllib import unquote
from pyon.util.log import log
from email.utils import formatdate
from stat import ST_MTIME
from coverage_model.coverage import AbstractCoverage
from coverage_model.parameter_types import QuantityType,ConstantRangeType,ArrayType, ConstantType, RecordType
from coverage_model.parameter_types import CategoryType, BooleanType, ParameterFunctionType, SparseConstantType
from coverage_model.parameter_functions import ParameterFunctionException
from pyon.container.cc import Container
from ion.services.dm.inventory.dataset_management_service import DatasetManagementService
from pydap.model import DatasetType,BaseType, GridType, SequenceType
from pydap.handlers.lib import BaseHandler
from pyon.public import CFG, PRED
import time
import simplejson as json
import collections
import functools
numpy_boolean = '?'
numpy_integer_types = 'bhilqp'
numpy_uinteger_types = 'BHILQP'
numpy_floats = 'efdg'
numpy_complex = 'FDG'
numpy_object = 'O'
numpy_str = 'SUV'
def exception_wrapper(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
log.exception("Failed handling PyDAP request")
raise
return wrapper
def request_profile(enabled=False):
def profile(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
init = time.time()
retval = func(*args, **kwargs)
finished = time.time()
log.info('Request took %ss', finished-init)
return retval
return wrapper
return profile
class Handler(BaseHandler):
CACHE_LIMIT = CFG.get_safe('server.pydap.cache_limit', 5)
CACHE_EXPIRATION = CFG.get_safe('server.pydap.cache_expiration', 5)
REQUEST_LIMIT = CFG.get_safe('server.pydap.request_limit', 200) # MB
_coverages = collections.OrderedDict() # Cache has to be a class var because each handler is initialized per request
extensions = re.compile(r'^.*[0-9A-Za-z\-]{32}',re.IGNORECASE)
def __init__(self, filepath):
self.filepath = filepath
def calculate_bytes(self, bitmask, parameter_num):
timesteps = np.sum(bitmask)
# Assume 8 bytes per variable per timestep
count = 8 * parameter_num * timesteps
return count
def is_too_large(self, bitmask, parameter_num):
requested = self.calculate_bytes(bitmask, parameter_num)
return requested > (self.REQUEST_LIMIT * 1024**2)
def get_numpy_type(self, data):
data = self.none_to_str(data)
result = data.dtype.char
if self.is_basestring(data):
result = 'S'
elif self.is_float(data):
result = 'd'
elif self.is_int(data):
result = 'i'
elif result == 'O':
self.json_dump(data)
result = 'O'
elif result == '?':
result = '?'
elif result not in ('d','f','h','i','b','H','I','B','S'):
raise TypeNotSupportedError('Type: %s (%s)' %(result, repr(data)))
return result
def json_dump(self, data):
try:
return json.dumps([i for i in data])
except TypeError as e:
raise TypeNotSupportedError(e)
@classmethod
def get_coverage(cls, data_product_id):
'''
Memoization (LRU) of _get_coverage
'''
if not data_product_id:
return
try:
result, ts = cls._coverages.pop(data_product_id)
if (time.time() - ts) > cls.CACHE_EXPIRATION:
result.close()
raise KeyError(data_product_id)
except KeyError:
if data_product_id is None:
return None
resource_registry = Container.instance.resource_registry
dataset_ids, _ = resource_registry.find_objects(data_product_id, PRED.hasDataset, id_only=True)
if not dataset_ids: return None
dataset_id = dataset_ids[0]
result = DatasetManagementService._get_coverage(dataset_id, mode='r')
result.value_caching = False
ts = time.time()
if result is None:
return None
if len(cls._coverages) >= cls.CACHE_LIMIT:
key, value = cls._coverages.popitem(0)
coverage, ts = value
coverage.close(timeout=5)
cls._coverages[dataset_id] = result, ts
return result
def get_attrs(self, cov, name):
pc = cov.get_parameter_context(name)
attrs = {}
if hasattr(pc,'uom'):
attrs['units'] = pc.uom
if hasattr(pc,'display_name'):
attrs['long_name'] = pc.display_name
return attrs
def get_data(self,cov, name, bitmask):
#pc = cov.get_parameter_context(name)
try:
data = self.get_values(cov, name)
data[bitmask]
#data = cov._range_value[name][:][bitmask]
except ParameterFunctionException:
data = np.empty(cov.num_timesteps(), dtype='object')
data = np.asanyarray(data)
if not data.shape:
data.shape = (1,)
return data
def get_time_data(self, cov, slice_):
return self.get_data(cov, cov.temporal_parameter_name, slice_)
def make_series(self, response, name, data, attrs, ttype):
base_type = BaseType(name=name, data=data, type=ttype, attributes=attrs)
#grid[dims[0]] = BaseType(name=dims[0], data=time_data, type=time_data.dtype.char, attributes=time_attrs, dimensions=dims, shape=time_data.shape)
return base_type
def filter_data(self, data):
if len(data.shape) > 1:
return self.ndim_stringify(data), 'S'
if data.dtype.char in numpy_integer_types + numpy_uinteger_types:
return data, data.dtype.char
if data.dtype.char in numpy_floats:
return data, data.dtype.char
if data.dtype.char in numpy_boolean:
return np.asanyarray(data, dtype='int32') ,'i'
if data.dtype.char in numpy_complex:
return self.stringify(data), 'S'
if data.dtype.char in numpy_object:
return self.stringify_inplace(data), 'S'
if data.dtype.char in numpy_str:
return data, 'S'
return np.asanyarray(['Unsupported Type' for i in data]), 'S'
def ndim_stringify(self, data):
retval = np.empty(data.shape[0], dtype='O')
try:
if len(data.shape)>1:
for i in xrange(data.shape[0]):
retval[i] = ','.join(map(lambda x : str(x), data[i].tolist()))
return retval
except:
retval = np.asanyarray(['None' for d in data])
return retval
def stringify(self, data):
retval = np.empty(data.shape, dtype='O')
try:
for i,obj in enumerate(data):
retval[i] = str(obj)
except:
retval = np.asanyarray(['None' for d in data])
return retval
def stringify_inplace(self, data):
try:
for i,obj in enumerate(data):
data[i] = str(obj)
except:
data = np.asanyarray(['None' for d in data])
return data
def get_bitmask(self, cov, fields, slices, selectors):
'''
returns a bitmask appropriate to the values
'''
bitmask = np.ones(cov.num_timesteps(), dtype=np.bool)
for selector in selectors:
field, operator, value = self.parse_selectors(selector)
if operator is None:
continue
values = self.get_values(cov, field)
expression = ' '.join(['values', operator, value])
bitmask = bitmask & ne.evaluate(expression)
return bitmask
def get_values(self, cov, field):
data_dict = cov.get_parameter_values(param_names=[field], fill_empty_params=True, as_record_array=False).get_data()
data = data_dict[field]
return data
def get_dataset(self, cov, fields, slices, selectors, dataset, response):
seq = SequenceType('data')
bitmask = self.get_bitmask(cov, fields, slices, selectors)
if self.is_too_large(bitmask, len(fields)):
log.error('Client request too large. \nFields: %s\nSelectors: %s', fields, selectors)
return
for name in fields:
# Strip the data. from the field
if name.startswith('data.'):
name = name[5:]
pc = cov.get_parameter_context(name)
if re.match(r'.*_[a-z0-9]{32}', name):
continue # Let's not do this
try:
data = self.get_data(cov, name, bitmask)
attrs = self.get_attrs(cov, name)
if isinstance(pc.param_type, QuantityType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type, ConstantType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type, ConstantRangeType):
#start = time.time()
#convert to string
try:
#scalar case
if data.shape == (2,):
data = np.atleast_1d('_'.join([str(data[0]), str(data[1])]))
else:
for i,d in enumerate(data):
f = [str(d[0]),str(d[1])]
data[i] = '_'.join(f)
except Exception, e:
data = np.asanyarray(['None' for d in data])
seq[name] = self.make_series(response, name, data, attrs, 'S')
elif isinstance(pc.param_type,BooleanType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type,CategoryType):
data, dtype = self.filter_data(data)
#start = time.time()
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type,ArrayType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type,RecordType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type, ParameterFunctionType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
elif isinstance(pc.param_type, SparseConstantType):
data, dtype = self.filter_data(data)
seq[name] = self.make_series(response, name, data, attrs, dtype)
#dataset[name] = self.make_series(response, name, data, attrs, dtype)
# elif param.is_coordinate and cov.temporal_parameter_name == name:
# dataset[name] = BaseType(name=name, data=data, type=data.dtype.char, attributes=attrs, shape=data.shape)
# else:
# log.error("Unhandled parameter for parameter (%s) type: %s", name, pc.param_type.__class__.__name__)
except Exception, e:
log.exception('Problem reading cov %s %s', cov.name, e.__class__.__name__)
continue
dataset['data'] = seq
return dataset
def value_encoding_to_dap_type(self, value_encoding):
if value_encoding is None:
return 'S'
dt = np.dtype(value_encoding).char
if dt =='O':
return 'S'
return dt
def dap_type(self, context):
if isinstance(context.param_type, (ArrayType, ConstantRangeType, CategoryType, RecordType)):
return 'S'
return self.value_encoding_to_dap_type(context.param_type.value_encoding)
def handle_dds(self, coverage, dataset, fields):
cov = coverage
seq = SequenceType('data')
for name in fields:
# Strip the data. from the field
if name.startswith('data.'):
name = name[5:]
if re.match(r'.*_[a-z0-9]{32}', name):
continue # Let's not do this
try:
context = coverage.get_parameter_context(name)
attrs = self.get_attrs(cov, name)
#grid[name] = BaseType(name=name, type=self.dap_type(context), attributes=attrs, dimensions=(time_name,), shape=(coverage.num_timesteps,))
seq[name] = BaseType(name=name, type=self.dap_type(context), attributes=attrs, shape=(coverage.num_timesteps(),))
#grid[cov.temporal_parameter_name] = time_base
except Exception:
log.exception('Problem reading cov %s', str(cov))
continue
dataset['data'] = seq
return dataset
def parse_query_string(self,query_string):
tokens = query_string.split('&')
fields = []
selectors = []
slices = []
dap_selection_operators = ['<', '<=', '>', '>=', '=', '!=', '=~']
slice_operators = ['[', ':', ']']
for token in tokens:
token = unquote(token)
if not token: # ignore the case where the url ends in nothing or a &
continue
token_identified = False
for selector in dap_selection_operators:
if selector in token:
selectors.append(token)
token_identified = True
break
for operator in slice_operators:
if operator in token:
slices.append(token)
token_identified = True
break
if not token_identified:
fields = token.split(',')
return fields, slices, selectors
def parse_slices(self,slice_operator):
pivot = slice_operator.find('[')
field = slice_operator[:pivot]
slicer = slice_operator[pivot:]
# Strip away the outer []s
slicer = slicer[1:-1]
# Separte the slice into tokens separated by :
start,stride,stop = slicer.split(':')
start = int(start)
stride = int(stride)
stop = int(stop)+1
slice_ = slice(start,stride,stop)
return field, slice_
def parse_selectors(self, selector):
matches = re.match(r'([a-zA-Z0-9-_\.]+)(<=|<|>=|>|=~|!=|=)(.*)', selector)
field, operator, value = matches.groups()
value = value.replace('"','')
value = value.replace("'",'')
if 'data.' in field: # strip away the data prefix
field = field[5:]
if operator is '=':
operator = '=='
elif operator is '=~':
return None,None,None
return field, operator, value
@request_profile(CFG.get_safe('server.pydap.profile_enabled', True))
@exception_wrapper
def parse_constraints(self, environ):
base, data_product_id = os.path.split(self.filepath)
coverage = self.get_coverage(data_product_id)
last_modified = formatdate(time.mktime(time.localtime(os.stat(self.filepath)[ST_MTIME])))
environ['pydap.headers'].append(('Last-modified', last_modified))
atts = {}
atts['title'] = coverage.name
dataset = DatasetType(coverage.name) #, attributes=atts)
response = environ['pydap.response']
if response == 'dods':
query_string = environ['QUERY_STRING']
fields, slices, selectors = self.parse_query_string(query_string)
elif response in ('dds', 'das'):
fields = [] # All fields
slices = []
selectors = []
all_vars = coverage.list_parameters()
if not fields:
fields = all_vars
if response == "dods":
dataset = self.get_dataset(coverage, fields, slices, selectors, dataset, response)
elif response in ('dds', 'das'):
self.handle_dds(coverage, dataset, fields)
return dataset
def none_to_str(self, data):
for i,d in enumerate(data):
if d is None:
data[i] = 'None'
return data
def is_basestring(self, data):
for d in data:
if not isinstance(d, basestring):
return False
return True
def is_float(self, data):
for d in data:
if not isinstance(d, float):
return False
return True
def is_int(self, data):
for d in data:
if not isinstance(d, int):
return False
return True
def is_collection(self, data):
for d in data:
if not isinstance(d, (list,tuple,np.ndarray)):
return False
return True
def update_slice_object(self, slice_, fill_index):
slice_ = slice_[0] if slice_ else slice(None)
#need to truncate slice here in case time has fill values
if slice_.start is None and slice_.stop is None and fill_index >=0:
slice_ = slice(0, fill_index, 1)
if slice_.start and slice_.stop is None:
if fill_index > slice_.start:
return None
if fill_index > slice_.stop:
slice_.stop = fill_index
if slice_.start is not None and slice_.start == slice_.stop:
slice_ = slice(slice_.start, slice_.stop+1, slice_.step)
return slice_
class TypeNotSupportedError(Exception):
pass
|
A look at Jon David "JD" Harrington around the time of the crime and when he was arrested. Additional images and a video below.
In November 2012, we published a post entitled UNSOLVED MURDERS IN COLORADO: READ TEN MORE PERSONAL STORIES OF VICTIMS, featuring cold cases highlighted by the invaluable Families of Homicide Victims and Missing Persons website.
Among the tragedies we spotlighted was the murder of Carolyn Jansen, who died in 2003, around the time she was reported missing.
Now, fortunately, the Jansen story can officially be retired from this list.
A jury has found Jon David "JD" Harrington, Jansen's former roommate, guilty of killing her and then stuffing her body in a Rubbermaid container, where it remained for two years. And the combination of DNA evidence and an investigation by a TV true-crime show may have helped bring him to justice.
Carolyn Colleen Jansen was born May 21st, 1958. She was an only child. Her parents divorced early in her life. She worked as a waitress and at many truck stops. She also sold Avon for years. She loved sandcastles, the color purple, the outdoors and and spending time with her children and grandchildren whom she loved very much. Carolyn Baker gave birth to four girls and two boys. An infant, David Yount, died soon after birth. Carolyn Baker's decomposed body was found in a Rubbemaid container in June, 2005, two years after she was last seen. The container had been left behind in an Aurora home by a man now serving time for theft and forgery. The Arapahoe County Coroner determined Carolyn Jansen was murdered.
Carolyn Jansen seen during the celebration of her wedding to Robert Yount.
Not much had been happening with the Jansen investigation around the time our piece was published — but in early 2014, the inquiry revved up again.
That year, the TNT program Cold Justice, starring ex-Texas prosecutor Kelly Siegler and veteran CSI investigator Yolanda McClary, took on the case for an episode eventually given the vivid title "Lady in the Box." The entire program is on view below, but here's the synopsis, which prominently mentions Harrington.
Carolyn Jansen, 43, was living in Aurora, Colo., trying to make a fresh start after spending years in an unhappy marriage. In 2001, she got a job working at a local diner while she built her direct-sale beauty products business. Then, in the early months of 2003, Carolyn suddenly went missing. More than two years later, on June 28, 2005, in a suburban neighborhood outside Denver, Richard Johnson had been noticing a foul odor coming from the back of his house, which he assumed was caused by some neighborhood cats. He started to clean out his storage shed, and after moving a few boxes around, he came upon a plastic container that had a horrible smell. He opened the container, and inside was a quilt covered with insects, a skeletonized foot and long brown hair. The body inside was later identified as being that of Carolyn Jansen. The medical examiner found a fracture over Jansen's left eye, and her death was ruled a homicide by blunt impact to the head. Richard told the Aurora Police Department that the box belonged to his friend, Jon ''JD'' Harrington, who had asked him to store some of his things at his house. JD denied having anything to do with Carolyn's death and told the police while he Carolyn were roommates for a short time, he hadn't seen her in years.
The Cold Justice episode aired on March 14, 2014 — but four days earlier, Harrington was arrested on suspicion of killing Jansen.
There's no telling if the interest of the Cold Justice team reignited the investigation, or if new activity got the attention of the show's producers. But either way, the case was finally broken, more than a decade after Jansen's death.
A photo of JD Harrington from around the time he knew Carolyn Jansen.
The key piece of evidence leading to the bust: Harrington's DNA was located on the duct tape that sealed the Rubbermaid container.
The DNA and the other evidence prosecutors assembled against Harrington convinced a jury in Arapahoe County.
Yesterday, Harrington was convicted of second-degree murder in Jansen's death.
Our condolences to the friends, family and loved ones of Carolyn Jansen.
Look below to see a larger version of Harrington's booking photo, followed by the Cold Justice episode.
|
#!/usr/bin/python
""" Defines an interface to allow users to build their queries on the fly. """
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011, Projex Software'
__license__ = 'LGPL'
# maintanence information
__maintainer__ = 'Projex Software'
__email__ = '[email protected]'
#------------------------------------------------------------------------------
from projex.text import nativestring
from projexui.qt import Signal
from projexui.qt.QtCore import Qt
from projexui.qt.QtGui import QWidget,\
QVBoxLayout
import projexui
from projexui.widgets.xquerybuilderwidget.xqueryrule \
import XQueryRule
from projexui.widgets.xquerybuilderwidget.xquerylinewidget \
import XQueryLineWidget
class XQueryBuilderWidget(QWidget):
""" """
saveRequested = Signal()
resetRequested = Signal()
cancelRequested = Signal()
def __init__( self, parent = None ):
super(XQueryBuilderWidget, self).__init__( parent )
# load the user interface
projexui.loadUi(__file__, self)
self.setMinimumWidth(470)
# define custom properties
self._rules = {}
self._defaultQuery = []
self._completionTerms = []
self._minimumCount = 1
# set default properties
self._container = QWidget(self)
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(2)
layout.addStretch(1)
self._container.setLayout(layout)
self.uiQueryAREA.setWidget(self._container)
# create connections
self.uiResetBTN.clicked.connect( self.emitResetRequested )
self.uiSaveBTN.clicked.connect( self.emitSaveRequested )
self.uiCancelBTN.clicked.connect( self.emitCancelRequested )
self.resetRequested.connect( self.reset )
def addLineWidget( self, query = None ):
"""
Adds a new line widget to the system with the given values.
:param query | (<str> term, <str> operator, <str> vlaue) || None
"""
widget = XQueryLineWidget(self)
widget.setTerms(sorted(self._rules.keys()))
widget.setQuery(query)
index = self._container.layout().count() - 1
self._container.layout().insertWidget(index, widget)
widget.addRequested.connect( self.addLineWidget )
widget.removeRequested.connect( self.removeLineWidget )
# update the remove enabled options for these widgets
self.updateRemoveEnabled()
def addRule( self, rule ):
"""
Adds a rule to the system.
:param rule | <XQueryRule>
"""
self._rules[rule.term()] = rule
self.updateRules()
def clear( self ):
"""
Clears out all the widgets from the system.
"""
for lineWidget in self.lineWidgets():
lineWidget.setParent(None)
lineWidget.deleteLater()
def completionTerms( self ):
"""
Returns the list of terms that will be used as a global override
for completion terms when the query rule generates a QLineEdit instance.
:return [<str>, ..]
"""
return self._completionTerms
def count( self ):
"""
Returns the count of the line widgets in the system.
:return <int>
"""
return len(self.lineWidgets())
def currentQuery( self ):
"""
Returns the current query string for this widget.
:return [(<str> term, <str> operator, <str> value), ..]
"""
widgets = self.lineWidgets()
output = []
for widget in widgets:
output.append(widget.query())
return output
def defaultQuery( self ):
"""
Returns the default query for the system.
:return [(<str> term, <str> operator, <str> value), ..]
"""
return self._defaultQuery
def keyPressEvent( self, event ):
"""
Emits the save requested signal for this builder for when the enter
or return press is clicked.
:param event | <QKeyEvent>
"""
if ( event.key() in (Qt.Key_Enter, Qt.Key_Return) ):
self.emitSaveRequested()
super(XQueryBuilderWidget, self).keyPressEvent(event)
def emitCancelRequested( self ):
"""
Emits the cancel requested signal.
"""
if ( not self.signalsBlocked() ):
self.cancelRequested.emit()
def emitResetRequested( self ):
"""
Emits the reste requested signal.
"""
if ( not self.signalsBlocked() ):
self.resetRequested.emit()
def emitSaveRequested( self ):
"""
Emits the save requested signal.
"""
if ( not self.signalsBlocked() ):
self.saveRequested.emit()
def findRule( self, term ):
"""
Looks up a rule by the inputed term.
:param term | <str>
:return <XQueryRule> || None
"""
return self._rules.get(nativestring(term))
def removeLineWidget( self, widget ):
"""
Removes the line widget from the query.
:param widget | <XQueryLineWidget>
"""
widget.setParent(None)
widget.deleteLater()
self.updateRemoveEnabled()
def minimumCount( self ):
"""
Defines the minimum number of query widgets that are allowed.
:return <int>
"""
return self._minimumCount
def lineWidgets( self ):
"""
Returns a list of line widgets for this system.
:return [<XQueryLineWidget>, ..]
"""
return self.findChildren(XQueryLineWidget)
def reset( self ):
"""
Resets the system to the default query.
"""
self.setCurrentQuery(self.defaultQuery())
def setCompletionTerms( self, terms ):
"""
Sets the list of terms that will be used as a global override
for completion terms when the query rule generates a QLineEdit instance.
:param terms | [<str>, ..]
"""
self._completionTerms = terms
def setCurrentQuery( self, query ):
"""
Sets the query for this system to the inputed query.
:param query | [(<str> term, <str> operator, <str> value), ..]
"""
self.clear()
for entry in query:
self.addLineWidget(entry)
# make sure we have the minimum number of widgets
for i in range(self.minimumCount() - len(query)):
self.addLineWidget()
def setDefaultQuery( self, query ):
"""
Sets the default query that will be used when the user clicks on the \
reset button or the reset method is called.
:param query | [(<str> term, <str> operator, <str> value), ..]
"""
self._defaultQuery = query[:]
def setMinimumCount( self, count ):
"""
Sets the minimum number of line widgets that are allowed at any \
given time.
:param count | <int>
"""
self._minimumCount = count
def setRules( self, rules ):
"""
Sets all the rules for this builder.
:param rules | [<XQueryRule>, ..]
"""
if ( type(rules) in (list, tuple) ):
self._rules = dict([(x.term(), x) for x in rules])
self.updateRules()
return True
elif ( type(rules) == dict ):
self._rules = rules.copy()
self.updateRules()
return True
else:
return False
def setTerms( self, terms ):
"""
Sets a simple rule list by accepting a list of strings for terms. \
This is a convenience method for the setRules method.
:param rules | [<str> term, ..]
"""
return self.setRules([XQueryRule(term = term) for term in terms])
def updateRemoveEnabled( self ):
"""
Updates the remove enabled baesd on the current number of line widgets.
"""
lineWidgets = self.lineWidgets()
count = len(lineWidgets)
state = self.minimumCount() < count
for widget in lineWidgets:
widget.setRemoveEnabled(state)
def updateRules( self ):
"""
Updates the query line items to match the latest rule options.
"""
terms = sorted(self._rules.keys())
for child in self.lineWidgets():
child.setTerms(terms)
|
As I’ve gotten along in years, I’ve been forced to face two unpleasant truths — I don’t appear to be getting any smarter or better looking. And it seemed like the minute those first strands of gray started creeping around my temples, the old “senior moments” started flaring up.
Some mornings, I have a better chance of finding the lost ark than where I placed my keys — and if you offered me a penny for my thoughts, you’d have a right to expect change.
I don’t just want you to live long, friend — I want you to live well. And keeping your brain healthy is essential to maintaining the quality of life we all deserve as we age.
Now, thanks to an amazing new breakthrough from Oxford University, a simple vitamin cocktail may be all you need to keep your brain razor sharp as you age and ward off the ravages of dementia and Alzheimer’s disease FOREVER!
In fact, three simple vitamins could work together like a well-oiled machine to offer your brain up to 90% protection against the most common cause of dementia!
Here’s the scoop. Researchers tracked 156 people over the age of 70 who were suffering from mild memory loss and high levels of homocysteine — a protein that shrinks your brain and can lead to devastating cognitive decline, such as dementia.
The research volunteers ate balanced diets, exercised regularly, and consumed a daily cocktail of vitamins B6, B12, and folic acid.
The results weren’t just impressive. The head of the Imaging Genetics Center at UCLA’s School of Medicine said they were the most amazing brain scans he had ever seen! People who took the daily combo of B6, B12, and folic acid had a whopping 90% less brain shrinkage than folks who took a placebo.
Let’s put that into some context — previous research had shown that other lifestyle changes like reducing alcohol consumption or dropping excess weight resulted in an already impressive 25% less brain shrinkage. This vitamin cocktail of B6, B12, and folic acid appears to be 350% more effective!
If you’re anxious to give this vitamin cocktail a go, a quick word of caution on folic acid. You can find it cheaply in any store that sells supplements, but it still may be no bargain. That’s because, as you age, folic acid might not do your brain a darn bit of good. That’s why Dr. Wright has been recommending folate over folic acid for years.
You see, human cells can’t use folic acid, and we depend on our livers to convert it to all the folate our brains need to stay sharp. But as we age, our bodies become less efficient at converting folic acid to folate — and some folks can’t convert folic acid to folate at all. So you’re better off incorporating folate into your vitamin regimen, instead of folic acid.
Our Creator may have given us two eyes, two ears, two lungs, and two kidneys, friend, but he only gave us one brain — and you need to do everything you can to keep your noggin in tip-top shape. Talk with your doctor about giving this safe, natural vitamin remedy a try, and see if you just might be able to leave dementia in the dust for good!
|
# Copyright 2020 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
# import kfp.gcp as gcp
message_param = dsl.PipelineParam(name='message', value='When flies fly behind flies')
output_path_param = dsl.PipelineParam(name='outputpath', value='default_output')
class GetFrequentWordOp(dsl.ContainerOp):
"""A get frequent word class representing a component in ML Pipelines.
The class provides a nice interface to users by hiding details such as container,
command, arguments.
"""
def __init__(self, name, message="When flies fly behind flies,"
" then flies are following flies."):
"""__init__
Args:
name: An identifier of the step which needs to be unique within a pipeline.
message: a dsl.PipelineParam object representing an input message.
"""
super(GetFrequentWordOp, self).__init__(
name=name,
image='python:3.6-jessie',
command=['sh', '-c'],
arguments=['python -c "from collections import Counter; '
'words = Counter(\'%s\'.split()); print(max(words, key=words.get))" '
'| tee /tmp/message.txt' % message],
file_outputs={'word': '/tmp/message.txt'})
class SaveMessageOp(dsl.ContainerOp):
"""A class representing a component in ML Pipelines.
It saves a message to a given output_path.
"""
def __init__(self, name, message, output_path):
"""Args:
name: An identifier of the step which needs to be unique within a pipeline.
message: a dsl.PipelineParam object representing the message to be saved.
output_path: a dsl.PipelineParam object representing the GCS path for output file.
"""
super(SaveMessageOp, self).__init__(
name=name,
image='google/cloud-sdk',
command=['sh', '-c'],
arguments=['echo "%s" | tee /tmp/results.txt | gsutil cp /tmp/results.txt %s'
% (message, output_path)])
class ExitHandlerOp(dsl.ContainerOp):
"""A class representing a component in ML Pipelines."""
def __init__(self, name):
super(ExitHandlerOp, self).__init__(
name=name,
image='python:3.6-jessie',
command=['sh', '-c'],
arguments=['echo exit!'])
def save_most_frequent_word():
exit_op = ExitHandlerOp('exiting')
with dsl.ExitHandler(exit_op):
counter = GetFrequentWordOp(
name='get-Frequent',
message=message_param)
counter.container.set_memory_request('200M')
saver = SaveMessageOp(
name='save',
message=counter.output,
output_path=output_path_param)
saver.container.set_cpu_limit('0.5')
# saver.container.set_gpu_limit('2')
saver.add_node_selector_constraint('kubernetes.io/os', 'linux')
# saver.apply(gcp.use_tpu(tpu_cores=2, tpu_resource='v2', tf_version='1.12'))
if __name__ == '__main__':
from kfp_tekton.compiler import TektonCompiler
tkc = TektonCompiler()
compiled_workflow = tkc._create_workflow(
save_most_frequent_word,
'Save Most Frequent Word',
'Get Most Frequent Word and Save to GCS',
[message_param, output_path_param],
None)
tkc._write_workflow(compiled_workflow, __file__.replace('.py', '.yaml'))
|
... your online one stop shop for all your packaging needs!
UK Packaging Suppliers - a leader in the online sale / distribution of packaging products.
Our commitment is simple, to provide 100% satisfaction to our customers, 100% of the time.
It’s why buyers return to us each and every time.
Our customers include large corporations / multi-nationals such as the BBC, Channel 5, Children In Need, Pinewood Studios, Manchester United Football Club, Gunn JCB, T K Maxx, B&Q, Swarovski, Royal Bank Of Scotland PLC, House Of Fraser, Lidl UK and even Buckingham Palace!
To the many thousands of residential and small commercial business customers we continue to serve, NO ORDER is too big or too small.
Polythene lay flat tubing, pallet strapping supplies and polystyrene foam products.
and we are on hand to answer any questions you may have, or provide advice on packaging solutions you require.
|
import time
from django.utils.decorators import classonlymethod
from django.conf import settings
from django.http import HttpResponse, Http404, HttpResponseForbidden, HttpResponseServerError
from django.utils import simplejson as json
from django.utils.cache import patch_response_headers
from django.utils.decorators import method_decorator
from django.views.decorators.vary import vary_on_headers as dj_vary_on_headers
from django.views.decorators.cache import never_cache
from django.views.generic import View, RedirectView, TemplateView
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.views.decorators.csrf import requires_csrf_token
from django.template import (Context, loader, TemplateDoesNotExist)
from django.utils.cache import get_max_age
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.template.context import RequestContext
from versionutils.versioning.views import RevertView, DeleteView
from . import take_n_from
# 29 days, effectively infinite in cache years
# XXX NOTE: For some reason, the memcached client we're using
# gives a client error when sending timestamp-style expiration
# dates -- e.g. > 30 days timestamps. So, for now we must make
# sure and always use <= 30 day timeouts, which should be fine.
DEFAULT_MEMCACHED_TIMEOUT = 60 * 60 * 24 * 29
class ForbiddenException:
pass
class NeverCacheMixin(object):
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(NeverCacheMixin, self).dispatch(*args, **kwargs)
class CacheMixin(object):
cache_timeout = DEFAULT_MEMCACHED_TIMEOUT
cache_keep_forever = False
@staticmethod
def get_cache_key(request=None, **kwargs):
raise NotImplementedError
def _should_cache(self, request, response):
if response.streaming or response.status_code != 200:
return False
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):
return False
if get_max_age(response) == 0:
return False
return True
def _get_from_cache(self, method, request, *args, **kwargs):
key = self.get_cache_key(request=request, **kwargs)
response = cache.get(key)
if response is None:
response = getattr(super(CacheMixin, self), method)(request, *args, **kwargs)
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(
lambda r: cache.set(key, r, self.cache_timeout)
)
else:
cache.set(key, response, self.cache_timeout)
if self._should_cache(request, response):
# Mark to keep around in Varnish and other cache layers
if self.cache_keep_forever:
response['X-KEEPME'] = True
patch_response_headers(response, self.cache_timeout)
return response
@staticmethod
def invalidate(request, **kwargs):
key = CacheMixin.get_cache_key(request=request, **kwargs)
cache.delete(key)
def get(self, request, *args, **kwargs):
return self._get_from_cache('get', request, *args, **kwargs)
def head(self, request, *args, **kwargs):
return self._get_from_cache('head', request, *args, **kwargs)
@classmethod
def get_region_slug_param(*args, **kwargs):
from regions.models import RegionSettings
if kwargs.get('region'):
return kwargs.get('region')
if not kwargs.get('request'):
raise KeyError("Need either `request` or a `region` parameter.")
request = kwargs.get('request')
return RegionSettings.objects.get(domain=request.META['HTTP_HOST']).region.slug
class Custom404Mixin(object):
@classonlymethod
def as_view(cls, **initargs):
default_view = super(Custom404Mixin, cls).as_view(**initargs)
def view_or_handler404(request, *args, **kwargs):
self = cls(**initargs)
try:
return default_view(request, *args, **kwargs)
except Http404 as e:
if hasattr(self, 'handler404'):
return self.handler404(request, *args, **kwargs)
raise e
return view_or_handler404
class CreateObjectMixin(object):
def create_object(self):
self.form_class._meta.model()
def get_object(self, queryset=None):
try:
return super(CreateObjectMixin, self).get_object(queryset)
except Http404:
return self.create_object()
class JSONResponseMixin(object):
def render_to_response(self, context):
"Returns a JSON response containing 'context' as payload"
return self.get_json_response(self.convert_context_to_json(context))
def get_json_response(self, content, **httpresponse_kwargs):
"Construct an `HttpResponse` object."
return HttpResponse(content, content_type='application/json',
**httpresponse_kwargs)
def convert_context_to_json(self, context):
"""
Convert the context dictionary into a JSON object.
Note: Make sure that the entire context dictionary is serializable
"""
return json.dumps(context)
class JSONView(View, JSONResponseMixin):
"""
A JSONView returns, on GET, a json dictionary containing the values of
get_context_data().
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class PermissionRequiredMixin(object):
"""
View mixin for verifying permissions before updating an existing object
Attrs:
permission: A string representing the permission that's required
on the object. E.g. 'page.change_page'. Override
permission_for_object() to allow more complex permission
relationships.
forbidden_message: A string to display when the permisson is not
allowed.
"""
permission = None
forbidden_message = _('Sorry, you are not allowed to perform this action.')
forbidden_message_anon = _('Anonymous users may not perform this action. '
'Please <a href="/Users/login/">log in</a>.')
def get_protected_object(self):
"""
Returns the object that should be used to check permissions.
Override this to use a different object as the "guard".
"""
return self.object
def get_protected_objects(self):
"""
Returns the objects that should be used to check permissions.
"""
return [self.get_protected_object()]
def permission_for_object(self, obj):
"""
Gets the permission that's required for `obj`.
Override this to allow more complex permission relationships.
"""
return self.permission
def get_object_idempotent(self):
return self.object
def patch_get_object(self):
# Since get_object will get called again, we want it to be idempotent
self.get_object = self.get_object_idempotent
def dispatch(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
if hasattr(self, 'get_object'):
self.object = self.get_object()
self.patch_get_object()
protected_objects = self.get_protected_objects()
for obj in protected_objects:
if not request.user.has_perm(self.permission_for_object(obj), obj):
if request.user.is_authenticated():
msg = self.forbidden_message
else:
msg = self.forbidden_message_anon
html = render_to_string('403.html', {'message': msg},
RequestContext(request))
return HttpResponseForbidden(html)
return super(PermissionRequiredMixin, self).dispatch(request, *args,
**kwargs)
class NamedRedirectView(RedirectView):
name = None
def get_redirect_url(self, **kwargs):
return reverse(self.name, kwargs=kwargs)
class AuthenticationRequired(object):
"""
Mixin to make a view only usable to authenticated users.
"""
forbidden_message = _('Sorry, you are not allowed to perform this action.')
forbidden_template_name = '403.html'
def get_forbidden_message(self):
return self.forbidden_message
def dispatch(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
if self.request.user.is_authenticated():
return super(AuthenticationRequired, self).dispatch(request, *args, **kwargs)
msg = self.get_forbidden_message()
html = render_to_string(self.forbidden_template_name, {'message': msg}, RequestContext(request))
return HttpResponseForbidden(html)
class GetCSRFCookieView(TemplateView):
template_name = 'utils/get_csrf_cookie.html'
class MultipleTypesPaginatedView(TemplateView):
items_per_page = 50
context_object_name = 'objects'
def get_object_lists(self):
raise NotImplementedError
def get_pagination_key(self, qs):
"""
Args:
qs: The queryset or iterable we want to get the querystring lookup key for.
Returns:
The querystring lookup. By default, this is `qs.model.__name__.lower()`
"""
return qs.model.__name__.lower()
def get_pagination_merge_key(self):
"""
Returns:
A callable that, when called, returns the value to use for the merge +
sort. Default: no further sorting (stay in place).
"""
return None
def get_pagination_objects(self):
items_with_indexes = []
id_to_page_key = {}
for (_id, qs) in enumerate(self.get_object_lists()):
pagination_key = self.get_pagination_key(qs)
page = int(self.request.GET.get(pagination_key, 0))
items_with_indexes.append((qs, page))
id_to_page_key[_id] = pagination_key
items, indexes, has_more_left = take_n_from(
items_with_indexes,
self.items_per_page,
merge_key=self.get_pagination_merge_key()
)
self.has_more_left = has_more_left
self.current_indexes = {}
for (num, index) in enumerate(indexes):
self.current_indexes[id_to_page_key[num]] = index
return items
def get_context_data(self, *args, **kwargs):
c = super(MultipleTypesPaginatedView, self).get_context_data(*args, **kwargs)
c[self.context_object_name] = self.get_pagination_objects()
c['pagination_has_more_left'] = self.has_more_left
c['pagination_next'] = ''
if self.has_more_left:
qitems = []
for pagelabel, index in self.current_indexes.items():
qitems.append('%s=%s' % (pagelabel, index))
c['pagination_next'] = '?' + '&'.join(qitems)
return c
class RevertView(RevertView):
def allow_admin_actions(self):
return self.request.user.is_staff
class DeleteView(DeleteView):
def allow_admin_actions(self):
return self.request.user.is_staff
@requires_csrf_token
def server_error(request, template_name='500.html'):
"""
500 error handler.
Templates: :template:`500.html`
Context: Contains {{ STATIC_URL }} and {{ LANGUAGE_CODE }}
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return HttpResponseServerError('<h1>Server Error (500)</h1>')
return HttpResponseServerError(template.render(Context({
'STATIC_URL': settings.STATIC_URL,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
})))
|
By AhmedElyamani, May 10, 2015 in Plugins - Publishing ONLY!
-It seems using '2 angles' produces a result that is virtually identical as using '1 angel'. If that is the indented (mathematically correct) result, perhaps get ride of "1" and just make the minimum option value "2".
-The "Fill Color" color wheel should be placed directly under the "Filling" checkbox. Also don't use both "Fill" and "Filling"; use one or the other.
-The "Brush Size" slider and "Outline Color" color wheel could be disabled when the "Outline" checkbox is unchecked. Likewise with the "Fill Color" color wheel and "Filling" checkbox.
-An anti-aliasing option wouldn't hurt either.
The source code would be appreciated for the sake of software freedom, if nothing else.
Good to see you back Ahmed!
Good effect, Ahmed. Thank You.
I love fractals. Any code with recursive loops gets my vote!
My suggestion; add more commands than DrawArc - go for the polygons.
@AhmedElyamani! Thanks for the effort and plugin.
Nice to see you back too Ahmed, an interesting plugin too.
I do not speak English. I also wanted to say the same thing. I use google translator.
Thanks for the great feedback , everyone!
@toe_head2001 I've applied all of your suggestions , except for disabling the UI items , as I have no idea how to do it.
It's supported by IndirectUI, but CodeLab doesn't expose the functionality; you'd have to use VisualStudio.
Don't forget to Dispose() unmanaged resources like pens and brushes.
This looks really cool. Thanks for making it! I will try it out later.
When rendering, I still see empty stripes that can seen be with or without using the anti-aliasing feature.
Does this effect only work in 4.05 ? I'm still using the old version and cannot find this effect after downloading. It's such a cool effect and I'd really like to use it in a current project I'm working on.
Here I'm posting a version compatible with PdN 3.5.11 that I've been able to compile. You can find the effect under the Render menu (I hope that's correct because the source code doesn't specify otherwise).
Thanks so much Maximilian. You have no idea how much I appreciate your kindness !
Same here, using the latest version.
Try commenting out the last set of loops - lines 110 to 119 and rebuild. (the dst surface is already copied in line 96). Seems to work for me anyway.
The graphics objects should really be disposed as Midora suggested too.
Here is a little something for all you fractal lovers. I like the way it provides an ability to test the blending of different colors. Thank you Ahmed. Hope you come back soon.
@AndrewDavid! I just love how colorful this is! Thank you so much.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# http://peak.telecommunity.com/DevCenter/setuptools#developer-s-guide
# from distutils.core import setup
from setuptools import setup, find_packages
def read_text(filename, dir=None):
import codecs
import os
if dir is None:
dir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(dir, filename)
with codecs.open(filename, 'r', encoding='utf-8') as f:
return f.read()
setup(
url='https://github.com/nandoflorestan/python-dropbox-backup',
name="poorbox",
version='0.1',
author='Nando Florestan',
author_email="[email protected]",
license='BSD',
description="Downloads a dropbox directory via the dropbox REST API. "
"Downloads only the changed files. Useful for limited environments.",
long_description=read_text('README.rst'),
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='tests',
install_requires=['six', 'dropbox>=1.5.1'],
keywords=['dropbox', "python", 'REST', 'API', 'download', 'console'],
classifiers=[ # http://pypi.python.org/pypi?:action=list_classifiers
"Development Status :: 4 - Beta",
# "Development Status :: 5 - Production/Stable",
'Environment :: Console',
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: System Administrators",
'License :: OSI Approved :: BSD License',
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Communications :: File Sharing",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Archiving :: Mirroring",
"Topic :: System :: Software Distribution",
"Topic :: System :: Systems Administration",
"Topic :: Utilities",
],
entry_points='''
[console_scripts]
poorbox = poorbox:main
''',
)
|
Cadillac radio code generator is more than available for unlocking any Cadillac locked radio device worldwide. Best unlock Cadillac radio code decoder for sure! What is more important – you will get it for free. Download it bellow and get constant music in your car.
Just shortly because you have reason to hurry up whit unlock Cadillac radio code process. Some word for the working policy on our online generator. This is the generator which you will like it because it’s more than simple for using. There is no need to be an internet computer expert to operate whit this kind of service. It’s easy like one, two three.
Very simple process requires only one information from your about your locked car radio device. The serial number that any Cadillac radio have.
Then write it down and put it in your Cadillac radio. Once this procedure is over you can use your favorite music again. Your Cadillac car radio device is unlocked!
Many positive sides! But the most important must be highlighted! The unlock process that you make it here on our website is permanent.It works on any Cadillac radio car model, from the oldest to the newest!
No meter the reason that you have for asking the code again from your Cadillac radio code device, you can solve the problem again whit the same four digit number code that you already get in this unlocking process!
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2013 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.core.management.base import BaseCommand, CommandError
from weblate.trans.models import SubProject, Project
from glob import glob
import tempfile
import git
import logging
import os
import re
import fnmatch
logger = logging.getLogger('weblate')
class Command(BaseCommand):
help = 'imports projects with more subprojects'
args = '<project> <gitrepo> <branch> <filemask>'
def get_name(self, maskre, path):
matches = maskre.match(path)
return matches.group(1)
def get_match_regexp(self, filemask):
'''
Prepare regexp for file matching
'''
match = fnmatch.translate(filemask)
match = match.replace('.*.*', '(.*.*)')
return re.compile(match)
def handle(self, *args, **options):
'''
Automatic import of project.
'''
if len(args) != 4:
raise CommandError('Not enough parameters!')
# Read params
prjname, repo, branch, filemask = args
maskre = self.get_match_regexp(filemask)
# Try to get project
try:
project = Project.objects.get(slug=prjname)
except Project.DoesNotExist:
raise CommandError(
'Project %s does not exist, you need to create it first!' % prjname
)
# Do we have correct mask?
if not '**' in filemask:
raise CommandError(
'You need to specify double wildcard for subproject part of the match!'
)
# Create temporary working dir
workdir = tempfile.mkdtemp(dir=project.get_path())
os.chmod(workdir, 0755)
# Initialize git repository
logger.info('Initializing git repository...')
gitrepo = git.Repo.init(workdir)
gitrepo.git.remote('add', 'origin', repo)
logger.info('Fetching remote git repository...')
gitrepo.git.remote('update', 'origin')
gitrepo.git.branch('--track', branch, 'origin/%s' % branch)
logger.info('Updating working copy in git repository...')
gitrepo.git.checkout(branch)
# Find matching files
matches = glob(os.path.join(workdir, filemask))
matches = [f.replace(workdir, '').strip('/') for f in matches]
logger.info('Found %d matching files', len(matches))
# Parse subproject names out of them
names = set()
for match in matches:
names.add(self.get_name(maskre, match))
logger.info('Found %d subprojects', len(names))
# Create first subproject (this one will get full git repo)
name = names.pop()
logger.info('Creating subproject %s as main subproject', name)
# Rename gitrepository to new name
os.rename(
workdir,
os.path.join(project.get_path(), name)
)
SubProject.objects.create(
name=name,
slug=name,
project=project,
repo=repo,
branch=branch,
filemask=filemask.replace('**', name)
)
sharedrepo = 'weblate://%s/%s' % (project.slug, name)
# Create remaining subprojects sharing git repository
for name in names:
logger.info('Creating subproject %s', name)
SubProject.objects.create(
name=name,
slug=name,
project=project,
repo=sharedrepo,
branch=branch,
filemask=filemask.replace('**', name)
)
|
Daenerys and Viserys Targaryen. . Wallpaper and background images in the Daenerys Targaryen club tagged: daenerys targaryen targaryen daenerys dany daenerys stormborn viserys.
|
#!/usr/bin/python
import re, sys, json
import xml.etree.ElementTree as et
import numpy
def parseColumnNames(csv):
columnNames = re.split(',', csv)
result = []
for columnName in columnNames:
result.append(columnName.replace(' ', '').replace('"', '').strip())
return result
def parseRowData(csv):
values = re.split(',', csv)
result = []
for value in values:
if(value.strip()[0] == '"' and value.strip()[-1] == '"'):
value = value.strip()[1:-1]
result.append(value.strip())
return result
# "SALES_PERIOD_TILL_DAY","SALES_YEAR_WEEK","OUTLET_ID","PERIODICAL_PRODUCT_ID","PRODUCT_ID","DELIVERED","RETURNED","SOLD"
periodId = sys.argv[1]
filename = sys.argv[2]
outputFilename = sys.argv[3]
outputFilename2 = sys.argv[4]
outputFilenameOutletJs = sys.argv[5]
outputFilename3 = sys.argv[6]
f = open(filename, "r")
out = open(outputFilename, "w")
out2 = open(outputFilename2, "w")
outOutletJs = open(outputFilenameOutletJs, "w")
out3 = open(outputFilename3, "w")
print "Period id: " + periodId
print "Input file: " + filename
print "Output file: " + outputFilename
print "JS output file: " + outputFilenameOutletJs
first = True
dataparsed = False
j = {}
data = []
root = {}
data.append(root)
fields = []
rows = []
parsed = 0
outletIdSet = set()
productIdSet = set()
i = 0
maxLines = -1 #100
#DATE_INDEX=0
#YEAR_WEEK_INDEX=1
OUTLET_ID_INDEX=0
#PERIODICAL_PRODUCT_ID_INDEX = 1
#PRODUCT_ID_INDEX = 1
PERIODICAL_PRODUCT_ID_INDEX = 1
#DELIVERED_INDEX = 5
#RETURNED_INDEX = 6
SOLD_INDEX = 2
for line in f:
if first:
columnNames = parseColumnNames(line)
for columnName in columnNames:
print "col: " + columnName
first = False
else:
if i == maxLines:
break
i = i + 1
rowData = parseRowData(line)
outletId = rowData[OUTLET_ID_INDEX]
productId = rowData[PERIODICAL_PRODUCT_ID_INDEX]
sold = float(rowData[SOLD_INDEX])
if outletId not in outletIdSet:
outletIdSet.add(outletId)
if productId not in productIdSet:
productIdSet.add(productId)
if i % 1000 == 0:
sys.stdout.write(".")
sys.stdout.flush()
numOutlets = len(outletIdSet)
numProducts = len(productIdSet)
print
print "outlets: " + str(numOutlets)
print "products: " + str(numProducts)
outletIds = list(outletIdSet)
productIds = list(productIdSet)
m = numpy.zeros( (numProducts, numOutlets), dtype=numpy.float)
for i in range(0, numProducts):
for j in range(0, numOutlets):
m[i, j] = -1
f = open(filename, "r")
first = True
i = 0
for line in f:
if first:
first = False
else:
if i == maxLines:
break
i = i + 1
rowData = parseRowData(line)
outletId = rowData[OUTLET_ID_INDEX]
productId = rowData[PERIODICAL_PRODUCT_ID_INDEX]
sold = float(rowData[SOLD_INDEX])
row = productIds.index(productId)
col = outletIds.index(outletId)
prev = float(m[row, col])
if prev == -1:
prev = 0
m[row, col] = prev + float(sold)
m[row, col] = float(sold)
if i % 1000 == 0:
sys.stdout.write(".")
sys.stdout.flush()
print
print numOutlets
out.write(str(numOutlets) + '\n')
out.write("# ")
for outletId in outletIds:
out.write(str(outletId) + " ")
out.write('\n')
for i in range(0, numProducts):
for j in range(0, numOutlets):
val = m[i,j]
if val == -1:
val = ""
else:
val = str(val)
out.write(val + " ")
out.write(productIds[i])
out.write('\n')
sys.stdout.write(".")
sys.stdout.flush()
print
print numProducts
out2.write(str(numProducts) + '\n')
out2.write("# ")
isFirstProductId = True
out3.write(";")
for productId in productIds:
if(not isFirstProductId):
out3.write(";")
isFirstProductId = False
out2.write(str(productId) + " ")
out3.write(str(productId))
out2.write('\n')
out3.write('\n')
for i in range(0, numOutlets):
out3.write(outletIds[i])
out3.write(";")
isFirstProductId = True
for j in range(0, numProducts):
val = m[j,i]
if val == -1:
val = ""
else:
val = str(val)
out2.write(val + " ")
if(not isFirstProductId):
out3.write(";")
isFirstProductId = False
out3.write(val)
out2.write(outletIds[i])
out2.write('\n')
out3.write('\n')
sys.stdout.write(".")
sys.stdout.flush()
print
print "Outlet JS"
varName = "outletSalesByPeriod";
outOutletJs.write("var "+varName+" = "+varName+" || {};\n\n");
outOutletJs.write(varName+"[\"" + periodId + "\"] = {};\n");
outOutletJs.write(varName+"[\"" + periodId + "\"].outletIds = ");
json.dump(outletIds, outOutletJs)
outOutletJs.write(";\n");
outOutletJs.write(varName+"[\"" + periodId + "\"].productIds = ");
json.dump(productIds, outOutletJs)
outOutletJs.write(";\n");
outletSales = []
for i in range(0, numOutlets):
outletId = outletIds[i]
productSales = []
outletSales.append(productSales)
for j in range(0, numProducts):
val = m[j,i]
if val == -1:
val = 0
productId = productIds[j]
productSales.append(val)
outOutletJs.write(varName+"[\"" + periodId + "\"].sold = ");
json.dump(outletSales, outOutletJs)
outOutletJs.write(";\n");
out.close()
out2.close()
outOutletJs.close()
out3.close()
print
print "Finished."
|
Singing on this summer night , yet hidden out of sight.
In the forest where they dwell.
But it was no where to be found.
The most beautiful sound from a bird.
|
from django.utils.functional import cached_property
from django.utils.html import escape
from wagtail.core.models import Page
from wagtail.core.rich_text import features as feature_registry
from wagtail.core.rich_text.rewriters import EmbedRewriter, LinkRewriter, MultiRuleRewriter
from wagtail.core.whitelist import Whitelister, allow_without_attributes
class WhitelistRule:
def __init__(self, element, handler):
self.element = element
self.handler = handler
class EmbedTypeRule:
def __init__(self, embed_type, handler):
self.embed_type = embed_type
self.handler = handler
class LinkTypeRule:
def __init__(self, link_type, handler):
self.link_type = link_type
self.handler = handler
# Whitelist rules which are always active regardless of the rich text features that are enabled
BASE_WHITELIST_RULES = {
'[document]': allow_without_attributes,
'p': allow_without_attributes,
'div': allow_without_attributes,
'br': allow_without_attributes,
}
class DbWhitelister(Whitelister):
"""
A custom whitelisting engine to convert the HTML as returned by the rich text editor
into the pseudo-HTML format stored in the database (in which images, documents and other
linked objects are identified by ID rather than URL):
* accepts a list of WhitelistRules to extend the initial set in BASE_WHITELIST_RULES;
* replaces any element with a 'data-embedtype' attribute with an <embed> element, with
attributes supplied by the handler for that type as defined in embed_handlers;
* rewrites the attributes of any <a> element with a 'data-linktype' attribute, as
determined by the handler for that type defined in link_handlers, while keeping the
element content intact.
"""
def __init__(self, converter_rules):
self.converter_rules = converter_rules
self.element_rules = BASE_WHITELIST_RULES.copy()
for rule in self.converter_rules:
if isinstance(rule, WhitelistRule):
self.element_rules[rule.element] = rule.handler
@cached_property
def embed_handlers(self):
return {
rule.embed_type: rule.handler for rule in self.converter_rules
if isinstance(rule, EmbedTypeRule)
}
@cached_property
def link_handlers(self):
return {
rule.link_type: rule.handler for rule in self.converter_rules
if isinstance(rule, LinkTypeRule)
}
def clean_tag_node(self, doc, tag):
if 'data-embedtype' in tag.attrs:
embed_type = tag['data-embedtype']
# fetch the appropriate embed handler for this embedtype
try:
embed_handler = self.embed_handlers[embed_type]
except KeyError:
# discard embeds with unrecognised embedtypes
tag.decompose()
return
embed_attrs = embed_handler.get_db_attributes(tag)
embed_attrs['embedtype'] = embed_type
embed_tag = doc.new_tag('embed', **embed_attrs)
embed_tag.can_be_empty_element = True
tag.replace_with(embed_tag)
elif tag.name == 'a' and 'data-linktype' in tag.attrs:
# first, whitelist the contents of this tag
for child in tag.contents:
self.clean_node(doc, child)
link_type = tag['data-linktype']
try:
link_handler = self.link_handlers[link_type]
except KeyError:
# discard links with unrecognised linktypes
tag.unwrap()
return
link_attrs = link_handler.get_db_attributes(tag)
link_attrs['linktype'] = link_type
tag.attrs.clear()
tag.attrs.update(**link_attrs)
else:
if tag.name == 'div':
tag.name = 'p'
super(DbWhitelister, self).clean_tag_node(doc, tag)
class EditorHTMLConverter:
def __init__(self, features=None):
if features is None:
features = feature_registry.get_default_features()
self.converter_rules = []
for feature in features:
rule = feature_registry.get_converter_rule('editorhtml', feature)
if rule is not None:
# rule should be a list of WhitelistRule() instances - append this to
# the master converter_rules list
self.converter_rules.extend(rule)
@cached_property
def whitelister(self):
return DbWhitelister(self.converter_rules)
def to_database_format(self, html):
return self.whitelister.clean(html)
@cached_property
def html_rewriter(self):
embed_rules = {}
link_rules = {}
for rule in self.converter_rules:
if isinstance(rule, EmbedTypeRule):
embed_rules[rule.embed_type] = rule.handler.expand_db_attributes
elif isinstance(rule, LinkTypeRule):
link_rules[rule.link_type] = rule.handler.expand_db_attributes
return MultiRuleRewriter([
LinkRewriter(link_rules), EmbedRewriter(embed_rules)
])
def from_database_format(self, html):
return self.html_rewriter(html)
class PageLinkHandler:
"""
PageLinkHandler will be invoked whenever we encounter an <a> element in HTML content
with an attribute of data-linktype="page". The resulting element in the database
representation will be:
<a linktype="page" id="42">hello world</a>
"""
@staticmethod
def get_db_attributes(tag):
"""
Given an <a> tag that we've identified as a page link embed (because it has a
data-linktype="page" attribute), return a dict of the attributes we should
have on the resulting <a linktype="page"> element.
"""
return {'id': tag['data-id']}
@staticmethod
def expand_db_attributes(attrs):
try:
page = Page.objects.get(id=attrs['id'])
attrs = 'data-linktype="page" data-id="%d" ' % page.id
parent_page = page.get_parent()
if parent_page:
attrs += 'data-parent-id="%d" ' % parent_page.id
return '<a %shref="%s">' % (attrs, escape(page.localized.specific.url))
except Page.DoesNotExist:
return "<a>"
|
The strange thing is when i use Chrome to print it ,it is normal.
how to print the image? Thank you!
|
"""
This code is part of the Arc-flow Vector Packing Solver (VPSolver).
Copyright (C) 2013-2015, Filipe Brandao
Faculdade de Ciencias, Universidade do Porto
Porto, Portugal. All rights reserved. E-mail: <[email protected]>.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import signal
import atexit
import shutil
import tempfile
import subprocess
from . import *
class VBP:
def __init__(self, W, w, b, verbose=None):
self.vbp_file = VPSolver.new_tmp_file(".vbp")
f = open(self.vbp_file,"w")
if type(W)==int:
W=[W]
else:
W = list(W)
print >>f, len(W)
print >>f, " ".join(map(str,W))
print >>f, len(w)
for i in xrange(len(w)):
if type(w[i])==int:
row = [w[i],b[i]]
else:
row = list(w[i])+[b[i]]
assert len(row) == len(W)+1
print >>f, " ".join(map(str,row))
f.close()
if verbose:
f = open(self.vbp_file,"r")
print f.read()
f.close()
self.m = len(b)
self.ndims = len(W)
self.W, self.w, self.b = W, w, b
@classmethod
def fromFile(cls, vbp_file, verbose=None):
f = open(vbp_file, "r")
lst = map(int,f.read().split())
ndims = lst.pop(0)
W = lst[:ndims]
lst = lst[ndims:]
m = lst.pop(0)
w, b = [], []
for i in xrange(m):
w.append(lst[:ndims])
lst = lst[ndims:]
b.append(lst.pop(0))
return cls(W, w, b, verbose)
def __del__(self):
try:
os.remove(self.vbp_file)
except:
pass
class AFG:
def __init__(self, instance, compress=-2, binary=False, vtype="I", verbose=None):
assert isinstance(instance, VBP)
VPSolver.set_verbose(verbose)
self.instance = instance
self.afg_file = VPSolver.new_tmp_file(".afg")
self.output = VPSolver.vbp2afg(instance.vbp_file, self.afg_file, compress, binary, vtype)
self.V, self.A, self.S, self.T = None, None, None, None
def graph(self):
return AFGraph.fromFile(self.afg_file)
def __del__(self):
try:
os.remove(self.afg_file)
except:
pass
class MPS:
def __init__(self, graph, verbose=None):
assert isinstance(graph, AFG)
VPSolver.set_verbose(verbose)
self.afg_graph = graph
self.mps_file = VPSolver.new_tmp_file(".mps")
self.output = VPSolver.afg2mps(graph.afg_file, self.mps_file, verbose=verbose)
def __del__(self):
try:
os.remove(self.mps_file)
except:
pass
class LP:
def __init__(self, graph, verbose=None):
assert isinstance(graph, AFG)
VPSolver.set_verbose(verbose)
self.afg_graph = graph
self.lp_file = VPSolver.new_tmp_file(".lp")
self.output = VPSolver.afg2lp(graph.afg_file, self.lp_file, verbose=verbose)
def __del__(self):
try:
os.remove(self.lp_file)
except:
pass
class VPSolver:
VPSOLVER = "vpsolver"
VBP2AFG = "vbp2afg"
AFG2MPS = "afg2mps"
AFG2LP = "afg2lp"
VBPSOL = "vbpsol"
TMP_DIR = tempfile.mkdtemp()
TMP_CNT = 0
REDIRECT = "2>&1"
PLIST = []
@staticmethod
def set_verbose(verbose):
if verbose != None:
if verbose:
VPSolver.REDIRECT = "2>&1"
else:
VPSolver.REDIRECT = "> /dev/null 2>&1"
@staticmethod
def new_tmp_file(ext = "tmp"):
if not ext.startswith("."): ext = "."+ext
fname = "%s/%d%s" % (VPSolver.TMP_DIR, VPSolver.TMP_CNT, ext)
VPSolver.TMP_CNT += 1
return fname
@staticmethod
@atexit.register
def clear():
for p in VPSolver.PLIST:
try:
os.killpg(p.pid, signal.SIGTERM)
except:
pass
try:
shutil.rmtree(VPSolver.TMP_DIR)
except:
pass
@staticmethod
def run(cmd):
p = subprocess.Popen(cmd, shell=True, preexec_fn=os.setsid)
VPSolver.PLIST.append(p)
p.wait()
@staticmethod
def parse_vbpsol(vpsol_output):
try:
s = vpsol_output.strip()
lst = s[s.rfind("Objective:"):].split("\n")
lst[0] = lst[0].replace("Objective: ", "")
obj = int(lst[0])
lst = lst[2:]
lst = map(lambda x: x.split("x"), lst)
sol = []
for mult, pat in lst:
mult = int(mult)
pat = pat.replace("i=","")
pat = pat.replace("[","").replace("]","")
pat = map(lambda x: int(x)-1,pat.split(","))
sol.append((mult,pat))
except:
return None
return obj, sol
@staticmethod
def vbpsol(afg_file, sol_file, opts="", verbose=None):
VPSolver.set_verbose(verbose)
if isinstance(afg_file, AFG):
afg_file = afg_file.afg_file
out_file = VPSolver.new_tmp_file()
VPSolver.run("%s %s %s %s | tee %s %s" % (VPSolver.VBPSOL, afg_file, sol_file, opts, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
return output
@staticmethod
def vpsolver(vbp_file, compress=-2, binary=False, vtype="I", verbose=None):
VPSolver.set_verbose(verbose)
if isinstance(vbp_file, VBP):
vbp_file = vbp_file.vbp_file
out_file = VPSolver.new_tmp_file()
opts = "%d %d %s" % (compress, binary, vtype)
VPSolver.run("%s %s %s | tee %s %s" % (VPSolver.VPSOLVER, vbp_file, opts, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
return output, VPSolver.parse_vbpsol(output)
@staticmethod
def vbp2afg(vbp_file, afg_file, compress=-2, binary=False, vtype="I", verbose=None):
VPSolver.set_verbose(verbose)
if isinstance(vbp_file, VBP):
vbp_file = vbp_file.vbp_file
out_file = VPSolver.new_tmp_file()
opts = "%d %d %s" % (compress, binary, vtype)
VPSolver.run("%s %s %s %s | tee %s %s" % (VPSolver.VBP2AFG, vbp_file, afg_file, opts, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
return output
@staticmethod
def afg2mps(afg_file, mps_file, opts="", verbose=None):
VPSolver.set_verbose(verbose)
if isinstance(afg_file, AFG):
afg_file = afg_file.afg_file
out_file = VPSolver.new_tmp_file()
VPSolver.run("%s %s %s %s | tee %s %s" % (VPSolver.AFG2MPS, afg_file, mps_file, opts, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
return output
@staticmethod
def afg2lp(afg_file, lp_file, opts="", verbose=None):
VPSolver.set_verbose(verbose)
if isinstance(afg_file, AFG):
afg_file = afg_file.afg_file
out_file = VPSolver.new_tmp_file()
VPSolver.run("%s %s %s %s | tee %s %s" % (VPSolver.AFG2LP, afg_file, lp_file, opts, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
return output
@staticmethod
def script(script_name, arg1=None, arg2=None, verbose=None):
VPSolver.set_verbose(verbose)
cmd = script_name
for arg in [arg1, arg2]:
if isinstance(arg, MPS):
cmd += " --mps " + arg.mps_file
elif isinstance(arg, LP):
cmd += " --lp " + arg.lp_file
elif isinstance(arg, AFG):
cmd += " --afg " + arg.afg_file
elif isinstance(arg, VBP):
cmd += " --vbp " + arg.vbp_file
elif isinstance(arg, str):
if arg.endswith(".mps"):
cmd += " --mps " + arg
elif arg.endswith(".lp"):
cmd += " --lp " + arg
elif arg.endswith(".afg"):
cmd += " --afg " + arg
elif arg.endswith(".vbp"):
cmd += " --vbp " + arg
else:
raise Exception("Invalid file extension!")
out_file = VPSolver.new_tmp_file()
VPSolver.run("%s | tee %s %s" % (cmd, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
return output, VPSolver.parse_vbpsol(output)
@staticmethod
def script_wsol(script_name, model, verbose=None):
VPSolver.set_verbose(verbose)
cmd = script_name
if isinstance(model, MPS):
cmd += " --mps " + model.mps_file
elif isinstance(model, LP):
cmd += " --lp " + model.lp_file
elif isinstance(model,str):
if model.endswith(".mps"):
cmd += " --mps " + model
elif model.endswith(".lp"):
cmd += " --lp " + model
else:
raise Exception("Invalid file extension!")
out_file = VPSolver.new_tmp_file()
sol_file = VPSolver.new_tmp_file(".sol")
VPSolver.run("%s --wsol %s | tee %s %s" % (cmd, sol_file, out_file, VPSolver.REDIRECT))
f = open(out_file)
output = f.read()
f.close()
os.remove(out_file)
try:
f = open(sol_file)
sol = f.read().split()
vals = {}
assert len(sol)%2 == 0
for i in xrange(0,len(sol),2):
var, value = sol[i], int(round(float(sol[i+1])))
if value != 0:
vals[var] = value
f.close()
os.remove(sol_file)
except:
vals = None
return output, vals
def signal_handler(signal, frame):
print "signal received: %d" % signal
VPSolver.clear()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
|
Pop Bits: The last time Lynn was on the Pop chart was in 1981 with the #70 "Shake It Up Tonight" (#5 R&B, #5 Dance). A couple of follow-up singles just barely missed getting on the Pop chart, but this song from her 1983 album Preppie finally did the trick. The track would be highly successful at R&B becoming her second #1 while getting to #6 at Dance. Unfortunately, it would be Lynn's final single to reach the Pop chart and her last album to get into the R&B Top 10 (#8). She would continue to record throughout the balance of the decade, but her only success would be 1989's "Whatever It Takes," which got to #7 at R&B.
ReduxReview: I was excited to hear this as I love Jam & Lewis' work (see below). Plus, it hit #1 at R&B, so it should be pretty great. However, I was a bit disappointed. The groove is fine, but there is not much of the Jam/Lewis stamp on it and the song doesn't really go anywhere. Plus, Lynn is a much better vocalist than what is heard here. It work out well for everyone, but they have all done better work.
Trivia: For her Preppie album, Lynn co-wrote a few of the songs and produced all of the tracks except for this single. The song was written and produced by the up-and-coming team of Jimmy Jam and Terry Lewis. Jam and Lewis were beginning to have success working with The S.O.S. Band (two R&B Top 10's) and others, but it was this song that finally got them a #1 hit at R&B. It would lead to more work and in a few years their songs would start topping the Pop chart. Over time, they would become the most successful songwriting and production team in chart history getting sixteen Pop #1's and twenty-six R&B #1's. Their streak began with this hit.
|
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import os
import psutil
from vrouter.cpuinfo.ttypes import *
class CpuInfoData(object):
def __init__(self):
self._process = psutil.Process(os.getpid())
self._num_cpu = 0
#end __init__
def _get_num_cpu(self):
return psutil.NUM_CPUS
#end _get_num_cpu
def _get_sys_mem_info(self):
phymem_info = psutil.phymem_usage()
sys_mem_info = SysMemInfo()
sys_mem_info.total = phymem_info[0]/1024
sys_mem_info.used = phymem_info[1]/1024
sys_mem_info.free = phymem_info[2]/1024
return sys_mem_info
#end _get_sys_mem_info
def _get_mem_info(self):
mem_info = MemInfo()
mem_info.virt = self._process.get_memory_info().vms/1024
mem_info.peakvirt = mem_info.virt
mem_info.res = self._process.get_memory_info().rss/1024
return mem_info
#end _get_mem_info
def _get_cpu_load_avg(self):
load_avg = os.getloadavg()
cpu_load_avg = CpuLoadAvg()
cpu_load_avg.one_min_avg = load_avg[0]
cpu_load_avg.five_min_avg = load_avg[1]
cpu_load_avg.fifteen_min_avg = load_avg[2]
return cpu_load_avg
#end _get_cpu_load_avg
def _get_cpu_share(self):
cpu_percent = self._process.get_cpu_percent(interval=0.1)
return cpu_percent/self._get_num_cpu()
#end _get_cpu_share
def get_cpu_info(self, system=True):
cpu_info = CpuLoadInfo()
num_cpu = self._get_num_cpu()
if self._num_cpu != num_cpu:
self._num_cpu = num_cpu
cpu_info.num_cpu = num_cpu
if system:
cpu_info.sys_mem_info = self._get_sys_mem_info()
cpu_info.cpuload = self._get_cpu_load_avg()
cpu_info.meminfo = self._get_mem_info()
cpu_info.cpu_share = self._get_cpu_share()
return cpu_info
#end get_cpu_info
#end class CpuInfoData
|
If you want to get to Tbilisi for weekend, make sure to choose last Sunday of October. This is when the festival Tbilisoba takes place, during which locals commonly feast and thank for summer harvest.
One of the most characteristic elements of architecture of the capital Georgia is the Bridge of Peace, that is adjusted to pedestrians. It perfectly fits into the surroundings, despite its futuristic style.
On the summit of the mountain Sololaki, in the western part of the city, there is a twenty-metres high monument of Mother Georgia (Kartlis deda). For small fee you can get there by cableway to admire the beauty of the whole Tbilisi.
What’s worth seeing in Tbilisi?
Tbilisi is called the Pearl of Caucasus – there is nothing short of exaggeration in that statement. The city, which is situated on the joint of Europe and Asia, allows one to feel a genuine oriental vibe, that is composed of lovely views, unique monuments, and above all hospitable guests.
What to pay attention to while walking around the capital of Georgia? To balconies, definitely. Those, which you will see in that spot are real masterpieces – hand-made sculptures, wooden balustrades, the older, the more beautiful and more creative. They are mostly located on second floors of buildings.
Make sure to go to the oldest district of the city – Abanotubani. It is known mainly for baths. The name Tbilisi reportedly comes from sulphur springs, whose are beneficially used by citizens, as tbili means in local language something hot. Baths are topped with copulas, that after dark, when the steam is lingering above them, make huge impression!
In the last few years Tbilisi has been changing, especially when it comes to infrastructure. New restaurants, bars and clubs are founded, old buildings and roads are modernized. Where then should you go when you feel hungry? Exquisite Georgian cuisine you can try in Barbarestan at D. Aghmashenebeli 132. You can also except very pleasant vibe and great, regional cuisine in Old City Wall, whose address is Baratashvili 1. For Asian dishes step by Umami Asian Fusion at 1 Rose Revolution Square.
When upon visiting the town, you would like also to see, how traditional trading spot looks like, go to the Dry Bridge bazaar. Its name refers to a nearby bridge. You can buy there various things, but above all it is worth to step by to hear burble of locals that deal with all kind of business there.
|
#!/usr/bin/env python
# encoding: utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2013-2015 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# AUTHORS
# Hervé BREDIN -- http://herve.niderb.fr/
# Camille GUINAUDEAU
#
from __future__ import unicode_literals
from __future__ import print_function
import re
from pkg_resources import resource_filename
from bs4 import BeautifulSoup
from tvd import Plugin
from tvd import T, TStart, TEnd, Transcription
from tvd import Segment, Annotation
from pyannote.parser.transcription.ctm import CTMParser, IterLinesMixin
class GameOfThrones(Plugin, IterLinesMixin):
def speaker(self, url=None, episode=None, **kwargs):
# absolute path to resource file
path = resource_filename(self.__class__.__name__, url)
annotation = Annotation()
with open(path, 'r') as fp:
for line in fp:
tokens = line.strip().split()
start_time = float(tokens[0])
duration = float(tokens[1])
segment = Segment(start_time, start_time + duration)
speaker = tokens[2]
annotation[segment, speaker] = speaker
return annotation
def outline_www(self, url=None, episode=None, **kwargs):
"""
Parameters
----------
url : str, optional
URL where resource is available
episode : Episode, optional
Episode for which resource should be downloaded
Useful in case a same URL contains resources for multiple episodes.
Returns
-------
G : Transcription
"""
r = self.download_as_utf8(url)
soup = BeautifulSoup(r)
h2 = soup.find_all('h2')
sp = ""
i = 0
outline = {}
for element in h2[0].next_elements:
if element.name == 'p':
if outline.get(i) == "----":
sp = element.text
else:
sp = outline.get(i) + " " + element.text
outline.update({i: sp})
if element.name == 'h2':
i = i + 1
sp = "----"
outline.update({i: sp})
G = Transcription(episode=episode)
t2 = TStart
i = 1
while outline.get(i):
# add /empty/ edge between previous and next annotations
t1 = t2
t2 = T()
G.add_edge(t1, t2)
# add next annotation
t1 = t2
t2 = T()
G.add_edge(t1, t2, scene=outline.get(i))
i = i + 1
# add /empty/ edge between previous annotation and episode end
t1 = t2
t2 = TEnd
G.add_edge(t1, t2)
return G
def _scenes(self, url=None, episode=None):
"""Load file at `url` as Annotation
File must follow the following format:
# start_time end_time label
0.000 10.234 beginning
10.234 56.000 scene_1
"""
# initialize empty annotation
# uri is set to episode when provided
annotation = Annotation(uri=episode)
# absolute path to resource file
path = resource_filename(self.__class__.__name__, url)
# open file and parse it
with open(path, 'r') as f:
for line in f:
start, end, label = line.strip().split()
start = float(start)
end = float(end)
annotation[Segment(start, end)] = label
return annotation
def scenes_outline(self, url=None, episode=None, **kwargs):
return self._scenes(url=url, episode=episode)
def scenes(self, url=None, episode=None, **kwargs):
return self._scenes(url=url, episode=episode)
# load name mapping
def _get_mapping(self):
path = resource_filename(self.__class__.__name__, 'data/mapping.txt')
with open(path, 'r') as _:
mapping = dict(line.split() for line in _.readlines())
return mapping
def transcript_www(self, url=None, episode=None, **kwargs):
# load name mapping
mapping = self._get_mapping()
r = self.download_as_utf8(url)
soup = BeautifulSoup(r)
G = Transcription(episode=episode)
t2 = TStart
div = soup.find_all('div')
transcript = ""
for i in range(0, len(div)):
if re.match("{'class': \['postbody'\]}", unicode(div[i].attrs)):
transcript = div[i]
for i in range(0, len(transcript.contents)):
string = unicode(transcript.contents[i])
if not re.match("\[(.*)\]", string):
if re.match("(.*) : (.*)", string) and \
not re.match("(.*) by : (.*)", string):
ligne = re.split(' : ', transcript.contents[i])
# add /empty/ edge between previous and next annotations
t1 = t2
t2 = T()
G.add_edge(t1, t2)
# add next annotation
t1 = t2
t2 = T()
spk = ligne[0].lower().replace(' ', '_')
if re.match("(.*)_\(|\[(.*)\)|\]", spk):
match = re.match("(.*)_\(|\[(.*)\)|\]", spk)
spk = match.group(1)
spk = mapping.get(spk, spk)
if re.match("(.*)/(.*)", spk):
spks = spk.split('/')
if spks[0] in mapping:
spk = mapping.get(spks[0])
G.add_edge(t1, t2, speaker=spk, speech=ligne[1])
if spks[1] in mapping:
spk = mapping.get(spks[1])
G.add_edge(t1, t2, speaker=spk, speech=ligne[1])
else:
G.add_edge(t1, t2, speaker=spk, speech=ligne[1])
elif (
re.match("(.*): (.*)", string)
and not re.match("Credit: (.*)", string)
and not re.match("(.*) by: (.*)", string)
):
ligne = re.split(': ', transcript.contents[i])
# add /empty/ edge between previous and next annotations
t1 = t2
t2 = T()
G.add_edge(t1, t2)
# add next annotation
t1 = t2
t2 = T()
spk = ligne[0].lower().replace(' ', '_')
if re.match("(.*)_\(|\[(.*)\)|\]", spk):
match = re.match("(.*)_\(|\[(.*)\)|\]", spk)
spk = match.group(1)
spk = mapping.get(spk, spk)
if re.match("(.*)/(.*)", spk):
spks = spk.split('/')
if spks[0] in mapping:
spk = mapping.get(spks[0])
G.add_edge(t1, t2, speaker=spk, speech=ligne[1])
if spks[1] in mapping:
spk = mapping.get(spks[1])
G.add_edge(t1, t2, speaker=spk, speech=ligne[1])
else:
G.add_edge(t1, t2, speaker=spk, speech=ligne[1])
# add /empty/ edge between previous annotation and episode end
t1 = t2
t2 = TEnd
G.add_edge(t1, t2)
return G
def transcript(self, url=None, episode=None, **kwargs):
path = resource_filename(self.__class__.__name__, url)
transcription = Transcription(episode=episode)
# previous dialogue end time
e_dialogue = None
for line in self.iterlines(path):
# ARYA_STARK I'm not a boy!
# speaker = ARYA_STARK
# speech = I'm not a boy!
tokens = line.split()
speaker = tokens[0].strip()
speech = ' '.join(tokens[1:]).strip()
# new dialogue
_s_dialogue, _e_dialogue = T(), T()
# connect dialogue with previous dialogue
if e_dialogue is not None:
transcription.add_edge(e_dialogue, _s_dialogue)
transcription.add_edge(_s_dialogue, _e_dialogue,
speaker=speaker, speech=speech)
# keep track of previous dialogue end time
e_dialogue = _e_dialogue
return transcription
def transcript_aligned(self, url=None, episode=None, **kwargs):
path = resource_filename(self.__class__.__name__, url)
return CTMParser().read(path)()
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
In the previous years, you have learned that questions or problems are investigated and solved in an orderly manner called the scientific method. This process involves working with variables. But what are variables? This is what we are going to investigate in this microsite about variables.
The lesson in variables is divided into five sections-- guide section, activity sections, enrichment section, assessment section, and the reference section.
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import requests
import json
from ast import literal_eval as make_tuple
from celery import shared_task
from celery.exceptions import MaxRetriesExceededError, Retry
from requests.exceptions import ConnectionError
from django_statsd.clients import statsd
from django.core.cache import cache
from django.conf import settings
@shared_task(bind=True, default_retry_delay=1, max_retries=42)
def start(self, opname, input_data, wkaoi=None):
"""
Start a geoproessing operation.
Given an operation name and a dictionary of input data, looks up the
operation from the list of supported operations in settings.GEOP['json'],
combines it with input data, and submits it to Spark JobServer.
This task must always be succeeded by `finish` below.
All errors are passed along and not raised here, so that error handling can
be attached to the final task in the chain, without needing to be attached
to every task.
If a well-known area of interest id is specified in wkaoi, checks to see
if there is a cached result for that wkaoi and operation. If so, returns
that immediately in the 'cached' key. If not, starts the geoprocessing
operation while also passing along the cache key to the next step, so that
the results of geoprocessing may be cached.
:param opname: Name of operation. Must exist in settings.GEOP['json']
:param input_data: Dictionary of values to extend base operation JSON with
:param wkaoi: String id of well-known area of interest. "{table}__{id}"
:return: Dictionary containing either job_id if successful, error if not
"""
if opname not in settings.GEOP['json']:
return {
'error': 'Unsupported operation {}'.format(opname)
}
if not input_data:
return {
'error': 'Input data cannot be empty'
}
outgoing = {}
if wkaoi and settings.GEOP['cache']:
key = 'geop_{}__{}'.format(wkaoi, opname)
outgoing['key'] = key
cached = cache.get(key)
if cached:
outgoing['cached'] = cached
return outgoing
data = settings.GEOP['json'][opname].copy()
data['input'].update(input_data)
try:
outgoing['job_id'] = sjs_submit(data, self.retry)
return outgoing
except Retry as r:
raise r
except Exception as x:
return {
'error': x.message
}
@shared_task(bind=True, default_retry_delay=1, max_retries=42)
def finish(self, incoming):
"""
Retrieve results of geoprocessing.
To be used immediately after the `start` task, this takes the incoming
data and inspects it to see if there are any reported errors. If found,
the errors are passed through to the next task. Otherwise, the incoming
parameters are used to retrieve the job from Spark JobServer, and those
results are returned.
This task must always be preceeded by `start` above. The succeeding task
must take the raw JSON values and process them into information. The JSON
output will look like:
{
'List(1,2)': 3,
'List(4,5)': 6
}
where the values and number of items depend on the input.
All errors are passed along and not raised here, so that error handling can
be attached to the final task in the chain, without needing to be attached
to every task.
If the incoming set of values contains a 'cached' key, then its contents
are returned immediately. If there is a 'key' key, then the results of
geoprocessing will be saved to the cache with that key before returning.
:param incoming: Dictionary containing job_id or error
:return: Dictionary of Spark JobServer results, or error
"""
if 'error' in incoming:
return incoming
if 'cached' in incoming:
return incoming['cached']
try:
result = sjs_retrieve(incoming['job_id'], self.retry)
if 'key' in incoming:
cache.set(incoming['key'], result, None)
return result
except Retry as r:
# Celery throws a Retry exception when self.retry is called to stop
# the execution of any further code, and to indicate to the worker
# that the same task is going to be retried.
# We capture and re-raise Retry to continue this behavior, and ensure
# that it doesn't get passed to the next task like every other error.
raise r
except Exception as x:
return {
'error': x.message
}
@statsd.timer(__name__ + '.sjs_submit')
def sjs_submit(data, retry=None):
"""
Submits a job to Spark Job Server. Returns its Job ID, which
can be used with sjs_retrieve to get the final result.
"""
host = settings.GEOP['host']
port = settings.GEOP['port']
args = settings.GEOP['args']
base_url = 'http://{}:{}'.format(host, port)
jobs_url = '{}/jobs?{}'.format(base_url, args)
try:
response = requests.post(jobs_url, data=json.dumps(data))
except ConnectionError as exc:
if retry is not None:
retry(exc=exc)
if response.ok:
job = response.json()
else:
error = response.json()
if error['status'] == 'NO SLOTS AVAILABLE' and retry is not None:
retry(exc=Exception('No slots available in Spark JobServer.\n'
'Details = {}'.format(response.text)))
elif error['result'] == 'context geoprocessing not found':
reboot_sjs_url = '{}/contexts?reset=reboot'.format(base_url)
context_response = requests.put(reboot_sjs_url)
if context_response.ok:
if retry is not None:
retry(exc=Exception('Geoprocessing context missing in '
'Spark JobServer\nDetails = {}'.format(
context_response.text)))
else:
raise Exception('Geoprocessing context missing in '
'Spark JobServer, but no retry was set.\n'
'Details = {}'.format(
context_response.text))
else:
raise Exception('Unable to create missing geoprocessing '
'context in Spark JobServer.\n'
'Details = {}'.format(context_response.text))
else:
raise Exception('Unable to submit job to Spark JobServer.\n'
'Details = {}'.format(response.text))
if job['status'] == 'STARTED':
return job['result']['jobId']
else:
raise Exception('Submitted job did not start in Spark JobServer.\n'
'Details = {}'.format(response.text))
@statsd.timer(__name__ + '.sjs_retrieve')
def sjs_retrieve(job_id, retry=None):
"""
Given a job ID, will try to retrieve its value. If the job is
still running, will call the optional retry function before
proceeding.
"""
host = settings.GEOP['host']
port = settings.GEOP['port']
url = 'http://{}:{}/jobs/{}'.format(host, port, job_id)
try:
response = requests.get(url)
except ConnectionError as exc:
if retry is not None:
retry(exc=exc)
if response.ok:
job = response.json()
else:
raise Exception('Unable to retrieve job {} from Spark JobServer.\n'
'Details = {}'.format(job_id, response.text))
if job['status'] == 'FINISHED':
return job['result']
elif job['status'] == 'RUNNING':
if retry is not None:
try:
retry()
except MaxRetriesExceededError:
delete = requests.delete(url) # Job took too long, terminate
if delete.ok:
raise Exception('Job {} timed out, '
'deleted.'.format(job_id))
else:
raise Exception('Job {} timed out, unable to delete.\n'
'Details: {}'.format(job_id, delete.text))
else:
if job['status'] == 'ERROR':
status = 'ERROR ({}: {})'.format(job['result']['errorClass'],
job['result']['message'])
else:
status = job['status']
delete = requests.delete(url) # Job in unusual state, terminate
if delete.ok:
raise Exception('Job {} was {}, deleted'.format(job_id, status))
else:
raise Exception('Job {} was {}, could not delete.\n'
'Details = {}'.format(job_id, status, delete.text))
def parse(sjs_result):
"""
Converts raw JSON results from Spark JobServer to dictionary of tuples
If the input is this:
{
'List(1,2)': 3,
'List(4,5)': 6
}
The output will be:
{
(1, 2): 3,
(4, 5): 6
}
:param sjs_result: Dictionary mapping strings like 'List(a,b,c)' to ints
:return: Dictionary mapping tuples of ints to ints
"""
return {make_tuple(key[4:]): val for key, val in sjs_result.items()}
def to_one_ring_multipolygon(area_of_interest):
"""
Given a multipolygon comprising just a single ring structured in a
five-dimensional array, remove one level of nesting and make the AOI's
coordinates a four-dimensional array. Otherwise, no op.
"""
if type(area_of_interest['coordinates'][0][0][0][0]) is list:
multipolygon_shapes = area_of_interest['coordinates'][0]
if len(multipolygon_shapes) > 1:
raise Exception('Unable to parse multi-ring RWD multipolygon')
else:
area_of_interest['coordinates'] = multipolygon_shapes
return area_of_interest
|
Bud XL has an exclusive ability of extracting sugars from plant leaves and transferring them to the roots. This improves the taste of the fruits by making them sweeter. This also enhances the robustness and size of the flowers which leads to greater turnover and high production of fruits.
It is basically a late bloom flowering enhancer that enables sugar extraction from bracts and carries them to fruits to achieve sweeter and bigger yields. It's additives and ingredients are manufactured from food grade and/or pharmaceutical ingredients that come in small batches to make sure that there is consistency in experience and quality.
House and Garden makes use of enzyme processes in extracting sugars from large bracts so that they can be stored in the flowers and fruits of the crop. Start applying the Bud XL from the fourth week which is the middle of the crop’s flowering period. You can begin to apply this nutrient solution after the formation of the first flowers. At this period, the big bracts normally lose their function. It makes sure that the plant no longer uses its energy to keep the bract. Instead, the energy is directed to the production of fruits and flowers.
Mix 1ml of Bud-XL to one liter of the nutritional solution. Apply it from the beginning of the sixth week up to the close of the rearing cycle. The dilution rate of this fertilizer is 1ml per liter. First, add the base nutrient (e.g. soil, hydro, or Cocos) to the nutrient container. Adjust your nutrient’s EC before adding the Bud XL. Also, adjust the value of the PH so that it can harmonize with the nutritional solution.
It include potassium hydroxide, nitric acid, and ammonium nitrate.
|
# -*- coding: utf-8 -*-
# Copyright 2018 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines a setup to compile to qubits placed in a linear chain or a circle.
It provides the `engine_list` for the `MainEngine`. This engine list contains an AutoReplacer with most of the gate
decompositions of ProjectQ, which are used to decompose a circuit into only two qubit gates and arbitrary single qubit
gates. ProjectQ's LinearMapper is then used to introduce the necessary Swap operations to route interacting qubits
next to each other. This setup allows to choose the final gate set (with some limitations).
"""
from projectq.cengines import LinearMapper
from projectq.ops import CNOT, Swap
from ._utils import get_engine_list_linear_grid_base
def get_engine_list(num_qubits, cyclic=False, one_qubit_gates="any", two_qubit_gates=(CNOT, Swap)):
"""
Returns an engine list to compile to a linear chain of qubits.
Note:
If you choose a new gate set for which the compiler does not yet have standard rules, it raises an
`NoGateDecompositionError` or a `RuntimeError: maximum recursion depth exceeded...`. Also note that even the
gate sets which work might not yet be optimized. So make sure to double check and potentially extend the
decomposition rules. This implemention currently requires that the one qubit gates must contain Rz and at
least one of {Ry(best), Rx, H} and the two qubit gate must contain CNOT (recommended) or CZ.
Note:
Classical instructions gates such as e.g. Flush and Measure are automatically allowed.
Example:
get_engine_list(num_qubits=10, cyclic=False,
one_qubit_gates=(Rz, Ry, Rx, H),
two_qubit_gates=(CNOT,))
Args:
num_qubits(int): Number of qubits in the chain
cyclic(bool): If a circle or not. Default is False
one_qubit_gates: "any" allows any one qubit gate, otherwise provide a tuple of the allowed gates. If the gates
are instances of a class (e.g. X), it allows all gates which are equal to it. If the gate is
a class (Rz), it allows all instances of this class. Default is "any"
two_qubit_gates: "any" allows any two qubit gate, otherwise provide a tuple of the allowed gates. If the gates
are instances of a class (e.g. CNOT), it allows all gates which are equal to it. If the gate
is a class, it allows all instances of this class. Default is (CNOT, Swap).
Raises:
TypeError: If input is for the gates is not "any" or a tuple.
Returns:
A list of suitable compiler engines.
"""
return get_engine_list_linear_grid_base(
LinearMapper(num_qubits=num_qubits, cyclic=cyclic), one_qubit_gates, two_qubit_gates
)
|
TRENTON, N.J. (AP) - Independent groups far outspent New Jersey’s political parties in last year’s election, state officials said.
The Election Law Enforcement Commission said in a report released Thursday that Democratic and Republican fundraising committees spent a combined $3.8 million in 2016. That’s down from $6.4 million in 2012, and $5.2 million in 2008.
But independent groups spent a combined $28 million on ballot questions including casino expansion and transportation funding in the 2016 election.
Commission executive director Jeff Brindle says the parties are losing ground to the independent groups who do not have to disclose donors or limit expenditures.
He predicts this year’s governor’s race will surpass the $40 million spent during the 2013 gubernatorial contest.
Most of the spending in the 2016 contest centered on the proposed amendment to allow casino gambling in two northern New Jersey locations. Most of that money went to defeat the question, which voters rejected. Other spending went to support an amendment to require gas tax revenue be spent only on transportation. That question succeeded.
New Jersey’s 12 House members were on the ballot in 2016, but no statewide offices were before voters.
The state’s Democratic state, Assembly and Senate committees raised $2.5 million and spent $2.2 million, while their Republican counterparts brought in $2 million and spent $1.6 million.
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 20:23:01 2020
@author: Salomé
"""
# Standard library imports
import numpy as np
# Mosqito functions import
from mosqito.functions.tonality_tnr_pr.critical_band import critical_band
def find_highest_tone(freqs, spec_db, index, nb_tones, ind):
"""
Method to find the two highest tones in a given spectrum from a given index
according to their critical band
Parameters
----------
freqs : numpy.array
frequency axis
spec_db : numpy.array
signal spectrum in dB
index : numpy.array
list of candidate tones index
index : numpy.array
list of candidate tones index
nb_tones : integer
number of candidate tones non examinated
Returns
-------
ind_p : integer
index of the highest tone in the critical band
ind_p : integer
index of the second highest tone in the critical band
index : numpy.array
list of candidate tones index updated
nb_tones : integer
number of candidate tones non examinated updated
"""
f = freqs[ind]
# critical band centered on f
f1, f2 = critical_band(f)
low_limit_idx = np.argmin(np.abs(freqs - f1))
high_limit_idx = np.argmin(np.abs(freqs - f2))
# Other tones in the critical band centered on f tones
multiple_idx = index[index > low_limit_idx]
multiple_idx = multiple_idx[multiple_idx < high_limit_idx]
if len(multiple_idx) > 1:
sort_spec = np.argsort(-1 * spec_db[multiple_idx])
# highest tones in the critical band
ind_p = multiple_idx[sort_spec[0]]
ind_s = multiple_idx[sort_spec[1]]
# suppression of the lower values
for s in sort_spec[2:]:
sup = np.where(index == multiple_idx[s])[0]
index = np.delete(index, sup)
nb_tones -= 1
if ind_p != ind:
# screening to find the highest value in the critical band centered on fp
ind_p, ind_s, index, nb_tones = find_highest_tone(
freqs, spec_db, index, nb_tones, ind_p
)
else:
ind_p = ind
ind_s = None
return ind_p, ind_s, index, nb_tones
|
A4 (297 mm x 210 mm) white. Available in standard office 80gsm and superior 140g weights. Both options suitable for laser and inkjet printing, as well as photocopier use. The 140g version is more suited to double sided use and where you want to present a good company image such as reception, hotel booking and hospitality industries.
Each sheet is micro-perforated at 75mm from the short edge - bottom or top depending on which way you load your printer, that's about 1/4 of a page tear off slip, leaving 3/4 A4 for the main information.
Micro-perforation allows the perforated tear off strip to be easily detached without leaving a ragged edge, and tends to seperate cleanly rather than tearing elsewhere on the page. Being precision micro-perforated, this also means no hanging bumps and tags of paper on the sheet before printing - so less dust and wear and tear on your printers, and much less chance of paper jams and mis-feeds.
A4 perforated at 75mm. 1000 sheets per box.
Perforated A4 140g stationery with a horizontal microperforation at 75mm, ideal for when you need superior quality and robustness, such as in hotel reception, customer booking formd and hospitality industries.
140g A4 perforated at 75mm. 250 sheets per box.
Flat rate £10 carriage on all orders to UK mainland destinations. All prices subject to VAT at 20% for UK orders. Please contact us for other destinations. All prices correct at 15/03/2018.
|
import pymongo
from bson.objectid import ObjectId
def formatObjectIDs(collectionName, results):
for result in results: # For each result is passed, convert the _id to the proper mID, cID, etc.
result[collectionName[0]+'ID'] = str(result.pop('_id')) # Note the .pop removes the _id from the dict
return results
class mongoInstance(object):
def getConfig(self, key):
result = MongoInstance.client['NodeMapper'].config.find_one({'key': key})
config = result['config']
return { 'Config': config }
def postConfig(self, key, config):
doc = {
'config': config
}
print MongoInstance.client['NodeMapper'].config.find_and_modify({'key': key}, {'$set': doc}, upsert=True, new=True)
return { 'result': 'inserted' }
def getData(self, key):
result = MongoInstance.client['NodeMapper'].data.find_one({'key': key})
nodes = result['nodes']
connections = result['connections']
return { 'Nodes': nodes, 'Connections': connections }
def postData(self, key, nodes, connections):
doc = {
'nodes': nodes,
'connections': connections
}
print MongoInstance.client['NodeMapper'].data.find_and_modify({'key': key}, {'$set': doc}, upsert=True, new=True)
return { 'result': 'inserted' }
# Client corresponding to a single connection
@property
def client(self):
if not hasattr(self, '_client'):
self._client = pymongo.MongoClient(host='localhost:27017')
return self._client
# A Singleton Object
MongoInstance = mongoInstance()
|
Kaffestugan Annorlunda is beautifully situated in the Stenshuvud National Park and offers a lovely cake buffet at a fixed price.
We are opening again for next season april 13th, welcome!
Welcome to Österlens cosiest coffee house beautifully situated at Stenshuvud National Park. It serves the traditional cookie buffet with 8 kinds of cakes on the buffet table and fresh coffee in the pot where everything is baked according to old recipes.
For those who want something different there are more options. How about a classic apple tart or juicy carrot cake? We also offer lunch options, such as a classic shrimp sandwich, salads or savory pies.
If you prefer to cool off with a soft ice cream you are welcome to order it in our ice cream kiosk.
We will take your orders by the table so please find yourself a nice table and our waitress will be right with you, welcome to visit us! You will find us at Stenshuvud National Park.
All of our cookies are made by hand in our little bakery downstairs, enjoy!
Kaffestugan Annorlunda was actually Gråmanstorps school that was built in a place called Klippan in 1846 but was later demolished and two years later was it to be build up in Vårhallarna in Simrishamn, but was not granted planning permission.
In 1956, the founder leased the land here at Stenshuvud where "Långkatekesen" was built. But it became a coffe house, not a school this time, which opened August 16 with food and refreshments.
|
import argparse
import os
import sys
import tools
import capture
DEFAULT_OUTPUT_DIRECTORY = os.path.join(os.getcwd(), 'dljc-out')
# token that identifies the end of the options for do-like-javac and the beginning
# of the compilation command
CMD_MARKER = '--'
class AbsolutePathAction(argparse.Action):
"""Convert a path from relative to absolute in the arg parser"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(values))
base_parser = argparse.ArgumentParser(add_help=False)
base_group = base_parser.add_argument_group('global arguments')
base_group.add_argument('-o', '--out', metavar='<directory>',
default=DEFAULT_OUTPUT_DIRECTORY, dest='output_directory',
action=AbsolutePathAction,
help='The directory to log results.')
base_group.add_argument('--log_to_stderr', action='store_true',
help='''Redirect log messages to stderr instead of log file''')
base_group.add_argument('-t', '--tool', metavar='<tool>',
action='store',default=None,
help='A comma separated list of tools to run. Valid tools: ' + ', '.join(tools.TOOLS))
# base_group.add_argument('-c', '--checker', metavar='<checker>',
# action='store',default='NullnessChecker',
# help='A checker to check (for checker/inference tools)')
def split_args_to_parse():
split_index = len(sys.argv)
if CMD_MARKER in sys.argv:
split_index = sys.argv.index(CMD_MARKER)
args, cmd = sys.argv[1:split_index], sys.argv[split_index + 1:]
command_name = os.path.basename(cmd[0]) if len(cmd) > 0 else None
capturer = capture.get_capturer(command_name)
return args, cmd, capturer
def create_argparser():
parser = argparse.ArgumentParser(
parents=[base_parser] + tools.parsers(),
add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
group = parser.add_argument_group(
'supported compiler/build-system commands')
supported_commands = ', '.join(capture.supported_commands())
group.add_argument(
CMD_MARKER,
metavar='<cmd>',
dest='nullarg',
default=None,
help=('Command to run the compiler/build-system. '
'Supported build commands: ' + supported_commands),
)
return parser
def parse_args():
to_parse, cmd, capturer = split_args_to_parse()
global_argparser = create_argparser()
args = global_argparser.parse_args(to_parse)
if capturer:
return args, cmd, capturer
else:
global_argparser.print_help()
sys.exit(os.EX_OK)
|
You can use this wallpaper for your device. This ,nature,iphone, high resolution, pictures, butterflies, windows wallpapers HD Wallpaper is published in Other category and the original resolution of wallpaper is 1920x1200 px.. ,nature,iphone, high resolution, pictures, butterflies, windows wallpapers There are too many resolution options at the resolution section above. You can choose whatever you need from the list. The system will be prepared your choose and download will be start immediately. All of these, if you need only original size of image, than you can click "Download" button below of the preview image. This free image was published since Posted on January 12, 2018 .
The image of ,nature,iphone, high resolution, pictures, butterflies, windows wallpapers is published by users. If you want to report any violation for this image you can reach us by clicking here.
|
# ############ yield and generators #############
# Create by Michael Kennedy (@mkennedy)
# Fibonacci numbers:
# 1, 1, 2, 3, 5, 8, 13, 21, ...
def classic_fibonacci(limit):
nums = []
current, nxt = 0, 1
while current < limit:
current, nxt = nxt, nxt + current
nums.append(current)
return nums
# can we do better?
def generator_fibonacci():
current, nxt = 0, 1
while True:
current, nxt = nxt, nxt + current
yield current
# generator are composible:
def even_generator(numbers):
for n in numbers:
if n % 2 == 0:
yield n
# consume both generators as a pipeline here
def even_fib():
for n in even_generator(generator_fibonacci()):
yield n
if __name__ == '__main__':
print("Classic")
for m in classic_fibonacci(100):
print(m, end=', ')
print()
print("generator")
for m in generator_fibonacci():
print(m, end=', ')
if m > 100:
break
print()
print("composed")
for m in even_fib():
print(m, end=', ')
if m > 1000000:
break
print()
|
Eloisa McLoughlin, the daughter of Fort Vancouver's Chief Factor, will be the topic of an upcoming lecture in March.
Over nearly 200 years of history, many strong women have made their mark on Fort Vancouver National Historic Site. In the month of March, the national park will honor their achievements and contributions to local history through exhibits and special programs.
Women important in the history of this area will be honored in a special exhibit by local artist Hilarie Couture, called Founding Mothers: Portraits of Progress. The exhibit will be on display at the Fort Vancouver Visitor Center for the entire month. Couture is an accomplished portrait artist, and the exhibit will feature several portraits of local women, including the debut of new works honoring women of Fort Vancouver. Couture will give a short presentation to officially open the exhibit on Saturday, March 4, at 1 pm in the Visitor Center.
At two upcoming lectures, the public is invited to explore the history of two women of Fort Vancouver: Marguerite McLoughlin, who was the wife of the fort's Chief Factor, and her daughter, Eloisa. Assistant Curator Meagan Huff will discuss the fascinating lives of these women, who had front-row seats for the rise and fall of a fur trading empire. This program will take place on Wednesday, March 15, at 7 pm, at the Visitor Center. A second engagement will take place on Friday, March 31, at 1 pm, at the McLoughlin House in Oregon City.
The McLoughlin House in Oregon City is one of the oldest homes in Oregon, and is a unit of Fort Vancouver National Historic Site. This unit of the national park also includes the Barclay House, which was owned by the family of Forbes Barclay, who served as Fort Vancouver's doctor for several years. For the month of March, a mid 19th century dress owned by his wife, Maria Pambrun Barclay, will be on display. The dress was recently acquired for the national park's museum collection.
"The historical record can be lacking when it comes to the lives of women at Fort Vancouver," said Huff, "but with the research that we have done, and through our exhibits and public programs, we are working to highlight the very important roles they played here. We want their stories to be a part of the overall history we share."
|
from __future__ import absolute_import
import logging
import six
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.contrib import messages
from django.http import Http404
from requests.exceptions import HTTPError
from sentry import options
from sentry.api import client
from sentry.api.serializers import serialize
from sentry.models import ProjectOption
from sentry.utils import json
def react_plugin_config(plugin, project, request):
response = client.get('/projects/{}/{}/plugins/{}/'.format(
project.organization.slug,
project.slug,
plugin.slug,
), request=request)
return mark_safe("""
<div id="ref-plugin-config"></div>
<script>
$(function(){
ReactDOM.render(React.createFactory(Sentry.PluginConfig)({
project: %s,
organization: %s,
data: %s
}), document.getElementById('ref-plugin-config'));
});
</script>
""" % (
json.dumps_htmlsafe(serialize(project, request.user)),
json.dumps_htmlsafe(serialize(project.organization, request.user)),
json.dumps_htmlsafe(response.data)
))
def default_plugin_config(plugin, project, request):
if plugin.can_enable_for_projects() and \
not plugin.can_configure_for_project(project):
raise Http404()
plugin_key = plugin.get_conf_key()
form_class = plugin.get_conf_form(project)
template = plugin.get_conf_template(project)
if form_class is None:
return HttpResponseRedirect(reverse(
'sentry-manage-project', args=[project.organization.slug, project.slug]))
test_results = None
form = form_class(
request.POST if request.POST.get('plugin') == plugin.slug else None,
initial=plugin.get_conf_options(project),
prefix=plugin_key,
)
if form.is_valid():
if 'action_test' in request.POST and plugin.is_testable():
try:
test_results = plugin.test_configuration(project)
except Exception as exc:
if isinstance(exc, HTTPError):
test_results = '%s\n%s' % (exc, exc.response.text[:256])
elif hasattr(exc, 'read') and callable(exc.read):
test_results = '%s\n%s' % (exc, exc.read()[:256])
else:
logging.exception('Plugin(%s) raised an error during test',
plugin_key)
test_results = 'There was an internal error with the Plugin'
if not test_results:
test_results = 'No errors returned'
else:
for field, value in six.iteritems(form.cleaned_data):
key = '%s:%s' % (plugin_key, field)
if project:
ProjectOption.objects.set_value(project, key, value)
else:
options.set(key, value)
messages.add_message(
request, messages.SUCCESS,
_('Your settings were saved successfully.'))
return HttpResponseRedirect(request.path)
# TODO(mattrobenolt): Reliably determine if a plugin is configured
# if hasattr(plugin, 'is_configured'):
# is_configured = plugin.is_configured(project)
# else:
# is_configured = True
is_configured = True
return mark_safe(render_to_string(template, {
'form': form,
'request': request,
'plugin': plugin,
'plugin_description': plugin.get_description() or '',
'plugin_test_results': test_results,
'plugin_is_configured': is_configured,
}, context_instance=RequestContext(request)))
def default_issue_plugin_config(plugin, project, form_data):
plugin_key = plugin.get_conf_key()
for field, value in six.iteritems(form_data):
key = '%s:%s' % (plugin_key, field)
if project:
ProjectOption.objects.set_value(project, key, value)
else:
options.set(key, value)
def default_plugin_options(plugin, project):
form_class = plugin.get_conf_form(project)
if form_class is None:
return {}
NOTSET = object()
plugin_key = plugin.get_conf_key()
initials = plugin.get_form_initial(project)
for field in form_class.base_fields:
key = '%s:%s' % (plugin_key, field)
if project is not None:
value = ProjectOption.objects.get_value(project, key, NOTSET)
else:
value = options.get(key)
if value is not NOTSET:
initials[field] = value
return initials
|
You’re looking for a property in Fiddletown and you’re not sure of its state? It’s a huge amount of coin and you don’t want to make a mistake.
Now that’s settled, how do you find a good building inspection business in Fiddletown 2159?
That's how how you find a good building inspection business in Fiddletown 2159?
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import conductor
from nova import exception
from nova.i18n import _LI
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class SchedulerReportClient(object):
"""Client class for updating the scheduler."""
def __init__(self):
self.conductor_api = conductor.API()
def update_resource_stats(self, context, name, stats):
"""Creates or updates stats for the desired service.
:param context: local context
:param name: name of resource to update
:type name: immutable (str or tuple)
:param stats: updated stats to send to scheduler
:type stats: dict
"""
if 'id' in stats:
compute_node_id = stats['id']
updates = stats.copy()
del updates['id']
else:
raise exception.ComputeHostNotCreated(name=str(name))
self.conductor_api.compute_node_update(context,
{'id': compute_node_id},
updates)
LOG.info(_LI('Compute_service record updated for '
'%s') % str(name))
|
Renewed our website | Lifelong Kindergarten, Inc.
In new website, introducing the projects and explaining the business scheme of our company starting from education systematically.
Please feel free to contact us if you have any questions or collaborative ideas and so on.
|
from app import db
from app.models.base import BaseMixin
class Roll(db.Model, BaseMixin):
"""
The number of pins on a turn in bowling
.. py:attribute:: pins
The number of pins knocked down in a roll.
:type: int
"""
id = db.Column(db.Integer, primary_key=True)
pins = db.Column(db.Integer)
# for back ref
frame_id = db.Column(db.Integer, db.ForeignKey('frame.id'))
def __init__(self, frame, pins):
self.pins = pins
self.frame_id = frame.id
db.session.add(self)
class Frame(db.Model, BaseMixin):
"""
A frame in bowling.
.. py:attribute:: number
The fame number.
:type: int
.. py:attribute:: score
The total score for the frame (this is a running total calculated from previous frames)
:type: int
.. py:attribute:: rolls
A list of rolls in this frame.
:type: A list of :py:class:`app.models.frame.Roll`
"""
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer)
score = db.Column(db.Integer)
# for back ref
player_id = db.Column(db.Integer, db.ForeignKey('player.id'))
rolls = db.relationship(
'Roll', backref=db.backref('frame', lazy='joined'), lazy='dynamic')
def __init__(self, player, number):
self.number = number
self.player_id = player.id
db.session.add(self)
def total_pins(self):
"""
Helper method to get the total number of pins in this frame.
:return: The total number of pins dropped in this frame.
:rtype: int
"""
return sum([roll.pins for roll in list(self.rolls.all())])
def get_rolls(self):
"""
Helper method to get the rolls in this frame.
:return: The rolls for this frame.
:rtype: A list of :py:class:`app.models.frame.Roll`
"""
return list(self.rolls.all())
def roll(self, pins):
"""
Add a roll to this frame.
:param int pins: The number of pins knocked over for this roll.
:return: The roll that was added.
:rtype: :py:class:`app.models.frame.Roll`
:raises Exception: If the allowed number of rolls has been exceeded.
"""
rolls = self.get_rolls()
rolls_allowed = 2
if self.number == 10 and len(rolls) and rolls[0].pins == 10:
rolls_allowed = 3
if len(rolls) >= rolls_allowed:
raise Exception("Exceeded maximum rolls")
roll = Roll(self, pins)
roll.save()
self.rolls.append(roll)
return roll
def is_strike(self):
"""
Helper method to determine if this frame is a strike.
:return: Truth
:rtype: bool
"""
if len(self.rolls.all()) == 1 and self.total_pins() == 10:
return True
return False
def is_spare(self):
"""
Helper method to determine if this frame is a spare.
:return: Truth
:rtype: bool
"""
if len(self.rolls.all()) == 2 and self.total_pins() == 10:
return True
return False
def is_complete(self):
"""
Checks if this frame is complete.
:return: Truth
:rtype: bool
"""
rolls = self.rolls.all()
if self.number == 10:
return self.is_complete10(rolls)
return sum([roll.pins for roll in rolls]) == 10 or len(rolls) == 2
def is_complete10(self, rolls):
"""
Takes frame 10 into account when it checks if this frame is complete.
:return: Truth
:rtype: bool
"""
n = len(rolls)
if n < 3:
return False
# strike
if n == 3 and rolls[0] == 10:
return False
return True
def __repr__(self):
return '<Frame %d %d>' % (self.id, self.score)
|
Stitch can replicate data from all your sources (including PostgreSQL) to a central warehouse. From there, it's easy to use Grafana to perform the in-depth analysis you need.
Integrate PostgreSQL and Grafana to turn your data into actionable insights.
|
from django.conf import settings
from forallschools.apps import constants
from forallschools.apps.core.models import App
from django.http import HttpResponseRedirect, HttpResponse
#import six
from django.contrib import messages
from django.shortcuts import redirect
from django.utils.http import urlquote
from social.exceptions import SocialAuthBaseException
class SocialAuthExceptionMiddleware(object):
"""Middleware that handles Social Auth AuthExceptions by providing the user
with a message, logging an error, and redirecting to some next location.
By default, the exception message itself is sent to the user and they are
redirected to the location specified in the SOCIAL_AUTH_LOGIN_ERROR_URL
setting.
This middleware can be extended by overriding the get_message or
get_redirect_uri methods, which each accept request and exception.
"""
def process_exception(self, request, exception):
self.strategy = getattr(request, 'social_strategy', None)
if self.strategy is None or self.raise_exception(request, exception):
return
if isinstance(exception, SocialAuthBaseException):
backend_name = self.strategy.backend.name
message = self.get_message(request, exception)
url = self.get_redirect_uri(request, exception)
message = message.replace('google-oauth2','Google')
if request.user.is_authenticated():
# Ensure that messages are added to authenticated users only,
# otherwise this fails
messages.error(request, message,
extra_tags='' + backend_name)
else:
url += ('?' in url and '&' or '?') + \
'message={0}&backend={1}'.format(urlquote(message),
backend_name)
return redirect(url)
def raise_exception(self, request, exception):
return self.strategy.setting('RAISE_EXCEPTIONS', settings.DEBUG)
def get_message(self, request, exception):
return six.text_type(exception)
def get_redirect_uri(self, request, exception):
return self.strategy.setting('LOGIN_ERROR_URL')
|
What To Put In An Instagram Bio: Your Instagram biography is the first thing individuals see when they click on your feed, the pick-up line of the digital globe. When you've only obtained a couple of lines to encourage potential brand-new pals to participate in the enjoyable as well as click adhere to, you should maximize them. No pressure, right? However truly, right here's ways to write your Instagram bio in 2017 as well as make an A+ first impression.
To begin with, explain just what it is that you do, for your career and/or your passion. If you needed to explain your organisation or brand name in 3 words, just what would they be? They could belong in your Instagram bio. Aim to follow the means you speak about yourself across platforms, and also utilize concise, jargon-free language.
Make sure you utilize your name or organisation name in the 'name' field when submitting your account, since this and your username are the only searchable terms on Instagram, as social networks marketing professional Jenn Herman creates. So, see to it the name you use is the one customers/community members will certainly be searching for. You might likewise use the 'name' area to call out to the specific service you give, your specialized or specific niche, so that individuals have an additional method of finding you.
Give individuals a feeling of your personailty when composing your Instagram biography. Usage key words and also emojis to reveal what you're passionate about and also have some fun with it! Usage humour, obtain innovative, tell a (v. brief) tale with your authentic tone of voice and attempt to make yourself attract attention from the group.
If you have a branded hashtag, feel free to include that as well to urge your neighborhood to obtain included. Likewise, do not hesitate to include your place if it relates to your business/brand (i.e. if you're a wedding professional photographer that only fires in Byron Bay, Australia). If you're running a service-based business as well as trying to position on your own as an around the world sensation, after that there's no need to fret about it.
Having a personalised profile photo (i.e. one of you, not your logo) could also actually aid establish that human connection. Logo design = spammy feelings. Genuine human face = genuine connection.
With just one spot in your whole profile for a link, you want to make sure you a) select the right one and optimize it and b) obtain individuals clicking. If you're a service-based company, we would certainly also suggest popping your email address in your biography, to make it easy for people to connect to you. Also, if you have a brick and mortar store, make certain to include your address and also opening up hours.
Your Instagram bio doesn't need to be static. Mess around and see what works for you, and try to guage exactly what resonates with individuals. We likewise recommend trialling different CTAs, particularly if you're changing the link in your bio around on the reg, to see exactly what obtains fans clicking.
|
__author__ = 'mark greenwood'
import wx
import time
import pygame
import pygame.camera
import os
# initialises connected devices
pygame.camera.init()
camList = pygame.camera.list_cameras()
class MyApp(wx.App):
"""Builds the main GUI application"""
def OnInit(self):
self.frame = MyFrame()
self.SetTopWindow(self.frame)
self.frame.CenterOnScreen()
self.frame.Show()
return True
class StreamWindow(wx.Frame):
"""Builds a window for displaying a camera test stream upon selection of test button"""
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, title="Test", size=(1280, 720), style=wx.DEFAULT_FRAME_STYLE)
wx.Frame.CenterOnScreen(self)
self.panel = wx.Panel(self)
def draw(self, selection):
"""Blits images to the window and draws grid lines on top of each one. Grid lines correspond to area for
cropping in tracking so plants must fit within."""
cam = pygame.camera.Camera(selection, (1280, 720)) # gets selected camera
self.Bind(wx.EVT_CLOSE, self.close_stream) # binds close event to X button
try:
cam.start()
self.run = True
while self.run == True:
img = cam.get_image()
pygame.draw.lines(img, (0, 0, 0), False, [[130, 20], [1150, 20], [1150, 700], [130, 700], [130, 20]], 2)
pygame.draw.lines(img, (0, 0, 0), False, [[334, 20], [334, 700], [538, 700], [538, 20], [742, 20],
[742, 700], [946, 700], [946, 20]], 2)
pygame.draw.lines(img, (0, 0, 0), False, [[130, 247], [1150, 247], [1150, 474], [130, 474]], 2)
img = pygame.image.tostring(img, "RGB", False) #converts to cross package format
bitmap = wx.BitmapFromBuffer(1280, 720, img) #convert to bitmap for display
self.bitmap = wx.StaticBitmap(self.panel, bitmap=bitmap)
self.Update()
self.Show()
wx.Yield()
cam.stop()
self.Destroy() # stop cam and then close window
except SystemError:
print "Please select a camera"
self.Destroy()
def close_stream(self, event):
"""Close stream event- breaks the loop on click of X button"""
self.run = False
class MyFrame(wx.Frame):
"""Builds the main GUI frame containing all the input selections and events"""
def __init__(self):
super(MyFrame, self).__init__(None, id=wx.ID_ANY, title="Image Capture", size=(1000, 600),
name="MyFrame")
#Creates the panel to sit inside the main window
self.panel = wx.Panel(self)
self.panel.SetBackgroundColour(wx.WHITE)
#Camera 1 inputs
text_box1 = wx.StaticText(self.panel, label="Plate 1", pos=(5, 30))
self.combo_box1 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 25))
test_button1 = wx.Button(self.panel, label="Test camera", pos=(285, 25), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click, test_button1)
#Camera 2
text_box2 = wx.StaticText(self.panel, label="Plate 2", pos=(5, 65))
self.combo_box2 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 60))
test_button2 = wx.Button(self.panel, label="Test camera", pos=(285, 60), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click2, test_button2)
#cam 3
text_box3 = wx.StaticText(self.panel, label="Plate 3", pos=(5, 100))
self.combo_box3 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 95))
test_button3 = wx.Button(self.panel, label="Test camera", pos=(285, 95), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click3, test_button3)
#cam 4
text_box4 = wx.StaticText(self.panel, label="Plate 4", pos=(5, 135))
self.combo_box4 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 130))
test_button4 = wx.Button(self.panel, label="Test camera", pos=(285, 130), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click4, test_button4)
#cam 5
text_box5 = wx.StaticText(self.panel, label="Plate 5", pos=(5, 170))
self.combo_box5 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 165))
test_button5 = wx.Button(self.panel, label="Test camera", pos=(285, 165), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click5, test_button5)
#cam 6
text_box6 = wx.StaticText(self.panel, label="Plate 6", pos=(5, 205))
self.combo_box6 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 200))
test_button6 = wx.Button(self.panel, label="Test camera", pos=(285, 200), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click6, test_button6)
#cam 7
text_box7 = wx.StaticText(self.panel, label="Plate 7", pos=(5, 240))
self.combo_box7 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 235))
test_button7 = wx.Button(self.panel, label="Test camera", pos=(285, 235), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click7, test_button7)
#cam 8
text_box8 = wx.StaticText(self.panel, label="Plate 8", pos=(5, 275))
self.combo_box8 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(80, 270))
test_button8 = wx.Button(self.panel, label="Test camera", pos=(285, 270), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click8, test_button8)
#cam 9
text_box9 = wx.StaticText(self.panel, label="Plate 9", pos=(500, 30))
self.combo_box9 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos= (575, 25))
test_button9 = wx.Button(self.panel, label="Test camera", pos=(780, 25), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click9, test_button9)
#cam 10
text_box10 = wx.StaticText(self.panel, label="Plate 10", pos=(500, 65))
self.combo_box10 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 60))
test_button10 = wx.Button(self.panel, label="Test camera", pos=(780, 60), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click10, test_button10)
#cam 11
text_box11 = wx.StaticText(self.panel, label="Plate 11", pos=(500, 100))
self.combo_box11 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 95))
test_button11 = wx.Button(self.panel, label="Test camera", pos=(780, 95), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click11, test_button11)
#cam 12
text_box12 = wx.StaticText(self.panel, label="Plate 12", pos=(500, 135))
self.combo_box12 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 130))
test_button12 = wx.Button(self.panel, label="Test camera", pos=(780, 130), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click12, test_button12)
#cam 13
text_box13 = wx.StaticText(self.panel, label="Plate 13", pos=(500, 170))
self.combo_box13 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 165))
test_button13 = wx.Button(self.panel, label="Test camera", pos=(780, 165), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click13, test_button13)
#cam 14
text_box14 = wx.StaticText(self.panel, label="Plate 14", pos=(500, 205))
self.combo_box14 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 200))
test_button14 = wx.Button(self.panel, label="Test camera", pos=(780, 200), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click14, test_button14)
#cam 15
text_box15 = wx.StaticText(self.panel, label="Plate 15", pos=(500, 240))
self.combo_box15 = wx.ComboBox(self.panel, value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 235))
test_button15 = wx.Button(self.panel, label="Test camera", pos=(780, 235), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click15, test_button15)
#cam 16
text_box16 = wx.StaticText(self.panel, label="Plate 16", pos=(500, 275))
self.combo_box16 = wx.ComboBox(self.panel,value='Select camera', choices=['Select camera'] + camList,
style=wx.CB_DROPDOWN, pos=(575, 270))
test_button16 = wx.Button(self.panel, label="Test camera", pos=(780, 270), size=(150, 28))
self.Bind(wx.EVT_BUTTON, self.on_click16, test_button16)
#Time data, run button and save path
save_dialog = wx.StaticText(self.panel, label='Save directory', pos=(400, 325))
save_button = wx.Button(self.panel, label='....', pos=(510, 320), size=(80,28))
self.Bind(wx.EVT_BUTTON, self.on_click18, save_button)
time_text = wx.StaticText(self.panel, label="Time Interval (s)", pos=(400, 352))
self.time_input = wx.TextCtrl(self.panel, pos=(510, 350))
cycle_text = wx.StaticText(self.panel, label="Cycles", pos=(400, 382))
self.cycle_input = wx.TextCtrl(self.panel, pos=(510, 380))
run_button = wx.Button(self.panel, -1, size=(190, 30), pos=(400, 415), label='Run Program')
self.Bind(wx.EVT_BUTTON, self.on_click17, run_button)
#error/progress text box
self.error_box_text = wx.TextCtrl(self.panel, value='', size=(990, 120), pos=(5, 475),
style = wx.TE_READONLY + wx.TE_MULTILINE)
self.gauge = wx.Gauge(self.panel, size=(990, 20), pos=(5, 450))
#events - converts drop down camera selection to string which is passed to the stream window.
def on_click(self, event):
selection = self.combo_box1.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click2(self, event):
selection = self.combo_box2.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click3(self, event):
selection = self.combo_box3.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click4(self, event):
selection = self.combo_box4.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click5(self, event):
selection = self.combo_box5.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click6(self, event):
selection = self.combo_box6.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click7(self, event):
selection = self.combo_box7.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click8(self, event):
selection = self.combo_box8.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click9(self, event):
selection = self.combo_box9.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click10(self, event):
selection = self.combo_box10.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click11(self, event):
selection = self.combo_box11.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click12(self, event):
selection = self.combo_box12.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click13(self, event):
selection = self.combo_box13.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click14(self, event):
selection = self.combo_box14.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click15(self, event):
selection = self.combo_box15.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click16(self, event):
selection = self.combo_box16.GetStringSelection()
stream_w = StreamWindow(parent=None, id=-1)
stream_w.draw(selection)
def on_click18(self, event):
self.save_dlg = wx.DirDialog(self, 'Choose or create a directory for your test', defaultPath=os.getcwd(),
style=wx.DD_CHANGE_DIR)
self.save_dlg.ShowModal()
def camerasnp(self, cam, save_as):
"""This capture an image and saves it, cam is the name of the device, saveas is the name to save document as"""
pygame.camera.init()
cam = pygame.camera.Camera(cam, (1920, 1080)) # change to cam resolution
cam.start()
img = cam.get_image()
pygame.image.save(img, save_as) # saves image
cam.stop()
def timelapser(self, time_sec, loops, matrix, save_dlg):
"""Runs time-lapse with measured time between each cycle"""
print 'Time interval = %s' % time_sec
print 'Number of intervals = %s' % loops
counter = 0
keys = matrix.keys()
keys.sort()
self.error_box_text.WriteText('Running %s loops at %s seconds intervals' % (str(loops), time_sec))
wx.Yield() # pauses the process momentarily to update text box
while counter < loops:
old_time = time.time()
text = 'Running loop %s' % counter
print text
self.error_box_text.WriteText('\n%s' % text) # updates with loop number
self.gauge.SetValue(counter) # updates progress bar
wx.Yield()
counter += 1
for cam_shoot in keys:
if matrix[cam_shoot] == "":
continue
else:
for snap in range(0, 3): # takes 3 images at each time point
self.camerasnp(matrix[cam_shoot], '%s/Plate%s-%s-c%s.png' % (save_dlg, cam_shoot, counter, snap))
new_time = time.time()
time.sleep(time_sec-(new_time-old_time))
def on_click17(self, event):
"""Main run event - gets selected inputs and then return error messages if invalid else runs program"""
try:
all_selected_cameras = {1: (self.combo_box1.GetStringSelection()),
2: (self.combo_box2.GetStringSelection()),
3: (self.combo_box3.GetStringSelection()),
4: (self.combo_box4.GetStringSelection()),
5: (self.combo_box5.GetStringSelection()),
6: (self.combo_box6.GetStringSelection()),
7: (self.combo_box7.GetStringSelection()),
8: (self.combo_box8.GetStringSelection()),
9: (self.combo_box9.GetStringSelection()),
10: (self.combo_box10.GetStringSelection()),
11: (self.combo_box11.GetStringSelection()),
12: (self.combo_box12.GetStringSelection()),
13: (self.combo_box13.GetStringSelection()),
14: (self.combo_box14.GetStringSelection()),
15: (self.combo_box15.GetStringSelection()),
16: (self.combo_box16.GetStringSelection())}
run_time = float(self.time_input.GetValue())
loops = float(self.cycle_input.GetValue())
save_dlg = self.save_dlg.GetPath()
self.gauge.SetRange(loops-1) # sets progress gauge max
#error handling - checks if inputs are valid
for key, value in all_selected_cameras.items(): # if somebody clicks 'sel camera' it changes to empty string
if value == 'Select camera':
all_selected_cameras[key] = ''
if all(val == '' for val in all_selected_cameras.values()): # prints an error if no cam is selected
self.error_box_text.WriteText('Please select a camera\n')
if run_time < 60: # this prevents errors where time taken for images is longer than interval
self.error_box_text.WriteText('Error - please select a longer interval')
#if inputs are valid program is run
else:
self.error_box_text.WriteText('Saving to %s\n' % save_dlg)
self.timelapser(run_time, loops, all_selected_cameras, save_dlg)
complete_dialog = wx.MessageDialog(None, message="Image capture complete",
style=wx.OK + wx.ID_JUSTIFY_CENTER + wx.ICON_INFORMATION)
complete_dialog.ShowModal()
except(ValueError, AttributeError):
self.error_box_text.WriteText('Error: please select all parameters\n')
if __name__ == '__main__':
app = MyApp(False)
app.MainLoop()
|
The Courtyards offers an unbeatable location across the street from the University of Florida campus with amenities like: newly renovated fully-furnished apartments, fastest internet service in the area, a pool and rooftop sundeck, and fitness center.
When looking for housing near the University of Florida, look no further than The Courtyards. Here, you will discover the best that student living has to offer. You can enjoy the benefits of living near campus without actually having to live in a dorm. The Courtyards is conveniently located within walking distance of campus, which allows you to retreat to your apartment for a break when you need to without worrying about the hassle of a long commute. At the same time, you won’t have to worry about floor meetings, dorm restrictions, curfews or cramped dorms.
If you choose to live at The Courtyards, it won’t just be your student apartment, but your new home. We offer the special extras that students are looking for during their search such as a refreshing swimming pool, free access to a strength and cardio studio, and a cozy fire pit. Students can remain up-to-date on the latest shows and news with 100mbps of High-Speed Fiber Optic Internet and cable with HBO. Your apartment is move-in ready because it comes with kitchen appliances and a fully-furnished furniture package for a stress-free transition.
Students can rest easy knowing they are secure with a gated entrance, a night patrol and emergency maintenance staff. At The Courtyards, you can enjoy all of these features without exorbitant prices. All of our spacious floorplans and accompanying features are available at an affordable rate because we believe students should never have to compromise.
We're located across the street from the heart of University of Florida’s Tigert Hall. Living at The Courtyards puts you at the center of everything: Campus, Midtown, Downtown and Archer Road.
When seeking the right Gainesville student apartments for rent, most ask their friends. Here are what residents are saying about life at The Courtyards as a University of Florida student.
Please call for availability prior to completing your application online.
|
import json
from django import template
from ..engine import standard
from ..engine.utils import subregion_display
register = template.Library()
colors = {'austria-hungary': '#a41a10',
'england': '#1010a3',
'france': '#126dc0',
'germany': '#5d5d5d',
'italy': '#30a310',
'russia': '#7110a2',
'turkey': '#e6e617'}
@register.inclusion_tag('diplomacy/map_card.html', takes_context=True)
def map(context, width, height):
game = context['game']
turn = context.get('turn', game.current_turn())
data = {'width': width, 'height': height}
data['colors'] = json.dumps(colors)
if turn:
units = turn.get_units()
owns = turn.get_ownership()
data['owns'] = json.dumps(
[(o['territory'], o['government']) for o in owns]
)
data['units'] = json.dumps(
[(subregion_display(u['subregion']), u['u_type'], u['government'])
for u in units
if not u['dislodged']]
)
else:
data['owns'] = json.dumps(
[(T, P)
for P in standard.powers
for T, (p, sc, unit) in standard.starting_state.items()
if p == P])
data['units'] = json.dumps([])
return data
|
Jane Crosswalk R + Matrix Light 2 "Squared"
Equipped with the same DNA as the EPIC pushchair, the new Crosswalk R is a luxury high performance all-terrain pushchair with a patented compact folding system and a larger and fully reclinable seat unit.
Lightweight, compact, strong, adaptable, these are just some of the Crosswalks R characteristics which makes it the perfect pushchair for all lifestyles.
Complete versatility, complete strength, complete comfort.
Its advanced features make the Crosswalk R a nimble, easy to manoeuvre pushchair. Its exclusive large PU tyres, 10 times more resistant than traditional tyres, have great shock absorbing properties.
A tubular aluminium chassis, independent suspension with shock absorbers on the rear wheels and a more spacious seat with reclining multi-position backrest (including a unique horizontal new-born position), offers baby greater comfort. Crosswalk has everything you could want in a pushchair.
Complete with the award winning Matrix Light 2 car seat that converts into a carrycot. It offers a lie flat position both in the car and on the pram, which is the healthiest way for them to travel especially on longer journeys as there is no time limits. The Matrix can also be used for overnight sleeping and is an ideal environment for your newborn as it supports a natural, healthy sleep.
An all-terrain pushchair that's chic in the city and strong in the country.
Large sized seat with multiple reclinning backrest positions.
Unique fully horizontal 180° Lie-flat seat position for new-born babies, supports natural, stress-free sleep.
A reversible seat lets baby face you or the world.
Large all-terrain PU tyres that are 10 times more hard-wearing than traditional tyres.
|
import numpy as np
from menpo.image import Image
from menpo.shape import ColouredTriMesh
from menpo.transform import AlignmentSimilarity
from menpo3d.rasterize import rasterize_mesh
from scipy.stats import chi2
from .camera import perspective_camera_for_template
from .data import load_template
from .shading import lambertian_shading
from matplotlib import pyplot as plt
def rasterize_mesh_at_template(mesh, img_shape=(640, 480),
pose_angle_deg=0, shaded=False):
camera = perspective_camera_for_template(img_shape,
pose_angle_deg=pose_angle_deg)
mesh_aligned = AlignmentSimilarity(mesh, load_template()).apply(mesh)
if shaded:
mesh_aligned = lambertian_shading(mesh_aligned)
return rasterize_mesh(camera.apply(mesh_aligned), img_shape)
def visualize_nicp_weighting(template, weighting):
colours = ((weighting[:, None] * np.array([1, 0, 0])) +
((1 - weighting[:, None]) * np.array([1, 1, 1])))
print('min: {}, max: {}'.format(weighting.min(), weighting.max()))
ColouredTriMesh(template.points, trilist=template.trilist,
colours=colours).view()
def visualize_pruning(w_norm, n_retained,
title='Initial model weights vs theoretical for pruning'):
fig, ax1 = plt.subplots()
ax1.set_title(title)
ax1.hist(w_norm, normed=True, bins=200, alpha=0.6, histtype='stepfilled',
range=[0, n_retained * 5])
ax1.axvline(x=n_retained, linewidth=1, color='r')
ax1.set_ylabel('PDF', color='b')
ax2 = ax1.twinx()
ax2.set_ylabel('Survival Function', color='r')
ax1.set_xlabel('w_norm')
x = np.linspace(chi2.ppf(0.001, n_retained),
chi2.ppf(0.999, n_retained), 100)
ax2.plot(x, chi2.sf(x, n_retained),
'g-', lw=1, alpha=0.6, label='chi2 pdf')
ax1.plot(x, chi2.pdf(x, n_retained),
'r-', lw=1, alpha=0.6, label='chi2 pdf')
def visualize_nicp_result(mesh):
l = rasterize_mesh_at_template(mesh, pose_angle_deg=+20, shaded=True)
r = rasterize_mesh_at_template(mesh, pose_angle_deg=-20, shaded=True)
return Image(np.concatenate([l.pixels, r.pixels], axis=-1))
|
When I hover over a subject title, or a 'last post' title I see a blank pop-up box. I assume it should show some text, perhaps the text color is also white like the background color? See the attached screenshot (on the right).
I can give you access to the test site where I have this problem if needed.
Last edit: 1 year 5 months ago by mvvfans.
1 year 5 months ago #684 by Eugene S.
1 year 5 months ago - 1 year 5 months ago #685 by Eugene S.
Comment or remove this style and you will get a black background with white color.
Last edit: 1 year 5 months ago by Eugene S..
Thank you, that did the trick!! Is that pop-up a feature of the template or of the new Kunena version?
I think I'd rather turn it off but I can't find the option.
Actually when I look at my 'live' forum www.mvvfans.nl (Kunena 4 + RND Kazure) I see the popup too but it's much smaller, with a white background and black letters. There it doesn't look bad. I wonder why my Joomla template overrides these settings in Kunena 5 + Kvivid?
1 year 5 months ago #688 by Eugene S.
Actually, we found a little bug there. It will be fixed in next template update.
|
# Invader
# https://github.com/cgio/invader
def fs(path, pattern, start_offset=0, chunk_size=-1, chunk_limit=-1,
find_all=True):
"""
Yields offset of found byte pattern within a file.
Supports wildcard bytes, starting offset, reading in chunks, and read
limits.
Args:
path (str): The file path.
pattern (str): The sequence of bytes, e.g. 'FF??E82623D7'.
'??' represents a single byte wildcard.
Spaces between bytes are supported.
start_offset (int): The offset to start searching from.
chunk_size (int): The read length per chunk (-1 == entire file).
chunk_limit (int): The max # of chunks to read (-1 == all chunks).
find_all (bool): True == find all instances. False == only first
instance.
Returns:
int: On success, yield found offset.
bool: On error, yield False.
Example:
import invader
for found_offset in invader.fs(
r'C:\target.exe',
'?? 01 55 ?? ?? 4B 20 1E 1D ?? 15',
start_offset=0x1000,
chunk_size=1024,
chunk_limit=10,
find_all=False
):
if found_offset is not False:
print(hex(found_offset))
"""
# Only -1 or > 0 is allowed
if chunk_size == 0 or chunk_limit == 0:
yield False
pattern = pattern.replace(' ', '')
# If no path, invalid pattern, or pattern is all wildcards
if len(path) == 0 or len(pattern) < 2 or len(
pattern) % 2 != 0 or pattern.count('?') == len(pattern):
yield False
# Correct invalid values
if start_offset < 0:
start_offset = 0
# If chunk_size == entire file, chunk_limit becomes irrelevant
if chunk_size == -1:
chunk_limit = -1
# Get largest segment bytes
pattern_largest_segment = list(filter(None, pattern.split('??')))
pattern_largest_segment.sort(key=len, reverse=True)
pattern_largest_segment = pattern_largest_segment[0]
pattern_largest_segment_position = pattern.index(
pattern_largest_segment) // 2
pattern_largest_segment = bytes.fromhex(pattern_largest_segment)
# Search method 1 (no wildcards)
if pattern.count('?') == 0:
pattern_bytes = bytes.fromhex(pattern)
chunk_position = 0
with open(path, 'rb') as f:
if start_offset > 0:
f.seek(start_offset)
while True:
if chunk_limit > 0:
if chunk_position / chunk_size >= chunk_limit:
return
try:
data = f.read(chunk_size)
except MemoryError:
yield False
if not data:
return
i = 0
found_position = 0
while True:
try:
found_position = data.index(pattern_bytes,
found_position + i)
if chunk_size > 0:
yield chunk_position + found_position + \
start_offset
else:
yield found_position + start_offset
if find_all is False:
return
except ValueError:
break
i += 1
chunk_position += chunk_size
continue
return
# Create a list of wildcard positions
pattern_wildcard_positions = []
for i in range(0, len(pattern), 2):
pattern_byte = pattern[i:i + 2]
if pattern_byte == '??':
pattern_wildcard_positions.append(i // 2)
# Remove wildcards from pattern string and convert to bytes
pattern_len = len(pattern) // 2
pattern_bytes = pattern.replace('?', '')
pattern_bytes = bytes.fromhex(pattern_bytes)
# Search method 2 (wildcards)
possible_positions = []
end_of_file = False
first_result = True
chunk_position = 0
with open(path, 'rb') as f:
if start_offset > 0:
f.seek(start_offset)
while not end_of_file:
if chunk_limit > 0:
if chunk_position / chunk_size >= chunk_limit:
return
try:
data = f.read(chunk_size)
except MemoryError:
yield False
if not data:
end_of_file = True
chunk_search = True
while chunk_search:
try:
if first_result is True:
possible_positions.append(
data.index(pattern_largest_segment))
first_result = False
else:
possible_positions.append(
data.index(pattern_largest_segment,
possible_positions[-1] + 1))
except ValueError:
if chunk_size > 0:
chunk_position += chunk_size
chunk_search = False
for possible_position in possible_positions:
possible_position -= pattern_largest_segment_position
match_count = 0
pattern_bytes_pos = 0
data_offset_pos = 0
i = 0
while i < pattern_len:
if i in pattern_wildcard_positions:
match_count += 1
data_offset_pos += 1
i += 1
continue
elif pattern_bytes[pattern_bytes_pos] == data[
possible_position + data_offset_pos]:
match_count += 1
data_offset_pos += 1
pattern_bytes_pos += 1
i += 1
continue
i += 1
if match_count == pattern_len:
if find_all is True:
if chunk_size > 0:
yield chunk_position + possible_position + \
start_offset - chunk_size
else:
yield possible_position + start_offset
else:
yield possible_position + chunk_position + \
start_offset - chunk_size
return
possible_positions = []
first_result = True
return
|
I am disappointed in the winners of the FQXi essay contest. Maybe I am just a sore loser, because my essay is not one of the 20 winners, out of 271 submissions. But I do not think that the winners followed the contest objectives very well.
Most essays just promoted some crackpot theory without really explaining how or why the textbook theories are wrong.
Unfortunately, FQXi has taken down the page with these rules. You can find most of them here, or maybe in your browser cache.
Of the winning essays, most of them promote some completely mainstream and accepted idea, but try to make it sound original by attacking some silly straw man. Other essays presented some vague and speculative ideas about quantum gravity or some similar field where ideas cannot be tested.
In 2nd place, Ellis gives examples of causation being more easily understood with a top-down view, such as entropy increasing. Weinstein suggests that action-at-a-distance might explain some quantum mechanics and cosmology.
In 3rd place, Barbour doubts that reductionism will explain entanglement. Dribus rejects spacetime in favor of a "causal metric hypothesis". Hossenfelder speculates about quantum gravity. Wharton says the universe is not a computer.
My essay got a lot of attention, and many favorable comments. I have some much more provocative statements than the winning essays, but I thought that was the point. Most of the essays don't actually say Which of Our Basic Physical assumptions are Wrong.
So perhaps the judges thought that my essay was not "Technically correct and rigorously argued". If so, then I would have preferred them to say so in the online comments, so I could defend myself. As it is, I do not know what they disliked. If I had submitted it to a journal, at least I would have gotten a rejection report. I guess I could still submit it somewhere else, but it is not really a technical physics advance. It is an essay suited for this contest.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.