text
stringlengths 29
850k
|
---|
# Copyright (C) 2016 Linaro Limited
#
# Author: Matthew Hart <[email protected]>
#
# This file is part of LAVA Dispatcher.
#
# LAVA Dispatcher is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# LAVA Dispatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
import re
def add_to_kickstart(preseedfile, extra_command):
with open(preseedfile, 'a') as pf:
pf.write('\n')
pf.write('%post\n')
pf.write('exec < /dev/console > /dev/console\n')
pf.write(extra_command + '\n')
pf.write('%end\n')
pf.close()
def add_late_command(preseedfile, extra_command):
added = False
with open(preseedfile, "r") as pf:
lines = pf.readlines()
pf.close()
endstring = '\\\n'
while endsin(lines, endstring):
for linenum, data in enumerate(lines):
if endsin(data, endstring):
lines[linenum] = lines[linenum].replace(endstring, '') + lines[linenum + 1]
del lines[linenum + 1]
for linenum, data in enumerate(lines):
if re.match("d-i preseed/late_command string(.*)", data):
# late_command already exists, append to it
append_line = "; " + extra_command + "\n"
lines[linenum] = lines[linenum].rstrip(' ;\n') + append_line
added = True
if not added:
append_line = extra_command + "\n"
lines.append("d-i preseed/late_command string " + append_line)
with open(preseedfile, "w") as pf:
for line in lines:
pf.write(line)
pf.close()
def endsin(lines, endstring):
match = False
if type(lines) is list:
for line in lines:
if line.endswith(endstring):
match = True
elif type(lines) is str:
if lines.endswith(endstring):
match = True
return match
|
• For openings of 4 metres and 8 metres respectively.
• Integrated photoelectric cells and command selectors.
• Optional illuminated lighting strip can be fed down the entire length.
• Control panel is placed away from the mechanical parts giving easy intervention whatever the circumstances.
• Tubular bar ideal for reducing the sail effect.
• Available in Zinc Plated coated steel and Stainless steel finishes.
|
from pycuda import gpuarray, driver
from scipy.signal import convolve2d
import cudaconv2
import cudaconv2
import numpy as np
import pycuda.driver as cuda
import sys
cudaconv2.init()
def test_convolution():
imgSize = 32
filterSize = 5
padding = 2
color = 1
imgNum = 1
filterNum = 64
stride = 1
modulesX = 1 + int(((2 * padding + imgSize - filterSize) / float(stride)))
print 'Modules X', modulesX
img = gpuarray.to_gpu(np.ones((imgSize * imgSize * color, imgNum)).astype(np.float32))
filter = gpuarray.to_gpu(np.ones((filterSize * filterSize * color, filterNum)).astype(np.float32))
target = gpuarray.to_gpu(np.ones((modulesX * modulesX * filterNum, imgNum)).astype(np.float32))
print 'standard output for convolution'
print convolve2d(np.ones((imgSize, imgSize)).astype(np.float32), np.ones((filterSize, filterSize)).astype(np.float32),'valid')
cudaconv2.convFilterActs(img, filter, target, imgSize, modulesX, modulesX, -padding, stride, color, 1, 0.0, 1.0)
print 'pycuda output for convolution'
atarget = target.get()
print atarget
|
It can be used for virtually anything. From social areas for the family to an area the children can play in, it has a lot of different uses. This gives you the choice of how you wish to use the decking and also just to generally expand your living space. It is perfect for parties, family events and many other forms of entertainment in the garden. Softwood timber is known to be easy to work with so you can lay your own decking yourself, saving you money. All the timber we supply can be cut to your specific requirements, this means that no material will be wasted and you only need to purchase what you need.
Decking can add huge aesthetic appeal to your home and this will entice future buyers. Decking areas are popular with homeowners and will give your home an edge over competitors. It also provides an opportunity for further development work and all round an excellent investment. Not only is decking a great investment but it provides you with that perfect outdoor living space. It extends your home into your garden to create an elegant environment for all outdoor activities.
As long as the decking is maintained, it will last a very long time. We provide customers with strong and long-lasting materials that will ensure your decking stays at its best. Decking also doesn’t require much maintenance, just ensure you give it a bit of a clean every now and then. This will make sure that your decking stays looking new. The timber we supply for your decking is of very high quality. Here at Stan Dawson, we take pride in offering our customers with top of the range timber. We have many customers that come to us for all their timber needs as they know the timber we supply will always be to the highest of standards.
Decking is particularly popular in the summer. When you want to spend time with your family and friends in the garden, decking is the perfect solution. Decking is a great way to get more use out of your garden. It creates this open, outdoor living space perfect for barbeques and just generally enjoying the summer weather. At Stan Dawson, we have a huge variety of different timber decking that can be used for your garden. If you are unsure as to what you need, our team will happy to advise and offer you more information that will help you to choose the perfect materials for your decking. Our team is highly knowledgeable about the timber we stock. When you enquire with us here at Stan Dawson you can be assured that you are dealing with one of our expert team members.
We not only offer top quality decking but we also offer a range of other timber products. If you are interested in sheet material click here.
Mon - Fri 8 am to 5 pm.
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2012-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
# Marvin LAVECHIN
from .base import BaseMetric, f_measure
from .utils import UEMSupportMixin
DER_NAME = 'detection error rate'
DER_TOTAL = 'total'
DER_FALSE_ALARM = 'false alarm'
DER_MISS = 'miss'
class DetectionErrorRate(UEMSupportMixin, BaseMetric):
"""Detection error rate
This metric can be used to evaluate binary classification tasks such as
speech activity detection, for instance. Inputs are expected to only
contain segments corresponding to the positive class (e.g. speech regions).
Gaps in the inputs considered as the negative class (e.g. non-speech
regions).
It is computed as (fa + miss) / total, where fa is the duration of false
alarm (e.g. non-speech classified as speech), miss is the duration of
missed detection (e.g. speech classified as non-speech), and total is the
total duration of the positive class in the reference.
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments (one half before, one half after).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
@classmethod
def metric_name(cls):
return DER_NAME
@classmethod
def metric_components(cls):
return [DER_TOTAL, DER_FALSE_ALARM, DER_MISS]
def __init__(self, collar=0.0, skip_overlap=False, **kwargs):
super(DetectionErrorRate, self).__init__(**kwargs)
self.collar = collar
self.skip_overlap = skip_overlap
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
reference = reference.get_timeline(copy=False).support()
hypothesis = hypothesis.get_timeline(copy=False).support()
reference_ = reference.gaps(support=uem)
hypothesis_ = hypothesis.gaps(support=uem)
false_positive = 0.
for r_, h in reference_.co_iter(hypothesis):
false_positive += (r_ & h).duration
false_negative = 0.
for r, h_ in reference.co_iter(hypothesis_):
false_negative += (r & h_).duration
detail = {}
detail[DER_MISS] = false_negative
detail[DER_FALSE_ALARM] = false_positive
detail[DER_TOTAL] = reference.duration()
return detail
def compute_metric(self, detail):
error = 1. * (detail[DER_FALSE_ALARM] + detail[DER_MISS])
total = 1. * detail[DER_TOTAL]
if total == 0.:
if error == 0:
return 0.
else:
return 1.
else:
return error / total
ACCURACY_NAME = 'detection accuracy'
ACCURACY_TRUE_POSITIVE = 'true positive'
ACCURACY_TRUE_NEGATIVE = 'true negative'
ACCURACY_FALSE_POSITIVE = 'false positive'
ACCURACY_FALSE_NEGATIVE = 'false negative'
class DetectionAccuracy(DetectionErrorRate):
"""Detection accuracy
This metric can be used to evaluate binary classification tasks such as
speech activity detection, for instance. Inputs are expected to only
contain segments corresponding to the positive class (e.g. speech regions).
Gaps in the inputs considered as the negative class (e.g. non-speech
regions).
It is computed as (tp + tn) / total, where tp is the duration of true
positive (e.g. speech classified as speech), tn is the duration of true
negative (e.g. non-speech classified as non-speech), and total is the total
duration of the input signal.
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments (one half before, one half after).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
@classmethod
def metric_name(cls):
return ACCURACY_NAME
@classmethod
def metric_components(cls):
return [ACCURACY_TRUE_POSITIVE, ACCURACY_TRUE_NEGATIVE,
ACCURACY_FALSE_POSITIVE, ACCURACY_FALSE_NEGATIVE]
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
reference = reference.get_timeline(copy=False).support()
hypothesis = hypothesis.get_timeline(copy=False).support()
reference_ = reference.gaps(support=uem)
hypothesis_ = hypothesis.gaps(support=uem)
true_positive = 0.
for r, h in reference.co_iter(hypothesis):
true_positive += (r & h).duration
true_negative = 0.
for r_, h_ in reference_.co_iter(hypothesis_):
true_negative += (r_ & h_).duration
false_positive = 0.
for r_, h in reference_.co_iter(hypothesis):
false_positive += (r_ & h).duration
false_negative = 0.
for r, h_ in reference.co_iter(hypothesis_):
false_negative += (r & h_).duration
detail = {}
detail[ACCURACY_TRUE_NEGATIVE] = true_negative
detail[ACCURACY_TRUE_POSITIVE] = true_positive
detail[ACCURACY_FALSE_NEGATIVE] = false_negative
detail[ACCURACY_FALSE_POSITIVE] = false_positive
return detail
def compute_metric(self, detail):
numerator = 1. * (detail[ACCURACY_TRUE_NEGATIVE] +
detail[ACCURACY_TRUE_POSITIVE])
denominator = 1. * (detail[ACCURACY_TRUE_NEGATIVE] +
detail[ACCURACY_TRUE_POSITIVE] +
detail[ACCURACY_FALSE_NEGATIVE] +
detail[ACCURACY_FALSE_POSITIVE])
if denominator == 0.:
return 1.
else:
return numerator / denominator
PRECISION_NAME = 'detection precision'
PRECISION_RETRIEVED = 'retrieved'
PRECISION_RELEVANT_RETRIEVED = 'relevant retrieved'
class DetectionPrecision(DetectionErrorRate):
"""Detection precision
This metric can be used to evaluate binary classification tasks such as
speech activity detection, for instance. Inputs are expected to only
contain segments corresponding to the positive class (e.g. speech regions).
Gaps in the inputs considered as the negative class (e.g. non-speech
regions).
It is computed as tp / (tp + fp), where tp is the duration of true positive
(e.g. speech classified as speech), and fp is the duration of false
positive (e.g. non-speech classified as speech).
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments (one half before, one half after).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
@classmethod
def metric_name(cls):
return PRECISION_NAME
@classmethod
def metric_components(cls):
return [PRECISION_RETRIEVED, PRECISION_RELEVANT_RETRIEVED]
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
reference = reference.get_timeline(copy=False).support()
hypothesis = hypothesis.get_timeline(copy=False).support()
reference_ = reference.gaps(support=uem)
true_positive = 0.
for r, h in reference.co_iter(hypothesis):
true_positive += (r & h).duration
false_positive = 0.
for r_, h in reference_.co_iter(hypothesis):
false_positive += (r_ & h).duration
detail = {}
detail[PRECISION_RETRIEVED] = true_positive + false_positive
detail[PRECISION_RELEVANT_RETRIEVED] = true_positive
return detail
def compute_metric(self, detail):
relevant_retrieved = 1. * detail[PRECISION_RELEVANT_RETRIEVED]
retrieved = 1. * detail[PRECISION_RETRIEVED]
if retrieved == 0.:
return 1.
else:
return relevant_retrieved / retrieved
RECALL_NAME = 'detection recall'
RECALL_RELEVANT = 'relevant'
RECALL_RELEVANT_RETRIEVED = 'relevant retrieved'
class DetectionRecall(DetectionErrorRate):
"""Detection recall
This metric can be used to evaluate binary classification tasks such as
speech activity detection, for instance. Inputs are expected to only
contain segments corresponding to the positive class (e.g. speech regions).
Gaps in the inputs considered as the negative class (e.g. non-speech
regions).
It is computed as tp / (tp + fn), where tp is the duration of true positive
(e.g. speech classified as speech), and fn is the duration of false
negative (e.g. speech classified as non-speech).
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments (one half before, one half after).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
@classmethod
def metric_name(cls):
return RECALL_NAME
@classmethod
def metric_components(cls):
return [RECALL_RELEVANT, RECALL_RELEVANT_RETRIEVED]
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
reference = reference.get_timeline(copy=False).support()
hypothesis = hypothesis.get_timeline(copy=False).support()
hypothesis_ = hypothesis.gaps(support=uem)
true_positive = 0.
for r, h in reference.co_iter(hypothesis):
true_positive += (r & h).duration
false_negative = 0.
for r, h_ in reference.co_iter(hypothesis_):
false_negative += (r & h_).duration
detail = {}
detail[RECALL_RELEVANT] = true_positive + false_negative
detail[RECALL_RELEVANT_RETRIEVED] = true_positive
return detail
def compute_metric(self, detail):
relevant_retrieved = 1. * detail[RECALL_RELEVANT_RETRIEVED]
relevant = 1. * detail[RECALL_RELEVANT]
if relevant == 0.:
if relevant_retrieved == 0:
return 1.
else:
return 0.
else:
return relevant_retrieved / relevant
DFS_NAME = 'F[precision|recall]'
DFS_PRECISION_RETRIEVED = 'retrieved'
DFS_RECALL_RELEVANT = 'relevant'
DFS_RELEVANT_RETRIEVED = 'relevant retrieved'
class DetectionPrecisionRecallFMeasure(UEMSupportMixin, BaseMetric):
"""Compute detection precision and recall, and return their F-score
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments (one half before, one half after).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
beta : float, optional
When beta > 1, greater importance is given to recall.
When beta < 1, greater importance is given to precision.
Defaults to 1.
See also
--------
pyannote.metrics.detection.DetectionPrecision
pyannote.metrics.detection.DetectionRecall
pyannote.metrics.base.f_measure
"""
@classmethod
def metric_name(cls):
return DFS_NAME
@classmethod
def metric_components(cls):
return [DFS_PRECISION_RETRIEVED, DFS_RECALL_RELEVANT, DFS_RELEVANT_RETRIEVED]
def __init__(self, collar=0.0, skip_overlap=False,
beta=1., **kwargs):
super(DetectionPrecisionRecallFMeasure, self).__init__(**kwargs)
self.collar = collar
self.skip_overlap = skip_overlap
self.beta = beta
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
reference = reference.get_timeline(copy=False).support()
hypothesis = hypothesis.get_timeline(copy=False).support()
reference_ = reference.gaps(support=uem)
hypothesis_ = hypothesis.gaps(support=uem)
# Better to recompute everything from scratch instead of calling the
# DetectionPrecision & DetectionRecall classes (we skip one of the loop
# that computes the amount of true positives).
true_positive = 0.
for r, h in reference.co_iter(hypothesis):
true_positive += (r & h).duration
false_positive = 0.
for r_, h in reference_.co_iter(hypothesis):
false_positive += (r_ & h).duration
false_negative = 0.
for r, h_ in reference.co_iter(hypothesis_):
false_negative += (r & h_).duration
detail = {DFS_PRECISION_RETRIEVED: true_positive + false_positive,
DFS_RECALL_RELEVANT: true_positive + false_negative,
DFS_RELEVANT_RETRIEVED: true_positive}
return detail
def compute_metric(self, detail):
_, _, value = self.compute_metrics(detail=detail)
return value
def compute_metrics(self, detail=None):
detail = self.accumulated_ if detail is None else detail
precision_retrieved = detail[DFS_PRECISION_RETRIEVED]
recall_relevant = detail[DFS_RECALL_RELEVANT]
relevant_retrieved = detail[DFS_RELEVANT_RETRIEVED]
# Special cases : precision
if precision_retrieved == 0.:
precision = 1
else:
precision = relevant_retrieved / precision_retrieved
# Special cases : recall
if recall_relevant == 0.:
if relevant_retrieved == 0:
recall = 1.
else:
recall = 0.
else:
recall = relevant_retrieved / recall_relevant
return precision, recall, f_measure(precision, recall, beta=self.beta)
DCF_NAME = 'detection cost function'
DCF_POS_TOTAL = 'positive class total' # Total duration of positive class.
DCF_NEG_TOTAL = 'negative class total' # Total duration of negative class.
DCF_FALSE_ALARM = 'false alarm' # Total duration of false alarms.
DCF_MISS = 'miss' # Total duration of misses.
class DetectionCostFunction(UEMSupportMixin, BaseMetric):
"""Detection cost function.
This metric can be used to evaluate binary classification tasks such as
speech activity detection. Inputs are expected to only contain segments
corresponding to the positive class (e.g. speech regions). Gaps in the
inputs considered as the negative class (e.g. non-speech regions).
Detection cost function (DCF), as defined by NIST for OpenSAT 2019, is
0.25*far + 0.75*missr, where far is the false alarm rate
(i.e., the proportion of non-speech incorrectly classified as speech)
and missr is the miss rate (i.e., the proportion of speech incorrectly
classified as non-speech.
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments (one half before, one half after).
Defaults to 0.0.
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
fa_weight : float, optional
Weight for false alarm rate.
Defaults to 0.25.
miss_weight : float, optional
Weight for miss rate.
Defaults to 0.75.
kwargs
Keyword arguments passed to :class:`pyannote.metrics.base.BaseMetric`.
References
----------
"OpenSAT19 Evaluation Plan v2." https://www.nist.gov/system/files/documents/2018/11/05/opensat19_evaluation_plan_v2_11-5-18.pdf
"""
def __init__(self, collar=0.0, skip_overlap=False, fa_weight=0.25,
miss_weight=0.75, **kwargs):
super(DetectionCostFunction, self).__init__(**kwargs)
self.collar = collar
self.skip_overlap = skip_overlap
self.fa_weight = fa_weight
self.miss_weight = miss_weight
@classmethod
def metric_name(cls):
return DCF_NAME
@classmethod
def metric_components(cls):
return [DCF_POS_TOTAL, DCF_NEG_TOTAL, DCF_FALSE_ALARM, DCF_MISS]
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
# Obtain timelines corresponding to positive class.
reference = reference.get_timeline(copy=False).support()
hypothesis = hypothesis.get_timeline(copy=False).support()
# Obtain timelines corresponding to negative class.
reference_ = reference.gaps(support=uem)
hypothesis_ = hypothesis.gaps(support=uem)
# Compute total positive/negative durations.
pos_dur = reference.duration()
neg_dur = reference_.duration()
# Compute total miss duration.
miss_dur = 0.0
for r, h_ in reference.co_iter(hypothesis_):
miss_dur += (r & h_).duration
# Compute total false alarm duration.
fa_dur = 0.0
for r_, h in reference_.co_iter(hypothesis):
fa_dur += (r_ & h).duration
components = {
DCF_POS_TOTAL : pos_dur,
DCF_NEG_TOTAL : neg_dur,
DCF_MISS : miss_dur,
DCF_FALSE_ALARM : fa_dur}
return components
def compute_metric(self, components):
def _compute_rate(num, denom):
if denom == 0.0:
if num == 0.0:
return 0.0
return 1.0
return num/denom
# Compute false alarm rate.
neg_dur = components[DCF_NEG_TOTAL]
fa_dur = components[DCF_FALSE_ALARM]
fa_rate = _compute_rate(fa_dur, neg_dur)
# Compute miss rate.
pos_dur = components[DCF_POS_TOTAL]
miss_dur = components[DCF_MISS]
miss_rate = _compute_rate(miss_dur, pos_dur)
return self.fa_weight*fa_rate + self.miss_weight*miss_rate
|
The Classical Kid for January is 9-year-old Jackson Mu. Jackson attends Aldrich Elementary and has played the piano for four years, and the violin for three. He’s performed annually with the “Christmas with the Omaha Symphony” and played solo piano in front of thousands at a Chinese New Year gala when he was six! He takes tennis lessons, plays Minecraft and loves riding his bike. The classical kid for February is 10-year-old BingYi Wang. He’s played the piano for four years and studies at the Omaha Conservatory of Music. He loves making music because he can express his emotions and it provides endless opportunities. He loves reading and playing tennis and baseball. His favorite musical memory was receiving praise at the Nebraska Summer Music Olympics from the UNO Staff.
The classical kid for March is 13-year old Laura Li. Laura studies piano at the Omaha Conservatory and also performs with her Middle School Jazz Bands. She attends Elkhorn Valley Middle School and says she loves making music because of the finished product. Her other favorite subjects include reading, biology and engineering. The classical kid for April is 9-year-old Rayna Kong. Rayna studies piano at the Omaha Conservatory and has been playing piano for 5 years. She goes to West Bay Elementary and loves learning new pieces, performing in district and state competitions and the Summer Olympics where she most recently received honorable mentions. She also plays tennis and speaks Chinese.
The KVNO Classical Kid for May is 10-year-old pianist, Mia Simon. Mia attends Willa Cather Elementary and has been playing the piano since she was five years old. She also plays the cello and participates with the Omaha Area Youth Orchestras and the Omaha Conservatory. Her favorite thing about playing these instruments is getting together to it enjoy it with friends while learning new music. She also loves reading, coloring and playing tennis. 14-year-old pianist, Olivia Zink is KVNO’s Classical kid for the month of June. She is home schooled and has been playing the piano for nine years. She plays with the Omaha Homeschool Band and Orchestra, participates at the advanced level and also performs at UNO’s Omaha Music Festival every year. She also plays flute, loves to read and her favorite subjects are history and debate.
The KVNO Classical Kid for July is 13-year-old pianist, Immanuel Soh. He is homeschooled and has been studying the piano since the age of 6. His most recent chance to perform was in Melbourne, Australia and loves visiting family in Malaysia. He enjoys listening to the music he makes to be reminded of his hard work. He enjoys chess, reading and participated in this years Scripps National Spelling Bee. The KVNO Classical kid for August is 9-year old pianist, Alleyah Flint. She attends Avery Elementary and studies at the Omaha Conservatory of Music and was the youngest student there when she started at the age of three. She also participates in a local ensemble called, Musical Kids and provides entertainment for the Distinguished Young Women Scholarship Program. She also enjoys dance lessons and puzzles.
The KVNO Classical Kid for September is pianist, 14-year-old Hannah Zink. Hannah is in 9th grade and his homeschooled along with her sisters. She loves John Williams and Hans Zimmer and learning film scores by ear at the piano while making her own arrangements. She’s been playing for 9 years and just started taking percussion lessons. She loves history and participates in the Omaha Homeschool Band and Orchestra. The KVNO Classical Kid for the month of October is Elizabeth Ford. Elizabeth plays trumpet and attends Papillion Middle School, and performs in concert and jazz band as first chair. She also plays piano and is first chair in the Omaha Area Youth Orchestra Honor Orchestra. Her favorite thing about making music is the challenge, setting new goals, and creating it with friends. She a competitive swimmer and enjoys drawing and crocheting.
The Classical kid for November 12-year-old pianist, Jonas Duda. He attends Bernadette Middle School and has been playing piano for six years. He also plays clarinet in his middle school band through Gross High School. His favorite composers are Beethoven and Chopin and loves setting goals through his practice. He is a black belt in Tae Kwon do, and loves to read. In his free time, he enjoys shooting hoops or playing cards with his family.
|
# -*- coding: utf-8 -*-
''' Diagram display a property defined over linear elements. '''
__author__= "Luis C. Pérez Tato (LCPT) , Ana Ortega (AO_O) "
__copyright__= "Copyright 2016, LCPT, AO_O"
__license__= "GPL"
__version__= "3.0"
__email__= "[email protected], [email protected] "
from miscUtils import LogMessages as lmsg
import vtk
from postprocess.xcVtk import colored_diagram as cd
from postprocess import extrapolate_elem_attr
class ElementPropertyDiagram(cd.ColoredDiagram):
'''Diagram of element properties'''
envelopes= set(['N+','N-','My+','My-','Mz+','Mz-','Vy+','Vy-','Vz+','Vz-','T+','T-'])
def __init__(self,scaleFactor,fUnitConv,sets,propertyName):
'''Diagram that represents a property value over several sets of elements.
:param scaleFactor: scale factor (size of the diagram).
:param fUnitConv: unit conversion factor (i.e N->kN => fUnitConv= 1e-3).
:param propertyName: name of the element's property to represent.
:param sets: represent the field over those element sets.
'''
super(ElementPropertyDiagram,self).__init__(scaleFactor,fUnitConv)
self.lstSets= sets
self.propertyName= propertyName
def appendDataSetToDiagram(self, eSet,indxDiagrama,defFScale=0.0):
''' Append property values to diagram .
:param eSet: Element set.
:param defFScale: factor to apply to current displacement of nodes
so that the display position of each node equals to
the initial position plus its displacement multiplied
by this factor. (Defaults to 0.0, i.e. display of
initial/undeformed shape)
'''
elems= eSet.getElements
if(self.propertyName=='FCTNCP'):
for e in elems:
self.vDir= e.getJVector3d(True) #initialGeometry= True
values= e.getProp(self.propertyName) # [back node value, front node value]
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,values[0],values[1],defFScale)
elif(self.propertyName=='FCVCP'):
for e in elems:
self.vDir= e.getJVector3d(True) #initialGeometry= True
values= e.getProp(self.propertyName) # [back node value, front node value]
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,values[0],values[1],defFScale)
elif(self.propertyName=='N+'):
for e in elems:
self.vDir= e.getJVector3d(True) #initialGeometry= True
values= e.getProp(self.propertyName) # [back node value, front node value]
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,values[0],values[1],defFScale)
elif(self.propertyName=='N-'):
for e in elems:
self.vDir= e.getJVector3d(True) #initialGeometry= True
values= e.getProp(self.propertyName) # [back node value, front node value]
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,values[0],values[1],defFScale)
elif(self.propertyName=='My+'):
for e in elems:
self.vDir= e.elem.getKVector3d(True) # initialGeometry= True
values= e.getProp(self.propertyName) # [back node value, front node value]
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,values[0],values[1],defFScale)
elif(self.propertyName=='My-'):
for e in elems:
self.vDir= e.elem.getKVector3d(True) # initialGeometry= True
values= e.getProp(self.propertyName) # [back node value, front node value]
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,values[0],values[1],defFScale)
elif(self.propertyName=='Mz+'):
for e in elems:
self.vDir= e.elem.getJVector3d(True) # initialGeometry= True
values= e.getProp(self.propertyName) # [back node value, front node value]
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,values[0],values[1],defFScale)
elif(self.propertyName=='Mz-'):
for e in elems:
self.vDir= e.elem.getJVector3d(True) # initialGeometry= True
values= e.getProp(self.propertyName) # [back node value, front node value]
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,values[0],values[1],defFScale)
elif(self.propertyName=='Vy+'):
for e in elems:
self.vDir= e.elem.getJVector3d(True) # initialGeometry= True
values= e.getProp(self.propertyName) # [back node value, front node value]
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,values[0],values[1],defFScale)
elif(self.propertyName=='Vy-'):
for e in elems:
self.vDir= e.elem.getJVector3d(True) # initialGeometry= True
values= e.getProp(self.propertyName) # [back node value, front node value]
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,values[0],values[1],defFScale)
elif(self.propertyName=='Vz+'):
for e in elems:
self.vDir= e.elem.getKVector3d(True) # initialGeometry= True
values= e.getProp(self.propertyName) # [back node value, front node value]
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,values[0],values[1],defFScale)
elif(self.propertyName=='Vz-'):
for e in elems:
self.vDir= e.elem.getKVector3d(True) # initialGeometry= True
values= e.getProp(self.propertyName) # [back node value, front node value]
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,values[0],values[1],defFScale)
else:
extrapolate_elem_attr.extrapolate_elem_function_attr(elems,self.propertyName,"getProp", self.propertyName)
for e in elems:
self.vDir= e.getJVector3d(True) #initialGeometry= True
v0= e.getNodes[0].getProp(self.propertyName)
v1= e.getNodes[1].getProp(self.propertyName)
indxDiagrama= self.appendDataToDiagram(e,indxDiagrama,v0,v1,defFScale)
def addDiagram(self):
self.creaEstrucDatosDiagrama()
self.creaLookUpTable()
self.creaActorDiagrama()
indxDiagrama= 0
indiceSet= 0
numSetsDiagrama= len(self.lstSets)
for s in self.lstSets:
self.appendDataSetToDiagram(s,indxDiagrama)
self.updateLookUpTable()
self.updateActorDiagrama()
|
I have watery eyes. Yeah, I do. At any sign of sun my eyes start to stream and it looks like I’m crying. I know, too good. The problem is that I also love wearing makeup that defines my eyes, particularly on my lower lash line. I do not love having makeup all under my eyes, because… I spent too long on my concealer. I’ve found it though… the product that adds definition and doesn’t budge.
It’s the By Terry Ombre Blackstar in Brown Perfection. A deep brown without too much shimmer, for the last couple of weeks I have been just drawing a little of this on my lower lash line every day. I love how it makes my eyes look generally nice, and the shade is warm enough not to wash me out/not too dark so it looks harsh or makes my eyes look smaller. I’ve spoken about my love for the ombre blackstar pencils before, and you’ll be happy to know I am still obsessing over them. Misty Rocky is amazing, as is Frozen Quartz all over the lid, but Brown Perfection has been strictly reserved for lower lash line action.
I apply it in the morning, and it’s still there when I take off my makeup 12+ hours later. Amazing stuff, and far better than any eyeliner I’ve ever tried.
|
#----------------------------------------------------------------------
# Copyright (c) 2010-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
'''
Credential creation and verification utilities.
'''
from __future__ import absolute_import
import os
import logging
import xmlrpclib
import sys
import datetime
import dateutil
from ...sfa.trust import credential as cred
from ...sfa.trust import gid
from ...sfa.trust import rights
from ...sfa.util.xrn import hrn_authfor_hrn
from ...sfa.trust.credential_factory import CredentialFactory
from ...sfa.trust.abac_credential import ABACCredential
from ...sfa.trust.certificate import Certificate
from .speaksfor_util import determine_speaks_for
def naiveUTC(dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
class CredentialVerifier(object):
"""Utilities to verify signed credentials from a given set of
root certificates. Will compare target and source URNs, and privileges.
See verify and verify_from_strings methods in particular."""
CATEDCERTSFNAME = 'CATedCACerts.pem'
# root_cert_fileordir is a trusted root cert file or directory of
# trusted roots for verifying credentials
def __init__(self, root_cert_fileordir):
self.logger = logging.getLogger('cred-verifier')
if root_cert_fileordir is None:
raise Exception("Missing Root certs argument")
elif os.path.isdir(root_cert_fileordir):
files = os.listdir(root_cert_fileordir)
self.root_cert_files = []
for file in files:
# FIXME: exclude files that aren't cert files?
if file == CredentialVerifier.CATEDCERTSFNAME:
continue
self.root_cert_files.append(os.path.expanduser(os.path.join(root_cert_fileordir, file)))
self.logger.info('Will accept credentials signed by any of %d root certs found in %s: %r' % (len(self.root_cert_files), root_cert_fileordir, self.root_cert_files))
elif os.path.isfile(root_cert_fileordir):
self.logger.info('Will accept credentials signed by the single root cert %s' % root_cert_fileordir)
self.root_cert_files = [root_cert_fileordir]
else:
raise Exception("Couldn't find Root certs in %s" % root_cert_fileordir)
@classmethod
def getCAsFileFromDir(cls, caCerts):
'''Take a directory of CA certificates and concatenate them into a single
file suitable for use by the Python SSL library to validate client
credentials. Existing file is replaced.'''
if caCerts is None:
raise Exception ('Missing caCerts argument')
if os.path.isfile(os.path.expanduser(caCerts)):
return caCerts
if not os.path.isdir(os.path.expanduser(caCerts)):
raise Exception ('caCerts arg Not a file or a dir: %s' % caCerts)
logger = logging.getLogger('cred-verifier')
# Now we have a dir of caCerts files
# For each file in the dir (isfile), concatenate them into a new file
comboFullPath = os.path.join(caCerts, CredentialVerifier.CATEDCERTSFNAME)
caFiles = os.listdir(caCerts)
#logger.debug('Got %d potential caCert files in the dir', len(caFiles))
outfile = open(comboFullPath, "w")
okFileCount = 0
for filename in caFiles:
filepath = os.path.join(caCerts, filename)
# Confirm it's a CA file?
# if not file.endswith('.pem'):
# continue
if not os.path.isfile(os.path.expanduser(filepath)):
logger.debug('Skipping non file %s', filepath)
continue
if filename == CredentialVerifier.CATEDCERTSFNAME:
# logger.debug('Skipping previous cated certs file')
continue
okFileCount += 1
logger.info("Adding trusted cert file %s", filename)
certfile = open(filepath)
for line in certfile:
outfile.write(line)
certfile.close()
outfile.close()
if okFileCount == 0:
sys.exit('Found NO trusted certs in %s!' % caCerts)
else:
logger.info('Combined dir of %d trusted certs %s into file %s for Python SSL support', okFileCount, caCerts, comboFullPath)
return comboFullPath
# Get the GID of the caller, substituting the real user if this is a 'speaks-for' invocation
def get_caller_gid(self, gid_string, cred_strings, options=None):
root_certs = \
[Certificate(filename=root_cert_file) \
for root_cert_file in self.root_cert_files]
caller_gid = gid.GID(string=gid_string)
# Potentially, change gid_string to be the cert of the actual user
# if this is a 'speaks-for' invocation
speaksfor_gid = \
determine_speaks_for(self.logger, \
cred_strings, # May include ABAC speaks_for credential
caller_gid, # Caller cert (may be the tool 'speaking for' user)
options, # May include 'geni_speaking_for' option with user URN
root_certs
)
if caller_gid.get_subject() != speaksfor_gid.get_subject():
speaksfor_urn = speaksfor_gid.get_urn()
self.logger.info("Speaks-for Invocation: %s speaking for %s" % (caller_gid.get_urn(), speaksfor_urn))
caller_gid = speaksfor_gid
return caller_gid
def verify_from_strings(self, gid_string, cred_strings, target_urn,
privileges, options=None):
'''Create Credential and GID objects from the given strings,
and then verify the GID has the right privileges according
to the given credentials on the given target.'''
def make_cred(cred_string):
credO = None
try:
credO = CredentialFactory.createCred(credString=cred_string)
except Exception, e:
self.logger.warn("Skipping unparsable credential. Error: %s. Credential begins: %s...", e, cred_string[:60])
return credO
# Get the GID of the caller, substituting the real user if this is a 'speaks-for' invocation
caller_gid = self.get_caller_gid(gid_string, cred_strings, options)
# Remove the abac credentials
cred_strings = [cred_string for cred_string in cred_strings \
if CredentialFactory.getType(cred_string) == cred.Credential.SFA_CREDENTIAL_TYPE]
return self.verify(caller_gid,
map(make_cred, cred_strings),
target_urn,
privileges)
def verify_source(self, source_gid, credential):
'''Ensure the credential is giving privileges to the caller/client.
Return True iff the given source (client) GID's URN
is == the given credential's Caller (Owner) URN'''
source_urn = source_gid.get_urn()
cred_source_urn = credential.get_gid_caller().get_urn()
#self.logger.debug('Verifying source %r against credential source %r (cred target %s)',
# source_urn, cred_source_urn, credential.get_gid_object().get_urn())
result = (cred_source_urn == source_urn)
if result:
# self.logger.debug('Source URNs match')
pass
else:
self.logger.debug('Source URNs do not match. Source URN %r != credential source URN %r', source_urn, cred_source_urn)
return result
def verify_target(self, target_urn, credential):
'''Ensure the credential is giving privileges on the right subject/target.
Return True if no target is specified, or the target URN
matches the credential's Object's (target's) URN, else return False.
No target is required, for example, to ListResources.'''
if not target_urn:
# self.logger.debug('No target specified, considering it a match.')
return True
else:
cred_target_urn = credential.get_gid_object().get_urn()
# self.logger.debug('Verifying target %r against credential target %r',
# target_urn, cred_target_urn)
result = target_urn == cred_target_urn
if result:
# self.logger.debug('Target URNs match.')
pass
else:
self.logger.debug('Target URNs do NOT match. Target URN %r != Credential URN %r', target_urn, cred_target_urn)
return result
def verify_privileges(self, privileges, credential):
''' Return True iff the given credential gives the privilege
to perform ALL of the privileges (actions) in the given list.
In particular, the given list of 'privileges' is really a list
of names of operations. The privileges in credentials are
each turned in to Rights objects (see sfa/trust/rights.py).
And the SFA rights table is used to map from names of privileges
as specified in credentials, to names of operations.'''
result = True
privs = credential.get_privileges()
for priv in privileges:
if not privs.can_perform(priv):
self.logger.debug('Privilege %s not found on credential %s of %s', priv, credential.get_gid_object().get_urn(), credential.get_gid_caller().get_urn())
result = False
return result
def verify(self, gid, credentials, target_urn, privileges):
'''Verify that the given Source GID supplied at least one credential
in the given list of credentials that has all the privileges required
in the privileges list on the given target.
IE if any of the supplied credentials has a caller that matches gid
and a target that matches target_urn, and has all the privileges in
the given list, then return the list of credentials that were ok.
If no target_urn is supplied, then no credential is required, but any supplied
credential must be valid.
Throw an Exception if we fail to verify any credential.'''
# Note that here we treat a list of credentials as being options
# Alternatively could accumulate privileges for example.
# The semantics of the list of credentials is under specified.
self.logger.debug('Verifying privileges')
result = list()
failure = ""
tried_creds = ""
if len(credentials) == 0:
if (target_urn is None):
self.logger.debug("No credentials, but also no target, so OK")
return result
else:
# EG a slice_urn was supplied but no credentials
failure = "No credentials found"
for cred in credentials:
if cred is None:
failure = "Credential was unparseable"
continue
if cred.get_cred_type() == cred.SFA_CREDENTIAL_TYPE:
cS = cred.get_gid_caller().get_urn()
elif cred.get_cred_type() == ABACCredential.ABAC_CREDENTIAL_TYPE:
cS = cred.get_summary_tostring()
else:
cS = "Unknown credential type %s" % cred.get_cred_type()
if tried_creds != "":
tried_creds = "%s, %s" % (tried_creds, cS)
else:
tried_creds = cS
if cred.get_cred_type() != cred.SFA_CREDENTIAL_TYPE:
failure = "Not an SFA credential: " + cS
continue
if not self.verify_source(gid, cred):
failure = "Cred %s fails: Credential doesn't grant rights to you (%s), but to %s (over object %s)" % (cred.get_gid_caller().get_urn(), gid.get_urn(), cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn())
continue
if not self.verify_target(target_urn, cred):
failure = "Cred granting rights to %s on %s fails: It grants permissions over a different target, not %s (URNs dont match)" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), target_urn)
continue
if not self.verify_privileges(privileges, cred):
failure = "Cred for %s over %s doesn't provide sufficient privileges" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn())
continue
try:
if not cred.verify(self.root_cert_files):
failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files))
continue
except Exception, exc:
failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs: %s: %s" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files), exc.__class__.__name__, exc)
self.logger.info(failure)
continue
# If got here it verified
result.append(cred)
if result and result != list():
# At least one credential verified ok and was added to the list
# return that list
return result
else:
# We did not find any credential with sufficient privileges
# Raise an exception.
fault_code = 'Insufficient privileges'
fault_string = 'No credential was found with appropriate privileges. Tried %s. Last failure: %s' % (tried_creds, failure)
self.logger.error(fault_string)
# GCF ticket #120 - do not raise an xmlrpclib Fault here -
# just an Exception. But the caller may want to turn this
# into one
# raise xmlrpclib.Fault(fault_code, fault_string)
raise Exception(fault_string)
def create_credential(caller_gid, object_gid, expiration, typename, issuer_keyfile, issuer_certfile, trusted_roots, delegatable=False):
'''Create and Return a Credential object issued by given key/cert for the given caller
and object GID objects, given life in seconds, and given type.
Privileges are determined by type per sfa/trust/rights.py
Privileges are delegatable if requested.'''
# FIXME: Validate args: my gids, >0 life,
# type of cred one I can issue
# and readable key and cert files
if caller_gid is None:
raise ValueError("Missing Caller GID")
if object_gid is None:
raise ValueError("Missing Object GID")
if expiration is None:
raise ValueError("Missing expiration")
naive_expiration = naiveUTC(expiration)
duration = naive_expiration - datetime.datetime.utcnow()
life_secs = duration.seconds + duration.days * 24 * 3600
if life_secs < 1:
raise ValueError("Credential expiration is in the past")
if trusted_roots is None:
raise ValueError("Missing list of trusted roots")
if typename is None or typename.strip() == '':
raise ValueError("Missing credential type")
typename = typename.strip().lower()
if typename not in ("user", "sa", "ma", "authority", "slice", "component"):
raise ValueError("Unknown credential type %s" % typename)
if not os.path.isfile(issuer_keyfile):
raise ValueError("Cant read issuer key file %s" % issuer_keyfile)
if not os.path.isfile(issuer_certfile):
raise ValueError("Cant read issuer cert file %s" % issuer_certfile)
issuer_gid = gid.GID(filename=issuer_certfile)
if not (object_gid.get_urn() == issuer_gid.get_urn() or
(issuer_gid.get_type().find('authority') == 0 and
hrn_authfor_hrn(issuer_gid.get_hrn(), object_gid.get_hrn()))):
raise ValueError("Issuer not authorized to issue credential: Issuer=%s Target=%s" % (issuer_gid.get_urn(), object_gid.get_urn()))
ucred = cred.Credential()
# FIXME: Validate the caller_gid and object_gid
# are my user and slice
# Do get_issuer and compare to the issuer cert?
# Or do gid.is_signed_by_cert(issuer_certfile)?
ucred.set_gid_caller(caller_gid)
ucred.set_gid_object(object_gid)
ucred.set_expiration(expiration)
# Use sfa/trust/rights.py to figure out what privileges
# the credential should have.
# user means refresh, resolve, info
# per the privilege_table that lets users do
# remove, update, resolve, list, getcredential,
# listslices, listnodes, getpolicy
# Note that it does not allow manipulating slivers
# And every right is delegatable if any are delegatable (default False)
privileges = rights.determine_rights(typename, None)
privileges.delegate_all_privileges(delegatable)
ucred.set_privileges(privileges)
ucred.encode()
ucred.set_issuer_keys(issuer_keyfile, issuer_certfile)
ucred.sign()
try:
ucred.verify(trusted_roots)
except Exception, exc:
raise Exception("Create Credential failed to verify new credential from trusted roots: %s" % exc)
return ucred
|
This page contains the solution of level 4-3 in episode 4 (Flight in the Night) from the popular iOS and Android game Bad Piggies. Below you can find a walkthrough for building the right contraptions needed to earn all 3 stars.
Hey I was looking around the course and I found the angry birds if u go all the way right does that mean anything? And great tip but I can't get the star thing.
|
import json
import mock
from django.test import Client
from chroma_core.models import ManagedHost, ServerProfile, Nid
from chroma_core.models.registration_token import RegistrationToken
from chroma_core.services.crypto import Crypto
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
from chroma_agent_comms.views import ValidatedClientView
from tests.unit.chroma_core.helpers import generate_csr, synthetic_host, load_default_profile
from tests.unit.lib.iml_unit_test_case import IMLUnitTestCase
from tests.utils import patch, timed
import settings
class TestRegistration(IMLUnitTestCase):
"""API unit tests for functionality used only by the agent"""
mock_servers = {
"mynewhost": {
"fqdn": "mynewhost.mycompany.com",
"nodename": "test01.mynewhost.mycompany.com",
"nids": [Nid.Nid("192.168.0.1", "tcp", 0)],
}
}
def setUp(self):
super(TestRegistration, self).setUp()
load_default_profile()
self.old_create_host = JobSchedulerClient.create_host
JobSchedulerClient.create_host = mock.Mock(
side_effect=lambda *args, **kwargs: (
synthetic_host("mynewhost", **self.mock_servers["mynewhost"]),
mock.Mock(id="bar"),
)
)
ValidatedClientView.valid_certs = {}
def tearDown(self):
JobSchedulerClient.create_host = self.old_create_host
def test_version(self):
host_info = self.mock_servers["mynewhost"]
with timed("csr", 10):
data = {
"fqdn": host_info["fqdn"],
"nodename": host_info["nodename"],
"version": "1.0",
"capabilities": ["manage_targets"],
"address": "mynewhost",
"csr": generate_csr(host_info["fqdn"]),
}
with patch(settings, VERSION="2.0"):
# Try with a mis-matched version
token = RegistrationToken.objects.create(profile=ServerProfile.objects.get())
with timed("register fail", 10):
response = Client().post(
"/agent/register/%s/" % token.secret, data=json.dumps(data), content_type="application/json"
)
self.assertEqual(response.status_code, 400)
# Try with a matching version
token = RegistrationToken.objects.create(profile=ServerProfile.objects.get())
settings.VERSION = "1.1"
with timed("register pass", 10):
response = Client().post(
"/agent/register/%s/" % token.secret, data=json.dumps(data), content_type="application/json"
)
self.assertEqual(response.status_code, 201)
content = json.loads(response.content)
# reregistration should fail with unknown serial
data = {"address": "mynewhost", "fqdn": "mynewhost.newcompany.com"}
headers = {"HTTP_X_SSL_CLIENT_NAME": host_info["fqdn"], "HTTP_X_SSL_CLIENT_SERIAL": ""}
response = Client().post(
"/agent/reregister/", data=json.dumps(data), content_type="application/json", **headers
)
self.assertEqual(response.status_code, 403)
# reregistration should update host's domain name
headers["HTTP_X_SSL_CLIENT_SERIAL"] = Crypto().get_serial(content["certificate"])
response = Client().post(
"/agent/reregister/", data=json.dumps(data), content_type="application/json", **headers
)
self.assertEqual(response.status_code, 200)
host = ManagedHost.objects.get(id=content["host_id"])
self.assertEqual(host.fqdn, data["fqdn"])
# TOOD: reinstate selinux check, probably within the agent itself (it should fail
# its own registration step without even talking to the manager)
# def test_selinux_detection(self):
# """Test that a host with SELinux enabled fails setup."""
# MockAgentRpc.selinux_enabled = True
# try:
# import time
# host = self._create_host('myaddress')
# self.assertTrue(Command.objects.all().order_by("-id")[0].errored)
# self.assertState(host, 'unconfigured')
# finally:
# MockAgentRpc.selinux_enabled = False
|
Mortgage securities allow lenders to recover much-needed capital to lend to new borrowers.
Mortgage securitization developed in the 1970s, when lenders began creating pools of mortgages and selling them to government-backed agencies such as Ginnie Mae, Freddie Mac and Fannie Mae. These agencies guaranteed the projected income from these pooled mortgages and sold them as mortgage-backed securities to investors interested in stable long-term income. These securities became so popular that private companies began to buy up mortgages and package them as securities, as well. By the mid 2000s, originating lenders kept very few mortgages—most were packaged and sold within the first few months after origination.
One danger of the widespread prevalence of mortgage-backed securities is that since lenders did not hold onto the mortgages, they were not responsible for the long-term stability of the loan. As credit standards lowered during the 2000s, lenders were able to spread these high-risk loans among higher-quality loans and remove their liability, which prompted them to make even more high-risk loans. Eventually securities purchased by insurance companies, pension funds and mutual funds contained a significant amount of these high-risk loans. When the housing market collapsed, homes lost large portions of their value and homeowners began defaulting on their mortgages, causing these securities to lose a great deal of their value and creating huge investor losses.
Lenders also saw the disadvantages of mortgage-backed securities, since they had to get rid of low-risk, profitable loans in order to balance out the high-risk loans they included in these pools. As the losses mounted, the credit ratings of these lenders decreased as well, and they had few quality loans left in their portfolio. This drop in credit rating made it harder for them to get funds, and they had to slow down or stop lending, causing some lenders to go out of business.
Ginnie Mae, Freddie Mac and Fannie Mae, who guaranteed many of these securities, paid out large sums of money to cover the losses on these securities. The losses became so bad that a government bailout was required. This bailout cost taxpayers billions of dollars.
Homeowners also lose out from mortgage-backed securities, since the buying and selling of their loan is a continual process. A mortgage loan may change holders several times over the course of its term. Each time the loan is sold and purchased, there is the potential for problems with title, escrow and account information. Your property taxes and homeowner’s insurance may go unpaid because the bill was sent to the wrong mortgage servicing company. Your escrow account may not transfer properly, and you could lose funds. Your loan terms or account number could be recorded improperly, which could lead to billing problems. It could also create difficulties with a smooth chain of title and affect your ability to sell your home.
Bramble, Laura. "The Disadvantages of Mortgage Securitization." Home Guides | SF Gate, http://homeguides.sfgate.com/disadvantages-mortgage-securitization-6613.html. Accessed 19 April 2019.
|
'''
Created on May 5, 2015
@author: julian
'''
import math
import random
import matplotlib.pyplot as plt
from Chromosome import *
REGION_SIZE = 100.0
NUMBER_OF_CITIES = 10
NUMBER_OF_CHROMOSOMES = 100
NUMBER_OF_EPOCHS = 100
MUTATION_PROBABILITY = 0.25
RANDOM_SEED = 9876
cities = {}
def plot_cities(chromosome=None, epoch=None):
plt.xlim((-REGION_SIZE*0.1,REGION_SIZE*1.1))
plt.ylim((-REGION_SIZE*0.1,REGION_SIZE*1.1))
plt.scatter([p[0] for p in cities.itervalues()], [p[1] for p in cities.itervalues()])
for name,xy in cities.iteritems():
plt.annotate(name,xy=xy,xytext=(xy[0]+1,xy[1]-1))
if chromosome:
plt.plot([cities[c][0] for c in chromosome], [cities[c][1] for c in chromosome])
plt.text(cities[chromosome[0]][0] + 2.0, cities[chromosome[0]][1] + 2.0, 'Start')
plt.text(cities[chromosome[-1]][0] + 2.0, cities[chromosome[-1]][1] + 2.0, 'Finish')
if epoch:
plt.title('EPOCH '+str(epoch))
plt.show()
def main():
# create a set of cities in a spiral in a square region
# choose an angle for the start of the spiral
phi = 0
# set the spiral twist loop number
loops = 1.5
# calculate the change in angle for each city
dphi = math.pi * loops / float(NUMBER_OF_CITIES)
for i in range(NUMBER_OF_CITIES):
# get radius of city centre
r = 0.5*REGION_SIZE*float(i+1)/float(NUMBER_OF_CITIES)
phi += dphi
xcity = 0.5*REGION_SIZE + r*math.cos(phi);
ycity = 0.5*REGION_SIZE + r*math.sin(phi);
city_name = chr(i+65)
# add this city to the dictionary
cities[city_name] = (xcity, ycity)
#plot_cities()
# create a population of chromosomes
# each chromosome will get a random ordering of cities to visit
chromosomes = []
random.seed(RANDOM_SEED)
for i in range(NUMBER_OF_CHROMOSOMES):
city_list = list(cities.keys())
random.shuffle(city_list)
chromosomes.append(Chromosome(city_list))
# we define a function which computes the path length for a given order of cities
def path_length(city_list, cities):
sum = 0.0
for i in range(1,len(city_list)):
(x1,y1) = cities[city_list[i-1]]
(x2,y2) = cities[city_list[i]]
sum += math.sqrt((x1-x2)**2+(y1-y2)**2)
return sum
epoch = 1
while True:
# find the path length for each chromosome
path_lengths = {}
for c in chromosomes:
path_lengths[c] = path_length(c.chromosome, cities)
sorted_chromosomes = sorted(path_lengths, key=path_lengths.get, reverse=False)
print 'Epoch',epoch,'Best chromosome',sorted_chromosomes[0].chromosome_string(), \
path_lengths[sorted_chromosomes[0]]
epoch += 1
if epoch > NUMBER_OF_EPOCHS:
break
# select the mating population
mating_population = sorted_chromosomes[:NUMBER_OF_CHROMOSOMES/2]
# have the population mate in pairs, to produce offspring
offspring_population = []
while len(offspring_population) < NUMBER_OF_CHROMOSOMES/2:
mother = random.choice(mating_population)
father = random.choice(mating_population)
(offspring1, offspring2) = mother.mate_no_duplicates(father)
# mutate the offspring with some probability
if random.random() < MUTATION_PROBABILITY:
offspring1.mutate_swap()
if random.random() < MUTATION_PROBABILITY:
offspring2.mutate_swap()
offspring_population.append(offspring1)
offspring_population.append(offspring2)
# the new population is the mating population plus the offspring
chromosomes = mating_population + offspring_population
# we plot the solution at the stopping condition
plot_cities(chromosomes[0].chromosome, str(epoch-1) + ' Best ' + chromosomes[0].chromosome_string())
if __name__ == '__main__':
main()
|
Don’t you agree that the phrase “I will wear what I like” is the perfect mantra for personal style aficionadas (and aficionados!) dear readers?!
After taking exception to a journalist’s insistence that women over a certain age should no longer wear things like leopard print and oversized sunglasses (huh?) my friend Catherine of Not Dressed As Lamb responded with an #iwillwearwhatilike campaign on Instagram…and a weekly blog linkup devoted to exactly that theme!!
We might not all agree about what’s considered attractive when it comes to our wardrobe choices – I bet I get some eye rolls when I’m wearing this shaggy Free People cardi! – but “pretty” isn’t always what it’s about; real personal style is a means of creative self-expression through fashion choices – and it should never be hampered by age or trends…or even gender, for that matter!!
Next Post Snow day: denim mini, sequinned sweater, Chanel-style jacket, and fierce booties!
This is such a beautiful look! I really love the layering 🙂 You always look amazing 🙂 I hope you and your family are having a wonderful Christmas season!
|
import common.constant as c
import time
import numpy as np
import pandas as pd
from common.base import Base
from itertools import combinations
from itertools import groupby
class DependencyGraph(Base):
# The dependency graph
dep_graph = None
def __init__(
self,
data = None,
edges = None,
noise_flag = True,
white_list = [],
eps1_val = c.EPSILON_1,
cramer = 0.2):
"""
__init__
Input:
1. DataUtils.Data
Procedure
1. Convert the given data frame to dataframe in R
2. Convert the given Domain(in python dict) to ListVector
3. Instantial the attributes dependency.
"""
self.LOG = Base.get_logger("DepGraph")
self.noise_flag = noise_flag
self.eps1_val = eps1_val
self.cramer = cramer
self.data = data
if data is None:
self.edges = edges
else:
self.edges = self._run()
self.white_list = white_list
def get_dep_edges(self, display = True):
pairwise_white_list = reduce(lambda acc, curr: acc+curr
,[list(combinations(cluster, 2)) for cluster in self.white_list]
,[])
if display is False:
return _get_edges_in_r(self.edges + pairwise_white_list)
return self.edges + pairwise_white_list
def set_white_list(self, white_list):
self.white_list = white_list
return self
def _run(self):
# get pandas df
if self.data is None:
raise Exception("The data is not specified.")
pandas_df = self.data.get_df()
# get domain
domains = self.data.get_domains()
self.LOG.info("Starting to compute Dep-Graph with eps1: %.2f..." % self.eps1_val)
start = time.time()
# attributes' name
attr_names = domains.keys()
# combinations of 2
comb = combinations(attr_names, 2)
mi_scale = self.compute_mi_scale()
noise_thresh_cv2 = np.random.laplace(0, mi_scale, 1)
filtered_pairs = []
for attrs_pair in comb:
col1_val = pandas_df[attrs_pair[0]]
col2_val = pandas_df[attrs_pair[1]]
if self.g_test(col1_val, col2_val, mi_scale, noise_thresh_cv2):
filtered_pairs += [attrs_pair]
end = time.time()
self.LOG.info("Compute Dep-Graph complete in %d seconds." % (end-start))
return filtered_pairs
def g_test(self, col1, col2, mi_scale, noise_thresh_cv2):
xmat = self.find_crosstab(col1, col2)
mi = self.get_mi(xmat)
attr1_lvs = sorted(set(col1))
attr2_lvs = sorted(set(col2))
min_length = min(len(attr1_lvs), len(attr2_lvs)) - 1
cv2_lh = mi + np.random.laplace(0, mi_scale, 1)
cv2_rh = (self.cramer ** 2) * min_length/2. + noise_thresh_cv2
return cv2_lh >= cv2_rh
def find_crosstab(self, col1, col2):
xtab = pd.crosstab(col1, col2)
return np.asarray(xtab)
def get_expected_sum(self, xmat):
rsums = np.sum(xmat, axis = 0).reshape(-1,1)
csums = np.sum(xmat, axis = 1).reshape(1,-1)
expected_sum = rsums * csums / float(np.sum(csums))
return expected_sum
def get_mi(self, xmat):
xmat = xmat / float(np.sum(xmat))
expected_sum = self.get_expected_sum(xmat)
summand = xmat/expected_sum.T
zeros = np.where(summand == 0)
summand[zeros] = 1
return np.sum(xmat * np.log(summand))
def compute_mi_scale(self):
eps_alpha_1 = self.amplify_epsilon_under_sampling(self.eps1_val)
sensitivity_scale_mi = self.compute_mi_sensitivity_scale(self.data.get_nrows(), False)
scale_mi = 2 * sensitivity_scale_mi / eps_alpha_1
return scale_mi
def amplify_epsilon_under_sampling(self, eps1, beta = 1):
eps_alpha = np.log(np.exp(1) ** (eps1) -1 + beta) - np.log(beta)
return eps_alpha
def compute_mi_sensitivity_scale(self, N, all_binary):
N = float(N)
if all_binary is True:
sen_scale = (1/N) * np.log(N) + (( N-1 )/N) * np.log(N/(N-1))
else:
sen_scale = (2/N) * np.log((N + 1)/2) + ((N-1)/N) * np.log((N+1)/(N-1))
return sen_scale
|
This wrapper is designed to provide warmth and comfort to your baby when they sleep. Made from soft fabric, it will help keep your baby at ease. Wrapper features cute embroidery on the hood.
Question about Baby Soft Hooded Wrapper, Blanket, Swaddle - Grey/Blue is Successfully Submitted.
|
from django import template
from django.conf import settings
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from opaque_keys.edx.keys import CourseKey
from slugify import slugify
register = template.Library()
@register.simple_tag
def settings_value(name):
"""
Retrieve a value from settings.
If setting is not found, None is returned.
"""
return getattr(settings, name)
@register.filter
def metric_percentage(value):
# Translators: Simply move the percent symbol (%) to the correct location. Do NOT translate the word statistic.
percent_stat = _('{statistic}%')
percent = '0'
if value:
if value < 0.01:
percent = '< 1'
else:
percent = '{0}'.format(round(value, 3) * 100)
# pylint: disable=no-member
return percent_stat.format(statistic=percent)
@register.tag(name='captureas')
def do_captureas(parser, token):
"""
Capture contents of block into context.
Source:
https://djangosnippets.org/snippets/545/
Example:
{% captureas foo %}{{ foo.value }}-suffix{% endcaptureas %}
{% if foo in bar %}{% endif %}
"""
try:
__, args = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("'captureas' node requires a variable name.")
nodelist = parser.parse(('endcaptureas',))
parser.delete_first_token()
return CaptureasNode(nodelist, args)
class CaptureasNode(template.Node):
def __init__(self, nodelist, varname):
self.nodelist = nodelist
self.varname = varname
def render(self, context):
output = mark_safe(self.nodelist.render(context).strip())
context[self.varname] = output
return ''
@register.inclusion_tag('summary_point.html')
def summary_point(value, label, subheading=None, tooltip=None):
return {
'value': value,
'label': label,
'subheading': subheading,
'tooltip': tooltip
}
@register.inclusion_tag('section_error.html')
def show_chart_error(background_class=''):
"""
Returns the error section with default context.
Arguments
background_class -- CSS class to add to the background style
(e.g. 'white-background'). Default background is gray.
"""
return _get_base_error_context('chart', background_class)
@register.inclusion_tag('section_error.html')
def show_table_error():
return _get_base_error_context('table')
@register.inclusion_tag('section_error.html')
def show_metrics_error():
return _get_base_error_context('metrics')
def _get_base_error_context(content_type, background_class=''):
return {
'content_type': content_type,
'load_error_message': settings.DOCUMENTATION_LOAD_ERROR_MESSAGE,
'background_class': background_class
}
@register.filter
def format_course_key(course_key, separator=u'/'):
if isinstance(course_key, basestring):
course_key = CourseKey.from_string(course_key)
return separator.join([course_key.org, course_key.course, course_key.run])
@register.filter(is_safe=True)
@stringfilter
def unicode_slugify(value):
return slugify(value)
|
After successfully redoing the login and registration, I had to add the UE to registration.php and reuploaded the entire webassist folder to get it to work. Upon testing, the email receipt for the confirm page no longer worked, so I went to the confirm.php to try to apply UE to create the email receipt. Ended up creating a new email form.
SpecialInstruction is also in bindings as a Form Variable and also WA Validated entries.
The dynamic text is place properly into the page, but upon testing, it will not work - will not display the text box on the email receipt.
What is going to make this work?
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-29 03:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment_message', models.CharField(blank=True, default='', max_length=140, null=True)),
('comment_user_name', models.CharField(blank=True, default='', max_length=70, null=True)),
('comment_date', models.DateTimeField(default=django.utils.timezone.now)),
('comment_mark', models.CharField(blank=True, default='comment', max_length=10, null=True)),
('comment_user_avatarurl', models.URLField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Spot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=70)),
('name', models.CharField(max_length=70)),
('longitude', models.FloatField()),
('latitude', models.FloatField()),
('download_speed', models.CharField(blank=True, default='', max_length=70, null=True)),
('upload_speed', models.CharField(blank=True, default='', max_length=70, null=True)),
('speed_test_link', models.URLField(blank=True, default='', max_length=100, null=True)),
('price_indication', models.CharField(blank=True, default='', max_length=70, null=True)),
('bathroom', models.BooleanField(default=False)),
('commit_user_name', models.CharField(blank=True, default='', max_length=70, null=True)),
('commit_message', models.CharField(blank=True, max_length=140, null=True)),
('commit_date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
|
Scroll down to see "Anamorphic illusions drawn in a special distortion in order to create an impression of 3 dimensions when seen from one particular viewpoint." Insanely great.
An optical illusion; a mind hack, if you will.
|
# -*- coding: utf-8 -*-
from django.views.generic.list import ListView
from django.views.generic import TemplateView
from posts.models import Post, PostTag
from posts.views import PostListView
# Home page
class IndexView(PostListView):
template_name = "website/index.html"
# FAQ
class FAQView(TemplateView):
template_name = "website/faq.html"
# For XML serving
class XmlView(ListView):
queryset = Post.objects.all()
def get_context_data(self, *args, **kwargs):
request = self.request
protocol = "https://" if request.is_secure() else "http://"
context = super(ListView, self).get_context_data(*args, **kwargs)
context['posts'] = Post.objects.order_by('-create_date')
return context
# Sitemap (xml)
class SiteMapXML(XmlView):
template_name = "sitemap.xml"
content_type = "application/xml"
def get_context_data(self, *args, **kwargs):
context = super(SiteMapXML, self).get_context_data(*args, **kwargs)
context['tags'] = PostTag.objects.all()
return context
# Atom Feed (xml)
class AtomFeedXML(XmlView):
template_name = "atom.xml"
content_type = "application/atom+xml"
# RSS Feed (xml)
class RssFeedXML(XmlView):
template_name = "rss.xml"
content_type = "application/rss+xml"
|
Wait for arrival of ICC Cricket World Cup 2011 is finally over and today on Saturday 19th February first Match of biggest Cricket Tournament is taking place between India and Bangladesh. World Cup will continue for 45 days. Cricket is a very popular game in more than 20 Countries and having Billions of Fans. People of Subcontinent specially in Pakistan and India are crazy about Cricket. If you are one of those people who don't have to Facility of TV or you are Present in your Office where TV is not present that there is a solution for you to Watch World Cup 2011 on Internet for free.
Live Streaming of Many TV Channels are freely Available online that are Showing all the matches of World Cup. You not need to pay any Money nor need any device to watch these Matches. Following i am sharing some Links with you where you can watch World Cup matches freely.
|
#!/usr/bin/env python
# Copyright (c) 2012, Madefire Inc.
# All rights reserved.
from __future__ import absolute_import
from threading import Event, Thread
from time import sleep
import SocketServer
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
PREPARE_DELAY = 2.5
IDLE_DELAY = 2.5
class Library:
def __init__(self, videos):
self._videos = videos
self._index = len(videos) - 1
def current(self):
return self._videos[self._index]
def next(self):
self._index += 1
if self._index == len(self._videos):
self._index = 0
return self.current()
event = Event()
# fill in the following array with the names of your videos and their
# durations, rounding up is better than down.
library = Library([('<video-name>', <video-duration-in-seconds>), ...])
class PlayRequestHandler(SocketServer.BaseRequestHandler ):
def setup(self):
logger.info('gained connection to %s', self.client_address)
def handle(self):
while True:
event.wait()
video = library.current()
try:
logger.debug('sending %s to %s', video[0], self.client_address)
self.request.send('prepare %s' % video[0])
sleep(PREPARE_DELAY)
self.request.send('play')
except:
return
def finish(self):
logger.info('lost connection to %s', self.client_address)
class Director(Thread):
def __init__(self):
self._running = True
Thread.__init__(self)
def run(self):
sleep(1)
while self._running:
video = library.next()
logger.info('playing %s for %d seconds', video[0], video[1])
event.set()
event.clear()
sleep(video[1] + PREPARE_DELAY + IDLE_DELAY)
logger.info('director finished')
def shutdown(self):
self._running = False
director = Director()
director.start()
class Server(SocketServer.ThreadingTCPServer):
def __init__(self, *args, **kwargs):
SocketServer.ThreadingTCPServer.__init__(self, *args, **kwargs)
self.allow_reuse_address = True
server = Server(('', 3333), PlayRequestHandler)
try:
logger.info('serving')
server.serve_forever()
except KeyboardInterrupt:
logger.info('shutting down')
director.shutdown()
server.shutdown()
director.join()
logger.info('done')
|
A new cell phone app will show illegal aliens the best route for entering the United States illegally. It will exploit weaknesses in our border security, telling them where the sensors, agents, and cameras are located.
It’s called Bienvenidos — welcome. Sure, come on in, anyone, for any reason. We have welfare galore! It is what the Democrats want for this nation.
It will make it easier for drug cartels and terrorists to come across the border.
It’s actually a great idea. ICE can set up the paddy wagons at these locations.
This app violates our immigration laws!
So then Sessions does need not much of an investigation, he can get indictments fast.
|
# Copyright 2015-2016, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
import logging
from PyQt5.QtCore import Qt, QSortFilterProxyModel, QStringListModel, QThread
from PyQt5.QtGui import QPalette, QTextCursor
from PyQt5.QtWidgets import QCompleter, QHeaderView, QMessageBox, QProgressDialog
from setools import TERuleQuery
from ..logtosignal import LogHandlerToSignal
from ..models import PermListModel, SEToolsListModel, invert_list_selection
from ..terulemodel import TERuleTableModel
from .analysistab import AnalysisTab
from .exception import TabFieldError
from .queryupdater import QueryResultsUpdater
from .workspace import load_checkboxes, load_lineedits, load_listviews, load_textedits, \
save_checkboxes, save_lineedits, save_listviews, save_textedits
class TERuleQueryTab(AnalysisTab):
"""A Type Enforcement rule query."""
def __init__(self, parent, policy, perm_map):
super(TERuleQueryTab, self).__init__(parent)
self.log = logging.getLogger(__name__)
self.policy = policy
self.query = TERuleQuery(policy)
self.setupUi()
def __del__(self):
self.thread.quit()
self.thread.wait(5000)
logging.getLogger("setools.terulequery").removeHandler(self.handler)
def setupUi(self):
self.load_ui("apol/terulequery.ui")
# set up source/target autocompletion
typeattr_completion_list = [str(t) for t in self.policy.types()]
typeattr_completion_list.extend(str(a) for a in self.policy.typeattributes())
typeattr_completer_model = QStringListModel(self)
typeattr_completer_model.setStringList(sorted(typeattr_completion_list))
self.typeattr_completion = QCompleter()
self.typeattr_completion.setModel(typeattr_completer_model)
self.source.setCompleter(self.typeattr_completion)
self.target.setCompleter(self.typeattr_completion)
# set up default autocompletion
type_completion_list = [str(t) for t in self.policy.types()]
type_completer_model = QStringListModel(self)
type_completer_model.setStringList(sorted(type_completion_list))
self.type_completion = QCompleter()
self.type_completion.setModel(type_completer_model)
self.default_type.setCompleter(self.type_completion)
# setup indications of errors on source/target/default
self.errors = set()
self.orig_palette = self.source.palette()
self.error_palette = self.source.palette()
self.error_palette.setColor(QPalette.Base, Qt.red)
self.clear_source_error()
self.clear_target_error()
self.clear_default_error()
self.clear_xperm_error()
# populate class list
self.class_model = SEToolsListModel(self)
self.class_model.item_list = sorted(self.policy.classes())
self.tclass.setModel(self.class_model)
# populate perm list
self.perms_model = PermListModel(self, self.policy)
self.perms.setModel(self.perms_model)
# populate bool list
self.bool_model = SEToolsListModel(self)
self.bool_model.item_list = sorted(self.policy.bools())
self.bool_criteria.setModel(self.bool_model)
# set up results
self.table_results_model = TERuleTableModel(self)
self.sort_proxy = QSortFilterProxyModel(self)
self.sort_proxy.setSourceModel(self.table_results_model)
self.table_results.setModel(self.sort_proxy)
self.table_results.sortByColumn(0, Qt.AscendingOrder)
# set up processing thread
self.thread = QThread()
self.worker = QueryResultsUpdater(self.query, self.table_results_model)
self.worker.moveToThread(self.thread)
self.worker.raw_line.connect(self.raw_results.appendPlainText)
self.worker.finished.connect(self.update_complete)
self.worker.finished.connect(self.thread.quit)
self.thread.started.connect(self.worker.update)
# create a "busy, please wait" dialog
self.busy = QProgressDialog(self)
self.busy.setModal(True)
self.busy.setRange(0, 0)
self.busy.setMinimumDuration(0)
self.busy.canceled.connect(self.thread.requestInterruption)
self.busy.reset()
# update busy dialog from query INFO logs
self.handler = LogHandlerToSignal()
self.handler.message.connect(self.busy.setLabelText)
logging.getLogger("setools.terulequery").addHandler(self.handler)
# Ensure settings are consistent with the initial .ui state
self.set_source_regex(self.source_regex.isChecked())
self.set_target_regex(self.target_regex.isChecked())
self.set_default_regex(self.default_regex.isChecked())
self.toggle_xperm_criteria()
self.criteria_frame.setHidden(not self.criteria_expander.isChecked())
self.notes.setHidden(not self.notes_expander.isChecked())
# connect signals
self.buttonBox.clicked.connect(self.run)
self.allowxperm.toggled.connect(self.toggle_xperm_criteria)
self.auditallowxperm.toggled.connect(self.toggle_xperm_criteria)
self.neverallowxperm.toggled.connect(self.toggle_xperm_criteria)
self.dontauditxperm.toggled.connect(self.toggle_xperm_criteria)
self.clear_ruletypes.clicked.connect(self.clear_all_ruletypes)
self.all_ruletypes.clicked.connect(self.set_all_ruletypes)
self.source.textEdited.connect(self.clear_source_error)
self.source.editingFinished.connect(self.set_source)
self.source_regex.toggled.connect(self.set_source_regex)
self.target.textEdited.connect(self.clear_target_error)
self.target.editingFinished.connect(self.set_target)
self.target_regex.toggled.connect(self.set_target_regex)
self.tclass.selectionModel().selectionChanged.connect(self.set_tclass)
self.invert_class.clicked.connect(self.invert_tclass_selection)
self.perms.selectionModel().selectionChanged.connect(self.set_perms)
self.invert_perms.clicked.connect(self.invert_perms_selection)
self.xperms.textEdited.connect(self.clear_xperm_error)
self.xperms.editingFinished.connect(self.set_xperm)
self.default_type.textEdited.connect(self.clear_default_error)
self.default_type.editingFinished.connect(self.set_default_type)
self.default_regex.toggled.connect(self.set_default_regex)
self.bool_criteria.selectionModel().selectionChanged.connect(self.set_bools)
#
# Ruletype criteria
#
def _set_ruletypes(self, value):
self.allow.setChecked(value)
self.allowxperm.setChecked(value)
self.auditallow.setChecked(value)
self.auditallowxperm.setChecked(value)
self.neverallow.setChecked(value)
self.neverallowxperm.setChecked(value)
self.dontaudit.setChecked(value)
self.dontauditxperm.setChecked(value)
self.type_transition.setChecked(value)
self.type_member.setChecked(value)
self.type_change.setChecked(value)
def set_all_ruletypes(self):
self._set_ruletypes(True)
def clear_all_ruletypes(self):
self._set_ruletypes(False)
#
# Source criteria
#
def clear_source_error(self):
self.clear_criteria_error(self.source, "Match the source type/attribute of the rule.")
def set_source(self):
try:
self.query.source = self.source.text()
except Exception as ex:
self.log.error("Source type/attribute error: {0}".format(ex))
self.set_criteria_error(self.source, ex)
def set_source_regex(self, state):
self.log.debug("Setting source_regex {0}".format(state))
self.query.source_regex = state
self.clear_source_error()
self.set_source()
#
# Target criteria
#
def clear_target_error(self):
self.clear_criteria_error(self.target, "Match the target type/attribute of the rule.")
def set_target(self):
try:
self.query.target = self.target.text()
except Exception as ex:
self.log.error("Target type/attribute error: {0}".format(ex))
self.set_criteria_error(self.target, ex)
def set_target_regex(self, state):
self.log.debug("Setting target_regex {0}".format(state))
self.query.target_regex = state
self.clear_target_error()
self.set_target()
#
# Class criteria
#
def set_tclass(self):
selected_classes = []
for index in self.tclass.selectionModel().selectedIndexes():
selected_classes.append(self.class_model.data(index, Qt.UserRole))
self.query.tclass = selected_classes
self.perms_model.set_classes(selected_classes)
def invert_tclass_selection(self):
invert_list_selection(self.tclass.selectionModel())
#
# Permissions criteria
#
def set_perms(self):
selected_perms = []
for index in self.perms.selectionModel().selectedIndexes():
selected_perms.append(self.perms_model.data(index, Qt.UserRole))
self.query.perms = selected_perms
def invert_perms_selection(self):
invert_list_selection(self.perms.selectionModel())
#
# Extended permission criteria
#
def toggle_xperm_criteria(self):
mode = any((self.allowxperm.isChecked(),
self.auditallowxperm.isChecked(),
self.neverallowxperm.isChecked(),
self.dontauditxperm.isChecked()))
self.xperms.setEnabled(mode)
self.xperms_equal.setEnabled(mode)
def clear_xperm_error(self):
self.clear_criteria_error(self.xperms,
"Match the extended permissions of the rule. "
"Comma-separated permissions or ranges of permissions.")
def set_xperm(self):
xperms = []
try:
text = self.xperms.text()
if text:
for item in self.xperms.text().split(","):
rng = item.split("-")
if len(rng) == 2:
xperms.append((int(rng[0], base=16), int(rng[1], base=16)))
elif len(rng) == 1:
xperms.append((int(rng[0], base=16), int(rng[0], base=16)))
else:
raise ValueError("Enter an extended permission or extended permission "
"range, e.g. 0x5411 or 0x8800-0x88ff.")
self.query.xperms = xperms
else:
self.query.xperms = None
except Exception as ex:
self.log.error("Extended permissions error: {0}".format(ex))
self.set_criteria_error(self.xperms, ex)
#
# Default criteria
#
def clear_default_error(self):
self.clear_criteria_error(self.default_type, "Match the default type the rule.")
def set_default_type(self):
self.query.default_regex = self.default_regex.isChecked()
try:
self.query.default = self.default_type.text()
except Exception as ex:
self.log.error("Default type error: {0}".format(ex))
self.set_criteria_error(self.default_type, ex)
def set_default_regex(self, state):
self.log.debug("Setting default_regex {0}".format(state))
self.query.default_regex = state
self.clear_default_error()
self.set_default_type()
#
# Boolean criteria
#
def set_bools(self):
selected_bools = []
for index in self.bool_criteria.selectionModel().selectedIndexes():
selected_bools.append(self.bool_model.data(index, Qt.UserRole))
self.query.boolean = selected_bools
#
# Save/Load tab
#
def save(self):
"""Return a dictionary of settings."""
if self.errors:
raise TabFieldError("Field(s) are in error: {0}".
format(" ".join(o.objectName() for o in self.errors)))
settings = {}
save_checkboxes(self, settings, ["criteria_expander", "notes_expander",
"allow", "allowxperm",
"auditallow", "auditallowxperm",
"neverallow", "neverallowxperm",
"dontaudit", "dontauditxperm",
"type_transition", "type_change", "type_member",
"source_indirect", "source_regex",
"target_indirect", "target_regex",
"perms_subset",
"xperms_equal",
"default_regex",
"bools_equal"])
save_lineedits(self, settings, ["source", "target", "xperms", "default_type"])
save_listviews(self, settings, ["tclass", "perms", "bool_criteria"])
save_textedits(self, settings, ["notes"])
return settings
def load(self, settings):
load_checkboxes(self, settings, ["allow", "allowxperm",
"auditallow", "auditallowxperm",
"neverallow", "neverallowxperm",
"dontaudit", "dontauditxperm",
"type_transition", "type_change", "type_member",
"criteria_expander", "notes_expander",
"source_indirect", "source_regex",
"target_indirect", "target_regex",
"perms_subset",
"xperms_equal",
"default_regex",
"bools_equal"])
load_lineedits(self, settings, ["source", "target", "xperms", "default_type"])
load_listviews(self, settings, ["tclass", "perms", "bool_criteria"])
load_textedits(self, settings, ["notes"])
#
# Results runner
#
def run(self, button):
# right now there is only one button.
rule_types = []
max_results = 0
if self.allow.isChecked():
rule_types.append("allow")
max_results += self.policy.allow_count
if self.allowxperm.isChecked():
rule_types.append("allowxperm")
max_results += self.policy.allowxperm_count
if self.auditallow.isChecked():
rule_types.append("auditallow")
max_results += self.policy.auditallow_count
if self.auditallowxperm.isChecked():
rule_types.append("auditallowxperm")
max_results += self.policy.auditallowxperm_count
if self.neverallow.isChecked():
rule_types.append("neverallow")
max_results += self.policy.neverallow_count
if self.neverallowxperm.isChecked():
rule_types.append("neverallowxperm")
max_results += self.policy.neverallowxperm_count
if self.dontaudit.isChecked():
rule_types.append("dontaudit")
max_results += self.policy.dontaudit_count
if self.dontauditxperm.isChecked():
rule_types.append("dontauditxperm")
max_results += self.policy.dontauditxperm_count
if self.type_transition.isChecked():
rule_types.append("type_transition")
max_results += self.policy.type_transition_count
if self.type_member.isChecked():
rule_types.append("type_member")
max_results += self.policy.type_member_count
if self.type_change.isChecked():
rule_types.append("type_change")
max_results += self.policy.type_change_count
self.query.ruletype = rule_types
self.query.source_indirect = self.source_indirect.isChecked()
self.query.target_indirect = self.target_indirect.isChecked()
self.query.perms_subset = self.perms_subset.isChecked()
self.query.boolean_equal = self.bools_equal.isChecked()
# if query is broad, show warning.
if not any((self.query.source, self.query.target, self.query.tclass, self.query.perms,
self.query.xperms, self.query.default, self.query.boolean)) \
and max_results > 1000:
reply = QMessageBox.question(
self, "Continue?",
"This is a broad query, estimated to return {0} results. Continue?".
format(max_results), QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.No:
return
# start processing
self.busy.setLabelText("Processing query...")
self.busy.show()
self.raw_results.clear()
self.thread.start()
def update_complete(self, count):
self.log.info("{0} type enforcement rule(s) found.".format(count))
# update sizes/location of result displays
if not self.busy.wasCanceled():
self.busy.setLabelText("Resizing the result table's columns; GUI may be unresponsive")
self.busy.repaint()
self.table_results.resizeColumnsToContents()
# If the permissions column width is too long, pull back
# to a reasonable size
header = self.table_results.horizontalHeader()
if header.sectionSize(4) > 400:
header.resizeSection(4, 400)
if not self.busy.wasCanceled():
self.busy.setLabelText("Resizing the result table's rows; GUI may be unresponsive")
self.busy.repaint()
self.table_results.resizeRowsToContents()
if not self.busy.wasCanceled():
self.busy.setLabelText("Moving the raw result to top; GUI may be unresponsive")
self.busy.repaint()
self.raw_results.moveCursor(QTextCursor.Start)
self.busy.reset()
|
Mission Statement: HMHD is a healthcare district serving the people of Calexico, Calif. Its mission is to be the area’s leading healthcare organization by being a good steward of its finances, a catalyst for the delivery of new and enhanced healthcare services, a consistent supporter of health and wellness, and a reliable healthcare resource for District residents.
Advancing Health and Wellness: HMHD is focusing on implementing and sponsoring programs that increase access and awareness to healthcare services and education for the underserved in the surrounding community by practicing the following tenets: 1) partnering successfully with other healthcare providers to enhance the quality and breadth of healthcare services available to District residents and nearby communities. 2) promoting, supporting, and providing healthcare services related primarily to disease prevention, health education, and wellness. 3) selectively providing financial support for healthcare initiatives that are consistent with the District’s vision and mission. 4) maximizing the value derived from each taxpayer dollar spent through the careful planning and implementation of all Board approved initiatives and; 5) conservatively managing its assets and resources to ensure the long term financial viability of the organization.
Expanding urgent care services at one of the local rural health clinics to 24 hours which allows the City residents to not have to drive 15-25 miles to one of the acute care hospital’s emergency departments for after-hour care. This was done in coordination with Pioneers Memorial Hospital’s Calexico rural health clinic.
HMHD brought a telemedicine program to the community which allows the population much needed access to specialty care, in this case endocrinology (diabetes care). This program is being done in coordination with Clinicas de Salud’s Calexico FQHC, as well as UC San Diego Medical Center’s telemedicine department.
HMHD also recently worked with the Partners in Care Foundation to bring the Chronic Disease Self-Management Program to the area. The program allows locally trained participants to lead workshops teaching local chronic disease sufferers and their family members to better care for their (or their family member’s) condition. The program is now offered in both English and Spanish.
HMHD has been diligently working to bring a Program of All-Inclusive Care for the Elderly (PACE) to Calexico. Discussions are continuing with St. Paul’s PACE from San Diego to have them bring the state’s first rural PACE to Calexico.
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from novaclient.tests import fakes
from novaclient.tests.fixture_data import base
class V1(base.Fixture):
base_url = 'images'
def setUp(self):
super(V1, self).setUp()
get_images = {
'images': [
{'id': 1, 'name': 'CentOS 5.2'},
{'id': 2, 'name': 'My Server Backup'}
]
}
headers = {'Content-Type': 'application/json'}
self.requests.register_uri('GET', self.url(),
json=get_images,
headers=headers)
image_1 = {
'id': 1,
'name': 'CentOS 5.2',
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "ACTIVE",
"metadata": {
"test_key": "test_value",
},
"links": {},
}
image_2 = {
"id": 2,
"name": "My Server Backup",
"serverId": 1234,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {},
}
self.requests.register_uri('GET', self.url('detail'),
json={'images': [image_1, image_2]},
headers=headers)
self.requests.register_uri('GET', self.url(1),
json={'image': image_1},
headers=headers)
self.requests.register_uri('GET', self.url(2),
json={'image': image_2},
headers=headers)
self.requests.register_uri('GET', self.url(456),
json={'image': image_2},
headers=headers)
def post_images(request, context):
body = jsonutils.loads(request.body)
assert list(body) == ['image']
fakes.assert_has_keys(body['image'], required=['serverId', 'name'])
return images_1
self.requests.register_uri('POST', self.url(),
json=post_images,
headers=headers,
status_code=202)
def post_images_1_metadata(request, context):
body = jsonutils.loads(request.body)
assert list(body) == ['metadata']
fakes.assert_has_keys(body['metadata'], required=['test_key'])
return {'metadata': image_1['metadata']}
self.requests.register_uri('POST', self.url(1, 'metadata'),
json=post_images_1_metadata,
headers=headers)
for u in (1, 2, '1/metadata/test_key'):
self.requests.register_uri('DELETE', self.url(u), status_code=204)
image_headers = {'x-image-meta-id': '1',
'x-image-meta-name': 'CentOS 5.2',
'x-image-meta-updated': '2010-10-10T12:00:00Z',
'x-image-meta-created': '2010-10-10T12:00:00Z',
'x-image-meta-status': 'ACTIVE',
'x-image-meta-property-test-key': 'test_value'}
self.requests.register_uri('HEAD', self.url(1), headers=image_headers)
class V3(V1):
base_url = 'v1/images'
|
The character of Hans Beckert is one of the most singular in movie history: a Greig-whistling, psychopathic child-murderer haunting the back streets of Wiemar Berlin. And yet, through his brilliant portrayal by Peter Lorre, he manages to evoke a strange sympathy. Fritz Lang’s early talkie is recognised as masterpiece and still has the power to shock 85 years after its original release.
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
__author__ = 'cooper'
import traceback
import logging
from threading import Thread
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy import log, signals
from scrapy.xlib.pydispatch import dispatcher
from scrapy.exceptions import DontCloseSpider
from scrapy.utils.project import get_project_settings
from stockspider.spiders.hq_spider import HqSpider
from common import inject, depends_on
@depends_on('sql_engine')
class ScrapySpider:
def __init__(self):
self.spider = HqSpider()
self.crawler = crawler = Crawler(get_project_settings())
crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
crawler.configure()
crawler.crawl(self.spider)
dispatcher.connect(self._dont_close_me, signals.spider_idle)
self.thread = None
self._started = False
self._stopped = False
def start(self):
def run():
try:
logging.info('Start spider')
reactor.run(installSignalHandlers=False)
except Exception, e:
print traceback.format_exc()
if not self._started:
self._started = True
self.crawler.start()
log.start_from_settings(get_project_settings())
self.thread = Thread(target=run)
log.msg('Start')
self.thread.start()
else:
raise Exception('spider has already started.')
def stop(self):
if not self._started:
raise Exception('spider not started.')
elif self._stopped:
raise Exception('spider has already stopped')
else:
log.msg('Stop')
self._stopped = True
self.crawler.stop()
def _dont_close_me(self, spider):
raise DontCloseSpider("..I prefer live spiders.")
|
If you have a complaint about the accessibility of our transit system or service, or believe you have been discriminated against because of your disability, you can file a complaint. Please provide all facts and circumstances surrounding your issue or complaint so we can fully investigate the incident.
You can call us, download and use our ADA complaint form at here or request a copy of the form by writing or phoning Fairbanks North Star Borough, Transportation Department, 3175 Peger Rd, Fairbanks, AK 99709, (907) 459-1196.
Do you need complaint assistance?
If you are unable to complete a written complaint due to a disability or if information is needed in another language we can assist you. Please contact us at (907) 459-1196 or [email protected].
How will your complaint be handled?
MACS Transportation investigates complaints received no more than 180 days after the alleged incident. MACS Transportation will process complaints that are complete. Once a completed complaint is received, MACS Transportation will review it to determine if MACS Transportation has jurisdiction.
MACS Transportation will generally complete an investigation within 90 days from receipt of a complaint. If more information is needed to resolve the case, MACS Transportation may contact you. Unless a longer period is specified by MACS Transportation you will have ten (10) days from the date of the request to send the requested information. If the requested information is not received, MACS Transportation may administratively close the case. A case may also be administratively closed if you no longer wish to pursue it.
After an investigation is complete, MACS Transportation will send you a letter summarizing the results of the investigation, stating the findings and advising of any corrective action to be taken as a result of the investigation. If you disagree with MACS Transportation determination, you may request reconsideration by submitting a request in writing to Fairbanks North Star Borough HR Director within seven (7) days after the date of MACS Transportation letter, stating with specificity the basis for the reconsideration. The director will notify you of the decision either to accept or reject the request for reconsideration within ten (10) days. In cases where reconsideration is granted, the director will issue a determination letter to the complainant upon completion of the reconsideration review.
Do I have other options for filing a complaint?
We encourage that you file the complaint with us. However, you may file a complaint with Alaska Community Transit or the Federal Transit Administration.
|
#!/usr/bin/env python3.4
from model.energy import Energy
from model.matter import Matter
from model.position import Position
class Cell:
def __init__(self, energy: Energy, matter: Matter, position: Position, max_age,
min_energy: Energy, max_energy: Energy, min_matter: Matter, max_matter: Matter,
life_function, absorb_energy_function, absorb_matter_function):
self.energy = energy
self.matter = matter
self.position = position
self.life_function = life_function
self.absorb_energy_function = absorb_energy_function
self.absorb_matter_function = absorb_matter_function
self.min_energy = min_energy
self.max_energy = max_energy
self.min_matter = min_matter
self.max_matter = max_matter
self.max_age = max_age
self.age = 0
self.life = True
def next_life_step(self):
self.age += 1
self.process_energy()
self.process_matter()
self.life_function()
return self.life
def process_energy(self):
self.energy += self.absorb_energy_function()
def process_matter(self):
self.matter += self.absorb_matter_function()
def is_too_old(self):
return self.age > self.max_age
def has_not_enough_energy(self): # @TODO has_enough_energy OR "return self.energy < self.min_energy>"
return self.energy > self.min_energy
def has_not_enough_matter(self): # @TODO has_enough_energy OR "return self.energy < self.min_energy>"
return self.matter > self.min_matter
|
One of the best road trips Australia has to offer is the coastal drive between Sydney and Melbourne. Not only do you take in Australia’s two largest cities, but the stretch of coast between the two is a diverse and stunning mosaic of forests, inlets, river systems, crystal clear lakes, charismatic wildlife, charming coastal towns and white unspoiled beaches. It’s easy to fall in love with the region’s natural beauty and to slip into the gentle rhythm of its lifestyle.
Below I’ve mapped every campsite.
Read the full list of beach camping in New South Wales.
Read the full list of beach camping in Victoria.
If you need to hire a camper van in Australia, check my price comparison of the major campervan hire companies in Australia. By the time sneaky fees are added, the cheapest turns out to be Camperman Australia with their all-inclusive pricing.
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Testing utilities for the webapp libraries.
GetDefaultEnvironment: Method for easily setting up CGI environment.
RequestHandlerTestBase: Base class for setting up handler tests.
"""
__author__ = '[email protected] (Rafe Kaplan)'
import cStringIO
import threading
import urllib2
from wsgiref import simple_server
from wsgiref import validate
from . import protojson
from . import remote
from . import test_util
from . import transport
from .webapp import service_handlers
from .webapp.google_imports import webapp
class TestService(remote.Service):
"""Service used to do end to end tests with."""
@remote.method(test_util.OptionalMessage,
test_util.OptionalMessage)
def optional_message(self, request):
if request.string_value:
request.string_value = '+%s' % request.string_value
return request
def GetDefaultEnvironment():
"""Function for creating a default CGI environment."""
return {
'LC_NUMERIC': 'C',
'wsgi.multiprocess': True,
'SERVER_PROTOCOL': 'HTTP/1.0',
'SERVER_SOFTWARE': 'Dev AppServer 0.1',
'SCRIPT_NAME': '',
'LOGNAME': 'nickjohnson',
'USER': 'nickjohnson',
'QUERY_STRING': 'foo=bar&foo=baz&foo2=123',
'PATH': '/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/bin/X11',
'LANG': 'en_US',
'LANGUAGE': 'en',
'REMOTE_ADDR': '127.0.0.1',
'LC_MONETARY': 'C',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '8080',
'HOME': '/home/mruser',
'USERNAME': 'mruser',
'CONTENT_LENGTH': '',
'USER_IS_ADMIN': '1',
'PYTHONPATH': '/tmp/setup',
'LC_TIME': 'C',
'HTTP_USER_AGENT': 'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; '
'rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6',
'wsgi.multithread': False,
'wsgi.version': (1, 0),
'USER_EMAIL': '[email protected]',
'USER_EMAIL': '112',
'wsgi.input': cStringIO.StringIO(),
'PATH_TRANSLATED': '/tmp/request.py',
'SERVER_NAME': 'localhost',
'GATEWAY_INTERFACE': 'CGI/1.1',
'wsgi.run_once': True,
'LC_COLLATE': 'C',
'HOSTNAME': 'myhost',
'wsgi.errors': cStringIO.StringIO(),
'PWD': '/tmp',
'REQUEST_METHOD': 'GET',
'MAIL': '/dev/null',
'MAILCHECK': '0',
'USER_NICKNAME': 'test',
'HTTP_COOKIE': 'dev_appserver_login="test:[email protected]:True"',
'PATH_INFO': '/tmp/myhandler'
}
class RequestHandlerTestBase(test_util.TestCase):
"""Base class for writing RequestHandler tests.
To test a specific request handler override CreateRequestHandler.
To change the environment for that handler override GetEnvironment.
"""
def setUp(self):
"""Set up test for request handler."""
self.ResetHandler()
def GetEnvironment(self):
"""Get environment.
Override for more specific configurations.
Returns:
dict of CGI environment.
"""
return GetDefaultEnvironment()
def CreateRequestHandler(self):
"""Create RequestHandler instances.
Override to create more specific kinds of RequestHandler instances.
Returns:
RequestHandler instance used in test.
"""
return webapp.RequestHandler()
def CheckResponse(self,
expected_status,
expected_headers,
expected_content):
"""Check that the web response is as expected.
Args:
expected_status: Expected status message.
expected_headers: Dictionary of expected headers. Will ignore unexpected
headers and only check the value of those expected.
expected_content: Expected body.
"""
def check_content(content):
self.assertEquals(expected_content, content)
def start_response(status, headers):
self.assertEquals(expected_status, status)
found_keys = set()
for name, value in headers:
name = name.lower()
try:
expected_value = expected_headers[name]
except KeyError:
pass
else:
found_keys.add(name)
self.assertEquals(expected_value, value)
missing_headers = set(expected_headers.keys()) - found_keys
if missing_headers:
self.fail('Expected keys %r not found' % (list(missing_headers),))
return check_content
self.handler.response.wsgi_write(start_response)
def ResetHandler(self, change_environ=None):
"""Reset this tests environment with environment changes.
Resets the entire test with a new handler which includes some changes to
the default request environment.
Args:
change_environ: Dictionary of values that are added to default
environment.
"""
environment = self.GetEnvironment()
environment.update(change_environ or {})
self.request = webapp.Request(environment)
self.response = webapp.Response()
self.handler = self.CreateRequestHandler()
self.handler.initialize(self.request, self.response)
class SyncedWSGIServer(simple_server.WSGIServer):
pass
class ServerThread(threading.Thread):
"""Thread responsible for managing wsgi server.
This server does not just attach to the socket and listen for requests. This
is because the server classes in Python 2.5 or less have no way to shut them
down. Instead, the thread must be notified of how many requests it will
receive so that it listens for each one individually. Tests should tell how
many requests to listen for using the handle_request method.
"""
def __init__(self, server, *args, **kwargs):
"""Constructor.
Args:
server: The WSGI server that is served by this thread.
As per threading.Thread base class.
State:
__serving: Server is still expected to be serving. When False server
knows to shut itself down.
"""
self.server = server
# This timeout is for the socket when a connection is made.
self.server.socket.settimeout(None)
# This timeout is for when waiting for a connection. The allows
# server.handle_request() to listen for a short time, then timeout,
# allowing the server to check for shutdown.
self.server.timeout = 0.05
self.__serving = True
super(ServerThread, self).__init__(*args, **kwargs)
def shutdown(self):
"""Notify server that it must shutdown gracefully."""
self.__serving = False
def run(self):
"""Handle incoming requests until shutdown."""
while self.__serving:
self.server.handle_request()
self.server = None
class TestService(remote.Service):
"""Service used to do end to end tests with."""
def __init__(self, message='uninitialized'):
self.__message = message
@remote.method(test_util.OptionalMessage, test_util.OptionalMessage)
def optional_message(self, request):
if request.string_value:
request.string_value = '+%s' % request.string_value
return request
@remote.method(response_type=test_util.OptionalMessage)
def init_parameter(self, request):
return test_util.OptionalMessage(string_value=self.__message)
@remote.method(test_util.NestedMessage, test_util.NestedMessage)
def nested_message(self, request):
request.string_value = '+%s' % request.string_value
return request
@remote.method()
def raise_application_error(self, request):
raise remote.ApplicationError('This is an application error', 'ERROR_NAME')
@remote.method()
def raise_unexpected_error(self, request):
raise TypeError('Unexpected error')
@remote.method()
def raise_rpc_error(self, request):
raise remote.NetworkError('Uncaught network error')
@remote.method(response_type=test_util.NestedMessage)
def return_bad_message(self, request):
return test_util.NestedMessage()
class AlternateService(remote.Service):
"""Service used to requesting non-existant methods."""
@remote.method()
def does_not_exist(self, request):
raise NotImplementedError('Not implemented')
class WebServerTestBase(test_util.TestCase):
SERVICE_PATH = '/my/service'
def setUp(self):
self.server = None
self.schema = 'http'
self.ResetServer()
self.bad_path_connection = self.CreateTransport(self.service_url + '_x')
self.bad_path_stub = TestService.Stub(self.bad_path_connection)
super(WebServerTestBase, self).setUp()
def tearDown(self):
self.server.shutdown()
super(WebServerTestBase, self).tearDown()
def ResetServer(self, application=None):
"""Reset web server.
Shuts down existing server if necessary and starts a new one.
Args:
application: Optional WSGI function. If none provided will use
tests CreateWsgiApplication method.
"""
if self.server:
self.server.shutdown()
self.port = test_util.pick_unused_port()
self.server, self.application = self.StartWebServer(self.port, application)
self.connection = self.CreateTransport(self.service_url)
def CreateTransport(self, service_url, protocol=protojson):
"""Create a new transportation object."""
return transport.HttpTransport(service_url, protocol=protocol)
def StartWebServer(self, port, application=None):
"""Start web server.
Args:
port: Port to start application on.
application: Optional WSGI function. If none provided will use
tests CreateWsgiApplication method.
Returns:
A tuple (server, application):
server: An instance of ServerThread.
application: Application that web server responds with.
"""
if not application:
application = self.CreateWsgiApplication()
validated_application = validate.validator(application)
server = simple_server.make_server('localhost', port, validated_application)
server = ServerThread(server)
server.start()
return server, application
def make_service_url(self, path):
"""Make service URL using current schema and port."""
return '%s://localhost:%d%s' % (self.schema, self.port, path)
@property
def service_url(self):
return self.make_service_url(self.SERVICE_PATH)
class EndToEndTestBase(WebServerTestBase):
# Sub-classes may override to create alternate configurations.
DEFAULT_MAPPING = service_handlers.service_mapping(
[('/my/service', TestService),
('/my/other_service', TestService.new_factory('initialized')),
])
def setUp(self):
super(EndToEndTestBase, self).setUp()
self.stub = TestService.Stub(self.connection)
self.other_connection = self.CreateTransport(self.other_service_url)
self.other_stub = TestService.Stub(self.other_connection)
self.mismatched_stub = AlternateService.Stub(self.connection)
@property
def other_service_url(self):
return 'http://localhost:%d/my/other_service' % self.port
def CreateWsgiApplication(self):
"""Create WSGI application used on the server side for testing."""
return webapp.WSGIApplication(self.DEFAULT_MAPPING, True)
def DoRawRequest(self,
method,
content='',
content_type='application/json',
headers=None):
headers = headers or {}
headers.update({'content-length': len(content or ''),
'content-type': content_type,
})
request = urllib2.Request('%s.%s' % (self.service_url, method),
content,
headers)
return urllib2.urlopen(request)
def RawRequestError(self,
method,
content=None,
content_type='application/json',
headers=None):
try:
self.DoRawRequest(method, content, content_type, headers)
self.fail('Expected HTTP error')
except urllib2.HTTPError as err:
return err.code, err.read(), err.headers
|
Sliding Wall Systems Contemporary System YouTube With 0 | Koltunballetacademy.com sliding glass wall systems. sliding partition wall systems. sliding door wall systems.
Sliding Wall Systems Elegant Http Www Lawallco Com Portfolio Html SLIDING DOORS Aspx Temporary Throughout 9. Sliding Wall Systems New Panels System For Home Design YouTube Throughout 7. Sliding Wall Systems Brilliant Partitions Folding Movable Walls In 18. Sliding Wall Systems Popular Door System Modern Interior Space Divider Throughout 15. Sliding Wall Systems Elegant Large Doors Friendly Insulated With Regard To In 10. Sliding Wall Systems Amazing Modern Panels Walls Canada Interior Diy In 6. Sliding Wall Systems Attractive Lovely Partition Inspiration Divider With Regard To 11. Sliding Wall Systems Contemporary Moving Makes Rooms And Sense LifeEdited Within 14. Sliding Wall Systems Brilliant Cozy System Large Doors Regarding 13. Sliding Wall Systems Modern Photos And Door Tinfishclematis Com Within 1. Sliding Wall Systems Modern Acoustic Manufacturer Metal Wood Stone Concrete Within 8. Sliding Wall Systems Awesome Left Open The LONGHI System Serve As Decorative Panels Within 3. Sliding Wall Systems Brilliant CE Center Inside 5. Sliding Wall Systems Modern HORIZONTAL SLIDING WALL SYSTEM FSW Dormakaba Free BIM Object For Pertaining To 16. Sliding Wall Systems Contemporary Partition Glazed Professional In 4. Sliding Wall Systems Contemporary DORMA HSW FSW Transparent Versatility With 12. Sliding Wall Systems Incredible Glass Design Ideas Prices And More Nanawall Pertaining To 2. Sliding Wall Systems Elegant System Image Of Glass Intended For 19.
|
import numpy as np
import sv_data
import sv_diagram as sv_d
def map_kmers(f, k):
""" Takes a list function f and returns a function that applies
f to k-mers of a list, returning the results as a list with
None values discarded.
"""
def g(input_list, *args, **kwargs):
outputs = [f(input_list[i:i+k], *args, **kwargs)
for i in range(len(input_list) + 1 - k)]
return [x for x in outputs if x != None]
return g
def map_kbins(f, k):
def g(input_list, *args, **kwargs):
# Length of the input list must be a multiple of k.
assert len(input_list) % k == 0
outputs = [f(input_list[i:i+k], *args, **kwargs)
for i in range(0, len(input_list), k)]
return [x for x in outputs if x != None]
return g
# Sequence of letters to rearranged positions
def pair_to_letters(pair):
letter, possible_tick = pair
if not letter.isalpha():
return None
elif possible_tick != "'":
return letter
else:
return letter + possible_tick
def letters_to_letterlist(letters):
letters += "A"
to_letterlist = map_kmers(pair_to_letters, 2)
return to_letterlist(letters)
def pair_to_positions(pair, length = 10):
letter, possible_tick = pair
positions = list(np.arange(length) + (ord(letter) * length))
inverted = list(positions[::-1])
if not letter.isalpha():
return None
elif possible_tick != "'":
return positions
else:
return inverted
def letters_to_positions(letters):
letters += "A" # Not read.
pairs_to_positions = map_kmers(pair_to_positions, 2)
positions = [x for letter_sequence in pairs_to_positions(letters)
for x in letter_sequence]
return positions
# Rearranged positions to sequence of letters
def positions_to_letter(positions, length = 10):
assert len(positions) == length
base_position = min(positions[0], positions[-1])
if base_position == positions[0]:
inverted = False
elif base_position == positions[-1]:
inverted = True
else:
print positions[0], positions[-1], base_position
letter = chr(base_position / len(positions))
if not inverted:
return letter
else:
return letter + "'"
def positions_to_letters(positions):
positions_to_list = map_kbins(positions_to_letter, 10)
list_of_letters = positions_to_list(positions)
return "".join(list_of_letters)
positions_to_ticks = map_kbins(np.mean, 10)
# Fusions from rearranged chromosome.
def detect_fusions(sites):
""" Takes a list of four sites, and returns either None, or a
fusion-tuple based on a length-four paired-end read, e.g.
01[2398]7
-><-
T T
"""
assert len(sites) == 4
breakdiff = abs(sites[1] - sites[2])
diff1 = sites[0] - sites[1]
diff2 = sites[2] - sites[3]
if breakdiff == 1:
return None
else:
# Differences should be 1 or -1 normally.
strand1 = {-1:"+", 1:"-"}.get(diff1, "?")
strand2 = {1:"+", -1:"-"}.get(diff2, "?")
bp1 = sv_data.Breakpoint(chrom = "",
pos = sites[1] * 1e6,
strand = strand1)
bp2 = sv_data.Breakpoint(chrom = "",
pos = sites[2] * 1e6,
strand = strand2)
return sv_data.Fusion(bp1, bp2)
get_fusions = map_kmers(detect_fusions, 4)
# Copy number from rearranged chromosome
def get_x_cn(positions):
counts = [(p, positions.count(p)) for p in positions]
x_tuple, cn_tuple = zip(*counts)
x, cn = list(x_tuple), list(cn_tuple)
return x, cn
## Campbellgrams ##
def simulate_sv_diagram(
letters, outfile = None,
**kwargs):
if outfile == None:
outfile = "../output/simulation/simulation_%s.pdf" % letters
### Simulation-specific stuff
positions = letters_to_positions(letters)
fusions = get_fusions(positions)
x, cn = get_x_cn(positions)
kwargs['yticks'] = range(max(cn) + 2)
kwargs['ymax'] = max(cn) + 1
kwargs['ymin'] = 0
kwargs['xlabel'] = letters
###
fig = sv_d.setup_figure()
cn_axes, fusion_axes = sv_d.sv_diagram_axes()
# Copy number
sv_d.plot_cn(cn_axes, x, cn)
sv_d.set_cn_axes_options(cn_axes, x, cn, kwargs)
sv_d.set_cn_axes_aesthetics(cn_axes)
sv_d.plt.minorticks_off()
### Simulation-specific stuff
x_range = range(min(x), max(x) + 1)
x_letters = letters_to_letterlist(positions_to_letters(x_range))
x_ticks = positions_to_ticks(x_range)
cn_axes.set_xticks(x_ticks, minor = True)
cn_axes.set_xticklabels(x_letters, minor = True)
sv_d.plt.setp(cn_axes.get_xticklabels(), visible=False)
###
# Fusions
sv_d.setup_fusion_axes(fusion_axes, min(x), max(x))
for fusion in fusions:
sv_d.plot_fusion(cn_axes, fusion_axes, fusion)
# Ensure everything fits
sv_d.plt.tight_layout()
# Output
fig.savefig(outfile)
sv_d.plt.close(fig)
|
Almost everyone who has traveled along Interstate 80 or on other New Jersey roads, has, at one time or another, seen a driver who is speeding, distracted, under the influence or driving in a reckless manner. When negligence leads to a motor vehicle accident, injury victims have the right to pursue compensation for their losses.
Our knowledgeable lawyers have diverse experience, which broadens the scope of legal representation our firm can offer. This ensures we can handle your accident injury case comprehensively, regardless of what issues are involved. We have the skill and resources to handle complex motor vehicle accident claims involving serious injuries and wrongful death. These resources also include experienced private investigators, accident reconstruction experts, engineers and other forensic experts.
What was the cause of your motor vehicle accident? In handling accident claims, we employ aggressive investigative tactics to determine how the accident was caused and to identify liable parties. Common causes of motor vehicle accidents include drunk driving, distracted driving, speeding, reckless driving, intersection violations and other types of negligence.
To arrange an appointment to discuss your car accident injury claim, please contact us today online or by telephone at 973-668-4435 or toll free at 888-382-3974. Our goal is to listen to the details of your accident and your concerns and then explain your legal rights and your options, so you can make fully informed decisions about important next steps.
|
# -*- coding: utf-8 -*-
'''
Video Uav Tracker v 2.0
Replay a video in sync with a gps track displayed on the map.
-------------------
copyright : (C) 2017 by Salvatore Agosta
email : [email protected]
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
INSTRUCTION:
Synching:
- Create new project
- Select video and gps track (1 trkpt per second)
- Identify first couple Frame/GpsTime and select it.
- Push Synchronize
- Push Start
Replay:
- Move on map
- Create associated DB shapefile
- Add POI with associated video frame saved
- Extract frames with associated coordinates for rapid photogrammetry use
'''
from PyQt5 import QtGui
from qgis.core import *
from qgis.gui import *
class PositionMarker(QgsMapCanvasItem):
""" marker for current GPS position """
def __init__(self, canvas, alpha=255):
QgsMapCanvasItem.__init__(self, canvas)
self.pos = None
self.hasPosition = False
self.d = 20
self.angle = 0
self.setZValue(100) # must be on top
self.alpha=alpha
def newCoords(self, pos):
if self.pos != pos:
self.pos = QgsPointXY(pos) # copy
self.updatePosition()
def setHasPosition(self, has):
if self.hasPosition != has:
self.hasPosition = has
self.update()
def updatePosition(self):
if self.pos:
self.setPos(self.toCanvasCoordinates(self.pos))
self.update()
def paint(self, p, xxx, xxx2):
if not self.pos:
return
path = QtGui.QPainterPath()
path.moveTo(0,-15)
path.lineTo(15,15)
path.lineTo(0,7)
path.lineTo(-15,15)
path.lineTo(0,-15)
# render position with angle
p.save()
p.setRenderHint(QtGui.QPainter.Antialiasing)
if self.hasPosition:
p.setBrush(QtGui.QBrush(QtGui.QColor(0,0,0, self.alpha)))
else:
p.setBrush(QtGui.QBrush(QtGui.QColor(200,200,200, self.alpha)))
p.setPen(QtGui.QColor(255,255,0, self.alpha))
p.rotate(self.angle)
p.drawPath(path)
p.restore()
def boundingRect(self):
return QtCore.QRectF(-self.d,-self.d, self.d*2, self.d*2)
class ReplayPositionMarker(PositionMarker):
def __init__(self, canvas):
PositionMarker.__init__(self, canvas)
def paint(self, p, xxx, xxx2):
if not self.pos:
return
path = QtGui.QPainterPath()
path.moveTo(-10,1)
path.lineTo(10,1)
path.lineTo(10,0)
path.lineTo(1,0)
path.lineTo(1,-5)
path.lineTo(4,-5)
path.lineTo(0,-9)
path.lineTo(-4,-5)
path.lineTo(-1,-5)
path.lineTo(-1,0)
path.lineTo(-10,0)
path.lineTo(-10,1)
# render position with angle
p.save()
p.setRenderHint(QtGui.QPainter.Antialiasing)
p.setBrush(QtGui.QBrush(QtGui.QColor(255,0,0)))
p.setPen(QtGui.QColor(255,255,0))
p.rotate(self.angle)
p.drawPath(path)
p.restore()
|
Join us for Lunch with the CEO on Wednesday, February 22 from 12:00 p.m. to 1:00 p.m. to learn more about how to be involved with Equest. Lunch will be provided.
Please RSVP to Christine at [email protected].
|
import os
def load_data(path):
input_file = os.path.join(path)
with open(input_file, "r", encoding='utf-8', errors='ignore') as f:
data = f.read()
return data
def extract_vocab(data):
special_words = ['<pad>', '<unk>', '<s>', '<\s>']
set_words = set([word for line in data.split('\n') for word in line.split()])
int_to_vocab = {word_i: word for word_i, word in enumerate(special_words + list(set_words))}
vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()}
return int_to_vocab, vocab_to_int
def pad_id_sequences(source_ids, source_vocab_to_int, target_ids, target_vocab_to_int, sequence_length):
new_source_ids = [list(reversed(sentence + [source_vocab_to_int['<pad>']] * (sequence_length - len(sentence)))) \
for sentence in source_ids]
new_target_ids = [sentence + [target_vocab_to_int['<pad>']] * (sequence_length - len(sentence)) \
for sentence in target_ids]
return new_source_ids, new_target_ids
def batch_data(source, target, batch_size):
"""
Batch source and target together
"""
for batch_i in range(0, len(source)//batch_size):
start_i = batch_i * batch_size
source_batch = source[start_i:start_i + batch_size]
target_batch = target[start_i:start_i + batch_size]
yield source_batch, target_batch
|
Now this is what you call backstage fun! My dress by Tim Watson is a bit ruched up from all of the excitement Haha. on Day 4 of PFF at The National Showcase with the boys. Swoon!
The beauty look at Breathless as part of Designer Capsule #3, was just that, breath-taking!
The make-up at Nikki Loueza as part of Designer Capsule #3 was my favourite of the week.
|
# -*- coding: utf-8 -*-
"""
A context manager for handling sys.displayhook.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
#-----------------------------------------------------------------------------
# Authors:
#
# * Robert Kern
# * Brian Granger
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file documentation/BSDLicense_IPython.md, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class DisplayTrap:
"""Object to manage sys.displayhook.
This came from IPython.core.kernel.display_hook, but is simplified
(no callbacks or formatters) until more of the core is refactored.
"""
def __init__(self, hook=None):
self.old_hook = None
self.hook = hook
# We define this to track if a single BuiltinTrap is nested.
# Only turn off the trap when the outermost call to __exit__ is made.
self._nested_level = 0
def __enter__(self):
""" Enter a code segment where displayhook is set.
"""
if self._nested_level == 0:
self.set()
self._nested_level += 1
return self
def __exit__(self, type, value, traceback):
""" Leave a code segmen swhere displayhook is unset.
@param type:
@param value:
@param traceback:
"""
if self._nested_level == 1:
self.unset()
self._nested_level -= 1
# Returning False will cause exceptions to propagate
return False
def set(self):
"""Set the hook."""
if self.hook is not None and sys.displayhook is not self.hook:
self.old_hook = sys.displayhook
sys.displayhook = self.hook
def unset(self):
"""Unset the hook."""
if self.hook is not None and sys.displayhook is not self.old_hook:
sys.displayhook = self.old_hook
|
This entry was posted on 25th January 2019 by Jakk.
UK-based Sylatech specialise in investment casting, waveguide systems and CNC machining. They design and manufacture precision-made custom parts through a range of processes, including lost wax. For investment casting, they manufacture prototype sample parts for customers, but using traditional tooling methods to do so is very expensive. Customers would typically incur a cost of between £2,000 to £4,000 per tool. With 30% of tools needing alterations, some customers would be more than £5,000 deep before receiving their end-use parts.
Now, Sylatech use Ultimaker 3D printers to offer customers a more cost-effective and useful prototyping solution. Their Ultimaker 3 Extended has the build volume to manufacture large parts and the print speed to fabricate those parts in a matter of hours. The process has proven very popular with customers because it enables them to see how their casted parts will really perform. The Ultimaker 3 Extended prints parts with pinpoint accuracy, so customers can always expect the prototypes to perform in the same way as a casted part, minus the weight difference.
This has brought huge cost savings to projects and sped up the prototyping process considerably. The Sylatech team can now 3D print several new iterations of a part in days instead of weeks and have the flexibility to manufacture prototypes to virtually any specification. A CAD drawing can be finished and sent to print in one day.
"With the Ultimaker, we can use a 3D printed model for the creation of sample parts directly in our foundry process, without having to invest in tooling to create wax patterns," says Gordon Gunn, Director of Marketing at Sylatech. "Through 3D printing we can significantly speed up our sampling process - clients can now get a prototype metal part in just seven days!"
The cost and time savings are enormous for Sylatech. They have reduced typical product development costs from £17,100 to £600. That's a 186.5% saving, which is passed onto customers (most appreciatively). The project development time has been decreased from 4 weeks to 5 days. This allows them to take on more work and deliver products faster.
3D printing has also reduced the level of tooling modifications required. Traditionally, 30% of tools required alteration due to customer design modifications. Thanks to the accuracy and design flexibility delivered by their Ultimaker, this has been reduced considerably to below 5%. This means customers get their parts faster and experience a smoother manufacturing process. A doubly good combination for high customer satisfaction.
Perhaps the biggest benefit to the 3D printing process though comes in increased design flexibility. Because 3D printing is an additive manufacturing process and not a subtractive one, you can actually make parts with a greater level of intricacy and detail than you can with wax patterns. Models drawn in CAD are printed true-to-design, and the Ultimaker 3 Extended allows for complex geometries and intricate designs to be made up quickly.
|
#!/usr/bin/env python
__author__ = "bt3"
"""
Given an integer x and an unsorted array of integers, describe an
algorithm to determine whether two of the numbers add up to x.
1. Using hash tables.
2. Sorting the array and keeping two pointers in the array, one in
the beginning and one in the end. Whenever the sum of the current
two integers is less than x, move the first pointer forwards, and
whenever the sum is greater than x, move the second pointer
backwards. O(nln n).
3. Create a BST with x minus each element in the array.
Check whether any element of the array appears in the BST.
It takes O(nlog n) times two.
"""
from collections import defaultdict, Counter
def check_sum(array, k):
'''
>>> check_sum([3, 2, 6, 7, 9, 1], 8)
[(6, 2), (1, 7)]
>>> check_sum([5, 2, 6, 7, 9, 1], 4)
[]
>>>
'''
dict = defaultdict()
res = []
for i in array:
if k-i in dict:
res.append((i, k-i))
del dict[k-i]
else:
dict[i] = 1
return res
def check_sum2(array, k):
'''
>>> check_sum2([1, 4, 2, 7, 1, 3, 10, 15, 3, 1], 6)
set([(3, 3)])
>>> check_sum2([1, 4, 2, 7, 1, 3, 10, 15, 3, 1], 0)
set([])
'''
dict = Counter()
res = set()
for i in array:
dict[i] += 1
for i in array:
if dict[k-i] > 0:
if i == k-i and dict[k-i] > 1:
res.add((i, k-i))
dict[k-i] -= 2
elif i == k-i:
res.add((i, k-i))
dict[k-i] -= 1
return res
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Copyright © 2008, John Thawley ~ Creative Communications Group All rights reserved.
Penske Racing, Porsche. Timo Bernhard, Romain Dumas and Emmanuel Collard (No. 7 Penske Racing DHL Porsche Spyder) drove to an historic overall win at the 56th Mobil 1 Twelve Hours of Sebring presented by Fresh from Florida last Saturday, the season opener for the American Le Mans Series. The win was the first overall at Sebring by an LMP2 car, and America's classic endurance contest saw 27 overall lead changes, a race record, and three lead-lap finishers, tying a race record. Dumas took the checkered flag by 1:02.084 over Dyson Racing’s trio of Butch Leitzinger, Marino Franchitti and Andy Wallace in their Porsche RS Spyder. (Adrian Fernandez and Luis Diaz had finished second overall and in class but failed a post-race stall test in technical inspections. Their Lowe’s Fernandez Racing Acura ARX-01b was excluded as a result.) The Audi Sport North America trio of Dindo Capello, Allan McNish and Tom Kristensen finished third overall and first in LMP1. Third in P2 class and fourth overall was the second Dyson Porsche of Chris Dyson and Guy Smith as the P2 class took three of the top four spots. The overall win for Porsche was its first since 1988 when Hans Stuck and Klaus Ludwig took victory in a Porsche 962. Roger Penske took his first win at Sebring in 40 years and became the first team owner to win overall at Sebring, the Indianapolis 500 and the Daytona 500. Audi’s streak of consecutive overall wins at Sebring ended at eight, although it extended its Series record of consecutive class wins to 23. The much-anticipated battle between diesel rivals Audi and Peugeot materialized in the race’s opening half. The two traded the lead back and forth for 131 of the first 133 laps before the Peugeot 908 began to falter with hydraulic problems and related issues. It eventually finished fourth in P1 and 11th overall in its only scheduled appearance in North America for 2008.
Marco Andretti. Some people who should know better actually questioned this kid's talent, the implication being that he was handed things in racing because of his last name. But anyone who witnessed the way he grabbed his AGR Acura ARX-01B LMP2 car by the scruff of the neck and flat willed it around Sebring at blistering speeds had to be impressed. It was one of the most awesome displays of pure race driving talent and ability that we have ever seen. This just in, the kid is good.
Corvette Racing, Pratt & Miller. Johnny O’Connell became the winningest driver in Sebring history with his seventh class victory as he broke a tie with Sebring legend Phil Hill. O’Connell teamed with Jan Magnussen and Ron Fellows for a trouble-free run in Corvette Racing’s No. 3 Corvette C6R. The trio started from the head of the GT1 class and never trailed, completing 328 laps and finishing eight overall. It was the sixth GT1 class victory in seven years for Corvette Racing. “We had great pit stops. We made time on in laps and out laps,” O’Connell said. “Anytime you beat your own team, you have to be pretty good. Everyone is really feeling positive about things. Last year there were a lot of races where I thought we were the dominant car and had a lot of bad luck.” O’Connell won overall in 1994 and posted class victories in 1993, 1995 and 2002-2004. “We had some unfinished business,” Fellows said. “We had a really strong car at Road Atlanta and had some bad luck with Jan. We had a great race at Le Mans. All three of us feel that this is a great way to start. Le Mans is the next big prize we’re going to shoot for. This is a great confidence builder for all of us and we definitely want another Le Mans.” The victory gave Fellows four Sebring wins - he teamed with O’Connell for three straight from 2002-04 - and was Magnussen’s second in four years. “If you’ve ever had the chance to meet Phil Hill, he is everything good and proper about motorsports,” O’Connell said. “He was what was wonderful about that era. You run across some of your heroes that you run across that are so full of themselves. But you meet someone like Phil Hill, he’s one of the coolest cats around! You start to have a real understanding about what this race is about.” The No. 4 Corvette C6R of Oliver Gavin, Olivier Beretta and Max Papis ran into trouble early. The car lost time early in the race when the crew had to change a right halfshaft in the second hour. "We lost the inner tripod joint on the right side," explained team manager Gary Pratt. "All of the lube was still there, but it snapped one of the three drives at the spline. We've never seen a failure like that before." The trio was trying for a second straight class win together, but came home second in GT1, eight laps behind their teammates. The Sebring race marked the competition debut of two new Corvette C6R race cars, chassis numbers 007 and 008. "We introduced two new cars at this event, and again demonstrated the hallmarks of Corvette Racing: meticulous preparation, great teamwork, and flawless pit stops," said Corvette Racing program manager Doug Fehan. "We've shown that we're ready for an exciting season in ALMS and we're looking forward to Le Mans."
Johnny O'Connell, Corvette Racing program manager Doug Fehan, GM Racing road racing group manager Steve Wesoloski, Ron Fellows and Jan Magnussen celebrate another big GT1 class win for Corvette Racing at the Mobil 1 12 Hours of Sebring last Saturday night.
Flying Lizard Motorsports, Porsche. The Lizards finally broke through in the 12 Hours after finishing second the last two years. Jörg Bergmeister, Wolf Henzler and Marc Lieb drove their No. 45 Porsche 911 GT3 RSR to a two-lap victory over the No. 44 team car of Darren Law, Seth Neiman and Alex Davison. It was the first 1-2 class finish for the Flying Lizard Motorsports team, which placed third in 2005 and runner-up in 2006 and 2007. “The Lizards really deserve this victory because they have worked so hard over the years,” Lieb said. “To win this race after the last two disappointing seasons, it makes me very happy. The Lizards really deserve this victory because they have worked so hard over the years. This team puts a lot of effort into racing with Porsche. It goes down through the people who care for the tires to engineers. Everything is just running really well at the moment. Flying Lizard is one of the top teams in the world and deserves a win like this.” The Risi Competizione/Krohn Racing Ferrari F430GT finished third in class, driven by Nic Jonsson, Eric van de Poele and Tracy Krohn. The next race for the American Le Mans Series is the Acura Sports Car Challenge of St. Petersburg on Saturday, April 5, from the streets of St. Petersburg, Fla. The race will be broadcast on ABC from 1:30-3:30 p.m. ET on April 5. Radio coverage will be available on XM Satellite Radio and on americanlemans.com, which also will feature IMSA’s Live Timing & Scoring.
Randy Pobst, K-PAX Racing, Porsche. Former Floridian Randy Pobst’s (No. 1 K-PAX Racing Porsche 911 GT3) local weather pattern knowledge helped make the pivotal decision to pit early for rain tires, as he came back through the field - from last to first - to win the SCCA Pro Racing SPEED World Challenge GT Championship season opener, part of the Mobil 1 12 Hours of Sebring Presented by Fresh From Florida at Sebring International Raceway. Andy Pilgrim (No. 8 Remington Shaving Cadillac CTS-V), of Boca Raton, Fla., and Michael Galati (No. 23 K-PAX Racing Porsche 911 GT3), of Olmsted, Ohio, completed the top three. “We started with the rain early on and it was quite livable, but then somebody crashed which didn’t surprise me one bit,” Pobst continued. “So then, we’re riding along under yellow, watching the skies, the windshield, and the road. I looked at the weather and decided to go with the wet tires. I felt that the rain tires, were a kind of intermediate tires, that would be okay in the dry too. So we changed the tires and went to the back, and to our great fortune it started raining harder and harder. As I watched that water build up I knew that everybody on dries was in trouble. When we changed, it still wasn’t a sure thing. We took a gamble. It could have quit raining right there, but it didn’t.” Brandon Davis (No. 10 ACS/Sun Microsystems Ford Mustang Cobra) recovered from a late-race brush with the wall on a restart to record a fourth-place finish. This race will be broadcast on SPEED Friday, March 21 at 1 p.m. (EDT). The series next visits the Toyota Grand Prix of Long Beach for the Long Beach SPEED GT Presented by ACS, April 20.
Pierre Kleinubing, RealTime Racing, Acura. Pierre Kleinubing (No. 43 Acura/RealTime/Eibach/Red Line Acura TSX), of Coconut Creek, Fla., led from start to finish to capture his 27th-career SCCA Pro Racing SPEED World Challenge Touring Car win, his third at Sebring International Raceway, to start the 2008 season at the Mobil 1 12 Hours of Sebring Presented by Fresh From Florida weekend. Teammates Kuno Wittmer (No. 44 Acura/RealTime/Eibach/Red Line Acura TSX), of Montreal, Quebec, and Peter Cunningham (No. 42 Acura/RealTime/Eibach/Red Line Acura TSX), of Milwaukee, Wis., delivered a podium sweep for RealTime Racing. Starting from his record 31st pole, Kleinubing averaged 92.963 mph in beating Wittmer to the checkered flag by 0.645-second. The series next travels to VIRginia International Raceway, in Alton, Va., for Round Two, April 25-27. This race will be broadcast on SPEED Channel Friday, March 21 at 12 noon (EDT).
Michelin. The French tire manufacturer won an unprecedented 10th consecutive overall victory in the 56th running of the 12 Hours of Sebring, America’s oldest and most prestigious sports car race. The tenth consecutive overall win in the 12 Hours of Sebring matches the record tenth consecutive win for Michelin at the 24 Hours of Le Mans last June. The furious race pace produced new race lap records in the LMP1, LMP2 and GT2 classes, as both Audis and the race leading Peugeot all broke the former track record on just the third lap of the race. Risi Ferrari’s Jaime Melo set a new GT2 mark on the next lap. When the No. 2 Audi led lap 115 of the race, it marked the 3,000th lap of Sebring led by Michelin in the last 10 years. With the exception of three laps under caution early in the 2005 race, Michelin has led every green flag lap at Sebring since lap 296 of the 1999 Sebring race. The Michelin total is now 3,236 of the last 3,239 laps at Sebring. The Sebring victory was the 86th for Michelin in the 89 races held since the inception of the ALMS in 1999. With Saturday’s sweep of all four class victories, Michelin now has 245 ALMS class wins, including victories in all four ALMS classes in each of the past 13 events.
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.backends import ModelBackend
from apiclient.discovery import build
import httplib2
import json
from uuid import uuid4
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
from django.conf import settings
from django.template.defaultfilters import slugify
from django_auth_addon.models import GooglePlusCredentialsModel
SERVICE = build('plus', 'v1')
class GooglePlusBackend(ModelBackend):
def authenticate(self, access_code=None):
if access_code is None:
return None
try:
oauth_flow = flow_from_clientsecrets(settings.CLIENT_SECRETS, scope='')
oauth_flow.redirect_uri = 'postmessage'
self.credentials = oauth_flow.step2_exchange(access_code)
except FlowExchangeError:
return None
# Check that the access token is valid.
access_token = self.credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
return None
access_token = self.credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
token_info = json.loads(h.request(url, 'GET')[1])
# http = httplib2.Http()
# http = self.credentials.authorize(http)
# # Get a list of people that this user has shared with this app.
# google_request = SERVICE.people().get(userId='me')
# people_document = google_request.execute(http=http)
# context['given_name'] = self.people_document['name']['givenName']
# context['family_name'] = self.people_document['name']['familyName']
# Check to see if there is a google plus credential object with the provided user id from google
google_plus_credentials = GooglePlusCredentialsModel.objects.filter(gplus_id=token_info['user_id'])
if len(google_plus_credentials) == 0:
credentials = GooglePlusCredentialsModel()
credentials.gplus_id = token_info['user_id']
# Need to create a whole new user object and move on.
user = User.objects.create_user(get_username(), token_info['email'])
credentials.user = user
user.save()
credentials.save()
else:
# Check to see if the credentials object has a user and then return it.
user = google_plus_credentials[0].user
return user
def get_username():
max_length = 30
username = slugify(uuid4().get_hex()[:max_length])
while not is_valid_username(username):
username = slugify(uuid4().get_hex()[:max_length])
return username
def is_valid_username(username):
if username is None:
return False
user_list = User.objects.filter(username=username)
return len(user_list) == 0
|
AM Foam Products is poised with over 50 years experience, a committed staff, extensive capabilities, and quality vendors to satisfy all of your foam needs.
With over thirty grades of Polyurethane Foam in stock, we're ready to satisfy virtually any need you have for cushioning or packaging foam.
Our status as an Authorized Fabricator for Sealed Air Corporation ensures you access to the most innovative and highest-quality Polyethylene products on the market today.
Lastly, our F&K Skiver and skilled technicians allow us to guarantee the quality of a wide range of Crosslinked Polyethylene and Sponge Rubber materials (sheets and rolls) to serve the gasket industry.
AM Foam Products is poised---with over 50 years experience, a committed staff, extensive capabilities and quality vendors----to satisfy all of your foam needs.
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Module utilitary
Usage:
module_handler.py print_descendant [<module_name>...]
module_handler.py create_png [<module_name>...]
module_handler.py clean [ --all | <module_name>...]
module_handler.py create_git_ignore [<module_name>...]
Options:
print_descendant Print the genealogy of the NEEDED_CHILDREN_MODULES
aka (children, subchildren, etc)
create_png Create a png of the file
NEEDED_CHILDREN_MODULES The path of NEEDED_CHILDREN_MODULES
by default try to open the file in the current path
"""
import os
import sys
import os.path
import shutil
try:
from docopt import docopt
from qp_path import QP_SRC, QP_ROOT, QP_PLUGINS
except ImportError:
print "source .quantum_package.rc"
raise
def is_module(path_module_rel):
return os.path.isfile(os.path.join(QP_SRC, path_module_rel,
"NEEDED_CHILDREN_MODULES"))
def is_plugin(path_module_rel):
return os.path.isfile(os.path.join(QP_PLUGINS, path_module_rel,
"NEEDED_CHILDREN_MODULES"))
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK) and not fpath.endswith(".py")
def get_dict_child(l_root_abs=None):
"""Loop over MODULE in QP_ROOT/src, open all the NEEDED_CHILDREN_MODULES
and create a dict[MODULE] = [sub module needed, ...]
"""
d_ref = dict()
if not l_root_abs:
l_root_abs = [QP_SRC]
for root_abs in l_root_abs:
for module_rel in os.listdir(root_abs):
module_abs = os.path.join(root_abs, module_rel)
try:
path_file = os.path.join(module_abs, "NEEDED_CHILDREN_MODULES")
with open(path_file, "r") as f:
l_children = f.read().split()
except IOError:
pass
else:
if module_rel not in d_ref:
d_ref[module_rel] = l_children
else:
print "Module {0} alredy defined"
print "Abort"
sys.exit(1)
return d_ref
def get_l_module_descendant(d_child, l_module):
"""
From a list of module return the module and descendant
"""
l = []
for module in l_module:
if module not in l:
l.append(module)
try:
l.extend(get_l_module_descendant(d_child, d_child[module]))
except KeyError:
print >> sys.stderr, "Error: "
print >> sys.stderr, "`{0}` is not a submodule".format(module)
print >> sys.stderr, "Check the typo (spelling, case, '/', etc.) "
sys.exit(1)
return list(set(l))
class ModuleHandler():
def __init__(self, l_root_abs=None):
self.dict_child = get_dict_child(l_root_abs)
@property
def l_module(self):
return self.dict_child.keys()
@property
def dict_parent(self):
"""
Get a dic of the first parent
"""
d_child = self.dict_child
d = {}
for module_name in d_child:
d[module_name] = [i for i in d_child.keys()
if module_name in d_child[i]]
return d
@property
def dict_descendant(self):
"""
Get a dic of all the genealogy desc (children and all_children)
"""
d = {}
d_child = self.dict_child
for module_name in d_child:
try:
d[module_name] = get_l_module_descendant(d_child,
d_child[module_name])
except KeyError:
print "Check NEEDED_CHILDREN_MODULES for {0}".format(
module_name)
sys.exit(1)
return d
@property
def dict_root(self):
"""
Return a dict(module_name) = module_boss
The top node in a tree.
"""
d_asc = self.dict_parent
d_desc = self.dict_descendant
l_all_module = self.l_module
dict_root = {}
for module in l_all_module:
dict_root[module] = [p for p in l_all_module
if module in [p] + d_desc[p] and not d_asc[p]
][0]
return dict_root
def l_descendant_unique(self, l_module):
d_desc = self.dict_descendant
d = {}
for module in l_module:
for e in d_desc[module]:
d[e] = 1
return d.keys()
def l_reduce_tree(self, l_module):
"""For a list of module in input return only the root"""
l_d_u = self.l_descendant_unique(l_module)
l_module_reduce = []
for module in l_module:
if module not in l_d_u:
l_module_reduce.append(module)
return l_module_reduce
def create_png(self, l_module):
"""Create the png of the dependency tree for a l_module"""
# Don't update if we are not in the main repository
from is_master_repository import is_master_repository
if not is_master_repository:
return
basename = "tree_dependency"
path = '{0}.png'.format(basename)
from graphviz import Digraph
all_ready_done = []
def draw_module_edge(module, l_children):
"Draw all the module recursifly"
if module not in all_ready_done:
for children in l_children:
# Add Edge
graph.edge(module, children)
# Recurs
draw_module_edge(children, d_ref[children])
all_ready_done.append(module)
graph = Digraph(comment=l_module, format="png", filename=basename)
d_ref = self.dict_child
# Create all the edge
for module in l_module:
graph.node(module, fontcolor="red")
draw_module_edge(module, d_ref[module])
# Try to render the png
# If not just touch it
try:
graph.render(cleanup=True)
except:
with open(path, 'a'):
os.utime(path, None)
return
if __name__ == '__main__':
arguments = docopt(__doc__)
if arguments['--all']:
l_module = [f for f in os.listdir(QP_SRC)
if os.path.isdir(os.path.join(QP_SRC, f))]
elif not arguments['<module_name>']:
dir_ = os.getcwd()
l_module = [os.path.basename(dir_)]
else:
l_module = arguments['<module_name>']
for module in l_module:
if not is_module(module):
print "{0} is not a valide module. Abort".format(module)
print "No NEEDED_CHILDREN_MODULES in it"
sys.exit(1)
m = ModuleHandler()
if arguments['print_descendant']:
for module in l_module:
print " ".join(sorted(m.l_descendant_unique([module])))
if arguments["create_png"]:
try:
m.create_png(l_module)
except RuntimeError:
pass
except SyntaxError:
print "Warning: The graphviz API dropped support for python 2.6."
pass
if arguments["clean"] or arguments["create_git_ignore"]:
l_dir = ['IRPF90_temp', 'IRPF90_man']
l_file = ["irpf90_entities", "tags", "irpf90.make", "Makefile",
"Makefile.depend", ".ninja_log", ".ninja_deps",
"ezfio_interface.irp.f"]
for module in l_module:
module_abs = os.path.realpath(os.path.join(QP_SRC, module))
l_symlink = m.l_descendant_unique([module])
l_exe = [f for f in os.listdir(module_abs)
if is_exe(os.path.join(module_abs, f))]
if arguments["clean"]:
for f in l_dir:
try:
shutil.rmtree(os.path.join(module_abs, f))
except:
pass
for symlink in l_symlink:
try:
os.unlink(os.path.join(module_abs, symlink))
except:
pass
for f in l_file:
try:
os.remove(os.path.join(module_abs, f))
except:
pass
for f in l_exe:
try:
os.remove(os.path.join(module_abs, f))
except:
pass
if arguments["create_git_ignore"]:
# Don't update if we are not in the main repository
from is_master_repository import is_master_repository
if not is_master_repository:
print >> sys.stderr, 'Not in the master repo'
sys.exit(0)
path = os.path.join(module_abs, ".gitignore")
with open(path, "w+") as f:
f.write("# Automatically created by {0} \n".format(__file__).replace(QP_ROOT,"$QP_ROOT"))
l_text = l_dir + l_file + l_symlink + l_exe
l_text.sort()
f.write("\n".join(l_text))
|
The Florida overtime minimum wage is $12.69 per hour, one and a half times the regular minimum wage of $8.46. If you earn more then the Florida minimum wage rate of $8.46, you are entitled to at least 1.5 times your regular hourly wage for all overtime worked.
The Fair Labor Standards Act (FLSA) explicitly protects Florida workers who file an overtime complaint from being penalized by their employer for filing the complaint with threats, suspension, or firing. The statute of limitations for filing an overtime claim is two years, although this is increased to three years in the case that your employer willfully and knowingly broke the law in failing to compensate you for overtime worked.
After checking to ensure that your job is not exempt from overtime under Florida law, the next step to take in filing an unpaid overtime claim is submitting an official complaint including all pertinent information to your local Department of Labor office. If the Florida Department of Labor cannot solve your overtime case, you can file a civil claim with the Florida state court system by levying an unpaid overtime lawsuit against your employer.
If you believe you are owed unpaid overtime, or think your employer is not complying with other labor laws, your first point of contact is your local field office of the Federal Department of Labor. There are seven field offices located in Florida.
Contact the office nearest to you, and they will be able to advise you of your legal rights and your next steps. You may want to have your employer's information and any relevant paystubs available. Remember, it is illegal for your employer to punish you for speaking to the Department of Labor! You can find contact information for the Florida Department of Labor here.
|
from base import db
class Experiment(db.Document):
exp_uid = db.StringField()
exp_key = db.StringField()
perm_key = db.StringField()
app_id = db.StringField()
name = db.StringField()
description = db.StringField()
instructions = db.StringField()
debrief = db.StringField()
params = db.DictField()
status = db.StringField(default="staging")
target_set = db.ReferenceField('TargetSet')
query_tries = db.IntField()
query_duration = db.IntField()
info = db.DictField()
# Use set's on any parameters that can be changed outside of a constructor.
def set_status(self,status):
self.status = status
self.save()
def set_exp_uid(self,exp_uid):
self.exp_uid = exp_uid
self.save()
def set_exp_key(self,exp_key):
self.exp_key = exp_key
self.save()
def set_perm_key(self,perm_key):
self.perm_key = perm_key
self.save()
def set_info(self,info):
self.info = info
self.save()
|
My partner Maria Rosa Sharrow of Willow Street Shops received the ingredients that I sent out for her Bead Soup so I thought that I would show you what I stirred up for her.
I LOVE the copper clasp you sent her! Can't wait to see what she will do with the ingredients of her soup.
|
import os
import hashlib
import asyncio
import psycopg2.extras
import aiopg
from . import nwdb
from . import core
from . import mailqueue
from . import room
@core.function
def authenticate(client, email, password):
"""
Authenticate the client by matching email and password.
Note, the password must not be sent in cleartext, it is sent as a
sha356(uid + sha256(password)), where uid is sent with the initial
welcome message.
"""
hash = client.uid
with (yield from nwdb.connection(readonly=True)) as conn:
cursor = yield from conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
yield from cursor.execute("""
select A.id, A.handle, A.email, A.password, M.clan_id, B.alliance_id, B.name as clan_name, C.name as alliance_name, M.id as membership_id
from member A
left outer join membership M on M.member_id = A.id
left outer join clan B on B.id = M.clan_id
left outer join alliance C on C.id = B.alliance_id
where lower(A.email) = lower(%s)
""", [email])
rs = yield from cursor.fetchone()
authenticated = False
if rs is None:
print("rsIsNone")
authenticated = False
else:
h = (hash + rs[3]).encode("utf8")
if hashlib.sha256(h).hexdigest() == password:
client.member_id = client.session["member_id"] = rs["id"]
client.clan_id = rs["clan_id"]
client.alliance_id = rs["alliance_id"]
client.handle = rs["handle"]
client.clan_name = rs["clan_name"]
client.alliance_name = rs["alliance_name"]
cursor.execute("select name from role A inner join role_owner B on B.membership_id = %s", rs["membership_id"])
client.roles = roles = [i.name for i in cursor.fetchall()]
client.member_info = dict(
id=client.member_id,
clan_id=client.clan_id,
alliance_id=client.alliance_id,
handle=client.handle,
clan_name=client.clan_name,
alliance_name=client.alliance_name,
roles=client.roles
)
authenticated = True
if 'Banned' in client.roles:
yield from client.send("member.banned")
authenticated = False
else:
authenticated = False
if(not authenticated):
yield from asyncio.sleep(3)
client.authenticated = authenticated
if authenticated:
yield from client.on_authenticated()
yield from client.send("member.info", client.member_info)
if client.clan_id is not None:
clan_room = room.Room.get("Clan " + str(client.clan_id))
yield from client.join(clan_room)
return authenticated
@core.function
def register(client, handle, email, password):
"""
Register a new user. Handle and email must be unique, and password
must be sha256(password), not cleartext.
"""
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
try:
yield from cursor.execute("""
insert into member(handle, email, password)
select %s, %s, %s
returning id
""", [handle, email, password])
except Exception as e:
return False
else:
rs = yield from cursor.fetchone()
client.session["member_id"] = rs[0]
yield from mailqueue.send(client, email, "Welcome.", "Thanks for registering.")
return True
@core.handler
def password_reset_request(client, email):
"""
Request a password reset for an email address. A code is sent to the
email address which must be passed in via th password_reset message.
"""
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
token = hashlib.md5(os.urandom(8)).hexdigest()[:8]
try:
yield from cursor.execute("""
insert into password_reset_request(member_id, token)
select id, %s from member where lower(email) = lower(%s)
returning id
""", [token, email])
rs = yield from cursor.fetchone()
except Exception as e:
yield from client.send("member.password_reset_request", False)
else:
yield from mailqueue.send(client, email, "Password Reset Request", "Code: " + token)
yield from client.send("member.password_reset_request", True)
@core.function
def password_reset(client, email, token, password):
"""
Change the password by using the provided token. The password must be
sha256(password), not cleartext.
"""
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
success = False
try:
yield from cursor.execute("""
update member A
set password = %s
where lower(A.email) = lower(%s)
and exists (select token from password_reset_request where member_id = A.id and lower(token) = lower(%s))
returning A.id
""", [password, email, token])
except Exception as e:
logging.warning(str(type(e)) + " " + str(e))
success = False
else:
rs = yield from cursor.fetchone()
if rs is None:
siccess = False
else:
success = True
member_id = rs[0]
yield from cursor.execute("delete from password_reset_request where member_id = %s", [member_id])
yield from mailqueue.send(client, email, "Password Reset", "Success")
return success
@core.handler
def ban(client, member_id):
client.require_role('Operator')
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
success = False
yield from cursor.execute("""
select add_role(%s, 'Banned');
""", member_id)
@core.handler
def unban(client, member_id):
client.require_role('Operator')
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
yield from cursor.execute("""
select remove_role(%s, 'Banned');
""", member_id)
@core.handler
def add_role(client, member_id, role):
client.require_role('Operator')
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
success = False
yield from cursor.execute("""
select add_role(%s, %s);
""", member_id, role)
@core.handler
def remove_role(client, member_id, role):
client.require_role('Operator')
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
yield from cursor.execute("""
select remove_role(%s, %s);
""", member_id, role)
@core.handler
def set_object(client, key, value):
"""
Save an arbitrary object for a member under a key.
"""
client.require_auth()
value = json.dumps(value)
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
yield from cursor.execute("""
update member_store set value = %s
where key = %s and member_id = %s
returning id
""", [value, key, client.member_id])
rs = yield from cursor.fetchone()
if rs is None:
yield from cursor.execute("""
insert into member_store(member_id, key, value)
select %s, %s, %s
""", [client.member_id, key, value])
@core.function
def get_object(client, key):
"""
Retrieves an arbitrary object previously stored by the member under a key.
"""
client.require_auth()
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
yield from cursor.execute("""
select value from member_store
where member_id = %s and key = %s
""", [client.member_id, key])
rs = yield from cursor.fetchone()
if rs is not None:
rs = json.loads(rs[0])
return rs
@core.function
def get_object_keys(client):
"""
Retrieves all keys stored by the member.
"""
client.require_auth()
with (yield from nwdb.connection()) as conn:
cursor = yield from conn.cursor()
yield from cursor.execute("""
select key from member_store
where member_id = %s
""", [client.member_id])
rs = yield from cursor.fetchall()
return list(i[0] for i in rs)
|
Franconia Sculpture Park's Arts Administration Internship provides 'hands-on' immersion in the daily operations of a progressive arts organization located in the scenic St. Croix River Valley, 45 miles NE of Minneapolis/St. Paul, MN, USA. Franconia provides residence and workspace to emerging and established artists in a 43-acre sculpture park.
There are two different types of administrative interns. Program interns will support Franconia’s education & public programming. The Development intern will work directly with the development manager to support fundraising campaigns. Both interns work closely with Franconia’s staff so interns have a rare opportunity to be immersed in many types of projects and work experiences.
|
# -*- coding: utf-8 -*-
"""
Copyright 2013-2014 Olivier Cortès <[email protected]>
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import logging
from statsd import statsd
# from constance import config
from celery import chain as tasks_chain
from django.conf import settings
from django.db import models, IntegrityError, transaction
from django.db.models.signals import post_save, pre_save, pre_delete
from django.utils.translation import ugettext_lazy as _
from django.utils.text import slugify
from simple_history.models import HistoricalRecords
from sparks.foundations.utils import combine_dicts
from oneflow.base.utils import register_task_method
from oneflow.base.utils.http import clean_url
from oneflow.base.utils.dateutils import now, datetime, benchmark
from ..common import (
DjangoUser as User,
CONTENT_TYPES,
ARTICLE_ORPHANED_BASE,
)
from ..processor import get_default_processing_chain_for
from common import generate_orphaned_hash
from base import (
BaseItemQuerySet,
BaseItemManager,
BaseItem,
baseitem_process_task,
baseitem_create_reads_task,
)
from original_data import baseitem_postprocess_original_data_task
from abstract import (
UrlItem,
ContentItem,
baseitem_absolutize_url_task,
)
LOGGER = logging.getLogger(__name__)
MIGRATION_DATETIME = datetime(2014, 11, 1)
__all__ = [
'Article',
'create_article_from_url',
# Tasks will be added below.
]
def create_article_from_url(url, feeds, origin):
""" Create an article from a web url, in feeds, with an origin. """
# TODO: find article publication date while fetching content…
# TODO: set Title during fetch…
try:
new_article, created = Article.create_article(
url=url.replace(' ', '%20'),
title=_(u'Imported item from {0}').format(clean_url(url)),
feeds=feeds, origin=origin)
except:
# NOTE: duplication handling is already
# taken care of in Article.create_article().
LOGGER.exception(u'Article creation from URL %s failed.', url)
return None, False
mutualized = created is None
if created or mutualized:
for feed in feeds:
feed.recent_items_count += 1
feed.all_items_count += 1
for feed in feeds:
if new_article.date_published:
if new_article.date_published > feed.latest_item_date_published:
feed.latest_item_date_published = new_article.date_published
# Even if the article wasn't created, we need to create reads.
# In the case of a mutualized article, it will be fetched only
# once, but all subscribers of all feeds must be connected to
# it to be able to read it.
for subscription in feed.subscriptions.all():
subscription.create_read(new_article, verbose=created)
# Don't forget the parenthesis else we return ``False`` everytime.
return new_article, created or (None if mutualized else False)
def _format_feeds(feeds):
""" Return feeds in a compact string form for displaying in logs. """
return u', '.join(u'{0} ({1})'.format(f.name, f.id) for f in feeds)
# —————————————————————————————————————————————————————————— Manager / QuerySet
def BaseItemQuerySet_article_method(self):
""" Patch BaseItemQuerySet to know how to return articles. """
return self.instance_of(Article)
BaseItemQuerySet.article = BaseItemQuerySet_article_method
# ——————————————————————————————————————————————————————————————————————— Model
# BIG FAT WARNING: inheritance order matters. BaseItem must come first,
# else `create_post_task()` is not found by register_task_method().
class Article(BaseItem, UrlItem, ContentItem):
""" Some kind of news article, or web page. """
class Meta:
app_label = 'core'
verbose_name = _(u'Article')
verbose_name_plural = _(u'Articles')
objects = BaseItemManager()
# Django simple history.
history = HistoricalRecords()
version_description = models.CharField(
max_length=128, null=True, blank=True,
verbose_name=_(u'Version description'),
help_text=_(u'Set by content processors or author to know with which '
u'processor chain this version was produced. Can be a '
u'code or a processor chain ID/slug to help querying.')
)
publishers = models.ManyToManyField(
User, null=True, blank=True, related_name='publications')
# —————————————————————————————————————————————————————————————— Django
def __unicode__(self):
return _(u'{0} (#{1}) from {2}').format(
self.name[:40] + (self.name[40:] and u'…'), self.id, self.url)
# —————————————————————————————————————————————————————————————— Properties
@property
def is_good(self):
""" Return True if all our base classes don't return False. """
if not BaseItem.is_good.fget(self) \
or not UrlItem.is_good.fget(self) \
or not ContentItem.is_good.fget(self):
return False
return True
@property
def is_processed(self):
""" See if all relevant processors have run on the current instance. """
if not BaseItem.is_processed.fget(self) \
or not UrlItem.is_processed.fget(self) \
or not ContentItem.is_processed.fget(self):
return False
return True
@property
def processing_parameters(self):
""" Return a merge of all inherited classes processing parameters.
.. todo:: get and merge feeds parameters, if any.
.. todo:: cache the result via `cacheops` if possible and relevant.
"""
return combine_dicts(
BaseItem.processing_parameters.fget(self),
combine_dicts(
UrlItem.processing_parameters.fget(self),
ContentItem.processing_parameters.fget(self)
)
)
# ————————————————————————————————————————————————————————————————— Methods
def get_processing_chain(self):
""" Return a processor chain suitable for current article.
If our website has one, it will be returned.
Else, the default processor chain for articles will be returned.
"""
website = self.website
if website.processing_chain is None:
return get_default_processing_chain_for(self._meta.model)
else:
return website.processing_chain
def processing_must_abort(self, verbose=True, force=False, commit=True):
""" Return True if processing of current instance must be aborted.
.. versionadded:: 0.90.x. This is the new method, used by the 2015
processing infrastructure.
"""
# HEADS UP: we do not test self.is_processed, it's up to every
# base class to do it in their processing_must_abort()
# method.
# NOTE: we use all() and not any(). This is intentional. In the
# current processors implementation this is needed.
#
# Example: When an article URL is absolutized,
# UrlItem.processing_must_abort() will return True.
# But we must not abort the whole processing: we still
# need to continue processing to handle the `content`
# downloading and conversion to markdown (and soon
# {pre,post}_processing content enhancements.
#
# As every processor will be protected by its accepts()
# method, there will never be no double-processing. Only
# a little too much testing, at worst.
#
# Even if we manage to forward the current processing
# category to the processing_must_abort() method, there
# will always be the accepts() tests. Bypassing them is
# a design error for me. In this context, we would only
# gain the all(True) → any(False) transformation.
#
# And that would imply much more code. Thus, I consider
# the current implementation an acceptable tradeoff.
#
# As a final addition, we have exactly the same logic in
# Article.is_processed, and there it feels perfectly fine:
# an article is not considered processed if any of its part
# is not. Perhaps it's just the name of the current method
# that is a little misleading…
return all(
klass.processing_must_abort(self, verbose=verbose,
force=force, commit=commit)
for klass in (BaseItem, UrlItem, ContentItem)
)
def reset(self, force=False, commit=True):
""" clear the article content & content type.
This method exists for testing / debugging purposes.
"""
if settings.DEBUG:
force = True
if not force:
LOGGER.warning(u'Cannot reset article without `force` argument.')
return
for klass in (BaseItem, UrlItem, ContentItem):
try:
klass.reset(self, force=force, commit=False)
except:
LOGGER.exception('%s %s: could not reset %s class.',
self._meta.verbose_name, self.id, klass)
if commit:
# We are reseting, don't waste a version.
self.save_without_historical_record()
def reprocess(self, verbose=True):
""" A shortcut to reset()/process() without the need to absolutize. """
url_absolute = self.url_absolute
is_orphaned = self.is_orphaned
redo = not url_absolute
self.reset(force=True)
if redo:
self.absolutize_url()
else:
self.url_absolute = url_absolute
self.is_orphaned = is_orphaned
self.process(verbose=verbose)
@classmethod
def create_article(cls, title, url, feeds, **kwargs):
""" Returns ``True`` if article created, ``False`` if a pure duplicate
(already exists in the same feed), ``None`` if exists but not in
the same feed. If more than one feed given, only returns ``True``
or ``False`` (mutualized state is not checked). """
tags = kwargs.pop('tags', [])
if url is None:
# We have to build a reliable orphaned URL, because orphaned
# articles are often duplicates. RSS feeds serve us many times
# the same article, without any URL, and we keep recording it
# as new (but orphaned) content… Seen 20141111 on Chuck Norris
# facts, where the content is in the title, and there is no URL.
# We have 860k+ items, out of 1k real facts… Doomed.
url = ARTICLE_ORPHANED_BASE + generate_orphaned_hash(title, feeds)
defaults = {
'name': title,
'is_orphaned': True,
# Skip absolutization, it's useless.
'url_absolute': True
}
defaults.update(kwargs)
article, created = cls.objects.get_or_create(url=url,
defaults=defaults)
# HEADS UP: no statsd here, it's handled by post_save().
else:
url = clean_url(url)
defaults = {'name': title}
defaults.update(kwargs)
article, created = cls.objects.get_or_create(url=url,
defaults=defaults)
if created:
created_retval = True
LOGGER.info(u'Created %sarticle %s %s.', u'orphaned '
if article.is_orphaned else u'', article.id,
u'in feed(s) {0}'.format(_format_feeds(feeds))
if feeds else u'without any feed')
else:
created_retval = False
if article.duplicate_of_id:
LOGGER.info(u'Swaping duplicate %s %s for master %s on '
u'the fly.', article._meta.verbose_name,
article.id, article.duplicate_of_id)
article = article.duplicate_of
if len(feeds) == 1 and feeds[0] not in article.feeds.all():
# This article is already there, but has not yet been
# fetched for this feed. It's mutualized, and as such
# it is considered at partly new. At least, it's not
# as bad as being a true duplicate.
created_retval = None
LOGGER.info(u'Mutualized article %s in feed(s) %s.',
article.id, _format_feeds(feeds))
article.create_reads(feeds=feeds)
else:
# No statsd, because we didn't create any record in database.
LOGGER.info(u'Duplicate article %s in feed(s) %s.',
article.id, _format_feeds(feeds))
# Special case where a mutualized article arrives from RSS
# (with date/author) while it was already here from Twitter
# (no date/author). Post-processing of original data will
# handle the authors, but at lest we update the date now for
# users to have sorted articles until original data is
# post-processed (this can take time, given the server load).
if article.date_published is None:
date_published = kwargs.get('date_published', None)
if date_published is not None:
article.date_published = date_published
article.save()
# Tags & feeds are ManyToMany, they
# need the article to be saved before.
if tags:
try:
with transaction.atomic():
article.tags.add(*tags)
except IntegrityError:
LOGGER.exception(u'Could not add tags %s to article %s',
tags, article.id)
if feeds:
try:
with transaction.atomic():
article.feeds.add(*feeds)
except:
LOGGER.exception(u'Could not add feeds to article %s',
article.id)
# Get a chance to catch the duplicate if workers were fast.
# At the cost of another DB read, this will save some work
# in repair scripts, and avoid some writes when creating reads.
article = cls.objects.get(id=article.id)
if article.duplicate_of_id:
if settings.DEBUG:
LOGGER.debug(u'Catched on-the-fly duplicate %s, returning '
u'master %s instead.', article.id,
article.duplicate_of_id)
return article.duplicate_of, False
return article, created_retval
def post_create_task(self, apply_now=False):
""" Method meant to be run from a celery task. """
if apply_now:
try:
result = baseitem_absolutize_url_task.apply((self.id, ))
if result is not False:
baseitem_create_reads_task.apply((self.id, ))
baseitem_process_task.apply((self.id, ))
baseitem_postprocess_original_data_task.apply((self.id, ))
except:
LOGGER.exception(u'Applying Article.post_create_task(%s) '
u'failed.', self)
return
post_absolutize_chain = tasks_chain(
# HEADS UP: both subtasks are immutable, we just
# want the group to run *after* the absolutization.
baseitem_create_reads_task.si(self.id),
baseitem_process_task.si(self.id),
baseitem_postprocess_original_data_task.si(self.id),
)
# OLD NOTES: randomize the absolutization a little, to avoid
# http://dev.1flow.net/development/1flow-dev-alternate/group/1243/
# as much as possible. This is not yet a full-featured solution,
# but it's completed by the `fetch_limit` thing.
#
# Absolutization is the condition of everything else. If it
# doesn't succeed:
# - no bother trying to post-process author data for example,
# because we need the absolutized website domain to make
# authors unique and worthful.
# - no bother fetching content: it uses the same mechanisms as
# absolutize_url(), and will probably fail the same way.
#
# Thus, we link the post_absolutize_chain as a callback. It will
# be run only if absolutization succeeds. Thanks, celery :-)
baseitem_absolutize_url_task.apply_async(
args=(self.id, ),
kwargs={'stop_chain_on_false': True},
link=post_absolutize_chain
)
#
# TODO: create short_url
#
# TODO: remove_useless_blocks, eg:
# <p><a href="http://addthis.com/bookmark.php?v=250">
# <img src="http://cache.addthis.com/cachefly/static/btn/
# v2/lg-share-en.gif" alt="Bookmark and Share" /></a></p>
#
# (in 51d6a1594adc895fd21c3475, see Notebook)
#
# TODO: link_replace (by our short_url_link for click statistics)
# TODO: images_fetch
# eg. handle <img alt="2013-05-17_0009.jpg"
# data-lazyload-src="http://www.vcsphoto.com/blog/wp-content/uploads/2013/05/2013-05-17_0009.jpg" # NOQA
# src="http://www.vcsphoto.com/blog/wp-content/themes/prophoto4/images/blank.gif" # NOQA
# height="1198" sidth="900"/>
#
# TODO: authors_fetch
# TODO: publishers_fetch
# TODO: duplicates_find (content wise, not URL wise)
#
return
@classmethod
def repair_missing_authors_migration_201411(cls):
# from oneflow.core.tasks.migration import vacuum_analyze
articles = Article.objects.filter(
authors=None,
date_created__gt=datetime(2014, 10, 31))
count = articles.count()
done = 0
LOGGER.info(u'Starting repairing %s missing authors @%s', count, now())
with benchmark(u'Fix missing authors on rel-DB fetched content…'):
for article in articles:
article.postprocess_original_data(force=True)
# if done % 25000 == 0:
# vacuum_analyze()
done += 1
# ———————————————————————————————————————————————————————————————— Celery Tasks
register_task_method(Article, Article.post_create_task,
globals(), queue=u'create')
# register_task_method(Article, Article.find_image,
# globals(), queue=u'fetch', default_retry_delay=3600)
# ————————————————————————————————————————————————————————————————————— Signals
def article_pre_save(instance, **kwargs):
""" Make a slug if none. """
article = instance
if not article.slug:
article.slug = slugify(article.name)
# if settings.DEBUG:
# if getattr(instance, 'skip_history_when_saving', False):
# LOGGER.info(u'%s %s: SAVE without history.',
# instance._meta.verbose_name,
# instance.id)
# else:
# LOGGER.info(u'%s %s: SAVE WITH HISTORY.',
# instance._meta.verbose_name,
# instance.id)
def article_post_save(instance, **kwargs):
article = instance
if kwargs.get('created', False):
with statsd.pipeline() as spipe:
spipe.gauge('articles.counts.total', 1, delta=True)
spipe.gauge('articles.counts.empty', 1, delta=True)
if article.is_orphaned:
spipe.gauge('articles.counts.orphaned', 1, delta=True)
if article.duplicate_of:
spipe.gauge('articles.counts.duplicates', 1, delta=True)
if article.url_error:
spipe.gauge('articles.counts.url_error', 1, delta=True)
if article.content_error:
spipe.gauge('articles.counts.content_error', 1, delta=True)
# Some articles are created "already orphaned" or duplicates.
# In the archive database this is more immediate than looking
# up the database name.
if not (article.is_orphaned or article.duplicate_of):
# MIGRATION: remove this "if".
if article.date_created >= MIGRATION_DATETIME:
# HEADS UP: this task name will be registered later
# by the register_task_method() call.
article_post_create_task.delay(article.id) # NOQA
def article_pre_delete(instance, **kwargs):
article = instance
with statsd.pipeline() as spipe:
spipe.gauge('articles.counts.total', -1, delta=True)
if article.is_orphaned:
spipe.gauge('articles.counts.orphaned', -1, delta=True)
if article.duplicate_of_id:
spipe.gauge('articles.counts.duplicates', -1, delta=True)
if article.url_error:
spipe.gauge('articles.counts.url_error', -1, delta=True)
if article.content_error:
spipe.gauge('articles.counts.content_error', -1, delta=True)
if article.content_type == CONTENT_TYPES.HTML:
spipe.gauge('articles.counts.html', -1, delta=True)
elif article.content_type in (CONTENT_TYPES.MARKDOWN, ):
spipe.gauge('articles.counts.markdown', -1, delta=True)
elif article.content_type in (None, CONTENT_TYPES.NONE, ):
spipe.gauge('articles.counts.empty', -1, delta=True)
if instance.processing_errors.exists():
try:
instance.processing_errors.clear()
except:
LOGGER.exception(u'%s %s: could not clear processing errors',
instance._meta.verbose_name, instance.id)
pre_delete.connect(article_pre_delete, sender=Article)
pre_save.connect(article_pre_save, sender=Article)
post_save.connect(article_post_save, sender=Article)
|
“Who Wrote the bible” by Richard Elliot Friedman is a brilliant research of modern views and concepts about creation of the main Book of Christianity. The author managed to gather a huge batch of factual material into a relatively small volume and explain this material in simple and easy to read terms. He speaks about authorship of Torah, as well as five books of the Bible: Genesis, Exodus, Leviticus, Numbers, and Deuteronomy. Despite of claims that such investigations would undermine faith, Friedman uses a purely scientific approach without two much political correctness. However, he may not be called an atheist or anti-religious scholar, he simply gathers available factual evidence and brings it to the reader in a form of exciting book. Friedman does not find new facts or theories, he summarizes those, which have been found before him.
"“Who Wrote the Bible” by Richard Elliot Friedman" topic?
Friedman does not attempt to make his book a detective story or a mystery. He does pay some attention to undiscovered secrets of antiquity, such as the disappeared Ark of the Covenant, however, he concentrates on his key subject – authorship of the Holy Book. In contrast, he pays much attention to emotional conditions and motives of those, who created the modern variant of the Bible. He speaks, that there are actually two competing texts in the Old Testament, one of which is probably authored by the people of Israel and other – by the people of Judah. So there are two different views of god – one God is called Jahveh and marked “J” and other is called Elohim and marked “E”. Jahveh supports Judah and oppresses Israel and Elohim does the opposite. So, concludes Friedman, this is a reflection of two competing kingdoms each claiming to worship “real” God [Friedman 1997, 19].
Under Friedman the two texts have been combined at about 722 BC after Judah accepted Israel refugees from Assyria. Perhaps Israel lobby has been very influential at the time and this made Judah include some fragments of the “E” into the renewed variant of the Torah.
Another competition inside the Biblical text is a discussion between groups of priests, who, in their strive for power, were ready to put words into the mouth of God Himself. The most influential of such groups were the Aaronids and the Mushites. Aaronids had their centre in Jerusalem and believed to originate from Aaron. Mushites were most numerous in Shiloh and were said to descend from Moses. After Mushites supported Solomon as successor of David they managed to establish their power firmly and the Aaronides started losing their influence. Aaronides created a text called “P” (“priestly”, because it concentrates on clerical subjects and underestimates law), in which they proved, that they had the sole right to be priests as descendants of Aaron, leaving a secondary to other Levites. Mushites responded by creating the “D” variant of the text (from “Deuteronomist,” because D includes Deuteronomy) in which they tolled how Moses ordered Mushites to rule the people and even accused the Aaronides of deliberate lie [Friedman 1997, 73].
The last “layer” of the text is called “R” – a redactor. Obvious contradictions in the text of the Holy Book had to be removed by later priests and rulers, so they corrected the text as they saw fit beginning from about 500 BC and till the Medieval Bible, which we know today. The “R” can not be attributed to one author or group of authors. Corrections have been made during great Councils of early Christian Church, as well as by translators of the Bible from Aramaic into Greek and Latin. The last edition of the Bible is called King James Bible and dates back to 1611.
Despite of his skeptical approach and discovery of contraversial history of Biblical text, Friedman never disputed, that the authors have been inspired by God. He only speaks of the humans who wrote it. Under his opinion, profound investigation of Biblical would provide deeper theological understanding of what god was and what God is as well as how God was seen in different times and by different peoples. For example, the “J’ God embodies absolute and impartial just, in contrast to combined “EJ” God, who is merciful and is full of compassion [Friedman 1997, 85]. Another useful application of his studies is an opportunity to recreate historic events described in the Old Testament.
Those events have been greatly modified in the “R” version in order to make them moralistic and hortatory. Under Friedman’s idea both those who see Bible as a historic document and those who believe it to be a sacred scripture would benefit from knowledge of it’s composition and structure. As he puts it, in whichever way the bible was created, it remains a Book of the Books and the reader will anyway admire it, because no one can know the true intention of God for sure and no one can know, whether it was a divine plan to create the Book of Books in the way it was created.
In his book Friedman is pretty successful in summarizing facts and giving his own evaluation of those facts. His language is simple, perhaps even to simple for such complicated subject, as it makes a reader to read the book as an amusing story and in a way prevents him from deep understanding of the concept. Comparing the book to other related researches it is possible to say, that Friedman is one of the most objective and truth seeking authors.
He does not impose certain theory upon reader, but merely provides information for thinking and analysis. Some critics noticed, that the book is too simple, as it does not discover new facts or theories and is a purely authorial. They may be responded, that there are enough complicated Biblical investigations, which are hard to read and understand. Friedman managed to satisfy the demand of common readers, who know little of the subject for highly qualified and understandable explanation of the Biblical history.
On the other hand the book lacks references to sources and is poorly supported by fundamental studies. Friedman gives facts but does not relate those facts to factual evidence, which can be checked by the reader. In many ways the book is to simplified and does not review deep complicated reasons of facts. The book is to a great extent uncritical. Many of the facts are explained from personal positions without arguments pro and contra.
Considering the overstated the book can be recommended to the “beginners” in Biblical studies. It does put forth many of the complicated questions and attracts reader’s attention to alternative theories, however, it may not satisfy the demand for deeper investigation. So is may be reviewed as a useful introduction and incitement for further research.
|
import wx
import generic_class
from .constants import control, dtype, substitution_map
import os
import yaml
import modelDesign_window
ID_RUN = 11
class ModelConfig(wx.Frame):
# this creates the wx.Frame mentioned above in the class declaration
def __init__(self, parent, gpa_settings=None):
wx.Frame.__init__(
self, parent=parent, title="CPAC - Create New FSL Model", size=(900, 650))
if gpa_settings == None:
self.gpa_settings = {}
self.gpa_settings['subject_list'] = ''
self.gpa_settings['pheno_file'] = ''
self.gpa_settings['subject_id_label'] = ''
self.gpa_settings['design_formula'] = ''
self.gpa_settings['mean_mask'] = ''
self.gpa_settings['custom_roi_mask'] = 'None'
self.gpa_settings['coding_scheme'] = ''
self.gpa_settings['use_zscore'] = True
self.gpa_settings['derivative_list'] = ''
self.gpa_settings['repeated_measures'] = ''
self.gpa_settings['group_sep'] = ''
self.gpa_settings['grouping_var'] = 'None'
self.gpa_settings['z_threshold'] = ''
self.gpa_settings['p_threshold'] = ''
else:
self.gpa_settings = gpa_settings
self.parent = parent
mainSizer = wx.BoxSizer(wx.VERTICAL)
vertSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = wx.Panel(self)
self.window = wx.ScrolledWindow(self.panel, size=(-1,300))
self.page = generic_class.GenericClass(self.window, " FSL Model Setup")
self.page.add(label="Subject List ",
control=control.COMBO_BOX,
name="subject_list",
type=dtype.STR,
comment="Full path to a list of subjects to be included in the model.\n\nThis should be a text file with one subject per line.\n\nTip 1: A list in this format contaning all subjects run through CPAC was generated along with the main CPAC subject list (see subject_list_group_analysis.txt).\n\nTIp 2: An easy way to manually create this file is to copy the subjects column from your Regressor/EV spreadsheet.",
values=self.gpa_settings['subject_list'])
self.page.add(label="Phenotype/EV File ",
control=control.COMBO_BOX,
name="pheno_file",
type=dtype.STR,
comment="Full path to a .csv file containing EV information for each subject.\n\nTip: A file in this format (containing a single column listing all subjects run through CPAC) was generated along with the main CPAC subject list (see template_phenotypic.csv).",
values=self.gpa_settings['pheno_file'])
self.page.add(label="Subjects Column Name ",
control=control.TEXT_BOX,
name="subject_id_label",
type=dtype.STR,
comment="Name of the subjects column in your EV file.",
values=self.gpa_settings['subject_id_label'],
style=wx.EXPAND | wx.ALL,
size=(160, -1))
load_panel_sizer = wx.BoxSizer(wx.HORIZONTAL)
load_pheno_btn = wx.Button(self.window, 2, 'Load Phenotype File', (220,10), wx.DefaultSize, 0)
load_panel_sizer.Add(load_pheno_btn)
self.Bind(wx.EVT_BUTTON, self.populateEVs, id=2)
self.page.add_pheno_load_panel(load_panel_sizer)
# experimental checkbox row stuff
self.page.add(label = "Model Setup ",
control = control.CHECKBOX_GRID,
name = "model_setup",
type = 9,#dtype.LBOOL,
values = '',
comment="A list of EVs from your phenotype file will populate in this window. From here, you can select whether the EVs should be treated as categorical or if they should be demeaned (continuous/non-categorical EVs only). 'MeanFD', 'MeanFD_Jenkinson', 'Measure Mean', and 'Custom_ROI_Mean' will also appear in this window automatically as options to be used as regressors that can be included in your model design. Note that the MeanFD and mean of measure values are automatically calculated and supplied by C-PAC via individual-level analysis.",
size = (450, -1))
self.page.add(label="Design Matrix Formula ",
control=control.TEXT_BOX,
name="design_formula",
type=dtype.STR,
comment="Specify the formula to describe your model design. Essentially, including EVs in this formula inserts them into the model. The most basic format to include each EV you select would be 'EV + EV + EV + ..', etc. You can also select to include MeanFD, MeanFD_Jenkinson, Measure_Mean, and Custom_ROI_Mean here. See the C-PAC User Guide for more detailed information regarding formatting your design formula.",
values= self.gpa_settings['design_formula'],
size=(450, -1))
self.page.add(label="Measure Mean Generation ",
control=control.CHOICE_BOX,
name='mean_mask',
type=dtype.LSTR,
comment = "Choose whether to use a group mask or individual-specific mask when calculating the output means to be used as a regressor.\n\nThis only takes effect if you include the 'Measure_Mean' regressor in your Design Matrix Formula.",
values=["Group Mask","Individual Mask"])
self.page.add(label="Custom ROI Mean Mask ",
control=control.COMBO_BOX,
name="custom_roi_mask",
type=dtype.STR,
comment="Optional: Full path to a NIFTI file containing one or more ROI masks. The means of the masked regions will then be computed for each subject's output and will be included in the model as regressors (one for each ROI in the mask file) if you include 'Custom_ROI_Mean' in the Design Matrix Formula.",
values=self.gpa_settings['custom_roi_mask'])
self.page.add(label="Use z-score Standardized Derivatives ",
control=control.CHOICE_BOX,
name='use_zscore',
type=dtype.BOOL,
comment="Run the group analysis model on the z-score " \
"standardized version of the derivatives you " \
"choose in the list below.",
values=["True","False"])
self.page.add(label = "Select Derivatives ",
control = control.CHECKLIST_BOX,
name = "derivative_list",
type = dtype.LSTR,
values = ['ALFF',
'ALFF (smoothed)',
'f/ALFF',
'f/ALFF (smoothed)',
'ReHo',
'ReHo (smoothed)',
'ROI Average SCA',
'ROI Average SCA (smoothed)',
'Voxelwise SCA',
'Voxelwise SCA (smoothed)',
'Dual Regression',
'Dual Regression (smoothed)',
'Multiple Regression SCA',
'Multiple Regression SCA (smoothed)',
'Network Centrality',
'Network Centrality (smoothed)',
'VMHC (z-score std only)',
'VMHC z-stat (z-score std only)'],
comment = "Select which derivatives you would like to include when running group analysis.\n\nWhen including Dual Regression, make sure to correct your P-value for the number of maps you are comparing.\n\nWhen including Multiple Regression SCA, you must have more degrees of freedom (subjects) than there were time series.",
size = (350,160))
self.page.add(label="Coding Scheme ",
control=control.CHOICE_BOX,
name="coding_scheme",
type=dtype.LSTR,
comment="Choose the coding scheme to use when generating your model. 'Treatment' encoding is generally considered the typical scheme. Consult the User Guide for more information.",
values=["Treatment", "Sum"])
self.page.add(label="Model Group Variances Separately ",
control=control.CHOICE_BOX,
name='group_sep',
type=dtype.NUM,
comment="Specify whether FSL should model the variance for each group separately.\n\nIf this option is enabled, you must specify a grouping variable below.",
values=['Off', 'On'])
self.page.add(label="Grouping Variable ",
control=control.TEXT_BOX,
name="grouping_var",
type=dtype.STR,
comment="The name of the EV that should be used to group subjects when modeling variances.\n\nIf you do not wish to model group variances separately, set this value to None.",
values=self.gpa_settings['grouping_var'],
size=(160, -1))
self.page.add(label="Run Repeated Measures ",
control=control.CHOICE_BOX,
name='repeated_measures',
type=dtype.BOOL,
comment="Run repeated measures to compare different " \
"scans (must use the group analysis subject " \
"list and phenotypic file formatted for " \
"repeated measures.",
values=["False","True"])
self.page.add(label="Z threshold ",
control=control.FLOAT_CTRL,
name='z_threshold',
type=dtype.NUM,
comment="Only voxels with a Z-score higher than this value will be considered significant.",
values=2.3)
self.page.add(label="Cluster Significance Threshold ",
control=control.FLOAT_CTRL,
name='p_threshold',
type=dtype.NUM,
comment="Significance threshold (P-value) to use when doing cluster correction for multiple comparisons.",
values=0.05)
self.page.set_sizer()
if 'group_sep' in self.gpa_settings.keys():
for ctrl in self.page.get_ctrl_list():
name = ctrl.get_name()
if name == 'group_sep':
if self.gpa_settings['group_sep'] == True:
ctrl.set_value('On')
elif self.gpa_settings['group_sep'] == False:
ctrl.set_value('Off')
mainSizer.Add(self.window, 1, wx.EXPAND)
btnPanel = wx.Panel(self.panel, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
buffer = wx.StaticText(btnPanel, label="\t\t\t\t\t\t")
hbox.Add(buffer)
cancel = wx.Button(btnPanel, wx.ID_CANCEL, "Cancel", (
220, 10), wx.DefaultSize, 0)
self.Bind(wx.EVT_BUTTON, self.cancel, id=wx.ID_CANCEL)
hbox.Add(cancel, 0, flag=wx.LEFT | wx.BOTTOM, border=5)
load = wx.Button(btnPanel, wx.ID_ADD, "Load Settings", (
200, -1), wx.DefaultSize, 0)
self.Bind(wx.EVT_BUTTON, self.load, id=wx.ID_ADD)
hbox.Add(load, 0.6, flag=wx.LEFT | wx.BOTTOM, border=5)
next = wx.Button(btnPanel, 3, "Next >", (200, -1), wx.DefaultSize, 0)
self.Bind(wx.EVT_BUTTON, self.load_next_stage, id=3)
hbox.Add(next, 0.6, flag=wx.LEFT | wx.BOTTOM, border=5)
# reminder: functions bound to buttons require arguments
# (self, event)
btnPanel.SetSizer(hbox)
#text_sizer = wx.BoxSizer(wx.HORIZONTAL)
#measure_text = wx.StaticText(self.window, label='Note: Regressor options \'MeanFD\' and \'Measure_Mean\' are automatically demeaned prior to being inserted into the model.')
#text_sizer.Add(measure_text)
#mainSizer.Add(text_sizer)
mainSizer.Add(
btnPanel, 0.5, flag=wx.ALIGN_RIGHT | wx.RIGHT, border=20)
self.panel.SetSizer(mainSizer)
self.Show()
# this fires only if we're coming BACK to this page from the second
# page, and these parameters are already pre-loaded. this is to
# automatically repopulate the 'Model Setup' checkbox grid and other
# settings under it
if self.gpa_settings['pheno_file'] != '':
phenoFile = open(os.path.abspath(self.gpa_settings['pheno_file']))
phenoHeaderString = phenoFile.readline().rstrip('\r\n')
phenoHeaderItems = phenoHeaderString.split(',')
phenoHeaderItems.remove(self.gpa_settings['subject_id_label'])
# update the 'Model Setup' box and populate it with the EVs and
# their associated checkboxes for categorical and demean
for ctrl in self.page.get_ctrl_list():
name = ctrl.get_name()
if name == 'model_setup':
ctrl.set_value(phenoHeaderItems)
ctrl.set_selection(self.gpa_settings['ev_selections'])
if name == 'coding_scheme':
ctrl.set_value(self.gpa_settings['coding_scheme'])
if name == 'mean_mask':
ctrl.set_value(self.gpa_settings['mean_mask'])
if name == 'repeated_measures':
ctrl.set_value(self.gpa_settings['repeated_measures'])
if name == 'z_threshold':
ctrl.set_value(self.gpa_settings['z_threshold'][0])
if name == 'p_threshold':
ctrl.set_value(self.gpa_settings['p_threshold'])
if name == 'use_zscore':
ctrl.set_value(self.gpa_settings['use_zscore'])
if name == 'group_sep':
ctrl.set_value(self.gpa_settings['group_sep'])
if name == 'grouping_var':
ctrl.set_value(self.gpa_settings['grouping_var'])
if name == 'derivative_list':
value = self.gpa_settings['derivative_list']
if isinstance(value, str):
value = value.replace("['","").replace("']","").split("', '")
new_derlist = []
# remove the _z if they are there, just so it can
# repopulate the listbox through the substitution map
for val in value:
if "_z" in val:
val = val.replace("_z","")
new_derlist.append(val)
else:
new_derlist.append(val)
ctrl.set_value(new_derlist)
def cancel(self, event):
self.Close()
def display(self, win, msg):
wx.MessageBox(msg, "Error")
win.SetBackgroundColour("pink")
win.SetFocus()
win.Refresh()
raise ValueError
def load_pheno(self,event):
pass
''' button: LOAD SETTINGS '''
def load(self, event):
# when the user clicks 'Load Settings', which loads the
# self.gpa_settings dictionary - it populates the values for both
# windows, so when they hit Next, the next window is also populated
dlg = wx.FileDialog(
self, message="Choose the config fsl yaml file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard="YAML files(*.yaml, *.yml)|*.yaml;*.yml",
style=wx.OPEN | wx.CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
config_map = yaml.load(open(path, 'r'))
s_map = dict((v, k) for k, v in substitution_map.iteritems())
# load the group analysis .yml config file (in dictionary form)
# into the self.gpa_settings dictionary which holds all settings
self.gpa_settings = config_map
if self.gpa_settings is None:
errDlgFileTest = wx.MessageDialog(
self, "Error reading file - group analysis " \
"configuration file appears to be blank.",
"File Read Error",
wx.OK | wx.ICON_ERROR)
errDlgFileTest.ShowModal()
errDlgFileTest.Destroy()
raise Exception
# repopulate the model setup checkbox grid, since this has to be
# done specially
if 'pheno_file' in self.gpa_settings.keys():
phenoFile = open(os.path.abspath(self.gpa_settings['pheno_file']))
phenoHeaderString = phenoFile.readline().rstrip('\r\n')
phenoHeaderItems = phenoHeaderString.split(',')
phenoHeaderItems.remove(self.gpa_settings['subject_id_label'])
# update the 'Model Setup' box and populate it with the EVs and
# their associated checkboxes for categorical and demean
for ctrl in self.page.get_ctrl_list():
if ctrl.get_name() == 'model_setup':
ctrl.set_value(phenoHeaderItems)
ctrl.set_selection(self.gpa_settings['ev_selections'])
# populate the rest of the controls
for ctrl in self.page.get_ctrl_list():
name = ctrl.get_name()
value = config_map.get(name)
dtype = ctrl.get_datatype()
# the model setup checkbox grid is the only one that doesn't
# get repopulated the standard way. instead it is repopulated
# by the code directly above
if name == 'derivative_list':
value = [s_map.get(item)
for item in value if s_map.get(item) != None]
if not value:
value = [str(item) for item in value]
new_derlist = []
for val in value:
if "_z" in val:
val = val.replace("_z","")
new_derlist.append(val)
else:
new_derlist.append(val)
ctrl.set_value(new_derlist)
elif name == 'repeated_measures' or name == 'use_zscore':
ctrl.set_value(str(value))
elif name == 'z_threshold' or name == 'p_threshold':
value = value[0]
ctrl.set_value(value)
elif name == 'group_sep':
value = s_map.get(value)
ctrl.set_value(value)
elif name != 'model_setup' and name != 'derivative_list':
ctrl.set_value(value)
dlg.Destroy()
def read_phenotypic(self, pheno_file, ev_selections):
import csv
ph = pheno_file
# Read in the phenotypic CSV file into a dictionary named pheno_dict
# while preserving the header fields as they correspond to the data
p_reader = csv.DictReader(open(os.path.abspath(ph), 'rU'), skipinitialspace=True)
#pheno_dict_list = []
# dictionary to store the data in a format Patsy can use
# i.e. a dictionary where each header is a key, and the value is a
# list of all of that header's values
pheno_data_dict = {}
for line in p_reader:
for key in line.keys():
if key not in pheno_data_dict.keys():
pheno_data_dict[key] = []
# create a list within one of the dictionary values for that
# EV if it is categorical; formats this list into a form
# Patsy can understand regarding categoricals:
# example: { ADHD: ['adhd1', 'adhd1', 'adhd2', 'adhd1'] }
# instead of just [1, 1, 2, 1], etc.
if 'categorical' in ev_selections.keys():
if key in ev_selections['categorical']:
pheno_data_dict[key].append(key + str(line[key]))
else:
pheno_data_dict[key].append(line[key])
else:
pheno_data_dict[key].append(line[key])
#pheno_dict_list.append(line)
# pheno_dict_list is a list of dictionaries of phenotype header items
# matched to their values, which also includes subject IDs
# i.e. [{'header1': 'value', 'header2': 'value'}, {'header1': 'value', 'header2': 'value'}, ..]
# these dictionaries are UNORDERED, i.e. header items ARE NOT ORDERED
return pheno_data_dict
''' button: LOAD PHENOTYPE FILE '''
def populateEVs(self, event):
# this runs when the user clicks 'Load Phenotype File'
if self.gpa_settings is None:
self.gpa_settings = {}
for ctrl in self.page.get_ctrl_list():
name = ctrl.get_name()
self.gpa_settings[name] = str(ctrl.get_selection())
### CHECK PHENOFILE if can open etc.
# function for file path checking
def testFile(filepath, paramName):
try:
fileTest = open(filepath)
fileTest.close()
except:
errDlgFileTest = wx.MessageDialog(
self, 'Error reading file - either it does not exist or you' \
' do not have read access. \n\n' \
'Parameter: %s' % paramName,
'File Access Error',
wx.OK | wx.ICON_ERROR)
errDlgFileTest.ShowModal()
errDlgFileTest.Destroy()
raise Exception
testFile(self.gpa_settings['subject_list'], 'Subject List')
testFile(self.gpa_settings['pheno_file'], 'Phenotype/EV File')
subFile = open(os.path.abspath(self.gpa_settings['subject_list']))
phenoFile = open(os.path.abspath(self.gpa_settings['pheno_file']),"rU")
phenoHeaderString = phenoFile.readline().rstrip('\r\n')
self.phenoHeaderItems = phenoHeaderString.split(',')
if self.gpa_settings['subject_id_label'] in self.phenoHeaderItems:
self.phenoHeaderItems.remove(self.gpa_settings['subject_id_label'])
else:
errSubID = wx.MessageDialog(
self, 'Please enter the name of the subject ID column' \
' as it is labeled in the phenotype file.',
'Blank/Incorrect Subject Header Input',
wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
# some more checks
sub_IDs = subFile.readlines()
self.subs = []
for sub in sub_IDs:
self.subs.append(sub.rstrip("\n"))
pheno_rows = phenoFile.readlines()
for row in pheno_rows:
# check if the pheno file produces any rows such as ",,,,," due
# to odd file formatting issues. if so, ignore this row. if there
# are values present in the row, continue as normal
if ",," not in row:
# if it finds a sub from the subject list in the current row
# taken from the pheno, move on. if it goes through the entire
# subject list and never finds a match, kick off the "else"
# clause below containing the error message
for sub in self.subs:
# for repeated measures-formatted files
if "," in sub:
# make the comma separator an underscore to match the
# repeated measures-formatted pheno file
if sub.replace(",","_") in row:
break
# for normal
else:
if sub in row:
break
else:
errSubID = wx.MessageDialog(
self, "Your phenotype file contains a subject ID " \
"that is not present in your group analysis " \
"subject list.\n\nPhenotype file row with subject " \
"ID not in subject list:\n%s" \
% row,
"Subject Not In List",
wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
for ctrl in self.page.get_ctrl_list():
# update the 'Model Setup' box and populate it with the EVs and
# their associated checkboxes for categorical and demean
if ctrl.get_name() == 'model_setup':
ctrl.set_value(self.phenoHeaderItems)
# populate the design formula text box with a formula which
# includes all of the EVs, and two of the measures (MeanFD and
# the measure/derivative mean) - the user can edit this if they
# need to, obviously
if ctrl.get_name() == 'design_formula':
formula_string = ''
for EV in self.phenoHeaderItems:
if formula_string == '':
formula_string = EV
else:
formula_string = formula_string + ' + ' + EV
formula_string = formula_string + ' + MeanFD_Jenkinson'
ctrl.set_value(formula_string)
''' button: NEXT '''
def load_next_stage(self, event):
import patsy
for ctrl in self.page.get_ctrl_list():
name = ctrl.get_name()
self.gpa_settings[name] = str(ctrl.get_selection())
### CHECK PHENOFILE if can open etc.
# function for file path checking
def testFile(filepath, paramName):
try:
fileTest = open(filepath)
fileTest.close()
except:
errDlgFileTest = wx.MessageDialog(
self, 'Error reading file - either it does not exist ' \
'or you do not have read access. \n\n' \
'Parameter: %s' % paramName,
'File Access Error',
wx.OK | wx.ICON_ERROR)
errDlgFileTest.ShowModal()
errDlgFileTest.Destroy()
raise Exception
testFile(self.gpa_settings['subject_list'], 'Subject List')
testFile(self.gpa_settings['pheno_file'], 'Phenotype/EV File')
phenoFile = open(os.path.abspath(self.gpa_settings['pheno_file']),"rU")
phenoHeaderString = phenoFile.readline().rstrip('\r\n')
self.phenoHeaderItems = phenoHeaderString.split(',')
if self.gpa_settings['subject_id_label'] in self.phenoHeaderItems:
self.phenoHeaderItems.remove(self.gpa_settings['subject_id_label'])
else:
errSubID = wx.MessageDialog(
self, 'Please enter the name of the subject ID column' \
' as it is labeled in the phenotype file.',
'Blank/Incorrect Subject Header Input',
wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
for ctrl in self.page.get_ctrl_list():
name = ctrl.get_name()
# get the design matrix formula
if name == 'design_formula':
self.gpa_settings['design_formula'] = str(ctrl.get_selection())
# get the EV categorical + demean grid selections
elif name == 'model_setup':
# basically, ctrl is checkbox_grid in this case, and
# get_selection goes to generic_class.py first, which links
# it to the custom GetGridSelection() function in the
# checkbox_grid class in custom_control.py
self.gpa_settings['ev_selections'] = ctrl.get_selection()
elif name == 'group_sep':
self.gpa_settings['group_sep'] = ctrl.get_selection()
elif name == 'grouping_var':
self.gpa_settings['grouping_var'] = ctrl.get_selection()
if name == 'derivative_list':
# grab this for below
derlist_ctrl = ctrl
else:
self.gpa_settings[name] = str(ctrl.get_selection())
self.gpa_settings['derivative_list'] = []
for derivative in list(derlist_ctrl.get_selection()):
if self.gpa_settings['use_zscore'] == "True":
self.gpa_settings['derivative_list'].append(derivative + "_z")
else:
self.gpa_settings['derivative_list'].append(derivative)
self.pheno_data_dict = self.read_phenotypic(self.gpa_settings['pheno_file'], self.gpa_settings['ev_selections'])
try:
phenoFile = open(os.path.abspath(self.gpa_settings['pheno_file']))
except:
print '\n\n[!] CPAC says: The phenotype file path provided ' \
'couldn\'t be opened - either it does not exist or ' \
'there are access restrictions.\n'
print 'Phenotype file provided: '
print self.gpa_settings['pheno_file'], '\n\n'
raise IOError
# validate design formula and build Available Contrasts list
var_list_for_contrasts = []
EVs_to_test = []
EVs_to_include = []
# take the user-provided design formula and break down the included
# terms into a list, and use this to create the list of available
# contrasts
formula = self.gpa_settings['design_formula']
# need to cycle through the EVs inside parentheses just to make
# sure they are valid
# THEN you have to treat the entire parentheses thing as one EV when
# it comes to including it in the list for contrasts
formula_strip = formula.replace('+',' ')
formula_strip = formula_strip.replace('-',' ')
formula_strip = formula_strip.replace('**(', '**')
formula_strip = formula_strip.replace(')**', '**')
formula_strip = formula_strip.replace('(',' ')
formula_strip = formula_strip.replace(')',' ')
EVs_to_test = formula_strip.split()
# ensure the design formula only has valid EVs in it
for EV in EVs_to_test:
# ensure ** interactions have a valid EV on one side and a number
# on the other
if '**' in EV:
both_sides = EV.split('**')
int_check = 0
for side in both_sides:
if side.isdigit():
int_check = 1
else:
if (side not in self.pheno_data_dict.keys()) and \
side != 'MeanFD' and side != 'MeanFD_Jenkinson' \
and side != 'Measure_Mean' and \
side != 'Custom_ROI_Mean':
errmsg = 'CPAC says: The regressor \'%s\' you ' \
'entered within the design formula as ' \
'part of the interaction \'%s\' is not ' \
'a valid EV option.\n\nPlease enter ' \
'only the EVs in your phenotype file ' \
'or the MeanFD, MeanFD_Jenkinson, ' \
'Custom_ROI_Mean, or Measure_Mean ' \
'options.' \
% (side,EV)
errSubID = wx.MessageDialog(self, errmsg,
'Invalid EV', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
if int_check != 1:
errmsg = 'CPAC says: The interaction \'%s\' you ' \
'entered within the design formula requires ' \
'a number on one side.\n\nExample: ' \
'(EV1 + EV2 + EV3)**3\n\nNote: This would be ' \
'equivalent to\n(EV1 + EV2 + EV3) * ' \
'(EV1 + EV2 + EV3) * (EV1 + EV2 + EV3)' % EV
errSubID = wx.MessageDialog(self, errmsg,
'Invalid EV', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
# ensure these interactions are input correctly
elif (':' in EV) or ('/' in EV) or ('*' in EV):
if ':' in EV:
both_EVs_in_interaction = EV.split(':')
if '/' in EV:
both_EVs_in_interaction = EV.split('/')
if '*' in EV:
both_EVs_in_interaction = EV.split('*')
for interaction_EV in both_EVs_in_interaction:
if (interaction_EV not in self.pheno_data_dict.keys()) and \
interaction_EV != 'MeanFD' and \
interaction_EV != 'MeanFD_Jenkinson' and \
interaction_EV != 'Measure_Mean' and \
interaction_EV != 'Custom_ROI_Mean':
errmsg = 'CPAC says: The regressor \'%s\' you ' \
'entered within the design formula as ' \
'part of the interaction \'%s\' is not a ' \
'valid EV option.\n\nPlease enter only ' \
'the EVs in your phenotype file or the ' \
'MeanFD, MeanFD_Jenkinson, Custom_ROI_' \
'Mean, or Measure_Mean options.' \
% (interaction_EV,EV)
errSubID = wx.MessageDialog(self, errmsg,
'Invalid EV', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
else:
if (EV not in self.pheno_data_dict.keys()) and EV != 'MeanFD' \
and EV != 'MeanFD_Jenkinson' and EV != 'Measure_Mean' \
and EV != 'Custom_ROI_Mean':
errmsg = 'CPAC says: The regressor \'%s\' you ' \
'entered within the design formula is not ' \
'a valid EV option.' \
'\n\nPlease enter only the EVs in your phenotype ' \
'file or the MeanFD, MeanFD_Jenkinson, ' \
'Custom_ROI_Mean, or Measure_Mean options.' \
% EV
errSubID = wx.MessageDialog(self, errmsg,
'Invalid EV', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
''' design formula/input parameters checks '''
if "Custom_ROI_Mean" in formula and \
(self.gpa_settings['custom_roi_mask'] == None or \
self.gpa_settings['custom_roi_mask'] == ""):
err_string = "You included 'Custom_ROI_Mean' as a regressor " \
"in your Design Matrix Formula, but you did not " \
"specify a Custom ROI Mean Mask file.\n\nPlease " \
"either specify a mask file, or remove " \
"'Custom_ROI_Mean' from your model."
errSubID = wx.MessageDialog(self, err_string,
'No Custom ROI Mean Mask File', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
if "Custom_ROI_Mean" not in formula and \
(self.gpa_settings['custom_roi_mask'] != None and \
self.gpa_settings['custom_roi_mask'] != "" and \
self.gpa_settings['custom_roi_mask'] != "None" and \
self.gpa_settings['custom_roi_mask'] != "none"):
warn_string = "Note: You specified a Custom ROI Mean Mask file, " \
"but you did not include 'Custom_ROI_Mean' as a " \
"regressor in your Design Matrix Formula.\n\nThe " \
"means of the ROIs specified in the file will not " \
"be included as regressors unless you include " \
"'Custom_ROI_Mean' in your model."
errSubID = wx.MessageDialog(self, warn_string,
'No Custom_ROI_Mean Regressor', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
if str(self.gpa_settings["use_zscore"]) == "True":
if "Measure_Mean" in formula:
warn_string = "Note: You have included Measure_Mean as a " \
"regressor in your model, but you have selected to run " \
"the group-level analysis with the z-score standardized "\
"version of the outputs.\n\nThe mean of any z-score " \
"standardized output will always be zero."
errSubID = wx.MessageDialog(self, warn_string,
'Measure_Mean Included With z-scored Outputs', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
else:
for deriv in self.gpa_settings["derivative_list"]:
if "VMHC" in deriv:
warn_string = "Note: You have selected to run group-" \
"level analysis using raw outputs (non-z-score " \
"standardized), but you have also included VMHC " \
"as one of the outputs to include in your model."
errSubID = wx.MessageDialog(self, warn_string,
'VMHC Cannot Be Included As Raw Output', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
# if there is a custom ROI mean mask file provided, and the user
# includes it as a regressor in their design matrix formula, calculate
# the number of ROIs in the file and generate the column names so that
# they can be passed as possible contrast labels
if "Custom_ROI_Mean" in formula and \
(self.gpa_settings['custom_roi_mask'] != None and \
self.gpa_settings['custom_roi_mask'] != "" and \
self.gpa_settings['custom_roi_mask'] != "None" and \
self.gpa_settings['custom_roi_mask'] != "none"):
import commands
try:
ROIstats_output = commands.getoutput("3dROIstats -mask %s %s" \
% (self.gpa_settings['custom_roi_mask'], \
self.gpa_settings['custom_roi_mask']))
except Exception as e:
print "[!] CPAC says: AFNI 3dROIstats failed for custom ROI" \
"Mean Mask file validation. Please ensure you either " \
"have AFNI installed and that you created the mask " \
"file properly. Consult the User Guide for more " \
"information.\n\n"
print "Error details: %s\n\n" % e
raise
ROIstats_list = ROIstats_output.split("\t")
# calculate the number of ROIs - 3dROIstats output can be split
# into a list, and the actual ROI means begin at a certain point
num_rois = (len(ROIstats_list)-3)/2
custom_roi_labels = []
for num in range(0,num_rois):
custom_roi_labels.append("Custom_ROI_Mean_%d" % int(num+1))
if str(self.gpa_settings["group_sep"]) == "On":
if (self.gpa_settings["grouping_var"] == "None") or \
(self.gpa_settings["grouping_var"] is None) or \
(self.gpa_settings["grouping_var"] == "none"):
warn_string = "Note: You have selected to model group " \
"variances separately, but you have not specified a " \
"grouping variable."
errSubID = wx.MessageDialog(self, warn_string,
'No Grouping Variable Specified', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
if self.gpa_settings["grouping_var"] not in formula:
warn_string = "Note: You have specified '%s' as your " \
"grouping variable for modeling the group variances " \
"separately, but you have not included this variable " \
"in your design formula.\n\nPlease include this " \
"variable in your design, or choose a different " \
"grouping variable." % self.gpa_settings["grouping_var"]
errSubID = wx.MessageDialog(self, warn_string,
'Grouping Variable not in Design', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
def read_phenotypic(pheno_file, ev_selections, subject_id_label):
import csv
import numpy as np
ph = pheno_file
# Read in the phenotypic CSV file into a dictionary named pheno_dict
# while preserving the header fields as they correspond to the data
p_reader = csv.DictReader(open(os.path.abspath(ph), 'rU'), skipinitialspace=True)
# dictionary to store the data in a format Patsy can use
# i.e. a dictionary where each header is a key, and the value is a
# list of all of that header's values
pheno_data_dict = {}
for line in p_reader:
# here, each instance of 'line' is really a dictionary where the
# keys are the pheno headers, and their values are the values of
# each EV for that one subject - each iteration of this loop is
# one subject
for key in line.keys():
if key not in pheno_data_dict.keys():
pheno_data_dict[key] = []
# create a list within one of the dictionary values for that
# EV if it is categorical; formats this list into a form
# Patsy can understand regarding categoricals:
# example: { ADHD: ['adhd1', 'adhd1', 'adhd0', 'adhd1'] }
# instead of just [1, 1, 0, 1], etc.
if 'categorical' in ev_selections.keys():
if key in ev_selections['categorical']:
pheno_data_dict[key].append(key + str(line[key]))
elif key == subject_id_label:
pheno_data_dict[key].append(line[key])
else:
pheno_data_dict[key].append(float(line[key]))
elif key == subject_id_label:
pheno_data_dict[key].append(line[key])
else:
pheno_data_dict[key].append(float(line[key]))
# this needs to run after each list in each key has been fully
# populated above
for key in pheno_data_dict.keys():
# demean the EVs marked for demeaning
if 'demean' in ev_selections.keys():
if key in ev_selections['demean']:
new_demeaned_evs = []
mean_evs = 0.0
# populate a dictionary, a key for each demeanable EV, with
# the value being the sum of all the values (which need to be
# converted to float first)
for val in pheno_data_dict[key]:
mean_evs += float(val)
# calculate the mean of the current EV in this loop
mean_evs = mean_evs / len(pheno_data_dict[key])
# remove the EV's mean from each value of this EV
# (demean it!)
for val in pheno_data_dict[key]:
new_demeaned_evs.append(float(val) - mean_evs)
# replace
pheno_data_dict[key] = new_demeaned_evs
# converts non-categorical EV lists into NumPy arrays
# so that Patsy may read them in properly
if 'categorical' in ev_selections.keys():
if key not in ev_selections['categorical']:
pheno_data_dict[key] = np.array(pheno_data_dict[key])
return pheno_data_dict
patsy_formatted_pheno = read_phenotypic(self.gpa_settings['pheno_file'], self.gpa_settings['ev_selections'], self.gpa_settings['subject_id_label'])
# let's create dummy columns for MeanFD, Measure_Mean, and
# Custom_ROI_Mask (if included in the Design Matrix Formula) just so we
# can get an accurate list of EVs Patsy will generate
def create_regressor_column(regressor):
# regressor should be a string of the name of the regressor
import numpy as np
regressor_list = []
for key in patsy_formatted_pheno.keys():
for val in patsy_formatted_pheno[key]:
regressor_list.append(0.0)
break
regressor_list = np.array(regressor_list)
patsy_formatted_pheno[regressor] = regressor_list
if 'MeanFD' in formula:
create_regressor_column('MeanFD')
if 'MeanFD_Jenkinson' in formula:
create_regressor_column('MeanFD_Jenkinson')
if 'Measure_Mean' in formula:
create_regressor_column('Measure_Mean')
if 'Custom_ROI_Mean' in formula:
add_formula_string = ""
for col_label in custom_roi_labels:
create_regressor_column(col_label)
# create a string of all the new custom ROI regressor column
# names to be inserted into the design formula, so that Patsy
# will accept the phenotypic data dictionary that now has these
# columns
if add_formula_string == "":
add_formula_string = add_formula_string + col_label
else:
add_formula_string = add_formula_string + " + " + col_label
formula = formula.replace("Custom_ROI_Mean",add_formula_string)
if 'categorical' in self.gpa_settings['ev_selections']:
for EV_name in self.gpa_settings['ev_selections']['categorical']:
if self.gpa_settings['coding_scheme'] == 'Treatment':
formula = formula.replace(EV_name, 'C(' + EV_name + ')')
elif self.gpa_settings['coding_scheme'] == 'Sum':
formula = formula.replace(EV_name, 'C(' + EV_name + ', Sum)')
# create the dmatrix in Patsy just to see what the design matrix
# columns are going to be
try:
dmatrix = patsy.dmatrix(formula, patsy_formatted_pheno)
except:
print '\n\n[!] CPAC says: Design matrix creation wasn\'t ' \
'successful - do the terms in your formula correctly ' \
'correspond to the EVs listed in your phenotype file?\n'
print 'Phenotype file provided: '
print self.gpa_settings['pheno_file'], '\n\n'
raise Exception
column_names = dmatrix.design_info.column_names
subFile = open(os.path.abspath(self.gpa_settings['subject_list']))
sub_IDs = subFile.readlines()
self.subs = []
for sub in sub_IDs:
self.subs.append(sub.rstrip("\n"))
# check to make sure there are more subjects than EVs!!
if len(column_names) >= len(self.subs):
err = "There are more (or an equal amount of) EVs currently " \
"included in the model than there are subjects in the " \
"group analysis subject list. There must be more " \
"subjects than EVs in the design.\n\nNumber of subjects: " \
"%d\nNumber of EVs: %d\n\nNote: An 'Intercept' " \
"column gets added to the design as an EV, so there will " \
"be one more EV than you may have specified in your " \
"design." % (len(self.subs),len(column_names))
errSubID = wx.MessageDialog(self, err,
"Too Many EVs or Too Few Subjects",
wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
raw_column_strings = []
# remove the header formatting Patsy creates for categorical variables
# because we are going to use var_list_for_contrasts as a label for
# users to know what contrasts are available to them
for column in column_names:
# if using Sum encoding, a column name may look like this:
# C(adhd, Sum)[S.adhd0]
# this loop leaves it with only "adhd0" in this case, for the
# contrasts list for the next GUI page
column_string = column
string_for_removal = ''
for char in column_string:
string_for_removal = string_for_removal + char
if char == '.':
column_string = column_string.replace(string_for_removal, '')
string_for_removal = ''
column_string = column_string.replace(']', '')
if ":" in column_string:
try:
column_string = column_string.split("[")[1]
except:
pass
raw_column_strings.append(column_string)
if str(self.gpa_settings["group_sep"]) == "On":
grouping_options = []
idx = 0
for column_string in raw_column_strings:
if self.gpa_settings["grouping_var"] in column_string:
grouping_variable_info = []
grouping_variable_info.append(column_string)
grouping_variable_info.append(idx)
grouping_options.append(grouping_variable_info)
# grouping_var_idx is the column numbers in the design matrix
# which holds the grouping variable (and its possible levels)
idx += 1
# all the categorical values/levels of the grouping variable
grouping_var_levels = []
for gv_idx in grouping_options:
for subject in dmatrix:
if self.gpa_settings["grouping_var"] in self.gpa_settings["ev_selections"]["categorical"]:
level_num = str(int(subject[gv_idx[1]]))
else:
level_num = str(subject[gv_idx[1]])
level_label = '__' + self.gpa_settings["grouping_var"] + level_num
if level_label not in grouping_var_levels:
grouping_var_levels.append(level_label)
# make the new header for the reorganized data
for column_string in raw_column_strings:
if column_string != "Intercept":
if self.gpa_settings["grouping_var"] not in column_string:
for level in grouping_var_levels:
var_list_for_contrasts.append(column_string + level)
elif self.gpa_settings["grouping_var"] in column_string:
var_list_for_contrasts.append(column_string)
else:
for column_string in raw_column_strings:
if column_string != 'Intercept':
var_list_for_contrasts.append(column_string)
# check for repeated measures file formatting!
group_sublist_file = open(self.gpa_settings['subject_list'], 'r')
group_sublist_items = group_sublist_file.readlines()
group_sublist = [line.rstrip('\n') for line in group_sublist_items \
if not (line == '\n') and not line.startswith('#')]
for ga_sub in group_sublist:
# ga_sub = subject ID taken off the group analysis subject list
# let's check to make sure the subject list is formatted for
# repeated measures properly if repeated measures is enabled
# and vice versa
if (self.gpa_settings['repeated_measures'] == "True") and \
(',' not in ga_sub):
errmsg = "The group analysis subject list is not in the " \
"appropriate format for repeated measures. Please " \
"use the appropriate format as described in the " \
"CPAC User Guide, or turn off Repeated Measures." \
"\n\nNote: CPAC generates a properly-formatted " \
"group analysis subject list meant for running " \
"repeated measures when you create your original " \
"subject list. Look for 'subject_list_group_" \
"analysis_repeated_measures.txt' in the directory " \
"where you created your subject list."
errSubID = wx.MessageDialog(self, errmsg,
'Subject List Format', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
elif (self.gpa_settings['repeated_measures'] == "False") and \
(',' in ga_sub):
errmsg = "It looks like your group analysis subject list is " \
"formatted for running repeated measures, but " \
"'Run Repeated Measures' is not enabled."
errSubID = wx.MessageDialog(self, errmsg,
'Subject List Format', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
# make sure the sub IDs in the sublist and pheno files match!
group_pheno_file = open(self.gpa_settings['pheno_file'], 'r')
group_pheno_lines = group_pheno_file.readlines()
# gather the subject IDs from the phenotype file
def get_pheno_subjects(delimiter):
for item in group_pheno_lines[0].split(delimiter):
if item == self.gpa_settings['subject_id_label']:
index = group_pheno_lines[0].index(item)
group_pheno_subs = group_pheno_lines[1:len(group_pheno_lines)]
pheno_subs = []
for pheno_sub_line in group_pheno_subs:
pheno_subs.append(pheno_sub_line.split(delimiter)[index])
return pheno_subs
pheno_subs = []
if "," in group_pheno_lines[0]:
pheno_subs = get_pheno_subjects(",")
# now make sure the group sublist and pheno subject IDs match, at least
# for the ones that exist (i.e. may be less sub IDs in the sublist)
for sublist_subID, pheno_subID in zip(group_sublist, pheno_subs):
# if group sublist is formatted for repeated measures
if "," in sublist_subID:
sublist_subID = sublist_subID.replace(",","_")
if sublist_subID != pheno_subID:
if self.gpa_settings['repeated_measures'] == "False":
errmsg = "The subject IDs in your group subject list " \
"and your phenotype file do not match. Please " \
"make sure these have been set up correctly."
else:
errmsg = "The subject IDs in your group subject list " \
"and your phenotype file do not match. Please " \
"make sure these have been set up correctly." \
"\n\nNote: Repeated measures is enabled - does "\
"your phenotype file have properly-formatted " \
"subject IDs matching your repeated measures " \
"group analysis subject list?"
errSubID = wx.MessageDialog(self, errmsg,
'Subject ID Mismatch', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
# open the next window!
modelDesign_window.ModelDesign(self.parent, self.gpa_settings, var_list_for_contrasts) # !!! may need to pass the actual dmatrix as well
self.Close()
|
TomTom Company is focused on the production of new innovative technological products that are in relation to navigation. This has enabled TomTom to concentrate in the innovation part of their business as most of the projects designed are through the employee’s innovation. Based on its interest on innovation, TomTom has been able to acquire numerous companies that had specialized in the innovation technologies. With the new acquisitions, TomTom holds the largest market share in the navigation industry as it is able to produce technological products that are demanded by consumers on a worldwide scale. The business size of the company has been increasing with new acquisitions being made from time to time (TomTom, n.d). The business focus on innovation and increased business size identifies the distinct phases of the TomTom Company.
The organizational culture at TomTom Company is work hard-Play hard. The company can be said to be taking moderate risks as it focuses on the innovations on the products and services it is familiar with. This reduces the risk as specialization is high (Kuratko et al., 2011). Due to the innovation aspect of the company, the structure is flexible hence allowing individuals to make some of the decisions. The flexible structure also enables high creativity as employees are at liberty to try new ideas (Scholz, 1987). The competitive nature of the industry also leads to race to quick production and delivery to the consumers. However, based on the increased size of employees, the organizational culture has thus changed to Type Z where each department is controlled independently as a clan. Individuals are held responsible for the ideas and innovations made, however when it comes to decisions, the whole team has to agree on which one to take.
|
import os
from juliabox.jbox_util import ensure_delete, make_sure_path_exists, unique_sessname, JBoxCfg
from juliabox.vol import JBoxVol
class JBoxDefaultConfigVol(JBoxVol):
provides = [JBoxVol.JBP_CONFIG]
FS_LOC = None
@staticmethod
def configure():
cfg_location = os.path.expanduser(JBoxCfg.get('cfg_location'))
make_sure_path_exists(cfg_location)
JBoxDefaultConfigVol.FS_LOC = cfg_location
@staticmethod
def _get_config_mounts_used(cid):
used = []
props = JBoxDefaultConfigVol.dckr().inspect_container(cid)
try:
for _cpath, hpath in JBoxVol.extract_mounts(props):
if hpath.startswith(JBoxDefaultConfigVol.FS_LOC):
used.append(hpath.split('/')[-1])
except:
JBoxDefaultConfigVol.log_error("error finding config mount points used in " + cid)
return []
return used
@staticmethod
def refresh_disk_use_status(container_id_list=None):
pass
@staticmethod
def get_disk_for_user(user_email):
JBoxDefaultConfigVol.log_debug("creating configs disk for %s", user_email)
if JBoxDefaultConfigVol.FS_LOC is None:
JBoxDefaultConfigVol.configure()
disk_path = os.path.join(JBoxDefaultConfigVol.FS_LOC, unique_sessname(user_email))
cfgvol = JBoxDefaultConfigVol(disk_path, user_email=user_email)
cfgvol._unpack_config()
return cfgvol
@staticmethod
def is_mount_path(fs_path):
return fs_path.startswith(JBoxDefaultConfigVol.FS_LOC)
@staticmethod
def get_disk_from_container(cid):
mounts_used = JBoxDefaultConfigVol._get_config_mounts_used(cid)
if len(mounts_used) == 0:
return None
mount_used = mounts_used[0]
disk_path = os.path.join(JBoxDefaultConfigVol.FS_LOC, str(mount_used))
container_name = JBoxVol.get_cname(cid)
sessname = container_name[1:]
return JBoxDefaultConfigVol(disk_path, sessname=sessname)
@staticmethod
def refresh_user_home_image():
pass
def release(self, backup=False):
ensure_delete(self.disk_path, include_itself=True)
@staticmethod
def disk_ids_used_pct():
return 0
def _unpack_config(self):
if os.path.exists(self.disk_path):
JBoxDefaultConfigVol.log_debug("Config folder exists %s. Deleting...", self.disk_path)
ensure_delete(self.disk_path, include_itself=True)
JBoxDefaultConfigVol.log_debug("Config folder deleted %s", self.disk_path)
JBoxDefaultConfigVol.log_debug("Will unpack config to %s", self.disk_path)
os.mkdir(self.disk_path)
JBoxDefaultConfigVol.log_debug("Created config folder %s", self.disk_path)
self.restore_user_home(True)
JBoxDefaultConfigVol.log_debug("Restored config files to %s", self.disk_path)
self.setup_instance_config()
JBoxDefaultConfigVol.log_debug("Setup instance config at %s", self.disk_path)
|
So. You're in a show-hole, or are just cruising for a new show to start binge watching... Say no more! This is going to be part 1 (a part 2 will come later) with just some basic classics that *hopefully* have graced your screen in the past.
I'm a wimp when it comes to all things scary, but Supernatural is easy to handle. Two brothers fighting ghosts, monsters, and demons -What's not to love? There is always some sort of plot twist to break up the episodes, and the show never seems repetitive (which is hard to accomplish after 13 seasons...). This is one of those shows where you will probably finish the first five seasons in two days.
Oh Veronica Mars. When this show ended after the third season...Well let me tell you, I was devastated. It is such a witty, interesting, edge of you seat kind of show, and holy love triangles! Veronica is a teen who takes after her dad, and has a knack for detective work. It's like a witty Nancy Drew with lots of relationships. Needless to say, plenty people love the show considering the fan funded movie that ended up coming out after the show wrapped up! Woohoo!
When PLL first aired, I refused to watch it. I had read the books in middle school (wow I'm old), and was angry that the characters didn't look how they were supposed to (needless to say, I got over it). This show is addicting. As in, stay-up-until-4-am-watching-episodes addicting. While the story line doesn't completely follow the books, the show is fantastic. Four friends are being watched, followed, and blackmailed by a mysterious person who goes by the name A. Perhaps their dead friend Ali? This show is geared more towards young adults, but don't let that stop you! Be warned, I did struggle to keep going as the show got to the later seasons.
I kind of jumped around with this show. I had seen a few episodes randomly while sick at home, and eventually decided to watch all of the seasons on Netflix. Once you get going, it's quite addicting. It's like the feel good version of Supernatural. Melinda Gordon has an ability to see and talk to ghosts, and runs an antique shop on the side. If that isn't enough to get you watching, her cute hubby might be more up your alley!
Dexter is just your average blood splatter analyst...who kills bad guys in his spare time. I really didn't want to like Dexter because the concept of a good-guy murderer was just a little odd to me...But after the first episode I was hooked. There's just something so calming about it (weird, I know). It definitely can be an emotional roller-coaster (Rita!), but it is well worth the watch.
If you haven't seen Buffy, then stop right now and go watch it. It's basically a right of passage if you're a 90's kid. There's always something new going on for this young vampire slayer, not to mention relationships! Oh and please, Twilight? Vampire Diaries? We all know it was Buffy & Angel who set the stage.
This show didn't really get the attention it deserved.It's fun to watch, and doesn't throw you into a deep depression when it ends (it's okay, we've all been there). I found that some of Dr. Tom's words of wisdom in this show stuck with me, and watching an episode is always a nice pick-me-up. Erica stumbled into an alternative kind of therapy, where she gets to go back in time to fix her regrets. It's a really neat concept, and I promise it's hard to stop watching.
Ahhh OTH. This was one of the first series I watched religiously. There is just something about all the different relationships that hooks you. Not to mention the story lines for each character, and seeing everyone go through high school and on to adult life. There is so much to be said about this show...It's basically high school woes, relationships, and plot twists, but you will never lose interest!
Okay this one is weird, I know. It has definitely given me some insight in to what I will be looking for in a house though! A Realtor shows couples three homes, and they choose the one that fits them best.
Okay, this one is at the bottom of my list because I almost didn't put it in here. It's a great show, better than the books in my opinion. But as of now, I don't watch it. Elena (the main character) is constantly in a love triangle with two brothers, Damon and Stefan. Oh and they just happen to be vampires. I'm nothing if not stubborn, and while I don't want to give anything away, I am not a fan of Elena's current love interest. I strongly believe she should be with one brother, and I will resume watching it religiously when she gets back together with him. **Update: this NEVER HAPPENS** Ugh.
I watch AHS, but I'm not sure how I feel about it anymore. The first season was phenomenal (with a hint of cheesy). The second season...Well I loved the idea of it, and once you start watching, you have to go to the end, but I was a little thrown off overall. The third season though...Well I just don't now how to feel. I can't decide if I love it or hate it. It's just so weird. If a million story lines and even more plot twists are your cup of tea, then definitely check this show out.
So based on this list I think we are long lost besties! I've never met anyone who even knew what Veronica Mars was before the Kickstarter campaign, much less watched it!
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
#
#----------------------------------------------------------------------------
# Copyright 2008, authors:
# * Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from render_cairo_base import OsmRenderBase
from tilenames import *
class RenderClass(OsmRenderBase):
# Specify the background for new tiles
def imageBackgroundColour(self, mapLayer=None):
return (0, 0, 0, 0)
def requireDataTile(self):
return (False)
# Draw a tile
def draw(self):
file = open("places.txt", "r")
ctx = self.getCtx("mainlayer")
print(self.proj.S, self.proj.dLat)
#pLon = (lon - self.W) / self.dLon
for line in file:
line = line.strip()
(lat, lon, id, type, name) = line.split("\t")
if (type in ('c', 't')):
(px, py) = latlon2relativeXY(float(lat), float(lon))
(x, y) = self.proj.project(py, px)
ctx.set_source_rgb(0.0, 0.0, 0.0)
ctx.set_font_size(12)
ctx.move_to(x, y)
ctx.show_text(name)
ctx.stroke()
#-----------------------------------------------------------------
# Test suite - call this file from the command-line to generate a
# sample image
if (__name__ == '__main__'):
a = RenderClass()
filename = "sample_" + __file__ + ".png"
a.RenderTile(8, 128, 84, 'default', filename) # norwch
print("------------------------------------")
print("Saved image to " + filename)
|
Just curious, how much total time did you have in the G1000 before being signed off in it?
I checked out with 3.6 hours of recent time in the G1000. I also had 1.1 hours a year or so ago. I expected it to take longer to get familiar but since the Cessna 172 SP I had been flying recently had a fairly nice GPS it was fairly intuitive.
|
#!/usr/bin/env bash
import plotly.offline as offline
import plotly.plotly as py
import plotly.graph_objs as go
import csv
import string
import argparse
parser = argparse.ArgumentParser(description='Process lockfree log')
parser.add_argument (
'--input',
help='input file')
parser.add_argument(
'--output',
help='output of graph picture'
)
args = parser.parse_args()
# parse header
headerDefined = False
xLabels = None
def filterRow(row):
return row[1:2] + [ s.translate(
{ord(c): None for c in string.ascii_letters}
) for s in row[3:]]
data = []
for row in csv.reader(open(args.input, 'r'), delimiter=' ', skipinitialspace=True):
row = filterRow(row)
if not headerDefined:
xLabels = row[1:]
headerDefined = True
continue
trace = go.Scatter(
x = xLabels,
y = row[1:],
name = row[0],
line = dict(
width = 4
)
)
data.append(trace)
offline.init_notebook_mode()
layout = dict(title = 'Queue Performance',
xaxis = dict(title = "Log statements (volume)"),
yaxis = dict(title = 'Time (seconds)')
)
# Plot and embed in ipython notebook!
fig = dict(data=data, layout=layout)
offline.plot(fig, auto_open=True, filename=args.output)
|
Elizabeth Streb, the founder of the Streb Lab for Action Mechanics, has spent her career testing the limits of the human body through performance and choreography that combines dance, circus arts, athletics and rodeo.
In her company’s production of “Ascension” nine people scale a 22-foot long ladder modelled after a New York City fire escape while it rotates at varying speeds. With grace and courage, her dancers accept the challenges she presents despite the risks to life and limb.
Follow BI Video: On YouTube.
|
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, LSTM, Flatten, Embedding, Merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
import h5py
def Word2VecModel(embedding_matrix, num_words, embedding_dim, seq_length, dropout_rate):
print "Creating text model..."
model = Sequential()
model.add(Embedding(num_words, embedding_dim,
weights=[embedding_matrix], input_length=seq_length, trainable=False))
model.add(LSTM(units=512, return_sequences=True, input_shape=(seq_length, embedding_dim)))
model.add(Dropout(dropout_rate))
model.add(LSTM(units=512, return_sequences=False))
model.add(Dropout(dropout_rate))
model.add(Dense(1024, activation='tanh'))
return model
def img_model(dropout_rate):
print "Creating image model..."
model = Sequential()
model.add(Dense(1024, input_dim=4096, activation='tanh'))
return model
def vqa_model(embedding_matrix, num_words, embedding_dim, seq_length, dropout_rate, num_classes):
vgg_model = img_model(dropout_rate)
lstm_model = Word2VecModel(embedding_matrix, num_words, embedding_dim, seq_length, dropout_rate)
print "Merging final model..."
fc_model = Sequential()
fc_model.add(Merge([vgg_model, lstm_model], mode='mul'))
fc_model.add(Dropout(dropout_rate))
fc_model.add(Dense(1000, activation='tanh'))
fc_model.add(Dropout(dropout_rate))
fc_model.add(Dense(num_classes, activation='softmax'))
fc_model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['accuracy'])
return fc_model
|
Mold design and production is carried out by our trusted partners. We have built a large network with considerable specialized expertise. This way we always get a mold that meets our specifications.
We always test new molds before commencing production to ensure that the product meets all of its requirements. We also provide test drive services to mold manufacturers.
We perform mold maintenance mainly at our own premises. Through regular mold maintenance, we make sure that molds remain in good, operational condition for a long time. We store molds in line with proper professional standards, and they are always ready to go if we need to start production.
|
"""
This module provide the attack method for Iterator FGSM's implement.
"""
from __future__ import division
import logging
from collections import Iterable
import numpy as np
from .base import Attack
__all__ = [
'GradientMethodAttack', 'FastGradientSignMethodAttack', 'FGSM',
'FastGradientSignMethodTargetedAttack', 'FGSMT',
'BasicIterativeMethodAttack', 'BIM',
'IterativeLeastLikelyClassMethodAttack', 'ILCM', 'MomentumIteratorAttack',
'MIFGSM'
]
class GradientMethodAttack(Attack):
"""
This class implements gradient attack method, and is the base of FGSM, BIM,
ILCM, etc.
"""
def __init__(self, model, support_targeted=True):
"""
:param model(model): The model to be attacked.
:param support_targeted(bool): Does this attack method support targeted.
"""
super(GradientMethodAttack, self).__init__(model)
self.support_targeted = support_targeted
def _apply(self,
adversary,
norm_ord=np.inf,
epsilons=0.01,
steps=1,
epsilon_steps=100):
"""
Apply the gradient attack method.
:param adversary(Adversary):
The Adversary object.
:param norm_ord(int):
Order of the norm, such as np.inf, 1, 2, etc. It can't be 0.
:param epsilons(list|tuple|int):
Attack step size (input variation).
Largest step size if epsilons is not iterable.
:param steps:
The number of attack iteration.
:param epsilon_steps:
The number of Epsilons' iteration for each attack iteration.
:return:
adversary(Adversary): The Adversary object.
"""
if norm_ord == 0:
raise ValueError("L0 norm is not supported!")
if not self.support_targeted:
if adversary.is_targeted_attack:
raise ValueError(
"This attack method doesn't support targeted attack!")
if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, epsilons, num=epsilon_steps)
pre_label = adversary.original_label
min_, max_ = self.model.bounds()
assert self.model.channel_axis() == adversary.original.ndim
assert (self.model.channel_axis() == 1 or
self.model.channel_axis() == adversary.original.shape[0] or
self.model.channel_axis() == adversary.original.shape[-1])
for epsilon in epsilons[:]:
step = 1
adv_img = adversary.original
if epsilon == 0.0:
continue
for i in range(steps):
if adversary.is_targeted_attack:
gradient = -self.model.gradient(adv_img,
adversary.target_label)
else:
gradient = self.model.gradient(adv_img,
adversary.original_label)
if norm_ord == np.inf:
gradient_norm = np.sign(gradient)
else:
gradient_norm = gradient / self._norm(
gradient, ord=norm_ord)
adv_img = adv_img + epsilon * gradient_norm * (max_ - min_)
adv_img = np.clip(adv_img, min_, max_)
adv_label = np.argmax(self.model.predict(adv_img))
logging.info('step={}, epsilon = {:.5f}, pre_label = {}, '
'adv_label={}'.format(step, epsilon, pre_label,
adv_label))
if adversary.try_accept_the_example(adv_img, adv_label):
return adversary
step += 1
return adversary
@staticmethod
def _norm(a, ord):
if a.ndim == 1:
return np.linalg.norm(a, ord=ord)
if a.ndim == a.shape[0]:
norm_shape = (a.ndim, reduce(np.dot, a.shape[1:]))
norm_axis = 1
else:
norm_shape = (reduce(np.dot, a.shape[:-1]), a.ndim)
norm_axis = 0
return np.linalg.norm(a.reshape(norm_shape), ord=ord, axis=norm_axis)
class FastGradientSignMethodTargetedAttack(GradientMethodAttack):
"""
"Fast Gradient Sign Method" is extended to support targeted attack.
"Fast Gradient Sign Method" was originally implemented by Goodfellow et
al. (2015) with the infinity norm.
Paper link: https://arxiv.org/abs/1412.6572
"""
def _apply(self, adversary, epsilons=0.01):
return GradientMethodAttack._apply(
self,
adversary=adversary,
norm_ord=np.inf,
epsilons=epsilons,
steps=1)
class FastGradientSignMethodAttack(FastGradientSignMethodTargetedAttack):
"""
This attack was originally implemented by Goodfellow et al. (2015) with the
infinity norm, and is known as the "Fast Gradient Sign Method".
Paper link: https://arxiv.org/abs/1412.6572
"""
def __init__(self, model):
super(FastGradientSignMethodAttack, self).__init__(model, False)
class IterativeLeastLikelyClassMethodAttack(GradientMethodAttack):
"""
"Iterative Least-likely Class Method (ILCM)" extends "BIM" to support
targeted attack.
"The Basic Iterative Method (BIM)" is to extend "FSGM". "BIM" iteratively
take multiple small steps while adjusting the direction after each step.
Paper link: https://arxiv.org/abs/1607.02533
"""
def _apply(self, adversary, epsilons=0.01, steps=1000):
return GradientMethodAttack._apply(
self,
adversary=adversary,
norm_ord=np.inf,
epsilons=epsilons,
steps=steps)
class BasicIterativeMethodAttack(IterativeLeastLikelyClassMethodAttack):
"""
FGSM is a one-step method. "The Basic Iterative Method (BIM)" iteratively
take multiple small steps while adjusting the direction after each step.
Paper link: https://arxiv.org/abs/1607.02533
"""
def __init__(self, model):
super(BasicIterativeMethodAttack, self).__init__(model, False)
class MomentumIteratorAttack(GradientMethodAttack):
"""
The Momentum Iterative Fast Gradient Sign Method (Dong et al. 2017).
This method won the first places in NIPS 2017 Non-targeted Adversarial
Attacks and Targeted Adversarial Attacks. The original paper used
hard labels for this attack; no label smoothing. inf norm.
Paper link: https://arxiv.org/pdf/1710.06081.pdf
"""
def __init__(self, model, support_targeted=True):
"""
:param model(model): The model to be attacked.
:param support_targeted(bool): Does this attack method support targeted.
"""
super(MomentumIteratorAttack, self).__init__(model)
self.support_targeted = support_targeted
def _apply(self,
adversary,
norm_ord=np.inf,
epsilons=0.1,
steps=100,
epsilon_steps=100,
decay_factor=1):
"""
Apply the momentum iterative gradient attack method.
:param adversary(Adversary):
The Adversary object.
:param norm_ord(int):
Order of the norm, such as np.inf, 1, 2, etc. It can't be 0.
:param epsilons(list|tuple|float):
Attack step size (input variation).
Largest step size if epsilons is not iterable.
:param epsilon_steps:
The number of Epsilons' iteration for each attack iteration.
:param steps:
The number of attack iteration.
:param decay_factor:
The decay factor for the momentum term.
:return:
adversary(Adversary): The Adversary object.
"""
if norm_ord == 0:
raise ValueError("L0 norm is not supported!")
if not self.support_targeted:
if adversary.is_targeted_attack:
raise ValueError(
"This attack method doesn't support targeted attack!")
assert self.model.channel_axis() == adversary.original.ndim
assert (self.model.channel_axis() == 1 or
self.model.channel_axis() == adversary.original.shape[0] or
self.model.channel_axis() == adversary.original.shape[-1])
if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, epsilons, num=epsilon_steps)
min_, max_ = self.model.bounds()
pre_label = adversary.original_label
for epsilon in epsilons[:]:
if epsilon == 0.0:
continue
step = 1
adv_img = adversary.original
momentum = 0
for i in range(steps):
if adversary.is_targeted_attack:
gradient = -self.model.gradient(adv_img,
adversary.target_label)
else:
gradient = self.model.gradient(adv_img, pre_label)
# normalize gradient
velocity = gradient / self._norm(gradient, ord=1)
momentum = decay_factor * momentum + velocity
if norm_ord == np.inf:
normalized_grad = np.sign(momentum)
else:
normalized_grad = self._norm(momentum, ord=norm_ord)
perturbation = epsilon * normalized_grad
adv_img = adv_img + perturbation
adv_img = np.clip(adv_img, min_, max_)
adv_label = np.argmax(self.model.predict(adv_img))
logging.info(
'step={}, epsilon = {:.5f}, pre_label = {}, adv_label={}'
.format(step, epsilon, pre_label, adv_label))
if adversary.try_accept_the_example(adv_img, adv_label):
return adversary
step += 1
return adversary
FGSM = FastGradientSignMethodAttack
FGSMT = FastGradientSignMethodTargetedAttack
BIM = BasicIterativeMethodAttack
ILCM = IterativeLeastLikelyClassMethodAttack
MIFGSM = MomentumIteratorAttack
|
If you fancy building your own dessert station or lolly buffet for your next function, but would love to save the hassle of designing and running around shopping, this is for you. A full kit of everything you need to set up your own buffet delivered to your door in Sydney. Perfect for children’s birthday parties and designed to suit your theme!
Complete the contact form below and chat to Lisa about getting your cupcake, lolly, bunting or monster themed DIY lolly buffet. This will then be customized and delivered to you for an easy set-up sure to impress your little party goers!
|
#!/usr/bin/env python
__author__ = "Khalid Alnajjar"
'''
A class for accessing Thesaurus Rex (v2) API
Requirements: requests xmltodict (installable thourgh pip)
'''
import requests, urllib.parse, time, xmltodict, json
class TheRex:
def __init__(self):
self.base_url = 'http://ngrams.ucd.ie/therex2/common-nouns/'
self.throttle = 2 # seconds
self.last_query = None
def map_item(self, r):
return tuple([r['#text'], int(r['@weight'])])
def member(self, concept):
'''To obtain properties and categories of a given concept'''
concept = urllib.parse.quote_plus(concept)
url = '{0}common-nouns/member.action?member={concept}&kw={concept}&needDisamb=true&xml=true'.format(self.base_url, concept=concept)
result = self._query_and_parse(url)
return self._result_to_dict(result, 'MemberData')
def modifier(self, modi, concept):
'''To find cateogires of the input concept that share the input modifier(property)'''
modi = urllib.parse.quote_plus(modi)
concept = urllib.parse.quote_plus(concept)
url = '{0}modifier.action?modi={modi}&ref={ref}&xml=true'.format(self.base_url, modi=modi, ref=concept)
result = self._query_and_parse(url)
return self._result_to_dict(result, 'ModifierData')
def head(self, head, concept):
'''To find properties of the input concept that are shared with the input head(category)'''
modi = urllib.parse.quote_plus(head)
concept = urllib.parse.quote_plus(concept)
url = '{0}head.action?head={head}&ref={ref}&xml=true'.format(self.base_url, head=head, ref=concept)
result = self._query_and_parse(url)
return self._result_to_dict(result, 'HeadData')
def category(self, modi, cat):
'''To find concepts that have a given modi(property) and also fall under a given category'''
url = '{0}category.action?cate={1}&kw={2}&search=true&xml=true'.format(self.base_url, modi + ':' + cat, modi + '+' + cat)
result = self._query_and_parse(url)
return self._result_to_dict(result, 'CategoryData')
def _query_and_parse(self, url):
t = time.time()
response = requests.get(url)
time.sleep(max(self.throttle-(time.time()-t), 0)) # simple throttling
return xmltodict.parse(response.content)
def _result_to_dict(self, query_result, root_name):
_root_content = query_result[root_name]
result = {}
if 'Categories' in _root_content and 'Category' in _root_content['Categories']:
categories = map(lambda r: self.map_item(r), _root_content['Categories']['Category'])
result['categories'] = dict(categories)
if 'Members' in _root_content and 'Member' in _root_content['Members']:
members = map(lambda r: self.map_item(r), _root_content['Members']['Member'])
result['members'] = dict(members)
if 'Modifiers' in _root_content and 'Modifier' in _root_content['Modifiers']:
modifiers = map(lambda r: self.map_item(r), _root_content['Modifiers']['Modifier'])
result['modifiers'] = dict(modifiers)
if 'CategoryHeads' in _root_content and 'CategoryHead' in _root_content['CategoryHeads']:
category_heads = map(lambda r: self.map_item(r), _root_content['CategoryHeads']['CategoryHead'])
result['category_heads'] = dict(category_heads)
return result
if __name__ == '__main__':
tr = TheRex()
target_concept = 'cat'
print(json.dumps(tr.member(target_concept), indent=4))
print()
print(json.dumps(tr.modifier(modi='furry', concept=target_concept), indent=4))
print()
print(json.dumps(tr.head(head='mammal', concept=target_concept), indent=4))
print()
print(json.dumps(tr.category('furry', 'animal'), indent=4))
|
If you struggle with Wrist Pain, The Shephard Health Team offers various treatment options that effectively help ease the pain from this ailment.
At Shepherd Health, we use a number of different treatment modalities to effectively treat pain involving the wrist. Active Release and Massage Therapy is used to treat the tissues and tendons that are causing pain. Cold Laser Therapy treats wrist pain on a cellular level by promoting cells to produce more energy, in turn promoting healing. As light energy is transformed into biochemical energy in the body, the blood supply to damaged cells is increased and the healing process is stimulated. Shock Wave Therapy is another one of the many treatments we use for wrist pain, here at Shephard Health!
To learn more about our Calgary Wrist Pain Treatment, please contact Shephard Health at (403) 543-7499 Today!
|
from functools import update_wrapper
import inspect
class DefFacts:
def __new__(cls, nonexpected=None, order=0):
obj = super(DefFacts, cls).__new__(cls)
if nonexpected is not None:
raise SyntaxError("DefFacts must be instanced to allow decoration")
obj.__wrapped = None
obj._wrapped_self = None
obj.order = order
return obj
@property
def _wrapped(self):
return self.__wrapped
@_wrapped.setter
def _wrapped(self, value):
if inspect.isgeneratorfunction(value):
self.__wrapped = value
return update_wrapper(self, self.__wrapped)
else:
raise TypeError("DefFact can only decorate generators.")
def __repr__(self): # pragma: no cover
return "DefFacts(%r)" % (self._wrapped)
def __call__(self, *args, **kwargs):
if self._wrapped is not None:
if self._wrapped_self is None:
gen = self._wrapped(*args, **kwargs)
else:
gen = self._wrapped(self._wrapped_self, *args, **kwargs)
return (x.copy() for x in gen)
elif not args:
raise RuntimeError("Usage error.")
else:
self._wrapped = args[0]
return self
def __get__(self, instance, owner):
self._wrapped_self = instance
return self
|
“Journal” of the R-fMRI Network (JRN) is a preprint, open-access, free-submission, “peer viewed”, community funded “Journal” of R-fMRI related studies. The goal of JRN is to supplement the current slow and inefficient “peer reviewed” journal publication system.
All the submissions at the JRN are preprint submissions, thus the authors can feel free to revise and submit to other formal “peer reviewed” traditional journals. The JRN only checks the format of the manuscript, and contacts the corresponding author to confirm his/her approval of submission.
All the JRN articles are freely available online after submission. Readers can freely read, download and comment on the articles.
Unlike the other open-access journal, submission to the JRN is totally free of charge.
Unlike other preprint services, the articles at the JRN will be peer viewed. The JRN has a panel of consultants – each of them is obligated to comment on one article each month. In addition, the readers are going to comment also. For each month, the JRN will rate monthly “consultants’ choice” and “readers’ choice” articles. Furthermore, the JRN is going to rate the top active articles, which demonstrated the most active comments and revisions – a model to spur the feedback and revision of the articles.
The JRN is a community funded effort. We encourage all the researchers make a small amount contribution each month at http://rfmri.org/HelpUs. Your contributions are extremely crucial to make the JRN effort happen!!!
Like the other posts at the R-fMRI Network, all submissions are dated, citable with a permanent URL and indexed by Google. However, the JRN submissions has a unique URL as http://rfmri.org/JRN_140828001. More importantly, we are going to make the articles to be indexed by Google Scholar and even by PubMed.
This is a planning effort, targeting the first issue in January, 2015. We are trying to recruit a part time content editor (compensation at $1000 per journal, Mr. Qingyang Li, M.S.) and a part time art editor (compensation at $100 per journal), please stay tuned at http://rfmri.org/LookForTalent if interested in. Thus, your contributions are extremely crucial for the JRN effort. If we failed to raise enough fund (>$1200) by January 2015, the JRN effort will be aborted. We believe if each of us make a small amount contribution (more are welcome) at a regular time basis (e.g., monthly), then the JRN effort shall not perish from the earth!
Please prepare your manuscript for the “peer-viewed” preprint JRN (first issue targeting January, 2015), and be prepared to receive the comments, then to revise your manuscript and submit to a traditional “peer-reviewed” Journal!
Editor in Chief (design the architect, fund raising, communicate with consultants, oversee the Content Editor and Art Editor, and make the JRN effort happen): Chao-Gan Yan, Ph.D.
Content Editor: Qingyang Li, M.S.
Art Editor: Open for application at http://rfmri.org/LookForTalent.
|
''' Copyright 2013 Michael Gallagher
Email: mikesligo at gmail dot com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. '''
#!/usr/bin/python
import re
import socket # Import socket module
from threading import Thread
from struct import *
import time
class Packet:
def __init__(self, packet):
packet = str(packet)
print packet
print "length: "+str(len(packet))
self.full = packet + "\r\n"
try:
host = re.search('\nHost: (\S+)',packet)
self.host = host.group(1)
except:
print "Error finding host"
print packet
raise
def printpacket(self):
print 'All: \n' + self.full
class Server:
def __init__(self, host, port):
self.port = port;
self.host = host;
def start(self):
print "Starting PieProxy..."
self.mainsocket = socket.socket()
self.mainsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # don't bind address
self.mainsocket.bind((host,port))
self.mainsocket.listen(5)
print "Starting listen..."
self.listen_for_incoming_client()
def listen_for_incoming_client(self): # To be run in a thread
while True:
self.conn, addr = self.mainsocket.accept() # Establish connection with client.
packet = self.conn.recv(8192)
if len(packet) != 0:
print "\nListening for incoming client..."
packet = Packet(packet)
self.forward_packet_to_server(packet)
# raw_input("\nHit enter to continue")
def forward_packet_to_server(self, packet):
print "Forwarding packet to server..."
s = socket.socket()
s.settimeout(1)
try:
if packet is not None:
print 'Connecting to '+packet.host
else:
print "Host is none"
print packet.full
s.connect((packet.host,80))
s.sendall(packet.full)
#receive = Thread(target=self.listen_for_incoming_server,args=(s,))
#receive.start()
self.listen_for_incoming_server(s)
except:
print "\nERROR ATTEMPTING TO CONNECT OR SEND PACKETS"
print "==========================================="
print packet.full
raise
def listen_for_incoming_server(self,socket):
print "Listening for incoming packets from the server"
print "Receiving data..."
response = socket.recv(8192)
data = response
try:
while len(data) > 0:
s.settimeout(s.gettimeout()+0.1)
data = socket.recv(8192)
response = response + data
print "Receiving more data..."
print "Length: " + str(len(data))
finally:
print "Response Length: " + str(len(response))
self.return_response_to_client(response)
socket.close()
print "Killing thread..."
return
def return_response_to_client(self, response):
print "Returning response to client..."
self.conn.sendall(response)
def close(self):
self.mainsocket.close()
if __name__ == '__main__':
print
print "Pie Proxy\n=============================\n"
host = socket.gethostname() # Get local machine name
port = 8000 # Reserve port
server = Server(host,port)
server.start()
|
At Get That Car Loan we can help you with your No Doc Car Loan or Low Doc Car Loan application.
This can be the solution for those who are self-employed, or who have started a new business that may have limited financials or be unable to verify their income.
If you haven’t completed your tax return, or are waiting on your accountant to finalise your accounts we can assist in this situation.
Alternatively, you may have completed your financials but just don’t want the hassle of having to provide all the paperwork associated with verifying your income a No Doc Car Loan could be the answer you have been looking for.
|
# This file is part of Sigviewr.
# This project is licensed under the GNU GPL (version 3 or higher).
# Copyright 2014 by Clemens Brunner and Thomas Brunner.
from collections import OrderedDict
import time
from .streamreader import StreamReader
# begin HACK
try:
import external.pylsl.pylsl as pylsl
except ImportError:
pass
# end HACK
class LSLReader(StreamReader):
def __init__(self):
super(LSLReader, self).__init__()
def open(self, streams):
self.inlet = pylsl.stream_inlet(streams)
self.metainfo.general["Type"] = "LSL"
self.metainfo.general["Version"] = str(pylsl.protocol_version())
self.metainfo.general["Recording time"] = time.asctime(time.gmtime())
for stream in [streams]:
stream_info = OrderedDict()
stream_info["Type"] = stream.type()
stream_info["Sampling rate"] = str(stream.nominal_srate())
stream_info["Channels"] = str(stream.channel_count())
self.metainfo.streams.append(stream_info)
def close(self):
pass
def numStreams(self):
return len(self.metainfo.streams)
def sampleRate(self, stream):
return float(self.metainfo.streams[stream]["Sampling rate"])
def numChannels(self, stream):
return int(self.metainfo.streams[stream]["Channels"])
def channelLabel(self, stream, channel):
return "Test"
def resolve_streams(self):
self.streams = pylsl.resolve_streams()
return self.streams
def get_data(self):
sample = pylsl.vectorf()
self.inlet.pull_sample(sample)
return list(sample)
|
Luxury long-staple Combed Cotton Sateen, 310 thread count, flange finish with white satin stitch. Available in 3 colors: Pierre (Stone), Silver (Light Grey), & Platine (Medium Grey).
The selection of solid colors in the Yves Delorme timeless collection expresses a sophistication framed in white and underlined with a satin stitch. This design blends beautifully with Triomphe solids and Lutece in cotton sateen, uniting a classical decor and a touch of novelty..
|
"""
This file includes commonly used utilities for this app.
"""
from datetime import datetime
today = datetime.now()
year = today.year
month = today.month
day = today.day
# Following are for images upload helper functions. The first two are used for product upload for the front and back.
# The last two are used for design product upload for the front and back.
def front_product_image(instance, filename):
# file will be uploaded to MEDIA_ROOT/product_imgs/maker_<id>/product_<id>/Y/m/d/front/<filename>
return 'product_imgs/maker_{0}/product_{1}/{2}/{3}/{4}/front/{5}'.format(instance.maker.id, instance.id, year, month, day, filename)
def back_product_image(instance, filename):
# file will be uploaded to MEDIA_ROOT/product_imgs/maker_<id>/product_<id>/Y/m/d/back/<filename>
return 'product_imgs/maker_{0}/product_{1}/{2}/{3}/{4}/back/{5}'.format(instance.maker.id, instance.id, year, month, day, filename)
def front_design_image(instance, filename):
# file will be uploaded to MEDIA_ROOT/product_imgs/designer_<id>/design_product_<id>/Y/m/d/front/<filename>
return 'product_imgs/designer_{0}/design_product_{1}/{2}/{3}/{4}/front/{5}'.format(instance.designer.id, instance.id, year, month, day, filename)
def back_design_image(instance, filename):
# file will be uploaded to MEDIA_ROOT/product_imgs/designer_<id>/design_product_<id>/Y/m/d/back/<filename>
return 'product_imgs/designer_{0}/design_product_{1}/{2}/{3}/{4}/back/{5}'.format(instance.designer.id, instance.id, year, month, day, filename)
def fill_category_tree(model, deep=0, parent_id=0, tree=[]):
'''
NAME::
fill_category_tree
DESCRIPTION::
一般用来针对带有parent产品分类表字段的进行遍历,并生成树形结构
PARAMETERS::
:param model: 被遍历的model,具有parent属性
:param deep: 本例中,为了明确表示父子的层次关系,用短线---的多少来表示缩进
:param parent_id: 表示从哪个父类开始,=0表示从最顶层开始
:param tree: 要生成的树形tuple
RETURN::
这里是不需要返回值的,但是如果某个调用中需要可以画蛇添足一下
USAGE::
调用时,可以这样:
choices = [()]
fill_topic_tree(choices=choices)
这里使用[],而不是(),是因为只有[],才能做为“引用”类型传递数据。
'''
if parent_id == 0:
ts = model.objects.filter(parent = None)
# tree[0] += ((None, '选择产品类型'),)
for t in ts:
tmp = [()]
fill_category_tree(4, t.id, tmp)
tree[0] += ((t.id, '-'*deep + t.name,),)
for tt in tmp[0]:
tree[0] += (tt,)
else:
ts = Category.objects.filter(parent_id = parent_id)
for t in ts:
tree[0] += ((t.id, '-'*deep + t.name,),)
fill_category_tree(deep + 4, t.id, tree)
return tree
|
LG G3 User Guide Manual Tips Tricks Download In this post I am posting a link of PDF file that will help you to use LG G3 in this PDF Guide all tips and tricks are mentioned so that a user can easily use LG G3 smartphone. You can find guide lines how to operate and use LG G3 phone.
The LG G3 32GB is a high-end device that includes the best of each function. Although it comes with an expensive price tag, but premium features such as QHD screen, wireless charging, mass storage, Superb camera, etc. make it a device that is worth investing.
The LG G3 32GB has a 5.5-inch IPS LCD screen, which offers an impressive resolution of 1,440 x 2,560 pixels, incredibly compact with a pixel density of 534 pixels per inch. The screen is protected with Gorilla Glass 3. The device works with a Qualcomm Snapdragon quad-core 2.5 GHz processor, along with 3 GB of RAM. The device runs the latest Android 4.4.2 Kitkat OS out-of-the-box.
The Android smartphone has an impressive 13 megapixel camera on the back with features such as face detection, geotagging, HDR, etc. A secondary 2.1-megapixel camera is also available. The camera with double LED flash light is impressive and also supports full HD video recording. The device claims to have a large internal storage of 32 GB and comes with a memory slot that allows up to 128 GB of microSD cards.
On the connectivity front, the smartphone supports 3G to connect high-speed Internet. The device has other options such as Wi-Fi, Bluetooth, GPS, USB port, HDMI port and NFC, which make your wired and wireless communications faster and more efficient. The device receives power from a 3,000 mAh battery, which provides more than one day of use per charge.
|
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# WeCase -- This file implemented AboutWindow.
# Copyright (C) 2013, 2014 The WeCase Developers.
# License: GPL v3 or later.
from PyQt4 import QtGui
import version
import wecasepath as path
from AboutWindow_ui import Ui_About_Dialog
class AboutWindow(QtGui.QDialog, Ui_About_Dialog):
def __init__(self, parent=None):
super(AboutWindow, self).__init__(parent)
self.setupUi(self)
def _setPkgProvider(self):
if version.pkgprovider == version.default_provider:
vanilla = self.tr("Vanilla Version")
self.distLabel.setText(vanilla)
else:
disttext = version.pkgprovider
self.distLabel.setText(self.distLabel.text() % disttext)
def _setVersionLabel(self):
self.versionLabel.setText(self.versionLabel.text() % version.pkgversion)
def _setDescriptionLabel(self):
self.descriptionLabel.setText(self.descriptionLabel.text() % version.bug_report_url)
def _setContirbutorBrowser(self):
with open(path.myself_path + "AUTHORS", "r") as f:
contirbutors = f.read()
contirbutors = contirbutors.replace("\n", "<br />")
self.contirbutorBrowser.setHtml("<center>%s</center>" % contirbutors)
def setupUi(self, widget):
super().setupUi(widget)
self._setPkgProvider()
self._setVersionLabel()
self._setDescriptionLabel()
self._setContirbutorBrowser()
|
It's been nearly 8 years since the exclusive Chicago Athletic Association closed its doors for the promise of a new hotel. In 1890 a group of Chicago's most prominent businessmen formed the Chicago Athletic Association and in 1893 erected a clubhouse along the Michigan Avenue Streetwall at 12 S Michigan. The Venitian Gothic inspired building has been a mysterious treasure for those who had not the means to visit, for over 120 years. This past May, however, that has all changed.
Now open to the public the building houses a boutique hotel, named The Chicago Athletic Association Hotel, after its predecessor. Easily the most exciting parts of the hotel are the myriad of places to eat, drink, and be merry. And each of them has retained the sophisticated feel of an exclusive club catering to the ivy league and prep school crowd.
Shake Shack - Located on the ground level this burger joint also serves wine, beer, and, of course, frozen custard shakes!
Drawing Room - Serving as the lobby of the hotel this space brings me right back to study hall in boarding school, if only they served cocktails then. A great spot to meet before dinner, you can relax with a glass of wine in front of the fireplace, browse through a book, or enjoy the views of Millennium Park below.
Milk Room - From 7:00a.m.-3:00p.m. this space serves as a coffee shop, but after 5:00 the 8 seat bar is expected to serve extremely rare spirits and a few select cocktails. I have not heard that the bar concept is open yet, but it should be coming soon.
Game Room - All games are free! Kick back and enjoy casual food along with cocktails and beer while challenging your friends to a variety of games from bocce ball, to pool, to foosball, or shuffleboard.
Cherry Circle Room - The main restaurant is a circular room with an old wood bar running along the perimeter. Have a casual meal at the bar or cozy up to a booth and have your cocktail made tableside!
Cindy's - Because Chicago is best in the summertime a rooftop bar with lake views was in need! Cindy's has the feel of a lakehouse cottage with a million dollar view. Relax inside for dinner or enjoy cocktails on a summer night by the outdoor firepits. Don't forget to enjoy the Navy Pier fireworks on Wednesday and Saturday in the summer!
|
from flask_restful import marshal_with, fields
from flask import Blueprint as FlaskBlueprint
import logging
from pebbles.models import Blueprint, Instance
from pebbles.server import restful
from pebbles.views.commons import auth
from pebbles.utils import requires_admin, memoize
from collections import defaultdict
stats = FlaskBlueprint('stats', __name__)
def query_blueprint(blueprint_id):
return Blueprint.query.filter_by(id=blueprint_id).first()
blueprint_fields = {
'name': fields.String,
'users': fields.Integer,
'launched_instances': fields.Integer,
'running_instances': fields.Integer,
}
result_fields = {
'blueprints': fields.List(fields.Nested(blueprint_fields)),
'overall_running_instances': fields.Integer
}
class StatsList(restful.Resource):
@auth.login_required
@requires_admin
@marshal_with(result_fields)
def get(self):
instances = Instance.query.all()
overall_running_instances = Instance.query.filter(Instance.state != Instance.STATE_DELETED).count()
get_blueprint = memoize(query_blueprint)
per_blueprint_results = defaultdict(lambda: {'users': 0, 'launched_instances': 0, 'running_instances': 0})
unique_users = defaultdict(set)
for instance in instances:
user_id = instance.user_id
blueprint = get_blueprint(instance.blueprint_id)
if not blueprint:
logging.warn("instance %s has a reference to non-existing blueprint" % instance.id)
continue
if 'name' not in per_blueprint_results[blueprint.id]:
per_blueprint_results[blueprint.id]['name'] = blueprint.name
if user_id not in unique_users[blueprint.id]:
unique_users[blueprint.id].add(user_id)
per_blueprint_results[blueprint.id]['users'] += 1
if(instance.state != Instance.STATE_DELETED):
per_blueprint_results[blueprint.id]['running_instances'] += 1
per_blueprint_results[blueprint.id]['launched_instances'] += 1
# per_blueprint_results[blueprint.id]['overall_running_instances'] = overall_running_instances
results = []
for blueprint_id in per_blueprint_results:
results.append(per_blueprint_results[blueprint_id])
results.sort(key=lambda results_entry: (results_entry["launched_instances"], results_entry["users"]), reverse=True)
final = {"blueprints": results, "overall_running_instances": overall_running_instances}
return final
|
A metal fabricator and electrical supplier was sued under the Clean Water Act for elevated concentrations in stormwater zinc just weeks before of a Level 3 Corrective Action deadline. Aspect quickly determined roofing was the primary source and negotiated installation of a roof downspout filter—meeting the deadline and greatly aiding settlement. After settlement, we assisted the client in coating the roof to avoid ongoing maintenance costs. The site now routinely meets all benchmarks.
|
# -*- encoding: utf-8 -*-
'''
Created on: 2015
Author: Mizael Martinez
'''
from pyfann import libfann
from login import Login
from escribirArchivo import EscribirArchivo
import inspect, sys, os
sys.path.append("../model")
from baseDatos import BaseDatos
auxiliar=[]
np=EscribirArchivo()
np.setUrl("prueba_normalizado.data")
np.setNumeroEntradas(8)
np.setNumeroSalidas(5)
np.escribirEnArchivoParaProbarRedNeuronalNormalizados()
bd=BaseDatos()
#primer elemento: # de neuronas
#segundo elemento: error
#tercer elemento: url del archivo de ese entrenamiento
print bd.obtenerErroresMenoresDeEntrenamientoNormalizado()[0][2]
errores=bd.obtenerErroresMenoresDeEntrenamientoNormalizado()
for k in range(len(errores)):
ann = libfann.neural_net()
ann.create_from_file("../files/"+errores[k][2])
ann.reset_MSE()
test_data = libfann.training_data()
test_data.read_train_from_file("../files/prueba_normalizado.data")
entradas=test_data.get_input()
salidas=test_data.get_output()
for i in range(0,len(entradas)):
ann.test(entradas[i], salidas[i])
auxiliar.append(ann.get_MSE())
print auxiliar
print "%s - %s - %s - %s"%("Neuronas".rjust(8),"Archivo".rjust(15),"Error Entrenamiento".rjust(16),"Error Prueba")
for z in range(len(errores)):
print "%s - %s - %s - %s"%(str(errores[z][0]).rjust(8),str(errores[z][2]).rjust(15),str(errores[z][1]).rjust(16),str(auxiliar[z]))
|
Is your team ready to challenge darkness?
There’s never been a better time to put your skills to the test.
Take advantage of our End of Financial Year Special to receive 25% off all exclusive workshops booked by 30th June 2018.
By removing individuals and teams from their familiar surroundings, our workshop participants discover skills that they can then transfer into their professional and personal lives. In fact, 74% of participants report that they have applied what they learned in the workshop to their real work environment. This is something we are very proud of.
|
###############################################################################
# findGenesAnnotExpr.py
# Copyright (c) 2017, Joshua J Hamilton and Katherine D McMahon
# Affiliation: Department of Bacteriology
# University of Wisconsin-Madison, Madison, Wisconsin, USA
# URL: http://http://mcmahonlab.wisc.edu/
# All rights reserved.
################################################################################
# Identify transport reactions and their corresponding genes. Map genes to COGs
# and extract their expression profiles.
################################################################################
#%%#############################################################################
### Import packages
################################################################################
from collections import Counter
import cobra
import copy
import os
import pandas as pd
import re
#%%#############################################################################
### Define folder structure
################################################################################
modelDir = '../../models/rawModels'
genomeDir = '../../data/transporters/modelGenes'
cladeDir = '../../data/transporters/cladeCOGs'
cogDir = '../../data/orthoMCL/genomeCOGs'
exprDir = '../../results/expression'
resultsDir = '../../results/transporters'
taxonFile = '../../data/externalData/taxonomy.csv'
annotTable = '../../data/orthoMCL/annotTable.csv'
#%%#############################################################################
### Model pre-processing
################################################################################
# Check that the output directory exists. If not, create it.
if not os.path.exists(genomeDir):
os.makedirs(genomeDir)
if not os.path.exists(cladeDir):
os.makedirs(cladeDir)
if not os.path.exists(resultsDir):
os.makedirs(resultsDir)
# Import the list of models
modelList = []
for model in os.listdir(modelDir):
if not model.startswith('.'):
modelList.append(model)
#%%#############################################################################
### Identify transporter genes
################################################################################
for curModel in modelList:
# Read in model from SBML and create dict to store stuff
model = cobra.io.read_sbml_model(modelDir+'/'+curModel+'/'+curModel+'.xml')
transDict = {}
for curRxn in model.reactions:
# Transport reactions, based on keywords
if re.search('transport', curRxn.name) or re.search('permease', curRxn.name) or re.search('symport', curRxn.name) or re.search('diffusion', curRxn.name) or re.search('excretion', curRxn.name) or re.search('export', curRxn.name) or re.search('secretion', curRxn.name) or re.search('uptake', curRxn.name) or re.search('antiport', curRxn.name):
cdsList = []
for gene in curRxn.genes:
if gene.id != 'Unknown':
cdsList = cdsList + [gene.id]
transDict[curRxn.id] = cdsList
# Transport reactions which don't get picked up based on keywords
elif curRxn.id == 'rxn05226_c0' or curRxn.id == 'rxn05292_c0' or curRxn.id == 'rxn05305_c0' or curRxn.id == 'rxn05312_c0' or curRxn.id == 'rxn05315_c0' or curRxn.id == 'rxn10945_c0' or curRxn.id == 'rxn10116_c0':
cdsList = []
for gene in curRxn.genes:
if gene.id != 'Unknown':
cdsList = cdsList + [gene.id]
transDict[curRxn.id] = cdsList
with open(genomeDir+'/'+curModel+'.txt', 'w') as outFile:
for key in transDict.keys():
outFile.write(key+';')
for cds in transDict[key]:
outFile.write(cds+',')
outFile.write('\n')
#%%#############################################################################
### For each clade, identify the COGs associated with each reaction
### For each (rxn, cog) pairing, identify the expression data
################################################################################
# Read in the taxonomy table and create a list of genomes for each clade
cladeToGenomeDict = {}
cladeList = []
taxonClass = pd.DataFrame.from_csv(taxonFile, sep=',')
taxonClass = taxonClass.dropna()
# Extract the unique clades
cladeList = pd.unique(taxonClass['Clade'].values)
for clade in cladeList:
genomeList = taxonClass[taxonClass['Clade'] == clade].index.tolist()
cladeToGenomeDict[clade] = genomeList
# Read in the annotation table
annotDF = pd.read_csv(annotTable, index_col=0)
# Identity all the unique reactions within the clade and their associated COGs.
for clade in cladeList:
geneDict = {}
cogDict = {}
modelList = cladeToGenomeDict[clade]
for model in modelList:
# Create a dictionary associating CDS with each reaction
with open(genomeDir+'/'+model+'.txt', 'r') as inFile:
for line in inFile:
[gene, cdsArray] = line.strip().split(';')
cdsArray = cdsArray.split(',')
cdsArray = filter(None, cdsArray)
if len(cdsArray) > 0:
if gene in geneDict.keys():
geneDict[gene] = geneDict[gene] + cdsArray
else:
geneDict[gene] = cdsArray
for cds in cdsArray:
cogDict[cds] = None
# Create a dictionary associating with COGs with each CDS
# Temporary dict to store all associations for that genome
tempDict = {}
with open(cogDir+'/'+model+'COGs.txt', 'r') as inFile:
for line in inFile:
[cds, cog] = line.strip().split(',')
tempDict[cds] = cog
# Populate the cogDict using this info
for cds in cogDict.keys():
if cds.replace('_CDS_', '.genome.CDS.') in tempDict.keys():
cogDict[cds] = tempDict[cds.replace('_CDS_', '.genome.CDS.')]
with open(cladeDir+'/'+clade+'.CDS.txt', 'w') as outFile:
for key in geneDict.keys():
outFile.write(key+';')
for cds in geneDict[key]:
outFile.write(cds+',')
outFile.write('\n')
# Now, we need to map the CDS for each reaction to its COG.
rxnCogDict = copy.deepcopy(geneDict)
for rxn in rxnCogDict.keys():
for pos, cds in enumerate(rxnCogDict[rxn]):
rxnCogDict[rxn][pos] = cogDict[cds]
# Some CDS map to the same COG, so update the lists to only include
# unique entries
for rxn in rxnCogDict.keys():
rxnCogDict[rxn] = list(set(rxnCogDict[rxn]))
with open(cladeDir+'/'+clade+'.COG.txt', 'w') as outFile:
for key in sorted(rxnCogDict.keys()):
for cds in sorted(rxnCogDict[key]):
outFile.write(key+','+str(cds)+'\n')
# Now, read in the expression data for that clade
exprDataFrame = pd.read_csv(exprDir+'/'+clade+'.norm', index_col=1)
exprDataFrame = exprDataFrame.drop('Clade', axis=1)
# Create an empty dataframe
rxnCogExprMultiIndex = pd.MultiIndex(levels=[[],[]],
labels=[[],[]],
names=['Reaction', 'COG'])
rxnCogExprDataFrame = pd.DataFrame(index=rxnCogExprMultiIndex, columns=exprDataFrame.columns)
# Iterate over the rxnCogDict and look up expression values in the exprDataFrame
# Use these to populate the rxnCogExprDataFrame
for rxn in sorted(rxnCogDict.keys()):
for cds in sorted(rxnCogDict[rxn]):
# If CDS IS in the genome AND expressed
if cds in exprDataFrame.index:
rxnCogExprDataFrame.loc[(rxn, cds),:] = exprDataFrame.loc[cds]
# If CDS IS in the genome AND NOT expressed
elif cds in cogDict.values():
rxnCogExprDataFrame.loc[(rxn, cds),:] = 0
# If CDS IS NOT in the genome
else:
rxnCogExprDataFrame.loc[(rxn, cds),:] = None
# The genes which are not expressed will not have consensus annotations
# Rerun that piece of code
# Compute majority annotation
# First subset the dataframe, keep genomes for that clade and dropping
tempDF = annotDF[modelList]
for curIndex in rxnCogExprDataFrame.index:
cog = curIndex[1]
annotList = []
for genome in tempDF.columns:
if not pd.isnull(tempDF.loc[cog][genome]):
innerString = tempDF.loc[cog][genome]
# Dataframe element is a string enclosed in brackets with a comma separating elements
innerString = re.sub('[\[\]]' , '', innerString)
innerList = re.split('\', \'|\", \"', innerString)
innerList = [re.sub('\"|\'', '', string) for string in innerList]
annotList = annotList + innerList
# Find the most common
annotCounter = Counter(annotList)
majorityAnnot = annotCounter.most_common(1)[0][0]
# Assign the Annotation
rxnCogExprDataFrame.loc[curIndex,'Annotation'] = majorityAnnot
# Write the results to file
rxnCogExprDataFrame.to_csv(cladeDir+'/'+clade+'.COG.norm')
#%%#############################################################################
### Aggregate it all into a single dataframe
################################################################################
# Create a master dataframe
masterMultiIndex = pd.MultiIndex(levels=[[],[]],
labels=[[],[]],
names=['Reaction', 'COG'])
masterDataFrame = pd.DataFrame(index=rxnCogExprMultiIndex)
for clade in cladeList:
# Read in the expression data for that clade
rxnCogExprDataFrame = pd.read_csv(cladeDir+'/'+clade+'.COG.norm', index_col=[0,1])
# Rename the columns
for column in rxnCogExprDataFrame.columns:
rxnCogExprDataFrame.rename(columns={column:column+' ('+clade+')'}, inplace=True)
# Merge into the masterDataFrame
masterDataFrame = pd.concat([masterDataFrame, rxnCogExprDataFrame], axis=1, join='outer')
# Write to file
masterDataFrame.to_csv(resultsDir+'/transAnnotExpr.csv')
|
Have you ever been playing Pokemon and wished for a bit more adventure? Maybe you’ve been playing one of the Castlevania and hoped for a few monster companions to help you along your journey? Well, Moi Rai Games’ Kickstarter for Monster Sanctuary might be just the thing you’re looking for!
As the youngest heir of an ancient bloodline of proud Monster Keepers, you step out into the world to follow in your ancestors’ footsteps. You will gather a party of monsters to grow and train. Meanwhile, a series of unsettling events worries the experienced Keepers of the Monster Sanctuary. This is the beginning of a journey to unravel the cause of a mystery that threatens the peace between humans and monsters.
To start, you will choose one of four elemental creatures to begin your journey, but this is just the tip of the iceberg as there are 50-100 monsters in the game right now. However, one of the rapidly approaching stretch goals will bump that number up to 100-200 with light and dark variations of the different monsters. To add new monsters to your party, your Monster Keeper will be awarded eggs after successful monster encounters.
The monsters within your party will have their own unique abilities and skill branches that will allow you to cultivate the perfect party. In order to fully explore the world, you’ll need a well-rounded party to navigate the world and open up new paths.
As of right now, the two brother team of Moi Rai Games have surpassed their Kickstarter goal by $60,000. Even in the short time it took me to write this article, I saw a handful of pledges pour in to support Monster Sanctuary. Denis Sinner, a developer for the Might and Magic and Tropico franchises, was truly inspired by the work in ConcernedApe’s Stardew Valley, and he began work on Monster Sanctuary over three years ago. His brother Anton assists with the story and writing, but Denis has worked on the programming and design of the game himself.
If the pixelated goodness in the photos, GIFs, and videos above isn’t enough to make you want to jump at the chance to support this project, the demo surely will! As of right now, a demo for Monster Sanctuary is available on Steam and GameJolt, and it is over 2 hours long and showcases over 50 monsters!
At the time of this writing, their Kickstarter still has ten days left! So, don’t miss your chance to be a part of the monster hunting community!
|
try:
from . import generic as g
except BaseException:
import generic as g
try:
import triangle # NOQA
has_triangle = True
except ImportError:
g.log.warning('No triangle! Not testing extrude primitives!')
has_triangle = False
class ExtrudeTest(g.unittest.TestCase):
def test_extrusion(self):
if not has_triangle:
return
transform = g.trimesh.transformations.random_rotation_matrix()
polygon = g.Point([0, 0]).buffer(.5)
e = g.trimesh.primitives.Extrusion(
polygon=polygon,
transform=transform)
# will create an inflated version of the extrusion
b = e.buffer(.1)
assert b.to_mesh().volume > e.to_mesh().volume
assert b.contains(e.vertices).all()
# try making it smaller
b = e.buffer(-.1)
assert b.to_mesh().volume < e.to_mesh().volume
assert e.contains(b.vertices).all()
# try with negative height
e = g.trimesh.primitives.Extrusion(
polygon=polygon,
height=-1.0,
transform=transform)
assert e.to_mesh().volume > 0.0
# will create an inflated version of the extrusion
b = e.buffer(.1)
assert b.to_mesh().volume > e.to_mesh().volume
assert b.contains(e.vertices).all()
# try making it smaller
b = e.buffer(-.1)
assert b.to_mesh().volume < e.to_mesh().volume
assert e.contains(b.vertices).all()
# try with negative height and transform
transform = [[1., 0., 0., -0.],
[0., 1., 0., 0.],
[-0., -0., -1., -0.],
[0., 0., 0., 1.]]
e = g.trimesh.primitives.Extrusion(
polygon=polygon,
height=-1.0,
transform=transform)
assert e.to_mesh().volume > 0.0
for T in g.transforms:
current = e.copy().apply_transform(T)
# get the special case OBB calculation for extrusions
obb = current.bounding_box_oriented
# check to make sure shortcutted OBB is the right size
assert g.np.isclose(
obb.volume,
current.to_mesh().bounding_box_oriented.volume)
# use OBB transform to project vertices of extrusion to plane
points = g.trimesh.transform_points(
current.vertices, g.np.linalg.inv(obb.primitive.transform))
# half extents of calculated oriented bounding box
half = (g.np.abs(obb.primitive.extents) / 2.0) + 1e-3
# every vertex should be inside OBB
assert (points > -half).all()
assert (points < half).all()
assert current.direction.shape == (3,)
assert current.origin.shape == (3,)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
These Chromium Plated Cable Drag Chain are highly appreciated for its high tensile strength, durability and long service life. Further, these Chromium Plated Cable Drag Chain are utilized for the lifts and various types of heavy lift structure. Further, we manufacturers these using best quality stainless and galvanized steel that is procured from reliable vendors of the industries.
|
#===========================================================================
#License
#===========================================================================
#Copyright (C) 2016 Alexander Blaessle
#This software is distributed under the terms of the GNU General Public License.
#This file is part of PyRW.
#PyRW is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#===========================================================================
#Importing necessary modules
#===========================================================================
#Numpy
import numpy as np
import RWsuperposition
#===========================================================================
#Module description
#===========================================================================
"""
Step module of pyrw containing classes describing random walk steps for walkers,
including the following classes:
(1) step
(2) MRWstep
(3) CRWstep
(4) CorRWstep
(5) CCRWstep
"""
#===========================================================================
#Module classes
#===========================================================================
class step(object):
#Init
def __init__(self,w,typ):
self.walker=w
self.typ=typ
self.superpositions=[]
def updateGammaDist(self):
self.gammaVec=[0.]
for s in self.superpositions:
self.gammaVec.append(self.gammaVec[-1]+s.gamma)
return self.gammaVec
def checkGammas(self,debug=False):
sumGammas=0
for s in self.superpositions:
sumGammas=sumGammas+s.gamma
if sumGammas==1:
return True
else:
if debug:
print "Warning, gammas do not sum up to 1!"
return False
def performStep(self):
#Pick random number to choose which superposition
rand_mode=np.random.random()
#Check which superposition to perform
for i in range(len(self.gammaVec)):
#print self.gamaVec[i], " <= ", rand_mode , " < = "
if self.gammaVec[i]<=rand_mode and rand_mode<=self.gammaVec[i+1]:
self.superpositions[i].doStep()
break
def addSuperposition(self,r,gamma,kappa):
try:
newId=max(self.getSuperpositionIds)+1
except TypeError:
newId=0
s=RWsuperposition.superposition(self.walker,r,gamma,kappa,newId)
self.superpositions.append(s)
self.updateGammaDist()
return s
def getSuperpositionIds(self):
ids=[]
for s in self.superpositions:
ids.append(s.Id)
return ids
class MRWstep(step):
#Init
def __init__(self,w,r):
super(MRWstep, self).__init__(w,0)
#Add simple Brownian step
self.addSuperposition(r,1,0)
class CRWstep(step):
#Init
def __init__(self,w,r1,r2,gamma):
super(CRWstep, self).__init__(w,1)
#Add two superpositions
self.addSuperposition(r1,gamma,0)
self.addSuperposition(r2,gamma,0)
class CorRWstep(step):
#Init
def __init__(self,w,r,kappa):
super(CorRWstep, self).__init__(w,2)
#Add simple correlated random walk
self.addSuperposition(r,1,kappa)
class CCRWstep(step):
#Init
def __init__(self,w,r1,r2,gamma,kappa):
super(CCRWstep, self).__init__(w,3)
#Add two superpositions, one MRW and one CorRW
self.addSuperposition(r1,gamma,0)
self.addSuperposition(r2,1-gamma,kappa)
def setParms(self,parms):
self.scaleR(parms[0])
self.setGamma(parms[1])
self.setKappa(parms[2])
def scaleR(self,rScale):
r1=self.superpositions[0].getR()
self.setR2(rScale*r1)
return rScale*r1
def setGamma(self,gamma):
self.superpositions[0].setGamma(gamma)
self.superpositions[1].setGamma(1-gamma)
return gamma
def setKappa(self,kappa):
self.superpositions[1].setKappa(kappa)
return kappa
def setR1(self,r):
self.superpositions[0].setR(r)
return r
def setR2(self,r):
self.superpositions[1].setR(r)
return r
class SCCRWstep(CCRWstep):
def __init__(self,w,r1,r2,gamma,kappa,gammaSup=1,gammaStep=0.1,gammaMin=0.2):
super(SCCRWstep, self).__init__(w,r1,r2,gamma,kappa)
self.origGamma=gamma
self.gammaStep=gammaStep
self.gammaMin=gammaMin
self.gammaSup=gammaSup
def performStep(self):
"""Overwrite performStep."""
#Pick random number to choose which superposition
rand_mode=np.random.random()
#Check which superposition to perform
for i in range(len(self.gammaVec)):
#print self.gamaVec[i], " <= ", rand_mode , " < = "
if self.gammaVec[i]<=rand_mode and rand_mode<=self.gammaVec[i+1]:
self.superpositions[i].doStep()
# Special clause to adjust gamma
if i==self.gammaSup:
idxs=[0,1]
idxs.remove(self.gammaSup)
idx=idxs[0]
gammaNew=max(self.superpositions[idx].gamma-self.gammaStep,self.gammaMin)
self.updateGamma(gammaNew)
else:
self.setBackGamma()
break
def setBackGamma(self):
self.updateGamma(self.origGamma)
def updateGamma(self,gamma):
self.setGamma(gamma)
self.updateGammaDist()
def setOrigGamma(self,gamma):
self.origGamma=gamma
self.setGamma(gamma)
self.updateGammaDist()
def setGammaStep(self,gammaStep):
self.gammaStep=gammaStep
def setGammaMin(self,gammaMin):
self.gammaMin=gammaMin
|
Orchids bloom again after cutting the stem.
1 What to Do With an Orchid After the Flowers Fall Off?
4 My Orchid Is Wilted: What Do I Do?
Orchids (Orchidaceae) enjoy different climates depending on the variety, but many types thrive outdoors in U.S. Department of Agriculture plant hardiness zones 10 and 11. Most gardeners grow their orchids indoors to control conditions such as light, temperature and humidity. In the right environment, your orchid puts on a dazzling display of colorful, exotic-looking blooms. After a few months, though, the flowers fade and the orchid seems like it's gone dormant. Moth orchids (Phalaenopsis blume) are the only type that will bloom again on the same flower spike if you cut it back. You can also cut the stem of any orchid variety at the base to prompt strong root development.
Sterilize your cutting tool by coating it in rubbing alcohol if you're not using a new blade. This will kill bacteria that can infect and harm your orchid.
Allow all the orchid's blooms to fade and drop off the plant.
Make a cut between the the lowest bloom on the stem and the node just below it. The node looks like a small bump protruding from the stem. This node should put out flowers in eight to 12 weeks.
Dab powdered cinnamon over the cut area. Cinnamon's antifungal properties will help protect your plant as the cut heals. Alternatively, use a bit of melted candle wax to seal the cut.
Wait for the orchid's flowers to fade and fall off the plant.
Make a cut about 1 or 2 inches above the base of the orchid's stem using a sterile knife or scissors.
Seal the cut with powdered cinnamon to prevent fungal disease. In several months, the orchid will send out a new stem and blooms.
You should cut off the entire stem if it ever turns brown or yellow.
According to the American Orchid Society, Phalaenopsis orchids are the only kind that will rebloom from an old stem.
American Orchid Society: Where Do I Cut the Flower Spike When It is Finished?
King, Melissa. "Orchids: How to Cut Back the Stalk." Home Guides | SF Gate, http://homeguides.sfgate.com/orchids-cut-back-stalk-71011.html. Accessed 23 April 2019.
Can I Cut the Long Stem of a Houseplant Orchid?
|
#!/usr/bin/python
#-*- encoding: utf-8 -*-
"""
Copyright (c) 2015 @myuuuuun
Released under the MIT license.
"""
import math
import numpy as np
import functools
import matplotlib.pyplot as plt
import matplotlib.cm as cm
EPSIRON = 1.0e-8
# P0〜P_(length-1)までのルジャンドル多項式の, xにおける値の配列を返す
def legendre(x, length):
values = [1, x]
for i in range(2, length):
v = ((2*i-1)*x*values[i-1] - (i-1) * values[i-2]) / i
values.append(v)
return values
# P0〜P_(length-1)までのチェビシェフ多項式の, xにおける値の配列を返す
def chebyshev(x, length):
values = []
for i in range(length):
v = np.cos(i * np.arccos(x))
values.append(v)
return values
if __name__ == '__main__':
# 共通設定
length = 6
x_list = np.arange(-0.99, 1.00, 0.01)
f_matrix = np.zeros((length, 199), dtype=float)
# legendre
"""
for i, x in enumerate(x_list):
values = legendre(x, length)
for j in range(length):
f_matrix[j][i] = values[j]
fig, ax = plt.subplots()
plt.title("Legendre多項式")
plt.xlabel("x")
plt.ylabel("f")
plt.xlim(-1, 1)
for j in range(length):
plt.plot(x_list, f_matrix[j], color=cm.gist_earth(j/length), label='P{0}'.format(j))
plt.legend()
plt.show()
"""
"""
# chebyshev
for i, x in enumerate(x_list):
values = chebyshev(x, length)
for j in range(length):
f_matrix[j][i] = values[j]
fig, ax = plt.subplots()
plt.title("Chebyshev多項式")
plt.xlabel("x")
plt.ylabel("f")
plt.xlim(-1, 1)
for j in range(length):
plt.plot(x_list, f_matrix[j], color=cm.gist_earth(j/length), label='P{0}'.format(j))
plt.legend()
plt.show()
"""
|
Tunisian Fouta Towels are hand woven from 100% Egyptian cotton in the traditional style, these towels become even softer and more absorbent with use. Each Fouta Towel is hand fringed and measuring 2m x 1m they are perfect to use as beach towels, spa towels, picnic and travel rugs, scarfs, wraps or throws.
Classic Stripe Fouta Towel .
Flat weave design makes for an excellant tablecloth.
Classic Stripe: Solid colour throughout with simple bold stripes at either end.
|
# from PicartoClientAPI import Language, ApiClient
def Lang(string): # fallback
return string
L = Lang
def generateTagline(details):
ret_string = L("viewers") + ": "
ret_string = "%s %d (%s: %d)" % (ret_string, details.viewers, L("total"), details.viewers_total)
return ret_string
def generateSummary(details):
ret_string = ""
for panel in details.description_panels:
ret_string += panel.title + "\r\n" + panel.body + "\r\n" + "\r\n"
return ret_string
def generateCountries(languages):
oc = ObjectContainer(
title2=u'%s' % "multistreams and records",
art=None,
content=ContainerContent.GenericVideos
)
ret_string = ""
# for lang_dict in languages:
# lang = ApiClient().deserialize_model(lang_dict, Language)
# ret_string += lang.name + "\r\n"
return oc
def generateExtras(details):
oc = ObjectContainer(
title2=u'%s' % "multistreams and records",
art=None,
content=ContainerContent.GenericVideos
)
for lang in details.languages:
ret_string += lang.name + "\r\n"
return oc
|
Everyone loves a bargain but they don’t want one that seems like a bargain. Quality products at a great savings is what the latest IT Cosmetics TSV is all about. It’s time to tune into QVC to order your very own Customer Favorites Collection. 4 favorite products make up this collection with one new shade of an old favorite.
We can promise that this is going to be a collection that you’ll regret not owning. What you’ll be purchasing is a Supersize CC+ Illumination with SPF 50+ Cream, Heavenly Skin CC+ Skin Perfecting Brush, Superhero Mascara, and Je Ne Sais Quoi hydrating color lip treatment in the new shade of Rose.
Nothing should surprise you when I say that we at Beauty Info Zone love IT Cosmetics CC+ Cream. In today’s TSV you’ll be receiving the Supersize CC+ Cream, a color correcting illuminating full coverage cream, with SPF 50+. This is a natural appearing coverage that brings you a radiance without shimmer or glimmer. It brightens my face making me feel better about myself. The fact that it’s an anti-aging formula makes it even better. A regular size of the foundation is $38.50. The TSV sells for $59.94 for all 4 items!
IT Cosmetics is known for their brushes so it’s not a surprise that there’s a new one in the TSV and it’s amazing. The Heavenly Skin CC+ Skin-Perfecting Brush is a perfect tool to smooth on the CC Cream making it appear to be airbrushed. It hugs the curves of your face while you work with it in a circular motion. You’ll love this brush for all your foundations so it’s a winner all around.
SUPERHERO Elastic Stretch Volumizing Mascara has been on my love list since I was first introduced to it. It’s a conditioning mascara that is extra black. It’s in charge of 3 things: super volume, super lift, and super length. Alone this mascara sells for $24 so the TSV price of $59.94 makes it basically free.
Last you’ll find that IT Cosmetics has extended their Je Ne Sais Quoi hydrating lip treatment with the shade Rose. Previously it was only available in the original Je Ne Sais Quoi pink that adjusts to your body temperature. Rose is a darker shade so you’ll notice it more. Remember this is a lip TREATMENT but it gives you enough color so that this can be your color of the day. It is a peptide-infused treatment with essential butters and oils, hyaluronic filling spheres and anti-oxidants so you’ll end up with soft, healthy lips. It’s not a long lasting lip color but it is moisturizing so that rosy pink is going to be a joy to reapply.
QVC.com or the QVC channel will be happy to take your order for A287725 any time while the TSV is live on Saturday, March 18th. Showtimes are midnight – 2 a.m., 3 a.m., 3 – 5 p.m., 7 – 9 p.m, and 11 p.m. EDT.
We’ve been fans of IT Cosmetics since the very beginning. The first product that Marcia and Lisa fell in love with was Bye Bye Under Eyes and our love has grown as the company has grown. Jamie Kern Lima from IT Cosmetics started small and now is known as one of the best cosmetic gurus around. We’ve always loved their brushes and today we have two beautiful holiday brush sets that are sold exclusively at ULTA. You don’t want to miss out on these whether you are treating yourself or a loved one. They are stunners besides being some of the best brushes available. Beauty Info Zone was sent the City Chic brush set and All That Glitters brush set. We had trouble deciding who gets the pleasure of each one but we’re both very pleased.
City Chic is a limited edition 5 piece Ultra Luxe Skin Loving Brush Set ($68) that is a perfect travel set or for everyday use. It’s one that you’ll receive accolades on if you gift it. Even better, put it on your wishlist and make sure everyone sees that you crave it.
Inside the beautiful rose gold cosmetic bag you’ll find 5 super soft brushes that your skin will love. The brushes are a rose gold ombre with duo fibers and of course are cruelty free.
The City Chic Powder brush is the softest brush you can imagine. I want to lay still and have someone just brush my face all day with this. It’s made for your favorite loose and pressed powders (which should be IT of course). It’s large, round, and fluffy.
City Chic Blush Brush is great for blush and for contouring. The angled head makes it easy to hug the contours of your face. It’s also a great cream blush brush as it diffuses the cream softly on your face.
City Chic Foundation Brush is a flat tapered paddle brush. Unlike the foundation brushes of yore, this one is soft and flexible so I find it easy to use with my liquid foundations, getting each nook and cranny covered. If you have a cream foundation that you like then this brush will come in very handy.
How I love eyeshadow brushes and this City Chic Eyeshadow Brush is terrific. Whether it’s powder eyeshadows or cream eyeshadows this medium size brush blends the colors with delight.
Last is a brush that gets a lot of love from me, City Chic Crease Brush. The size of this crease brush fits my crease perfectly plus I love it for blending all my shadows after I’m done to soften any harsh lines. With this narrow tapered brush I start with a dark shadow along the outer corner and blend it into my crease.
I think City Chic would be a hit for the women on your list. I’m sure the All That Glitters set is just as wonderful.
All That Glitters is not gold – nope, it’s silver, and this limited edition 5 piece Must-Have Luxe Skin Loving Brush Set ($58) is a truly beautiful set. All the brushes have sparkling silver handles and are housed in a silver clutch that my daughter immediately declared ‘elegant’!
If you have any IT Cosmetics brushes, you know that they are fantastic. Soft, thick, and luxurious the same quality IT brushes in a great travel set. The set includes brushes for loose and pressed powder, foundation, concealer, eyeshadow, and eye lining – pretty much all you need. The silver clutch has a magnetic closure, a satin pouch for brushes, and a zippered pocket.
The Brilliant Powder Brush is big, fluffy, and so soft. I honestly don’t think I own a softer brush, although this is thick enough to hold onto loose or pressed powder.
The Radiant Foundation Brush has a flat edge that makes blending cream and liquid foundations a dream. This has that awesome softness as well…these brushes are just divine.
I love the Stunning Eyeshadow Brush. It grabs just the right amount of pigment and blends shadows beautifully. It’s just the right size too – not too big or too small.
The Luminous Concealer Brush blends out cream concealers like a champ. It works great with my favorite concealer, the IT Cosmetics Bye Bye Undereye, which I use every day.
The Dazzling Liner/Brow Brush is a great multi-tasker. It is great for lining eyes with dark eyeshadow, and then does double duty as a brow liner for powder, wax, or gel brow products.
Whatever your pleasure – City Chic or All That Glitters – either brush set doesn’t disappoint! Make someone happy and gift them a new brush set, then make yourself happy and get yourself a set too.
Who needs to do a duck face when you can contour instead?
Contouring takes some great cosmetics and some great brushes. Marcia and I have reviewed quite a few Beauty Junkee brushes over the past few months – read the reviews HERE – and today I am very excited to tell you about the latest set we sent to review, the Contour Kit.
I’m going to jump right to the price – right now it’s on sale for 50% off – just $25 right now on Amazon. That is one heck of a sale for a really terrific set.
This set comes in a sleek brush case 9.25″ W X 4.5″ H X 0.5″ D (expandable) that will fit up to 15 makeup brushes. It’s perfect for storing your brushes as well as for travel.
All of the brushes work with both cream and powder contour products. I am impressed with the quality of the brushes, particularly at that price! The brushes have copper ferrules with dual crimps, and the handles are wood. They feel substantial but not too heavy.
pro Sculptor Brush This brush has narrow, dense bristles with dome shape for perfect application and blending; flat sculpting brushes apply a harsh line and do not blend. I love this brush for powder blush and bronzer. It’s soft but firm and it blends like a dream.
pro Duo Fiber Brush This brush has short and long bristles for feather like application that allows for buildable color. Where this brush excels is with powder highlighters. It’s great for creating a soft glow.
pro Highlighter Brush This brush has soft bristles with a small taper point for applying highlighter to narrow facial features. I find I use this most on my nose – lighter highlighter down the bridge of my nose and darker contour on the sides.
pro Detailer Brush This is a small precision eye shadow brush with narrow dome shaped bristles for applying to small features, such as nose, under lower lip, and inner corner of eye. Yup, it works great, and of course it’s also terrific for applying eyeshadow.
pro Contour Sponge Ok, so this isn’t a brush – it’s a makeup sponge that is a latex free and flat angled blending sponge that creates an airbrushed finish with cream contours. This sponge is nice and soft, and the flat edge is very handy, particularly for contouring cheeks.
|
# Citrocan
# Copyright (c) 2016 sisoftrg
# The MIT License (MIT)
import sys
PY3 = sys.version_info >= (3, 0)
class Decoder(object):
connected = False
cb = {}
mfs = {}
lamps = {}
economy = False
enabled = False
lighting = False
brightness = 0x0c
ignition = 2
rpm = 0
speed = 0
power = 0
odometer = 0
out_temp = 0
message_id = 0
show_message = False
vin1 = ""
vin2 = ""
vin3 = ""
funcs = 0
silence = False
source = ""
srcs = ['---', 'Tuner', 'CD', 'CD Changer', 'Input AUX 1', 'Input AUX 2', 'USB', 'Bluetooth']
have_changer = False
cd_disk = 0
volume = 0
vol_change = False
track_intro = False
random = False
repeat = False
rds_alt = False
want_rdtxt = False
balance_lr = 0
show_balance_lr = False
balance_rf = 0
show_balance_rf = False
bass = 0
show_bass = False
treble = 0
show_treble = False
loudness = False
show_loudness = False
autovol = 0
show_autovol = 0
ambience = ""
ambs = {0x03: 'None', 0x07: 'Classical', 0x0b: 'Jazz-Blues', 0x0f: 'Pop-Rock', 0x13: 'Vocal', 0x17: 'Techno'}
ambience_show = False
radio_mem = 0
radio_band = ""
bands = ['---', ' FM1', ' FM2', 'DAB', 'FMAST', 'AM', 'AMLW', '---']
radio_freq = ""
ast_scan = False
pty_scan = False
radio_scan = False
rds_scan = False
show_radios = False
want_rds = False
have_rds = False
want_ta = False
have_ta = False
traffic = False
want_reg = False
want_pty = False
show_pty = False
pty_mode = 0
pty_sel = 0
pty_cur = ""
ptys = {0x00: 'Deactivate', 0x01: 'News', 0x02: 'Affairs', 0x03: 'Info', 0x04: 'Sport', 0x05: 'Educate', 0x06: 'Drama', 0x07: 'Culture',
0x08: 'Science', 0x09: 'Varied', 0x0A: 'Pop M', 0x0B: 'Rock M', 0x0C: 'Easy M', 0x0D: 'Light M', 0x0E: 'Classics', 0x0F: 'Other M',
0x10: 'Weather', 0x11: 'Finance', 0x12: 'Children', 0x13: 'Social', 0x14: 'Religion', 0x15: 'Phone In', 0x16: 'Travel',
0x17: 'Leisure', 0x18: 'Jazz', 0x19: 'Country', 0x1A: 'Nation M', 0x1B: 'Oldies', 0x1C: 'Folk M', 0x1D: 'Document'}
rds_name = ""
cd_tracks = 0
cd_len = ""
cd_mp3 = 0
cd_pause = False
track_num = 0
track_time = ""
track_len = ""
track_name = ""
track_author = ""
rdtxt = ""
rkeys = {}
msgs = {0x00: 'Diagnosis ok', 0x01: 'Engine temperature too high', 0x03: 'Coolant circuit level too low', 0x04: 'Check engine oil level', 0x05: 'Engine oil pressure too low',
0x08: 'Braking system faulty', 0x0A: 'Air suspension ok (picture)', 0x0B: 'Door, boot, bonnet and fuel tank open', 0x0D: 'Tyre puncture(s) detected',
0x0F: 'Risk of particle filter blocking', 0x11: 'Suspension faulty: max.speed 90 km/h', 0x12: 'Suspension faulty', 0x13: 'Power steering faulty', 0x14: '10km/h!',
0x61: 'Handbrake on', 0x62: 'Handbrake off', 0x64: 'Handbrake control faulty: auto handbrake activated', 0x67: 'Brake pads worn', 0x68: 'Handbrake faulty',
0x69: 'Mobile deflector faulty', 0x6A: 'ABS braking system faulty', 0x6B: 'ESP / ASR system faulty', 0x6C: 'Suspension faulty', 0x6D: 'Power steering faulty',
0x6E: 'Gearbox faulty', 0x6F: 'Cruise control system faulty', 0x73: 'Ambient brightness sensor faulty', 0x74: 'Sidelamp bulb(s) faulty',
0x75: 'Automatic headlamp adjustment faulty', 0x76: 'Directional headlamps faulty', 0x78: 'Airbag faulty', 0x79: 'Active bonnet faulty', 0x7A: 'Gearbox faulty',
0x7B: 'Apply foot on brake and lever in position "N"', 0x7D: 'Presence of water in diesel fuel filter', 0x7E: 'Engine management system faulty',
0x7F: 'Depollution system faulty', 0x81: 'Particle filter additive level too low', 0x83: 'Electronic anti-theft faulty', 0x86: 'Right hand side door faulty',
0x87: 'Left hand side door faulty', 0x89: 'Space measuring system faulty', 0x8A: 'Battery charge or electrical supply faulty', 0x8D: 'Tyre pressure(s) too low',
0x92: 'Warning!', 0x95: 'Info!', 0x96: 'Info!', 0x97: 'Anti-wander system lane-crossing warning device faulty', 0x9D: 'Foglamp bulb(s) faulty',
0x9E: 'Direction indicator(s) faulty', 0xA0: 'Sidelamp bulb(s) faulty', 0xA1: 'Parking lamps active', 0xCD: 'Cruise control not possible: speed too low',
0xCE: 'Control activation not possible: enter the speed', 0xD1: 'Active bonnet deployed', 0xD2: 'Front seat belts not fastened',
0xD3: 'Rear right hand passenger seat belts fastened', 0xD7: 'Place automatic gearbox in position "P"', 0xD8: 'Risk of ice', 0xD9: 'Handbrake!',
0xDE: 'Door, boot, bonnet and fuel tank open', 0xDF: 'Screen wash fluid level too low', 0xE0: 'Fuel level too low', 0xE1: 'Fuel circuit deactivated',
0xE3: 'Remote control battery flat', 0xE4: 'Check and re-initialise tyre pressure', 0xE5: 'Tyre pressure(s) not monitored',
0xE7: 'High speed, check tyre pressures correct', 0xE8: 'Tyre pressure(s) too low', 0xEA: 'Hands-free starting system faulty',
0xEB: 'Starting phase has failed (consult handbook)', 0xEC: 'Prolonged starting in progress', 0xED: 'Starting impossible: unlock the steering',
0xEF: 'Remote control detected', 0xF0: 'Diagnosis in progress...', 0xF1: 'Diagnosis completed', 0xF7: 'Rear LH passenger seatbelt unfastened',
0xF8: 'Rear center passenger seatbelt unfastened', 0xF9: 'Rear RH passenger seatbelt unfastened'}
def __init__(self, ss):
self.ss = ss
@staticmethod
def get_str(b):
if PY3:
ba = bytes(b).strip(b'\0')
else:
ba = bytes(b''.join([chr(x) for x in b if x]))
try:
s = ba.decode('utf8')
except UnicodeDecodeError:
try:
s = ba.decode('cp1251', errors='replace')
except UnicodeDecodeError:
s = "<bad name>"
except LookupError: # kivy's p4a blacklists nonstandrad encodings by default, see blacklist.txt
s = "<wrong program build>"
return s.strip()
def parse_mf(self, ci, cl, cd):
typ = (cd[0] & 0xf0) >> 4
arg = cd[0] & 0x0f
if typ == 0: # single
# print("got mf:", hex(ci), ''.join('{:02x}'.format(x) for x in cd[1:min(1 + arg, cl)]))
return (arg, cd[1:min(1 + arg, cl)])
elif typ == 1: # first
fl = arg * 256 + cd[1]
el = fl - (cl - 2)
self.mfs[ci] = [fl, el, cd[2:cl]]
elif typ == 2: # consecutive. TODO: check frame order!
if ci not in self.mfs:
return None
el = self.mfs[ci][1]
if el > cl - 1:
self.mfs[ci][1] -= cl - 1
self.mfs[ci][2] += cd[1:cl]
else:
fl = self.mfs[ci][0]
d = self.mfs[ci][2] + cd[1:min(cl, el + 2)]
del self.mfs[ci]
# print("got mf:", hex(ci), ''.join('{:02x}'.format(x) for x in d))
return (fl, d)
elif typ == 3: # flow, packets not for us
pass
return None
def decode(self, ci, cl, cd):
if ci in self.cb and cd == self.cb[ci]:
return False
self.cb[ci] = cd
if ci == 0x036: # bsi: ignition
self.economy = bool(cd[2] & 0x80)
self.lighting = bool(cd[3] & 0x20)
self.brightness = cd[3] & 0x0f
self.ignition = cd[4] & 0x07
elif ci == 0x0a4: # current cd track, multiframe
dd = self.parse_mf(ci, cl, cd)
if not dd:
return False
cd = dd[1]
# cd track info
#got mf: 0xa4 20009801546865204372616e626572726965730000000000416e696d616c20496e7374696e63740000000000
#got mf: 0xa4 2000000000
# radiotext
#got mf: 0xa4 10000000544154415220524144494f53202020202020202020202020414c4c49202d2052454b4c414d41202838353532292039322d30302d383220202020202020202020
#got mf: 0xa4 10000000544154415220524144494f53492038372e3520464d204348414c4c49202d2052454b4c414d41202838353532292039322d30302d383220202020202020202020
#got mf: 0xa4 1000000000
page = (cd[0] >> 4) & 0x0f
if page == 1:
self.rdtxt = self.get_str(cd[4:])
elif page == 2:
ha = bool(cd[2] & 0x10)
self.track_author = ha and self.get_str(cd[4:24]) or ""
self.track_name = self.get_str(ha and cd[24:44] or cd[4:24])
elif ci == 0x0b6: # bsi: speed info
self.rpm = cd[0] * 256 + (cd[1] >> 3)
self.speed = cd[2] * 256 + cd[3]
elif ci == 0x0e6: # bsi: voltage
self.power = cd[5] / 20 + 7.2
elif ci == 0x0f6: # bsi: info
self.odometer = cd[2] * 65536 + cd[3] * 256 + cd[4]
self.out_temp = cd[6] / 2 - 39.5
self.lamps['reverse'] = bool(cd[7] & 0x80)
self.lamps['right'] = bool(cd[7] & 0x02)
self.lamps['left'] = bool(cd[7] & 0x01)
#elif ci == 0x120: # bsi: warning log
# pass
elif ci == 0x125: # track list, multiframe
dd = self.parse_mf(ci, cl, cd)
if not dd:
return False
cd = dd[1]
# cd list
#got mf: 0x125 900100108111524f4f5400000000000000000000000000000000
#got mf: 0x125 986f5d41f120696c6c5f6e696e6f5f2d5f686f775f63616e5f696b6f726e2d6b6973732d6d73742e6d70330000006d797a756b612e72755f332e5f42756c6c65745f6d797a756b612e72755f372e5f5374617469632d
#got mf: 0x125 00
# radio list, band
#got mf: 0x125 100100103130332e3230000000
#got mf: 0x125 20500000353331000000000000353331000000000000353331000000000000363330000000000000353331000000000000353331000000000000
#got mf: 0x125 201000004543484f204d534b90464d2039302e3920903130312e354d485ab0504c555320202020903130362e393000002035392d33342d3233b0
#got mf: 0x125 20200000464d2039302e39209031363a31363a333790343634375f31335fb03130332e363000000039302e36300000000044414e434520202090
#got mf: 0x125 40200000464d2039302e39209031363a31363a333790343634375f31335fb03130332e363000000039302e36300000000044414e434520202090
#got mf: 0x125 00
page = (cd[0] >> 4) & 0x0f
elif ci == 0x128: # bsi: lamps
self.lamps['belt_fl'] = bool(cd[0] & 0x40)
self.lamps['doors'] = bool(cd[1] & 0x10)
self.lamps['sidelight'] = bool(cd[4] & 0x80)
self.lamps['beam_l'] = bool(cd[4] & 0x40)
self.lamps['beam_h'] = bool(cd[4] & 0x20)
self.lamps['fog_f'] = bool(cd[4] & 0x10)
self.lamps['fog_r'] = bool(cd[4] & 0x08)
self.lamps['lefti'] = bool(cd[4] & 0x04)
self.lamps['righti'] = bool(cd[4] & 0x02)
#elif ci == 0x131: # cmd to cd changer
# pass
elif ci == 0x165: # radio status
self.enabled = bool(cd[0] & 0x80)
self.silence = bool(cd[0] & 0x20)
self.source = self.srcs[(cd[2] >> 4) & 7]
self.have_changer = bool(cd[1] & 0x10)
#self.cd_disk = ((cd[1] >> 5) & 3) ^ 1 # for b7?
#elif ci == 0x167: # display: settings?
# pass
elif ci == 0x1a1: # bsi: info messages
self.show_message = bool(cd[2] & 0x80)
if cd[0] == 0x80:
self.message_id = cd[1]
elif ci == 0x1a5: # volume
self.volume = cd[0] & 0x1f
self.vol_change = bool(cd[0] & 0x80)
#elif ci == 0x1d0: # climate: control info
# pass
elif ci == 0x1e0: # radio settings
self.track_intro = bool(cd[0] & 0x20)
self.random = bool(cd[0] & 0x04)
self.repeat = bool(cd[1] & 0x80)
self.rds_alt = bool(cd[2] & 0x20)
self.want_rdtxt = bool(cd[4] & 0x20)
elif ci == 0x1e5: # audio settings
self.balance_lr = ((cd[0] + 1) & 0x0f) - (cd[0] ^ 0x40 & 0x40) >> 2
self.show_balance_lr = bool(cd[0] & 0x80)
self.balance_rf = ((cd[1] + 1) & 0x0f) - (cd[1] ^ 0x40 & 0x40) >> 2
self.show_balance_rf = bool(cd[1] & 0x80)
self.bass = ((cd[2] + 1) & 0x0f) - (cd[2] ^ 0x40 & 0x40) >> 2
self.show_bass = bool(cd[2] & 0x80)
self.treble = ((cd[4] + 1) & 0x0f) - (cd[4] ^ 0x40 & 0x40) >> 2
self.show_treble = bool(cd[4] & 0x80)
self.loudness = bool(cd[5] & 0x40)
self.show_loudness = bool(cd[5] & 0x80)
self.autovol = cd[5] & 7
self.show_autovol = bool(cd[5] & 0x10)
self.ambience = self.ambs.get(cd[6] & 0x1f, "Unk:" + hex(cd[6] & 0x1f))
self.ambience_show = bool(cd[6] & 0x40)
elif ci == 0x21f: # remote keys under wheel
self.rkeys['fwd'] = bool(cd[0] & 0x80)
self.rkeys['rew'] = bool(cd[0] & 0x40)
self.rkeys['volup'] = bool(cd[0] & 0x08)
self.rkeys['voldn'] = bool(cd[0] & 0x04)
self.rkeys['src'] = bool(cd[0] & 0x02)
self.rkeys['scroll'] = cd[1]
#elif ci == 0x221: # trip computer
# pass
elif ci == 0x225: # radio freq
if cl == 6: # b7, from autowp docs
self.radio_mem = cd[0] & 7
self.radio_band = self.bands[(cd[1] >> 5) & 7]
freq = (cd[1] & 0x0f) * 256 + cd[2]
elif cl == 5: # b3/b5
self.pty_scan = bool(cd[0] & 0x01)
self.radio_scan = bool(cd[0] & 0x02)
self.rds_scan = bool(cd[0] & 0x04)
self.ast_scan = bool(cd[0] & 0x08)
self.show_radios = bool(cd[0] & 0x80)
self.radio_mem = (cd[1] >> 4) & 7
self.radio_band = self.bands[(cd[2] >> 4) & 7]
freq = (cd[3] & 0x0f) * 256 + cd[4]
if self.radio_band in ('AMMW', 'AMLW'):
self.radio_freq = "%d KHz" % freq
else:
self.radio_freq = "%.2f MHz" % (freq * 0.05 + 50)
elif ci == 0x265: # rds
self.want_rds = bool(cd[0] & 0x80)
self.have_rds = bool(cd[0] & 0x20)
self.want_ta = bool(cd[0] & 0x10)
self.have_ta = bool(cd[0] & 0x04)
self.traffic = bool(cd[0] & 0x02)
self.want_reg = bool(cd[0] & 0x01)
self.want_pty = bool(cd[1] & 0x80)
self.show_pty = bool(cd[1] & 0x40)
self.pty_mode = (cd[1] >> 4) & 3
self.pty_sel = cd[2] & 0x1f
pc = cd[3] & 0x1f
self.pty_cur = self.pty_mode in (1, 2) and pc and self.ptys.get(pc, "Unk:" + hex(pc)) or ""
#elif ci == 0x276: # bsi: date and time
# pass
elif ci == 0x2a5: # rds title
self.rds_name = self.get_str(cd) if cd[0] != 0 else None
elif ci == 0x2b6: # bsi: last 8 vin digits
self.vin3 = bytes(cd[:8]).decode()
elif ci == 0x2e1: # bsi: status of functions
self.funcs = (cd[0] << 16) + (cd[1] << 8) + cd[2]
#elif ci == 0x2e5: # hz
# pass
elif ci == 0x325: # cd tray info
self.cd_disk = cd[1] & 0x83
elif ci == 0x336: # bsi: first 3 vin letters
self.vin1 = bytes(cd[:3]).decode()
#elif ci == 0x361: # bsi: car settings
# pass
elif ci == 0x365: # cd disk info
self.cd_tracks = cd[0]
self.cd_len = "%02d:%02d" % (cd[1], cd[2]) if cd[1] != 0xff else "--:--"
self.cd_mp3 = bool(cd[3] & 0x01)
elif ci == 0x3a5: # cd track info
self.track_num = cd[0]
self.track_len = "%02d:%02d" % (cd[1], cd[2]) if cd[1] != 0xff else "--:--"
self.track_time = "%02d:%02d" % (cd[3], cd[4]) if cd[3] != 0xff else "--:--"
elif ci == 0x3b6: # bsi: middle 6 vin digits
self.vin2 = bytes(cd[:6]).decode()
elif ci == 0x3e5: # keypad
self.rkeys['menu'] = bool(cd[0] & 0x40)
self.rkeys['tel'] = bool(cd[0] & 0x10)
self.rkeys['clim'] = bool(cd[0] & 0x01)
self.rkeys['trip'] = bool(cd[1] & 0x40)
self.rkeys['mode'] = bool(cd[1] & 0x10)
self.rkeys['audio'] = bool(cd[1] & 0x01)
self.rkeys['ok'] = bool(cd[2] & 0x40)
self.rkeys['esc'] = bool(cd[2] & 0x10)
self.rkeys['dark'] = bool(cd[2] & 0x04)
self.rkeys['up'] = bool(cd[5] & 0x40)
self.rkeys['down'] = bool(cd[5] & 0x10)
self.rkeys['right'] = bool(cd[5] & 0x04)
self.rkeys['left'] = bool(cd[5] & 0x01)
#elif ci == 0x520: # hz
# pass
#elif ci == 0x5e0: # hw/sw radio info
# pass
else:
return False
return True
def visualize(self):
tuner = self.source == 'Tuner' and self.enabled
cd = self.source in ('CD', 'CD Changer') and self.enabled
aux = 'AUX' in self.source and self.enabled
if not self.enabled:
self.ss('icon', 'icon')
self.ss('name', 'Disabled')
self.ss('title', '')
elif aux:
self.ss('icon', 'linein')
self.ss('name', self.source)
self.ss('title', '')
elif tuner:
self.ss('icon', 'radio')
self.ss('name', (self.rds_scan or self.ast_scan or self.pty_scan) and "Wait..." or (self.traffic and "Traffic" or self.rds_name or self.radio_freq))
self.ss('title', self.pty_scan and self.pty_sel and ("PTY: " + self.ptys.get(self.pty_sel, "")) or (self.rds_scan and "RDS search.." or
(self.ast_scan and (self.radio_scan and "Autostore stations.." or "List in progress..")) or self.rdtxt))
elif cd:
self.ss('icon', self.cd_mp3 and 'cdmp3' or 'cdaudio')
self.ss('name', self.source == 'CD' and (self.cd_disk in (1, 3) and ('Track %d/%d' % (self.track_num, self.cd_tracks)) or "Wait...") or "CD Changer")
self.ss('title', self.track_name + (self.track_author and (" / %s" % self.track_author) or ""))
else:
self.ss('icon', 'icon')
self.ss('name', self.source)
self.ss('title', '')
self.ss('band', tuner and self.radio_band or "")
self.ss('info', tuner and self.rds_name and self.radio_freq or
(cd and ("%s %s%s" % (self.cd_pause and "×" or "»", self.track_time, self.track_len != "--:--" and " / " + self.track_len or "")) or ""))
self.ss('memch', tuner and not self.radio_scan and self.radio_mem and str(self.radio_mem) or "")
self.ss('dx', tuner and self.radio_scan and "DX" or "")
self.ss('ta', self.enabled and self.want_ta and "TA" or "")
self.ss('ta_ok', tuner and self.have_ta)
self.ss('pty', self.enabled and self.want_pty and "PTY" or "")
self.ss('pty_ok', tuner and self.pty_cur == self.ptys.get(self.pty_sel, ""))
self.ss('ptyname', tuner and self.enabled and self.rdtxt == "" and self.pty_cur or "")
self.ss('reg', tuner and self.want_reg and "REG" or "")
self.ss('rds', tuner and self.want_rds and "RDS" or "")
self.ss('rds_ok', tuner and self.have_rds)
self.ss('rdtxt_rnd', tuner and self.want_rdtxt and "RDTXT" or (cd and (self.random and "RDM" or (self.track_intro and "INT" or (self.repeat and "RPT")))) or "")
self.ss('loud', self.enabled and self.loudness and "LOUD" or "")
self.ss('vol', self.enabled and ("Vol: [b]%d[/b]" % self.volume) or "")
self.ss('volbar', self.enabled and self.volume or 0)
self.ss('temp', self.out_temp and ("[b]%.0f[/b]°C" % self.out_temp) or "[b]——[/b]°F")
self.ss('alert', not self.connected and "No connection" or (self.show_message and self.msgs.get(self.message_id, "") or ""))
self.ss('debug', "rpm=%d speed=%d power=%dV odometer=%d\neconomy=%d lighting=%d bright=%d ignition=%d funcs=%06x\n\nlamps=%s\n\nkeys=%s" % (
self.rpm, self.speed, self.power, self.odometer, self.economy, self.lighting, self.brightness, self.ignition, self.funcs, str(self.lamps), str(self.rkeys)))
def visualize_test(self):
self.ss('icon', "icon")
self.ss('name', "Name")
self.ss('title', "Title")
self.ss('band', "Band")
self.ss('info', "Info")
self.ss('memch', "0")
self.ss('dx', "DX")
self.ss('ta', "TA")
self.ss('ta_ok', True)
self.ss('pty', "PTY")
self.ss('pty_ok', True)
self.ss('ptyname', "PtyName")
self.ss('reg', "REG")
self.ss('rds', "RDS")
self.ss('rds_ok', True)
self.ss('rdtxt_rnd', "RDTXT")
self.ss('loud', "LOUD")
self.ss('vol', "Vol: [b]15[/b]")
self.ss('volbar', 15)
self.ss('temp', "[b]33[/b]°C")
self.ss('alert', "")
self.ss('debug', "some debug info")
|
B&S Starter Pawl Kit, B&S 281503, 281505, 692299, 492333. Fits our 26-12368 & 12329 Recoil Assembly. Includes 2-Pawls, Friction Plate 691696 Screw & 2-Springs. 263073 Spring Ring Retainer.
|
#!/usr/bin/env python2
"""
graphlab
========
Assists with registering, loading, and configuring GraphLab by Dato.
Note the python 2 shebang at the top of this file. At the time of this writing,
Graphlab does not support Python 3. - jdb, 2016May09
"""
import argparse
import os
import sys
import time
class VersionError(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def load_graphlab():
if sys.version_info >= (3, 0):
raise VersionError("Graphlab is only available in Python 2")
start = time.clock() # noqa
import graphlab
gl_product_key = os.getenv('GLCREATE_PRODUCT_KEY', False)
if not gl_product_key:
print("Please set GLCREATE_PRODUCT_KEY")
return
graphlab.product_key.set_product_key(gl_product_key)
# Display graphlab canvas in notebook
graphlab.canvas.set_target('ipynb')
# Number of workers
graphlab.set_runtime_config('GRAPHLAB_DEFAULT_NUM_PYLAMBDA_WORKERS', 16)
since = time.clock() - start
print("Graphlab loaded in {:.3f} seconds".format(since))
return graphlab
def convert_to_csv(filename):
gl = load_graphlab()
sframe = gl.SFrame(filename)
noext_filename, _ = os.path.splitext(os.path.abspath(filename))
new_filename = noext_filename + '.csv'
df = sframe.to_dataframe()
df.to_csv(new_filename)
assert os.path.exists(new_filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file',
help='GraphLab file to convert to Pandas .csv')
args = parser.parse_args()
convert_to_csv(args.file)
|
Conservation officers shot and killed a black bear in northern Michigan and plan to conduct tests on the animal to determine whether it's the one that mauled a 12-year-old as she jogged on her grandfather's property.
The Detroit Free Press reported that the bear was killed over the weekend. But the girl's grandfather doesn't believe the animal killed by the Michigan Department of Natural Resources was responsible for the attack, according to the newspaper.
"It was a real large bear," Dave Wetherell, 66, told the newspaper Sunday. "But I don't believe it was the same bear. This one here was between 400-500 pounds, the one they killed last night. They feel like the one who attacked Abby was 150 pounds."
Abby Wetherell, 12, headed toward home from an evening jog on her grandfather's woodsy property in northern Michigan last week when a terrifying sight caught her eye: a black bear was chasing her.
The bear knocked the girl down twice and lashed one of her thighs with its powerful paws as she screamed for help, then coolly played dead. Her father and a neighbor scared the animal away, and Abby was flown to a hospital, where she was doing well after surgery.
"We're very proud of the way she handled herself," Wetherell told The Associated Press at the time. "She's kind of amazed us."
The Michigan Department of Natural Resources set traps in the area of the Thursday night attack in Wexford County's Haring Township, just north of Cadillac, and asked the public to be on the lookout. The county has a well-established bear population, but the animals generally avoid humans, DNR wildlife biologist Adam Bump said.
The agency gets occasional reports of females challenging people who get too close to their cubs, he said. But in those cases, the sow usually snaps at the person once or twice and leaves. The bear that went after Abby apparently wasn't protecting cubs, which made the attack highly unusual.
The bear killed early Sunday will be tested for rabies and other communicable diseases, the Detroit Free Press reported.
Wetherell said he owns about 180 acres with a hunting cabin and a two-track dirt road where Abby often runs. The seventh-grader at Cadillac Junior High School lives nearby with her parents and sister.
Around 9 p.m. on Thursday, she noticed the bear out of the corner of her eye on an intersecting two-track. She picked up speed, and the bear pursued and caught her.
"He mauled her pretty bad," Wetherell said. "She fought and got away. He continued after her and knocked her down again. According to Abby, at this point she just laid there and played dead."
A neighbor heard the girl's cries and yelled for Abby's dad, Chris Wetherell, who was in the area. Together, they chased the bear into the woods.
Abby was taken by helicopter to Munson Medical Center in Traverse City, where more than two hours of surgery were needed to treat deep scratches on her leg. "She's in good shape," her grandfather said.
Despite the fright, the spunky girl said she'd continue visiting the property and was more concerned about missing a soccer game this weekend, David Wetherell said.
"She didn't want to let the team down," he said.
Michigan's black bear population is estimated at 8,000 to 10,000, but about 90 percent are in the Upper Peninsula, the DNR said. Wexford County is in the northwestern Lower Peninsula. Bears have been ranging farther south in recent years as growth of forested areas has provided more habitat and travel corridors, Bump said.
People who venture into likely bear territory should carry pepper spray, travel in small groups and make noise to avoid startling them, the DNR said.
"If you encounter a bear, stand your ground and then slowly back away," the agency said in a news release. "Do not turn away."
It advised people not to show fear, run or play dead, but instead to make themselves look as big as possible and talk to the animal in a stern voice. "Fight back if actually attacked with a backpack, stick or bare hands."
|
from lingpy import Model
from lingpy.sequence.sound_classes import token2class
from pyburmish import burmish_path, load_burmish
from unicodedata import normalize
sca = Model('sca')
color = Model('color')
color.converter['⁵⁵'] = 'Crimson'
color.converter['³⁵'] = 'LightBlue'
color.converter['⁴'] = 'LightYellow'
color.converter['²'] = 'LightGreen'
color.converter['³'] = 'ForestGreen'
color.converter['³¹'] = 'Brown'
color.converter['¹'] = 'White'
color.converter['²¹'] = 'DarkOrange'
color.converter['³³'] = 'CornflowerBlue'
color.converter['⁵³'] = '#c86496'
color.converter['⁵¹'] = 'cyan'
_conv = {}
_conv['A'] = 'LightBlue'
_conv['E'] = 'Orange'
_conv['I'] = 'LightGreen'
_conv['O'] = 'white'
_conv['U'] = 'Crimson'
_conv['Y'] = 'LightYellow'
for sound in color.converter:
cls = token2class(sound, 'sca')
if cls in 'AEIOUY':
color.converter[sound] = _conv[cls]
def contains(syllable, sound):
_s = normalize('NFD', ''.join(syllable))
if sound in _s:
return True
return False
def is_aspirated(syllable):
return contains(syllable, 'ʰ')
def is_creaky(syllable):
return contains(syllable, '\u0330')
def is_aspirated_or_unvoiced(syllable):
if is_aspirated(syllable):
return True
return contains(syllable, '\u0325')
|
Searching For Internet Plans? If Perhaps That Is The Case Well Then Check Out This – Amazing blog containing articles on different niches.
There are actually lots of individuals who have no approach precisely how they would survive without the web. Access to the world-wide-web may determine a great deal of things. In some cases, it may be the factor that determines if perhaps you will get the job. In addition, it might be a place of amusement. It isn’t unheard of for an employer to ask a prospective employee during a job interview whether or not these may get on the net on a consistent basis. And the reason for it is that there are diverse tasks that the worker can complete comfortably in his or her home and simply send the job performed with the help of web.
There are presently forms of net plans offered by net providers: broadband internet and dial up service. Dial up is the older technology of these two. It connects to the internet via phone cable to a central service. These connections had a number of positive aspects, the foremost of which being that these were obtainable just about anywhere as nearly any individual has access to a phone. Regrettably, nevertheless the existing telephone services were and happen to be not designed for high speed data transmission, so the speeds of these connections tended to be very slow.
Every day progressively more web sites serve up rich content in kind of a variety of videos and interactive pages. The issue is that dial up connections happen to be too slow so the pages keep loading for long times or tend not to load at all. These websites re focusing on people with fast world-wide-web access and as the time goes, progressively more people eliminate the dial up.
High speed broadband net access is a term that refers to any high speed web access. And we advocate maneuvering to exascale.co.uk in case you’re in search of best web deals. You’ll not go overboard by deciding on Leased Line provider. You may check the Leaded Line quote at the site.
|
from a10sdk.common.A10BaseClass import A10BaseClass
class VeCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ve_end: {"type": "number", "description": "VE port", "format": "number"}
:param ve_start: {"type": "number", "description": "VE port (VE Interface number)", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "ve-cfg"
self.DeviceProxy = ""
self.ve_end = ""
self.ve_start = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class EthCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ethernet_start: {"type": "number", "description": "Ethernet port (Ethernet Interface number)", "format": "interface"}
:param ethernet_end: {"type": "number", "description": "Ethernet port", "format": "interface"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "eth-cfg"
self.DeviceProxy = ""
self.ethernet_start = ""
self.ethernet_end = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class AclV6(A10BaseClass):
"""Class Description::
IPv6 ACL for HTTPS service.
Class acl-v6 supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param acl_name: {"description": "ACL name", "format": "string", "minLength": 1, "optional": false, "maxLength": 16, "type": "string"}
:param ve_cfg: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"ve-end": {"type": "number", "description": "VE port", "format": "number"}, "ve-start": {"type": "number", "description": "VE port (VE Interface number)", "format": "number"}, "optional": true}}]}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param eth_cfg: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"ethernet-start": {"type": "number", "description": "Ethernet port (Ethernet Interface number)", "format": "interface"}, "ethernet-end": {"type": "number", "description": "Ethernet port", "format": "interface"}, "optional": true}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/enable-management/service/https/acl-v6/{acl_name}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "acl_name"]
self.b_key = "acl-v6"
self.a10_url="/axapi/v3/enable-management/service/https/acl-v6/{acl_name}"
self.DeviceProxy = ""
self.acl_name = ""
self.ve_cfg = []
self.uuid = ""
self.eth_cfg = []
for keys, value in kwargs.items():
setattr(self,keys, value)
|
JENNER, Calif. (AP) - Officials say a 63-year-old man has been killed while kayaking off the Sonoma County coast.
Sgt. Cecile Focha of the Sonoma County Sheriff’s Office said Joseph Patrick Woods of San Rafael was paddling with a friend off Goat Rock Beach near Jenner on Wednesday when both of their kayaks capsized in rough waters.
Focha says Woods’ companion made it back to the beach, where he told rescue crews he had seen his friend swimming toward shore and then lost sight of him.
A California Highway Patrol helicopter spotted a person in the ocean a little more than an hour later. The Coast Guard pulled Woods from the water and he was pronounced dead at a nearby Coast Guard station.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.