text
stringlengths 29
850k
|
---|
# -*- coding: utf-8 -*-
from nose.tools import *
from pykt import KyotoTycoon, KTException
import time
def clear():
db = KyotoTycoon()
db = db.open()
db.clear()
db.close()
@raises(IOError)
def test_err_cas():
db = KyotoTycoon()
db.cas("A")
@with_setup(setup=clear)
def test_cas():
db = KyotoTycoon()
db = db.open()
db.set("A", "B")
ret = db.cas("A", oval="B", nval="C")
ok_(ret == True)
ret = db.get("A")
ok_(ret == "C")
db.close()
@raises(KTException)
@with_setup(setup=clear)
def test_cas_with_db():
db = KyotoTycoon("test")
db = db.open()
db.set("A", "B")
db.cas("A", oval="B", nval="C")
ok_(False)
@with_setup(setup=clear)
@raises(KTException)
def test_cas_fail():
db = KyotoTycoon()
db = db.open()
db.set("A", "B")
ret = db.cas("A", oval="C", nval="C")
@with_setup(setup=clear)
@raises(KTException)
def test_cas_few_param1():
db = KyotoTycoon()
db = db.open()
db.set("A", "B")
db.cas("A", nval="C")
@with_setup(setup=clear)
def test_cas_few_param2():
db = KyotoTycoon()
db = db.open()
db.set("A", "B")
ret = db.cas("A", oval="B")
ok_(ret == True)
ret = db.get("A")
ok_(ret == None)
db.close()
@with_setup(setup=clear)
def test_cas_utf8():
db = KyotoTycoon()
db = db.open()
db.set("あいうえお", "かきくけこ")
ret = db.cas("あいうえお", oval="かきくけこ", nval="さしすせそ")
ok_(ret == True)
ret = db.get("あいうえお")
ok_(ret == "さしすせそ")
db.close()
@with_setup(setup=clear)
def test_cas_loop():
db = KyotoTycoon()
db = db.open()
db.set("A", "0")
for i in xrange(100):
a = str(i)
b = str(i+1)
ret = db.cas("A", oval=a, nval=b)
ok_(ret == True)
ret = db.get("A")
ok_(ret == b)
db.close()
@with_setup(setup=clear)
def test_cas_expire():
db = KyotoTycoon()
db = db.open()
db.set("A", "B")
ret = db.cas("A", oval="B", nval="C", expire=2)
ok_(ret)
time.sleep(3)
ret = db.get("A")
ok_(ret == None)
db.close()
@with_setup(setup=clear)
def test_cas_expire_not_expire():
db = KyotoTycoon()
db = db.open()
db.set("A", "B")
ret = db.cas("A", oval="B", nval="C", expire=2)
ok_(ret)
time.sleep(2)
ret = db.get("A")
ok_(ret == "C")
time.sleep(2)
ret = db.get("A")
ok_(ret == None)
db.close()
|
Top 5 stock holdings are MSFT, AAPL, JNJ, VZ, PG, and represent 10.56% of Mn Services Vermogensbeheer B.V.'s stock portfolio.
Added to shares of these 10 stocks: VZ (+$78.77M), PG (+$78.40M), MSFT (+$77.10M), JNJ (+$73.84M), V (+$70.46M), MRK (+$69.67M), CSCO (+$69.06M), KO (+$65.40M), HD (+$62.98M), AAPL (+$62.11M).
Started 19 new stock positions in Imperial Oil Ltd New, WCG, DXCM, Burlington Stores, Stars Group Inc Com, Grubhub, Wayfair, Linde Plc, Suncor Energy Inc New, Ptc. NRG, Lamb Weston Hldgs, Dell Technologies Inc, Aurora Cannabis Inc snc, Axa Equitable Hldgs Inc, Canopy Gro, Wp Carey, Cenovus Energy Inc C om, L3 Technologies.
Reduced shares in these 10 stocks: CVX (-$37.49M), EOG (-$11.85M), , , , Canadian Natural Resources (-$8.32M), , , HAL (-$5.70M), APC (-$5.57M).
Sold out of its positions in AGCO, AET, APC, Andeavor, Antero Res, ATO, AVT, * Barrick Gold Corp, CA, COG.
Mn Services Vermogensbeheer B.V. was a net buyer of stock by $3.38B.
Mn Services Vermogensbeheer B.V. has $6.59B in assets under management (AUM), growing by 54.43%.
Past 13F-HR SEC Filings by Mn Services Vermogensbeheer B.V.
|
# Local file checksum cache implementation
#
# Copyright (C) 2012 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import glob
import operator
import os
import stat
import pickle
import bb.utils
import logging
from bb.cache import MultiProcessCache
logger = logging.getLogger("BitBake.Cache")
# mtime cache (non-persistent)
# based upon the assumption that files do not change during bitbake run
class FileMtimeCache(object):
cache = {}
def cached_mtime(self, f):
if f not in self.cache:
self.cache[f] = os.stat(f)[stat.ST_MTIME]
return self.cache[f]
def cached_mtime_noerror(self, f):
if f not in self.cache:
try:
self.cache[f] = os.stat(f)[stat.ST_MTIME]
except OSError:
return 0
return self.cache[f]
def update_mtime(self, f):
self.cache[f] = os.stat(f)[stat.ST_MTIME]
return self.cache[f]
def clear(self):
self.cache.clear()
# Checksum + mtime cache (persistent)
class FileChecksumCache(MultiProcessCache):
cache_file_name = "local_file_checksum_cache.dat"
CACHE_VERSION = 1
def __init__(self):
self.mtime_cache = FileMtimeCache()
MultiProcessCache.__init__(self)
def get_checksum(self, f):
entry = self.cachedata[0].get(f)
cmtime = self.mtime_cache.cached_mtime(f)
if entry:
(mtime, hashval) = entry
if cmtime == mtime:
return hashval
else:
bb.debug(2, "file %s changed mtime, recompute checksum" % f)
hashval = bb.utils.md5_file(f)
self.cachedata_extras[0][f] = (cmtime, hashval)
return hashval
def merge_data(self, source, dest):
for h in source[0]:
if h in dest:
(smtime, _) = source[0][h]
(dmtime, _) = dest[0][h]
if smtime > dmtime:
dest[0][h] = source[0][h]
else:
dest[0][h] = source[0][h]
def get_checksums(self, filelist, pn):
"""Get checksums for a list of files"""
def checksum_file(f):
try:
checksum = self.get_checksum(f)
except OSError as e:
bb.warn("Unable to get checksum for %s SRC_URI entry %s: %s" % (pn, os.path.basename(f), e))
return None
return checksum
def checksum_dir(pth):
# Handle directories recursively
dirchecksums = []
for root, dirs, files in os.walk(pth):
for name in files:
fullpth = os.path.join(root, name)
checksum = checksum_file(fullpth)
if checksum:
dirchecksums.append((fullpth, checksum))
return dirchecksums
checksums = []
for pth in filelist.split():
exist = pth.split(":")[1]
if exist == "False":
continue
pth = pth.split(":")[0]
if '*' in pth:
# Handle globs
for f in glob.glob(pth):
if os.path.isdir(f):
if not os.path.islink(f):
checksums.extend(checksum_dir(f))
else:
checksum = checksum_file(f)
if checksum:
checksums.append((f, checksum))
elif os.path.isdir(pth):
if not os.path.islink(pth):
checksums.extend(checksum_dir(pth))
else:
checksum = checksum_file(pth)
if checksum:
checksums.append((pth, checksum))
checksums.sort(key=operator.itemgetter(1))
return checksums
|
New York City regulator and superintendent of financial services Benjamin M. Lawsky, took to Reddit to introduce himself to the bitcoin community.
He thanked those who requested that he do an AMA on his agency. An AMA is an uncommon role taken in one’s life, and requires proof to be included within the post’s text.
Lawsky says that the New York Department of Financial Services is doing an overview on ruling for digital currencies.
He says that the NYDFS agency is still formulating how to take on the regulatory process. However, Lawsky says he welcomes all questions, and that he will try to answer questions as best as he can, eventhough he can not discuss any definitive answers on the subject.
The agency is taking bitcoin’s regulation process into serious account without any prejudgments.
The NYDFS conducts inquiries to decide which regulations and guidelines digital currencies will abide by. Hearings were conducted this past January, and over 14,000 people from 117 different countries tuned into the 2-day hearings online. Lawsky says that the turnout from the online bitcoin community was more than the agency ever could have expected.
Just last week, the agency released an outline of thoughts and ideas for regulations.
Lawsky then says that the agency is taking their time on decisions, as they don’t want to overlook any pressing issues that may cause a backlash in the future.
In addition to the NYDFS’s cooperation with the bitcoin community, Lawsky says that getting feedback from users and regulators alike will allow for a better decision overall.
He concluded his post by asking the Reddit bitcoin community some questions such as, what users personally found as the most important application for virtual currency, and where they thought the market was headed in the long term.
One Reddit user asked Lawsky why he felt banks were closing accounts associated with bitcoin, and if his proposed regulation would address the issue. Lawsky replied, saying that it could be likely due to the recent money laundering cases.
Also, Lawsky said that the coin’s volatility seems to be a factor in bank’s hesitation to accept a new form of currency. Bitcoin has yet to establish a leveled value.
A video response to the Reddit community’s questions and comments was also released as a thank you by Lawsky. He will also double-back to his Reddit post to answer updated questions and concerns.
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
from base64 import standard_b64decode
import unittest
from cStringIO import StringIO
import numbers
import time
from PIL import Image
import requests
import pytest
from splash.exceptions import ScriptError
lupa = pytest.importorskip("lupa")
from splash import __version__ as splash_version
from splash.har_builder import HarBuilder
from . import test_render
from .test_jsonpost import JsonPostRequestHandler
from .utils import NON_EXISTING_RESOLVABLE, SplashServer
from .mockserver import JsRender
from .. import defaults
class BaseLuaRenderTest(test_render.BaseRenderTest):
endpoint = 'execute'
def request_lua(self, code, query=None):
q = {"lua_source": code}
q.update(query or {})
return self.request(q)
def assertScriptError(self, resp, subtype, message=None):
err = self.assertJsonError(resp, 400, 'ScriptError')
self.assertEqual(err['info']['type'], subtype)
if message is not None:
self.assertRegexpMatches(err['info']['message'], message)
return err
def assertErrorLineNumber(self, resp, line_number):
self.assertEqual(resp.json()['info']['line_number'], line_number)
class MainFunctionTest(BaseLuaRenderTest):
def test_return_json(self):
resp = self.request_lua("""
function main(splash)
local obj = {key="value"}
return {
mystatus="ok",
number=5,
float=-0.5,
obj=obj,
bool=true,
bool2=false,
missing=nil
}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.headers['content-type'], 'application/json')
self.assertEqual(resp.json(), {
"mystatus": "ok",
"number": 5,
"float": -0.5,
"obj": {"key": "value"},
"bool": True,
"bool2": False,
})
def test_unicode(self):
resp = self.request_lua(u"""
function main(splash) return {key="значение"} end
""".encode('utf8'))
self.assertStatusCode(resp, 200)
self.assertEqual(resp.headers['content-type'], 'application/json')
self.assertEqual(resp.json(), {"key": u"значение"})
def test_unicode_direct(self):
resp = self.request_lua(u"""
function main(splash)
return 'привет'
end
""".encode('utf8'))
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, u"привет")
self.assertEqual(resp.headers['content-type'], 'text/plain; charset=utf-8')
def test_number(self):
resp = self.request_lua("function main(splash) return 1 end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "1")
self.assertEqual(resp.headers['content-type'], 'text/plain; charset=utf-8')
def test_number_float(self):
resp = self.request_lua("function main(splash) return 1.5 end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "1.5")
self.assertEqual(resp.headers['content-type'], 'text/plain; charset=utf-8')
def test_bool(self):
resp = self.request_lua("function main(splash) return true end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "True")
self.assertEqual(resp.headers['content-type'], 'text/plain; charset=utf-8')
def test_empty(self):
resp = self.request_lua("function main(splash) end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "")
resp = self.request_lua("function main() end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "")
def test_no_main(self):
resp = self.request_lua("x=1")
self.assertScriptError(resp, ScriptError.MAIN_NOT_FOUND_ERROR,
message="function is not found")
def test_bad_main(self):
resp = self.request_lua("main=1")
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR,
message="is not a function")
def test_ugly_main(self):
resp = self.request_lua("main={coroutine=123}")
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR,
message="is not a function")
def test_nasty_main(self):
resp = self.request_lua("""
main = {coroutine=function()
return {
send=function() end,
next=function() end
}
end}
""")
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR,
message="is not a function")
class ResultContentTypeTest(BaseLuaRenderTest):
def test_content_type(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_content_type('text/plain')
return "hi!"
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.headers['content-type'], 'text/plain')
self.assertEqual(resp.text, 'hi!')
def test_content_type_ignored_for_tables(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_content_type('text/plain')
return {hi="hi!"}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.headers['content-type'], 'application/json')
self.assertEqual(resp.text, '{"hi": "hi!"}')
def test_bad_content_type(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_content_type(55)
return "hi!"
end
""")
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR,
message='argument must be a string')
self.assertEqual(err['info']['splash_method'], 'set_result_content_type')
resp = self.request_lua("""
function main(splash)
splash:set_result_content_type()
return "hi!"
end
""")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_bad_content_type_func(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_content_type(function () end)
return "hi!"
end
""")
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR,
message='argument must be a string')
self.assertEqual(err['info']['splash_method'], 'set_result_content_type')
class ResultHeaderTest(BaseLuaRenderTest):
def test_result_header_set(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_header("foo", "bar")
return "hi!"
end
""")
self.assertStatusCode(resp, 200)
self.assertIn("foo", resp.headers)
self.assertEqual(resp.headers.get("foo"), "bar")
def test_bad_result_header_set(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_header({}, {})
return "hi!"
end
""")
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR,
message='arguments must be strings')
self.assertEqual(err['info']['splash_method'], 'set_result_header')
self.assertErrorLineNumber(resp, 3)
def test_unicode_headers_raise_bad_request(self):
resp = self.request_lua("""
function main(splash)
splash:set_result_header("paweł", "kiść")
return "hi!"
end
""")
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR,
message='must be ascii')
self.assertEqual(err['info']['splash_method'], 'set_result_header')
self.assertErrorLineNumber(resp, 3)
class ErrorsTest(BaseLuaRenderTest):
def test_syntax_error(self):
resp = self.request_lua("function main(splash) sdhgfsajhdgfjsahgd end")
# XXX: message='syntax error' is not checked because older Lua 5.2
# versions have problems with error messages.
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR)
def test_syntax_error_toplevel(self):
resp = self.request_lua("sdg; function main(splash) sdhgfsajhdgfjsahgd end")
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR)
# XXX: message='syntax error' is not checked because older Lua 5.2
# versions have problems with error messages.
def test_unicode_error(self):
resp = self.request_lua(u"function main(splash) 'привет' end".encode('utf8'))
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR,
message="unexpected symbol")
def test_user_error(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
error("User Error Happened") -- 3 <-
end
""")
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="User Error Happened")
self.assertErrorLineNumber(resp, 3)
@pytest.mark.xfail(reason="not implemented, nice to have")
def test_user_error_table(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
error({tp="user error", msg=123}) -- 3 <-
end
""")
err = self.assertScriptError(resp, ScriptError.LUA_ERROR)
self.assertEqual(err['info']['error'],
{'tp': 'user error', 'msg': 123})
self.assertErrorLineNumber(resp, 3)
def test_bad_splash_attribute(self):
resp = self.request_lua("""
function main(splash)
local x = splash.foo
return x == nil
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "True")
def test_return_multiple(self):
resp = self.request_lua("function main(splash) return 'foo', 'bar' end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), ["foo", "bar"])
def test_return_splash(self):
resp = self.request_lua("function main(splash) return splash end")
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR)
def test_return_function(self):
resp = self.request_lua("function main(s) return function() end end")
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR,
message="function objects are not allowed")
def test_return_coroutine(self):
resp = self.request_lua("""
function main(splash)
return coroutine.create(function() end)
end
""")
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="(a nil value)")
def test_return_coroutine_nosandbox(self):
with SplashServer(extra_args=['--disable-lua-sandbox']) as splash:
resp = requests.get(
url=splash.url("execute"),
params={
'lua_source': """
function main(splash)
return coroutine.create(function() end)
end
"""
},
)
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR,
message="function objects are not allowed")
def test_return_started_coroutine(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
local co = coroutine.create(function() -- 3 <-
coroutine.yield() -- 4
end)
coroutine.resume(co)
return co
end
""")
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="(a nil value)")
self.assertErrorLineNumber(resp, 3)
def test_return_started_coroutine_nosandbox(self):
with SplashServer(extra_args=['--disable-lua-sandbox']) as splash:
resp = requests.get(
url=splash.url("execute"),
params={
'lua_source': """ -- 1
function main(splash) -- 2
local co = coroutine.create(function() -- 3
coroutine.yield() -- 4
end) -- 5
coroutine.resume(co) -- 6
return co -- 7
end -- 8
"""
},
)
self.assertScriptError(resp, ScriptError.BAD_MAIN_ERROR,
message="thread objects are not allowed")
def test_error_line_number_attribute_access(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
local x = 5 -- 3
splash.set_result_content_type("hello") -- 4
end -- 5
""")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
self.assertErrorLineNumber(resp, 4)
def test_error_line_number_bad_argument(self):
resp = self.request_lua("""
function main(splash)
local x = 5
splash:set_result_content_type(48)
end
""")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
self.assertErrorLineNumber(resp, 4)
def test_error_line_number_wrong_keyword_argument(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
splash:set_result_content_type{content_type=48} -- 3 <--
end -- 4
""")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
self.assertErrorLineNumber(resp, 3)
def test_pcall_wrong_keyword_arguments(self):
resp = self.request_lua("""
function main(splash)
local x = function()
return splash:wait{timeout=0.7}
end
local ok, res = pcall(x)
return {ok=ok, res=res}
end
""")
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["ok"], False)
class EnableDisableJSTest(BaseLuaRenderTest):
def test_disablejs(self):
resp = self.request_lua("""
function main(splash)
assert(splash.js_enabled==true)
splash.js_enabled = false
splash:go(splash.args.url)
local html = splash:html()
return html
end
""", {
'url': self.mockurl('jsrender'),
})
self.assertStatusCode(resp, 200)
self.assertIn(u'Before', resp.text)
def test_enablejs(self):
resp = self.request_lua("""
function main(splash)
splash.js_enabled = true
splash:go(splash.args.url)
local html = splash:html()
return html
end
""", {
'url': self.mockurl('jsrender'),
})
self.assertStatusCode(resp, 200)
self.assertNotIn(u'Before', resp.text)
def test_disablejs_after_splash_go(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
splash.js_enabled = false
local html = splash:html()
return html
end
""", {
'url': self.mockurl('jsrender'),
})
self.assertStatusCode(resp, 200)
self.assertNotIn(u'Before', resp.text)
def test_multiple(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
splash.js_enabled = false
local html_1 = splash:html()
splash:go(splash.args.url)
return {html_1=html_1, html_2=splash:html()}
end
""", {
'url': self.mockurl('jsrender')
})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertNotIn(u'Before', data['html_1'])
self.assertIn(u'Before', data['html_2'])
class ImageRenderTest(BaseLuaRenderTest):
def test_disable_images_attr(self):
resp = self.request_lua("""
function main(splash)
splash.images_enabled = false
splash:go(splash.args.url)
local res = splash:evaljs("document.getElementById('foo').clientHeight")
return {res=res}
end
""", {'url': self.mockurl("show-image")})
self.assertEqual(resp.json()['res'], 0)
def test_disable_images_method(self):
resp = self.request_lua("""
function main(splash)
splash:set_images_enabled(false)
splash:go(splash.args.url)
local res = splash:evaljs("document.getElementById('foo').clientHeight")
return {res=res}
end
""", {'url': self.mockurl("show-image")})
self.assertEqual(resp.json()['res'], 0)
def test_enable_images_attr(self):
resp = self.request_lua("""
function main(splash)
splash.images_enabled = false
splash.images_enabled = true
splash:go(splash.args.url)
local res = splash:evaljs("document.getElementById('foo').clientHeight")
return {res=res}
end
""", {'url': self.mockurl("show-image")})
self.assertEqual(resp.json()['res'], 50)
def test_enable_images_method(self):
resp = self.request_lua("""
function main(splash)
splash:set_images_enabled(false)
splash:set_images_enabled(true)
splash:go(splash.args.url)
local res = splash:evaljs("document.getElementById('foo').clientHeight")
return {res=res}
end
""", {'url': self.mockurl("show-image")})
self.assertEqual(resp.json()['res'], 50)
class EvaljsTest(BaseLuaRenderTest):
def _evaljs_request(self, js):
return self.request_lua("""
function main(splash)
local res = splash:evaljs([[%s]])
return {res=res, tp=type(res)}
end
""" % js)
def assertEvaljsResult(self, js, result, type):
resp = self._evaljs_request(js)
self.assertStatusCode(resp, 200)
expected = {'tp': type}
if result is not None:
expected['res'] = result
self.assertEqual(resp.json(), expected)
def assertEvaljsError(self, js, subtype=ScriptError.JS_ERROR, message=None):
resp = self._evaljs_request(js)
err = self.assertScriptError(resp, subtype, message)
self.assertEqual(err['info']['splash_method'], 'evaljs')
return err
def test_numbers(self):
self.assertEvaljsResult("1.0", 1.0, "number")
self.assertEvaljsResult("1", 1, "number")
self.assertEvaljsResult("1+2", 3, "number")
def test_inf(self):
self.assertEvaljsResult("1/0", float('inf'), "number")
self.assertEvaljsResult("-1/0", float('-inf'), "number")
def test_string(self):
self.assertEvaljsResult("'foo'", u'foo', 'string')
def test_bool(self):
self.assertEvaljsResult("true", True, 'boolean')
def test_undefined(self):
self.assertEvaljsResult("undefined", None, 'nil')
def test_null(self):
# XXX: null is converted to an empty string by QT,
# we can't distinguish it from a "real" empty string.
self.assertEvaljsResult("null", "", 'string')
def test_unicode_string(self):
self.assertEvaljsResult("'привет'", u'привет', 'string')
def test_unicode_string_in_object(self):
self.assertEvaljsResult(
'var o={}; o["ключ"] = "значение"; o',
{u'ключ': u'значение'},
'table'
)
def test_nested_object(self):
self.assertEvaljsResult(
'var o={}; o["x"] = {}; o["x"]["y"] = 5; o["z"] = "foo"; o',
{"x": {"y": 5}, "z": "foo"},
'table'
)
def test_array(self):
self.assertEvaljsResult(
'x = [3, 2, 1, "foo", ["foo", [], "bar"], {}]; x',
[3, 2, 1, "foo", ["foo", [], "bar"], {}],
'table',
)
def test_self_referencing(self):
self.assertEvaljsResult(
'var o={}; o["x"] = "5"; o["y"] = o; o',
{"x": "5"}, # self reference is discarded
'table'
)
def test_function(self):
# XXX: functions are not returned by QT
self.assertEvaljsResult(
"x = function(){return 5}; x",
{},
"table"
)
def test_function_direct_unwrapped(self):
# XXX: this is invaild syntax
self.assertEvaljsError("function(){return 5}", message='SyntaxError')
def test_function_direct(self):
# XXX: functions are returned as empty tables by QT
self.assertEvaljsResult("(function(){return 5})", {}, "table")
def test_object_with_function(self):
# XXX: complex objects like function values are unsupported
self.assertEvaljsError('{"x":2, "y": function(){}}')
def test_function_call(self):
self.assertEvaljsResult(
"function x(){return 5}; x();",
5,
"number"
)
def test_dateobj(self):
# XXX: Date objects are converted to ISO8061 strings.
# Does it make sense to do anything else with them?
# E.g. make them available to Lua as tables?
self.assertEvaljsResult(
'x = new Date("21 May 1958 10:12 UTC"); x',
"1958-05-21T10:12:00Z",
"string"
)
def test_regexp(self):
self.assertEvaljsResult(
'/my-regexp/i',
{
u'_jstype': u'RegExp',
'caseSensitive': False,
'pattern': u'my-regexp'
},
'table'
)
self.assertEvaljsResult(
'/my-regexp/',
{
u'_jstype': u'RegExp',
'caseSensitive': True,
'pattern': u'my-regexp'
},
'table'
)
def test_syntax_error(self):
err = self.assertEvaljsError("x--4")
self.assertEqual(err['info']['js_error_type'], 'SyntaxError')
def test_throw_string(self):
err = self.assertEvaljsError("(function(){throw 'ABC'})();")
self.assertEqual(err['info']['js_error_type'], '<custom JS error>')
self.assertEqual(err['info']['js_error_message'], 'ABC')
err = self.assertEvaljsError("throw 'ABC'")
self.assertEqual(err['info']['js_error_type'], '<custom JS error>')
self.assertEqual(err['info']['js_error_message'], 'ABC')
def test_throw_error(self):
err = self.assertEvaljsError("(function(){throw new Error('ABC')})();")
self.assertEqual(err['info']['js_error_type'], 'Error')
self.assertEqual(err['info']['js_error_message'], 'ABC')
class WaitForResumeTest(BaseLuaRenderTest):
def _wait_for_resume_request(self, js, timeout=1.0):
return self.request_lua("""
function main(splash)
local result, error = splash:wait_for_resume([[%s]], %.1f)
local response = {}
if result ~= nil then
response["value"] = result["value"]
response["value_type"] = type(result["value"])
else
response["error"] = error
end
return response
end
""" % (js, timeout))
def test_return_undefined(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume();
}
""")
self.assertStatusCode(resp, 200)
# A Lua table with a nil value is equivalent to not setting that
# key/value pair at all, so there is no "result" key in the response.
self.assertEqual(resp.json(), {"value_type": "nil"})
def test_return_null(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume(null);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": "", "value_type": "string"})
def test_return_string(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume("ok");
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": "ok", "value_type": "string"})
def test_return_non_ascii_string(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume("你好");
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": u"你好", "value_type": "string"})
def test_return_int(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume(42);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": 42, "value_type": "number"})
def test_return_float(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume(1234.5);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": 1234.5, "value_type": "number"})
def test_return_boolean(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume(true);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": True, "value_type": "boolean"})
def test_return_list(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume([1,2,'red','blue']);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {
"value": [1, 2, 'red', 'blue'],
"value_type": "table"}
)
def test_return_dict(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume({'stomach':'empty','brain':'crazy'});
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {
"value": {'stomach': 'empty', 'brain': 'crazy'},
"value_type": "table"}
)
def test_return_additional_keys(self):
resp = self.request_lua("""
function main(splash)
local result, error = splash:wait_for_resume([[
function main(splash) {
splash.set("foo", "bar");
splash.resume("ok");
}
]])
return result
end""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {'foo': 'bar', 'value': 'ok'})
def test_delayed_return(self):
resp = self._wait_for_resume_request("""
function main(splash) {
setTimeout(function () {
splash.resume("ok");
}, 100);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": "ok", "value_type": "string"})
def test_error_string(self):
resp = self._wait_for_resume_request("""
function main(splash) {
splash.error("not ok");
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"error": "JavaScript error: not ok"})
def test_timed_out(self):
resp = self._wait_for_resume_request("""
function main(splash) {
setTimeout(function () {
splash.resume("ok");
}, 2500);
}
""", timeout=0.1)
expected_error = 'JavaScript error: One shot callback timed out' \
' while waiting for resume() or error().'
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"error": expected_error})
def test_missing_main_function(self):
resp = self._wait_for_resume_request("""
function foo(splash) {
setTimeout(function () {
splash.resume("ok");
}, 500);
}
""")
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message=r"no main\(\) function defined")
def test_js_syntax_error(self):
resp = self._wait_for_resume_request("""
function main(splash) {
)
setTimeout(function () {
splash.resume("ok");
}, 500);
}
""")
# XXX: why is it LUA_ERROR, not JS_ERROR? Should we change that?
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="SyntaxError")
def test_navigation_cancels_resume(self):
resp = self._wait_for_resume_request("""
function main(splash) {
location.href = '%s';
}
""" % self.mockurl('/'))
json = resp.json()
self.assertStatusCode(resp, 200)
self.assertIn('error', json)
self.assertIn('canceled', json['error'])
def test_cannot_resume_twice(self):
"""
We can't easily test that resuming twice throws an exception,
because that exception is thrown in Python code after Lua has already
resumed. The server log (if set to verbose) will show the stack trace,
but Lua will have no idea that it happened; indeed, that's the
_whole purpose_ of the one shot callback.
We can at least verify that if resume is called multiple times,
then the first value is returned and subsequent values are ignored.
"""
resp = self._wait_for_resume_request("""
function main(splash) {
splash.resume('ok');
setTimeout(function () {
splash.resume('not ok');
}, 500);
}
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"value": "ok", "value_type": "string"})
class RunjsTest(BaseLuaRenderTest):
def test_define_variable(self):
resp = self.request_lua("""
function main(splash)
assert(splash:runjs("x=5"))
return {x=splash:evaljs("x")}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"x": 5})
def test_runjs_undefined(self):
resp = self.request_lua("""
function main(splash)
assert(splash:runjs("undefined"))
return {ok=true}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
def test_define_function(self):
resp = self.request_lua("""
function main(splash)
assert(splash:runjs("egg = function(){return 'spam'};"))
local egg = splash:jsfunc("window.egg")
return {egg=egg()}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"egg": "spam"})
def test_runjs_syntax_error(self):
resp = self.request_lua("""
function main(splash)
local res, err = splash:runjs("function()")
return {res=res, err=err}
end
""")
self.assertStatusCode(resp, 200)
err = resp.json()['err']
self.assertEqual(err['type'], ScriptError.JS_ERROR)
self.assertEqual(err['js_error_type'], 'SyntaxError')
self.assertEqual(err['splash_method'], 'runjs')
def test_runjs_exception(self):
resp = self.request_lua("""
function main(splash)
local res, err = splash:runjs("var x = y;")
return {res=res, err=err}
end
""")
self.assertStatusCode(resp, 200)
err = resp.json()['err']
self.assertEqual(err['type'], ScriptError.JS_ERROR)
self.assertEqual(err['js_error_type'], 'ReferenceError')
self.assertRegexpMatches(err['message'], "Can't find variable")
self.assertEqual(err['splash_method'], 'runjs')
class JsfuncTest(BaseLuaRenderTest):
def assertJsfuncResult(self, source, arguments, result):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc([[%s]])
return func(%s)
end
""" % (source, arguments))
self.assertStatusCode(resp, 200)
if isinstance(result, (dict, list)):
self.assertEqual(resp.json(), result)
else:
self.assertEqual(resp.text, result)
def test_Math(self):
self.assertJsfuncResult("Math.pow", "5, 2", "25")
def test_helloworld(self):
self.assertJsfuncResult(
"function(s) {return 'Hello, ' + s;}",
"'world!'",
"Hello, world!"
)
def test_object_argument(self):
self.assertJsfuncResult(
"function(obj) {return obj.foo;}",
"{foo='bar'}",
"bar",
)
def test_object_result(self):
self.assertJsfuncResult(
"function(obj) {return obj.foo;}",
"{foo={x=5, y=10}}",
{"x": 5, "y": 10},
)
def test_object_result_pass(self):
resp = self.request_lua("""
function main(splash)
local func1 = splash:jsfunc("function(){return {foo:{x:5}}}")
local func2 = splash:jsfunc("function(obj){return obj.foo}")
local obj = func1()
return func2(obj)
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"x": 5})
def test_bool(self):
is5 = "function(num){return num==5}"
self.assertJsfuncResult(is5, "5", "True")
self.assertJsfuncResult(is5, "6", "False")
def test_undefined_result(self):
self.assertJsfuncResult("function(){}", "", "None")
def test_undefined_argument(self):
self.assertJsfuncResult("function(foo){return foo}", "", "None")
def test_throw_string(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){throw 'ABC'}")
return func()
end
""")
err = self.assertScriptError(resp, ScriptError.JS_ERROR)
self.assertEqual(err['info']['js_error_message'], 'ABC')
self.assertEqual(err['info']['js_error_type'], '<custom JS error>')
def test_throw_pcall(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){throw 'ABC'}")
local ok, res = pcall(func)
return {ok=ok, res=res}
end
""")
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["ok"], False)
self.assertIn("error during JS function call: u'ABC'", data["res"])
def test_throw_error(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){throw new Error('ABC')}")
return func()
end
""")
err = self.assertScriptError(resp, ScriptError.JS_ERROR)
self.assertEqual(err['info']['js_error_message'], 'ABC')
self.assertEqual(err['info']['js_error_type'], 'Error')
def test_throw_error_empty(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){throw new Error()}")
return func()
end
""")
err = self.assertScriptError(resp, ScriptError.JS_ERROR)
self.assertEqual(err['info']['js_error_message'], '')
self.assertEqual(err['info']['js_error_type'], 'Error')
def test_throw_error_pcall(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){throw new Error('ABC')}")
local ok, res = pcall(func)
return {ok=ok, res=res}
end
""")
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["ok"], False)
self.assertIn("error during JS function call: u'Error: ABC'", data["res"])
def test_js_syntax_error(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){")
return func()
end
""")
err = self.assertScriptError(resp, ScriptError.JS_ERROR)
self.assertEqual(err['info']['js_error_type'], 'SyntaxError')
def test_js_syntax_error_brace(self):
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc('); window.alert("hello")')
return func()
end
""")
err = self.assertScriptError(resp, ScriptError.JS_ERROR)
self.assertEqual(err['info']['js_error_type'], 'SyntaxError')
def test_array_result(self):
self.assertJsfuncResult(
"function(){return [1, 2, 'foo']}",
"",
[1, 2, "foo"]
)
def test_array_result_processed(self):
# XXX: note that index is started from 1
resp = self.request_lua("""
function main(splash)
local func = splash:jsfunc("function(){return [1, 2, 'foo']}")
local arr = func()
local first = arr[1]
return {arr=arr, first=1, tp=type(arr)}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"arr": [1, 2, "foo"], "first": 1, "tp": "table"})
def test_array_argument(self):
# XXX: note that index is started from 1
self.assertJsfuncResult(
"function(arr){return arr[1]}",
"{5, 6, 'foo'}",
"5",
)
# this doesn't work because table is passed as an object
@pytest.mark.xfail
def test_array_length(self):
self.assertJsfuncResult(
"function(arr){return arr.length}",
"{5, 6, 'foo'}",
"3",
)
def test_jsfunc_attributes(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
local func = splash:jsfunc("function(){return 123}") -- 3
return func.source -- 4 <-
end
""")
err = self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="attempt to index")
self.assertEqual(err['info']['line_number'], 4)
def test_private_jsfunc_not_available(self):
resp = self.request_lua("""
function main(splash)
return {ok = splash.private_jsfunc == nil}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json()['ok'], True)
def test_private_jsfunc_attributes(self):
resp = self.request_lua(""" -- 1
function main(splash) -- 2
local func = splash:private_jsfunc("function(){return 123}") -- 3 <-
return func.source -- 4
end
""")
err = self.assertScriptError(resp, ScriptError.LUA_ERROR)
self.assertEqual(err['info']['line_number'], 3)
class WaitTest(BaseLuaRenderTest):
def wait(self, wait_args, request_args=None):
code = """
function main(splash)
local ok, reason = splash:wait%s
return {ok=ok, reason=reason}
end
""" % wait_args
return self.request_lua(code, request_args)
def go_and_wait(self, wait_args, request_args):
code = """
function main(splash)
assert(splash:go(splash.args.url))
local ok, reason = splash:wait%s
return {ok=ok, reason=reason}
end
""" % wait_args
return self.request_lua(code, request_args)
def test_timeout(self):
resp = self.wait("(0.01)", {"timeout": 0.1})
self.assertStatusCode(resp, 200)
resp = self.wait("(1)", {"timeout": 0.1})
err = self.assertJsonError(resp, 504, "GlobalTimeoutError")
self.assertEqual(err['info']['timeout'], 0.1)
def test_wait_success(self):
resp = self.wait("(0.01)")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
def test_wait_noredirect(self):
resp = self.wait("{time=0.01, cancel_on_redirect=true}")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
def test_wait_redirect_nocancel(self):
# jsredirect-timer redirects after 0.1ms
resp = self.go_and_wait(
"{time=0.2, cancel_on_redirect=false}",
{'url': self.mockurl("jsredirect-timer")}
)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
def test_wait_redirect_cancel(self):
# jsredirect-timer redirects after 0.1ms
resp = self.go_and_wait(
"{time=0.2, cancel_on_redirect=true}",
{'url': self.mockurl("jsredirect-timer")}
)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"reason": "redirect"}) # ok is nil
@unittest.skipIf(NON_EXISTING_RESOLVABLE, "non existing hosts are resolvable")
def test_wait_onerror(self):
resp = self.go_and_wait(
"{time=2., cancel_on_redirect=false, cancel_on_error=true}",
{'url': self.mockurl("jsredirect-non-existing")}
)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"reason": "network3"}) # ok is nil
@unittest.skipIf(NON_EXISTING_RESOLVABLE, "non existing hosts are resolvable")
def test_wait_onerror_nocancel(self):
resp = self.go_and_wait(
"{time=2., cancel_on_redirect=false, cancel_on_error=false}",
{'url': self.mockurl("jsredirect-non-existing")}
)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
@unittest.skipIf(NON_EXISTING_RESOLVABLE, "non existing hosts are resolvable")
def test_wait_onerror_nocancel_redirect(self):
resp = self.go_and_wait(
"{time=2., cancel_on_redirect=true, cancel_on_error=false}",
{'url': self.mockurl("jsredirect-non-existing")}
)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"reason": "redirect"})
def test_wait_badarg(self):
resp = self.wait('{time="sdf"}')
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_wait_badarg2(self):
resp = self.wait('{time="sdf"}')
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_wait_good_string(self):
resp = self.wait('{time="0.01"}')
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
def test_wait_noargs(self):
resp = self.wait('()')
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_wait_time_missing(self):
resp = self.wait('{cancel_on_redirect=false}')
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_wait_unknown_args(self):
resp = self.wait('{ttime=0.5}')
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_wait_negative(self):
resp = self.wait('(-0.2)')
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
class ArgsTest(BaseLuaRenderTest):
def args_request(self, query):
func = """
function main(splash)
return {args=splash.args}
end
"""
return self.request_lua(func, query)
def assertArgs(self, query):
resp = self.args_request(query)
self.assertStatusCode(resp, 200)
data = resp.json()["args"]
data.pop('lua_source')
data.pop('uid')
return data
def assertArgsPassed(self, query):
args = self.assertArgs(query)
self.assertEqual(args, query)
return args
def test_known_args(self):
self.assertArgsPassed({"wait": "1.0"})
self.assertArgsPassed({"timeout": "2.0"})
self.assertArgsPassed({"url": "foo"})
def test_unknown_args(self):
self.assertArgsPassed({"foo": "bar"})
def test_filters_validation(self):
# 'global' known arguments are still validated
resp = self.args_request({"filters": 'foo,bar'})
err = self.assertJsonError(resp, 400, "BadOption")
self.assertEqual(err['info']['argument'], 'filters')
class JsonPostUnicodeTest(BaseLuaRenderTest):
request_handler = JsonPostRequestHandler
def test_unicode(self):
resp = self.request_lua(u"""
function main(splash) return {key="значение"} end
""".encode('utf8'))
self.assertStatusCode(resp, 200)
self.assertEqual(resp.headers['content-type'], 'application/json')
self.assertEqual(resp.json(), {"key": u"значение"})
class JsonPostArgsTest(ArgsTest):
request_handler = JsonPostRequestHandler
def test_headers(self):
headers = {"user-agent": "Firefox", "content-type": "text/plain"}
self.assertArgsPassed({"headers": headers})
def test_headers_items(self):
headers = [["user-agent", "Firefox"], ["content-type", "text/plain"]]
self.assertArgsPassed({"headers": headers})
def test_access_headers(self):
func = """
function main(splash)
local ua = "Unknown"
if splash.args.headers then
ua = splash.args.headers['user-agent']
end
return {ua=ua, firefox=(ua=="Firefox")}
end
"""
resp = self.request_lua(func, {'headers': {"user-agent": "Firefox"}})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ua": "Firefox", "firefox": True})
resp = self.request_lua(func)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ua": "Unknown", "firefox": False})
def test_custom_object(self):
self.assertArgsPassed({"myobj": {"foo": "bar", "bar": ["egg", "spam", 1]}})
def test_post_numbers(self):
self.assertArgsPassed({"x": 5})
class GoTest(BaseLuaRenderTest):
def go_status(self, url):
resp = self.request_lua("""
function main(splash)
local ok, reason = splash:go(splash.args.url)
return {ok=ok, reason=reason}
end
""", {"url": url})
self.assertStatusCode(resp, 200)
return resp.json()
def _geturl(self, code, empty=False):
if empty:
path = "getrequest?code=%s&empty=1" % code
else:
path = "getrequest?code=%s" % code
return self.mockurl(path)
def assertGoStatusCodeError(self, code):
for empty in [False, True]:
data = self.go_status(self._geturl(code, empty))
self.assertNotIn("ok", data)
self.assertEqual(data["reason"], "http%s" % code)
def assertGoNoError(self, code):
for empty in [False, True]:
data = self.go_status(self._geturl(code, empty))
self.assertTrue(data["ok"])
self.assertNotIn("reason", data)
def test_go_200(self):
self.assertGoNoError(200)
def test_go_400(self):
self.assertGoStatusCodeError(400)
def test_go_401(self):
self.assertGoStatusCodeError(401)
def test_go_403(self):
self.assertGoStatusCodeError(403)
def test_go_404(self):
self.assertGoStatusCodeError(404)
def test_go_500(self):
self.assertGoStatusCodeError(500)
def test_go_503(self):
self.assertGoStatusCodeError(503)
def test_nourl(self):
resp = self.request_lua("function main(splash) splash:go() end")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
def test_nourl_args(self):
resp = self.request_lua("function main(splash) splash:go(splash.args.url) end")
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR,
message="required")
self.assertEqual(err['info']['argument'], 'url')
@unittest.skipIf(NON_EXISTING_RESOLVABLE, "non existing hosts are resolvable")
def test_go_error(self):
data = self.go_status("non-existing")
self.assertEqual(data.get('ok', False), False)
self.assertEqual(data["reason"], "network301")
def test_go_multiple(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url_1)
local html_1 = splash:html()
splash:go(splash.args.url_2)
return {html_1=html_1, html_2=splash:html()}
end
""", {
'url_1': self.mockurl('getrequest?foo=1'),
'url_2': self.mockurl('getrequest?bar=2')
})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertIn("{'foo': ['1']}", data['html_1'])
self.assertIn("{'bar': ['2']}", data['html_2'])
def test_go_404_then_good(self):
resp = self.request_lua("""
function main(splash)
local ok1, err1 = splash:go(splash.args.url_1)
local html_1 = splash:html()
local ok2, err2 = splash:go(splash.args.url_2)
local html_2 = splash:html()
return {html_1=html_1, html_2=html_2, err1=err1, err2=err2, ok1=ok1, ok2=ok2}
end
""", {
'url_1': self.mockurl('--some-non-existing-resource--'),
'url_2': self.mockurl('bad-related'),
})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["err1"], "http404")
self.assertNotIn("err2", data)
self.assertNotIn("ok1", data)
self.assertEqual(data["ok2"], True)
self.assertIn("No Such Resource", data["html_1"])
self.assertIn("http://non-existing", data["html_2"])
@unittest.skipIf(NON_EXISTING_RESOLVABLE, "non existing hosts are resolvable")
def test_go_bad_then_good(self):
resp = self.request_lua("""
function main(splash)
splash:go("--non-existing-host")
local ok, err = splash:go(splash.args.url)
return {ok=ok, err=err}
end
""", {"url": self.mockurl("jsrender")})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"ok": True})
def test_go_headers_cookie(self):
resp = self.request_lua("""
function main(splash)
assert(splash:go{splash.args.url, headers={
["Cookie"] = "foo=bar; egg=spam"
}})
return splash:html()
end
""", {"url": self.mockurl("get-cookie?key=egg")})
self.assertStatusCode(resp, 200)
self.assertIn("spam", resp.text)
def test_go_headers(self):
resp = self.request_lua("""
function main(splash)
assert(splash:go{splash.args.url, headers={
["Custom-Header"] = "Header Value",
}})
local res1 = splash:html()
-- second request is without any custom headers
assert(splash:go(splash.args.url))
local res2 = splash:html()
return {res1=res1, res2=res2}
end
""", {"url": self.mockurl("getrequest")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertIn("'Header Value'", data["res1"])
self.assertNotIn("'Header Value'", data["res2"])
def test_set_custom_headers(self):
resp = self.request_lua("""
function main(splash)
splash:set_custom_headers({
["Header-1"] = "Value 1",
["Header-2"] = "Value 2",
})
assert(splash:go(splash.args.url))
local res1 = splash:html()
assert(splash:go{splash.args.url, headers={
["Header-3"] = "Value 3",
}})
local res2 = splash:html()
assert(splash:go(splash.args.url))
local res3 = splash:html()
return {res1=res1, res2=res2, res3=res3}
end
""", {"url": self.mockurl("getrequest")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertIn("'Value 1'", data["res1"])
self.assertIn("'Value 2'", data["res1"])
self.assertNotIn("'Value 3'", data["res1"])
self.assertNotIn("'Value 1'", data["res2"])
self.assertNotIn("'Value 2'", data["res2"])
self.assertIn("'Value 3'", data["res2"])
self.assertIn("'Value 1'", data["res3"])
self.assertIn("'Value 2'", data["res3"])
self.assertNotIn("'Value 3'", data["res3"])
class ResourceTimeoutTest(BaseLuaRenderTest):
def test_resource_timeout_aborts_first(self):
resp = self.request_lua("""
function main(splash)
splash:on_request(function(req) req:set_timeout(0.1) end)
local ok, err = splash:go{splash.args.url}
return {err=err}
end
""", {"url": self.mockurl("slow.gif?n=4")})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {'err': 'render_error'})
def test_resource_timeout_attribute(self):
# request should be cancelled
resp = self.request_lua("""
function main(splash)
splash.resource_timeout = 0.1
assert(splash:go(splash.args.url))
end
""", {"url": self.mockurl("slow.gif?n=4")})
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message='render_error')
def test_resource_timeout_attribute_priority(self):
# set_timeout should take a priority
resp = self.request_lua("""
function main(splash)
splash.resource_timeout = 0.1
splash:on_request(function(req) req:set_timeout(10) end)
assert(splash:go(splash.args.url))
end
""", {"url": self.mockurl("slow.gif?n=4")})
self.assertStatusCode(resp, 200)
def test_resource_timeout_read(self):
resp = self.request_lua("""
function main(splash)
local default = splash.resource_timeout
splash.resource_timeout = 0.1
local updated = splash.resource_timeout
return {default=default, updated=updated}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"default": 0, "updated": 0.1})
def test_resource_timeout_zero(self):
resp = self.request_lua("""
function main(splash)
splash.resource_timeout = 0
assert(splash:go(splash.args.url))
end
""", {"url": self.mockurl("slow.gif?n=1")})
self.assertStatusCode(resp, 200)
resp = self.request_lua("""
function main(splash)
splash.resource_timeout = nil
assert(splash:go(splash.args.url))
end
""", {"url": self.mockurl("slow.gif?n=1")})
self.assertStatusCode(resp, 200)
def test_resource_timeout_negative(self):
resp = self.request_lua("""
function main(splash)
splash.resource_timeout = -1
assert(splash:go(splash.args.url))
end
""", {"url": self.mockurl("slow.gif?n=1")})
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR,
message='splash.resource_timeout')
self.assertEqual(err['info']['line_number'], 3)
class ResultStatusCodeTest(BaseLuaRenderTest):
def test_set_result_status_code(self):
for code in [200, 404, 500, 999]:
resp = self.request_lua("""
function main(splash)
splash:set_result_status_code(tonumber(splash.args.code))
return "hello"
end
""", {'code': code})
self.assertStatusCode(resp, code)
self.assertEqual(resp.text, 'hello')
def test_invalid_code(self):
for code in ["foo", "", {'x': 3}, 0, -200, 195, 1000]:
resp = self.request_lua("""
function main(splash)
splash:set_result_status_code(splash.args.code)
return "hello"
end
""", {'code': code})
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
self.assertEqual(err['info']['splash_method'],
'set_result_status_code')
class SetUserAgentTest(BaseLuaRenderTest):
def test_set_user_agent(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
local res1 = splash:html()
splash:set_user_agent("Foozilla")
splash:go(splash.args.url)
local res2 = splash:html()
splash:go(splash.args.url)
local res3 = splash:html()
return {res1=res1, res2=res2, res3=res3}
end
""", {"url": self.mockurl("getrequest")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertIn("Mozilla", data["res1"])
self.assertNotIn("Mozilla", data["res2"])
self.assertNotIn("Mozilla", data["res3"])
self.assertNotIn("'user-agent': 'Foozilla'", data["res1"])
self.assertIn("'user-agent': 'Foozilla'", data["res2"])
self.assertIn("'user-agent': 'Foozilla'", data["res3"])
def test_error(self):
resp = self.request_lua("""
function main(splash) splash:set_user_agent(123) end
""")
err = self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
self.assertEqual(err['info']['splash_method'], 'set_user_agent')
class CookiesTest(BaseLuaRenderTest):
def test_cookies(self):
resp = self.request_lua("""
function main(splash)
local function cookies_after(url)
splash:go(url)
return splash:get_cookies()
end
local c0 = splash:get_cookies()
local c1 = cookies_after(splash.args.url_1)
local c2 = cookies_after(splash.args.url_2)
splash:clear_cookies()
local c3 = splash:get_cookies()
local c4 = cookies_after(splash.args.url_2)
local c5 = cookies_after(splash.args.url_1)
splash:delete_cookies("foo")
local c6 = splash:get_cookies()
splash:delete_cookies{url="http://example.com"}
local c7 = splash:get_cookies()
splash:delete_cookies{url="http://localhost"}
local c8 = splash:get_cookies()
splash:init_cookies(c2)
local c9 = splash:get_cookies()
return {c0=c0, c1=c1, c2=c2, c3=c3, c4=c4, c5=c5, c6=c6, c7=c7, c8=c8, c9=c9}
end
""", {
"url_1": self.mockurl("set-cookie?key=foo&value=bar"),
"url_2": self.mockurl("set-cookie?key=egg&value=spam"),
})
self.assertStatusCode(resp, 200)
data = resp.json()
cookie1 = {
'name': 'foo',
'value': 'bar',
'domain': 'localhost',
'path': '/',
'httpOnly': False,
'secure': False
}
cookie2 = {
'name': 'egg',
'value': 'spam',
'domain': 'localhost',
'path': '/',
'httpOnly': False,
'secure': False
}
self.assertEqual(data["c0"], [])
self.assertEqual(data["c1"], [cookie1])
self.assertEqual(data["c2"], [cookie1, cookie2])
self.assertEqual(data["c3"], [])
self.assertEqual(data["c4"], [cookie2])
self.assertEqual(data["c5"], [cookie2, cookie1])
self.assertEqual(data["c6"], [cookie2])
self.assertEqual(data["c7"], [cookie2])
self.assertEqual(data["c8"], [])
self.assertEqual(data["c9"], data["c2"])
def test_add_cookie(self):
resp = self.request_lua("""
function main(splash)
splash:add_cookie("baz", "egg")
splash:add_cookie{"spam", "egg", domain="example.com"}
splash:add_cookie{
name="foo",
value="bar",
path="/",
domain="localhost",
expires="2016-07-24T19:20:30+02:00",
secure=true,
httpOnly=true,
}
return splash:get_cookies()
end""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), [
{"name": "baz", "value": "egg", "path": "",
"domain": "", "httpOnly": False, "secure": False},
{"name": "spam", "value": "egg", "path": "",
"domain": "example.com", "httpOnly": False, "secure": False},
{"name": "foo", "value": "bar", "path": "/",
"domain": "localhost", "httpOnly": True, "secure": True,
"expires": "2016-07-24T19:20:30+02:00"},
])
def test_init_cookies(self):
resp = self.request_lua("""
function main(splash)
splash:init_cookies({
{name="baz", value="egg"},
{name="spam", value="egg", domain="example.com"},
{
name="foo",
value="bar",
path="/",
domain="localhost",
expires="2016-07-24T19:20:30+02:00",
secure=true,
httpOnly=true,
}
})
return splash:get_cookies()
end""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), [
{"name": "baz", "value": "egg", "path": "",
"domain": "", "httpOnly": False, "secure": False},
{"name": "spam", "value": "egg", "path": "",
"domain": "example.com", "httpOnly": False, "secure": False},
{"name": "foo", "value": "bar", "path": "/",
"domain": "localhost", "httpOnly": True, "secure": True,
"expires": "2016-07-24T19:20:30+02:00"},
])
class CurrentUrlTest(BaseLuaRenderTest):
def request_url(self, url, wait=0.0):
return self.request_lua("""
function main(splash)
local ok, res = splash:go(splash.args.url)
splash:wait(splash.args.wait)
return {ok=ok, res=res, url=splash:url()}
end
""", {"url": url, "wait": wait})
def assertCurrentUrl(self, go_url, url=None, wait=0.0):
if url is None:
url = go_url
resp = self.request_url(go_url, wait)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json()["url"], url)
def test_start(self):
resp = self.request_lua("function main(splash) return splash:url() end")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "")
def test_blank(self):
self.assertCurrentUrl("about:blank")
def test_not_redirect(self):
self.assertCurrentUrl(self.mockurl("getrequest"))
def test_jsredirect(self):
self.assertCurrentUrl(self.mockurl("jsredirect"))
self.assertCurrentUrl(
self.mockurl("jsredirect"),
self.mockurl("jsredirect-target"),
wait=0.5,
)
class DisableScriptsTest(BaseLuaRenderTest):
def test_nolua(self):
with SplashServer(extra_args=['--disable-lua']) as splash:
# Check that Lua is disabled in UI
resp = requests.get(splash.url("/"))
self.assertStatusCode(resp, 200)
self.assertNotIn("<textarea", resp.text) # no code editor
script = "function main(splash) return 'foo' end"
# Check that /execute doesn't work
resp = requests.get(
url=splash.url("execute"),
params={'lua_source': script},
)
self.assertStatusCode(resp, 404)
class SandboxTest(BaseLuaRenderTest):
def assertTooMuchCPU(self, resp, subtype=ScriptError.LUA_ERROR):
return self.assertScriptError(resp, subtype,
message="script uses too much CPU")
def assertTooMuchMemory(self, resp, subtype=ScriptError.LUA_ERROR):
return self.assertScriptError(resp, subtype,
message="script uses too much memory")
def test_sandbox_string_function(self):
resp = self.request_lua("""
function main(self)
return string.rep("x", 10000)
end
""")
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="nil value")
self.assertErrorLineNumber(resp, 3)
def test_sandbox_string_method(self):
resp = self.request_lua("""
function main(self)
return ("x"):rep(10000)
end
""")
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="attempt to index constant")
self.assertErrorLineNumber(resp, 3)
# TODO: strings should use a sandboxed string module as a metatable
@pytest.mark.xfail
def test_non_sandboxed_string_method(self):
resp = self.request_lua("""
function main(self)
return ("X"):lower()
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "x")
def test_infinite_loop(self):
resp = self.request_lua("""
function main(self)
local x = 0
while true do
x = x + 1
end
return x
end
""")
self.assertTooMuchCPU(resp)
def test_infinite_loop_toplevel(self):
resp = self.request_lua("""
x = 0
while true do
x = x + 1
end
function main(self)
return 5
end
""")
self.assertTooMuchCPU(resp, ScriptError.LUA_INIT_ERROR)
def test_infinite_loop_memory(self):
resp = self.request_lua("""
function main(self)
t = {}
while true do
t = { t }
end
return t
end
""")
# it can be either memory or CPU
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="too much")
def test_memory_attack(self):
resp = self.request_lua("""
function main(self)
local s = "aaaaaaaaaaaaaaaaaaaa"
while true do
s = s..s
end
return s
end
""")
self.assertTooMuchMemory(resp)
def test_memory_attack_toplevel(self):
resp = self.request_lua("""
s = "aaaaaaaaaaaaaaaaaaaa"
while true do
s = s..s
end
function main(self)
return s
end
""")
self.assertTooMuchMemory(resp, ScriptError.LUA_INIT_ERROR)
def test_billion_laughs(self):
resp = self.request_lua("""
s = "s"
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s s = s .. s
function main() end
""")
self.assertTooMuchMemory(resp, ScriptError.LUA_INIT_ERROR)
def test_disable_sandbox(self):
# dofile function should be always sandboxed
is_sandbox = "function main(splash) return {s=(dofile==nil)} end"
resp = self.request_lua(is_sandbox)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"s": True})
with SplashServer(extra_args=['--disable-lua-sandbox']) as splash:
resp = requests.get(
url=splash.url("execute"),
params={'lua_source': is_sandbox},
)
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"s": False})
class RequireTest(BaseLuaRenderTest):
def _set_title(self, title):
return """
splash:set_content([[
<html>
<head>
<title>%s</title>
</head>
</html>
]])
""" % title
def assertNoRequirePathsLeaked(self, resp):
self.assertNotIn("/lua", resp.text)
self.assertNotIn("init.lua", resp.text)
def test_splash_patching(self):
title = "TEST"
resp = self.request_lua("""
require "utils_patch"
function main(splash)
%(set_title)s
return splash:get_document_title()
end
""" % dict(set_title=self._set_title(title)))
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, title)
def test_splash_patching_no_require(self):
resp = self.request_lua("""
function main(splash)
%(set_title)s
return splash:get_document_title()
end
""" % dict(set_title=self._set_title("TEST")))
self.assertScriptError(resp, ScriptError.LUA_ERROR,
message="get_document_title")
self.assertNoRequirePathsLeaked(resp)
def test_require_unsafe(self):
resp = self.request_lua("""
local Splash = require("splash")
function main(splash) return "hello" end
""")
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR)
self.assertErrorLineNumber(resp, 2)
self.assertNoRequirePathsLeaked(resp)
def test_require_not_whitelisted(self):
resp = self.request_lua("""
local utils = require("utils")
local secret = require("secret")
function main(splash) return "hello" end
""")
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR)
self.assertErrorLineNumber(resp, 3)
self.assertNoRequirePathsLeaked(resp)
def test_require_non_existing(self):
resp = self.request_lua("""
local foobar = require("foobar")
function main(splash) return "hello" end
""")
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR)
self.assertNoRequirePathsLeaked(resp)
self.assertErrorLineNumber(resp, 2)
def test_require_non_existing_whitelisted(self):
resp = self.request_lua("""
local non_existing = require("non_existing")
function main(splash) return "hello" end
""")
self.assertScriptError(resp, ScriptError.LUA_INIT_ERROR)
self.assertNoRequirePathsLeaked(resp)
self.assertErrorLineNumber(resp, 2)
def test_module(self):
title = "TEST"
resp = self.request_lua("""
local utils = require "utils"
function main(splash)
%(set_title)s
return utils.get_document_title(splash)
end
""" % dict(set_title=self._set_title(title)))
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, title)
def test_module_require_unsafe_from_safe(self):
resp = self.request_lua("""
function main(splash)
return require("utils").hello
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "world")
class HarTest(BaseLuaRenderTest):
def test_har_empty(self):
resp = self.request_lua("""
function main(splash)
return splash:har()
end
""")
self.assertStatusCode(resp, 200)
har = resp.json()["log"]
self.assertEqual(har["entries"], [])
def test_har_about_blank(self):
resp = self.request_lua("""
function main(splash)
splash:go("about:blank")
return splash:har()
end
""")
self.assertStatusCode(resp, 200)
har = resp.json()["log"]
self.assertEqual(har["entries"], [])
def test_har_reset(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
splash:go(splash.args.url)
local har1 = splash:har()
splash:har_reset()
local har2 = splash:har()
splash:go(splash.args.url)
local har3 = splash:har()
return {har1, har2, har3}
end
""", {'url': self.mockurl("jsrender")})
self.assertStatusCode(resp, 200)
har1 = resp.json()["1"]
har2 = resp.json()["2"]
har3 = resp.json()["3"]
self.assertEqual(len(har1['log']['entries']), 2)
self.assertEqual(har2['log']['entries'], [])
self.assertEqual(len(har3['log']['entries']), 1)
def test_har_reset_argument(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
local har1 = splash:har()
splash:go(splash.args.url)
local har2 = splash:har{reset=true}
local har3 = splash:har()
splash:go(splash.args.url)
local har4 = splash:har()
return {har1, har2, har3, har4}
end
""", {'url': self.mockurl("jsrender")})
self.assertStatusCode(resp, 200)
har1 = resp.json()["1"]
har2 = resp.json()["2"]
har3 = resp.json()["3"]
har4 = resp.json()["4"]
self.assertEqual(len(har1['log']['entries']), 1)
self.assertEqual(len(har2['log']['entries']), 2)
self.assertEqual(har3['log']['entries'], [])
self.assertEqual(len(har4['log']['entries']), 1)
def test_har_reset_inprogress(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
splash:wait(0.5)
local har1 = splash:har{reset=true}
splash:wait(2.5)
local har2 = splash:har()
return {har1, har2}
end
""", {'url': self.mockurl("show-image?n=2.0&js=0.1")})
self.assertStatusCode(resp, 200)
data = resp.json()
har1, har2 = data["1"]["log"], data["2"]["log"]
self.assertEqual(len(har1['entries']), 2)
self.assertEqual(har1['entries'][0]['_splash_processing_state'],
HarBuilder.REQUEST_FINISHED)
self.assertEqual(har1['entries'][1]['_splash_processing_state'],
HarBuilder.REQUEST_HEADERS_RECEIVED)
class AutoloadTest(BaseLuaRenderTest):
def test_autoload(self):
resp = self.request_lua("""
function main(splash)
assert(splash:autoload("window.FOO = 'bar'"))
splash:go(splash.args.url)
local foo1 = splash:evaljs("FOO")
splash:evaljs("window.FOO = 'spam'")
local foo2 = splash:evaljs("FOO")
splash:go(splash.args.url)
local foo3 = splash:evaljs("FOO")
return {foo1=foo1, foo2=foo2, foo3=foo3}
end
""", {"url": self.mockurl("getrequest")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data, {"foo1": "bar", "foo2": "spam", "foo3": "bar"})
def test_autoload_remote(self):
resp = self.request_lua("""
function main(splash)
assert(splash:autoload(splash.args.eggspam_url))
assert(splash:go(splash.args.url))
local egg = splash:jsfunc("egg")
return egg()
end
""", {
"url": self.mockurl("getrequest"),
"eggspam_url": self.mockurl("eggspam.js"),
})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "spam")
def test_autoload_bad(self):
resp = self.request_lua("""
function main(splash)
local ok, reason = splash:autoload(splash.args.bad_url)
return {ok=ok, reason=reason}
end
""", {"bad_url": self.mockurl("--non-existing--")})
self.assertStatusCode(resp, 200)
self.assertNotIn("ok", resp.json())
self.assertIn("404", resp.json()["reason"])
def test_noargs(self):
resp = self.request_lua("""
function main(splash)
splash:autoload()
end
""")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
self.assertErrorLineNumber(resp, 3)
def test_autoload_reset(self):
resp = self.request_lua("""
function main(splash)
splash:autoload([[window.FOO = 'foo']])
splash:autoload([[window.BAR = 'bar']])
splash:go(splash.args.url)
local foo1 = splash:evaljs("window.FOO")
local bar1 = splash:evaljs("window.BAR")
splash:autoload_reset()
splash:go(splash.args.url)
local foo2 = splash:evaljs("window.FOO")
local bar2 = splash:evaljs("window.BAR")
return {foo1=foo1, bar1=bar1, foo2=foo2, bar2=bar2}
end
""", {"url": self.mockurl("getrequest")})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"foo1": "foo", "bar1": "bar"})
class HttpGetTest(BaseLuaRenderTest):
def test_get(self):
resp = self.request_lua("""
function main(splash)
local reply = splash:http_get(splash.args.url)
splash:wait(0.1)
return reply.content.text
end
""", {"url": self.mockurl("jsrender")})
self.assertStatusCode(resp, 200)
self.assertEqual(JsRender.template, resp.text)
def test_bad_url(self):
resp = self.request_lua("""
function main(splash)
return splash:http_get(splash.args.url)
end
""", {"url": self.mockurl("--bad-url--")})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json()["status"], 404)
def test_headers(self):
resp = self.request_lua("""
function main(splash)
return splash:http_get{
splash.args.url,
headers={
["Custom-Header"] = "Header Value",
}
}
end
""", {"url": self.mockurl("getrequest")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["status"], 200)
self.assertIn("Header Value", data["content"]["text"])
def test_redirects_follow(self):
resp = self.request_lua("""
function main(splash)
return splash:http_get(splash.args.url)
end
""", {"url": self.mockurl("http-redirect?code=302")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["status"], 200)
self.assertNotIn("redirect to", data["content"]["text"])
self.assertIn("GET request", data["content"]["text"])
def test_redirects_nofollow(self):
resp = self.request_lua("""
function main(splash)
return splash:http_get{url=splash.args.url, follow_redirects=false}
end
""", {"url": self.mockurl("http-redirect?code=302")})
self.assertStatusCode(resp, 200)
data = resp.json()
self.assertEqual(data["status"], 302)
self.assertEqual(data["redirectURL"], "/getrequest?http_code=302")
self.assertIn("302 redirect to", data["content"]["text"])
def test_noargs(self):
resp = self.request_lua("""
function main(splash)
splash:http_get()
end
""")
self.assertScriptError(resp, ScriptError.SPLASH_LUA_ERROR)
class NavigationLockingTest(BaseLuaRenderTest):
def test_lock_navigation(self):
url = self.mockurl("jsredirect")
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
splash:lock_navigation()
splash:wait(0.3)
return splash:url()
end
""", {"url": url})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, url)
def test_unlock_navigation(self):
resp = self.request_lua("""
function main(splash)
splash:go(splash.args.url)
splash:lock_navigation()
splash:unlock_navigation()
splash:wait(0.3)
return splash:url()
end
""", {"url": self.mockurl("jsredirect")})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, self.mockurl("jsredirect-target"))
def test_go_navigation_locked(self):
resp = self.request_lua("""
function main(splash)
splash:lock_navigation()
local ok, reason = splash:go(splash.args.url)
return {ok=ok, reason=reason}
end
""", {"url": self.mockurl("jsredirect"), "timeout": 1.0})
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {"reason": "navigation_locked"})
class SetContentTest(BaseLuaRenderTest):
def test_set_content(self):
resp = self.request_lua("""
function main(splash)
assert(splash:set_content("<html><head></head><body><h1>Hello</h1></body></html>"))
return {
html = splash:html(),
url = splash:url(),
}
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.json(), {
"html": "<html><head></head><body><h1>Hello</h1></body></html>",
"url": "about:blank",
})
def test_unicode(self):
resp = self.request_lua("""
function main(splash)
assert(splash:set_content("проверка"))
return splash:html()
end
""")
self.assertStatusCode(resp, 200)
self.assertIn(u'проверка', resp.text)
def test_related_resources(self):
script = """
function main(splash)
splash:set_content{
data = [[
<html><body>
<img width=50 heigth=50 src="/slow.gif?n=0.2">
</body></html>
]],
baseurl = splash.args.base,
}
return splash:png()
end
"""
resp = self.request_lua(script, {"base": self.mockurl("")})
self.assertStatusCode(resp, 200)
img = Image.open(StringIO(resp.content))
self.assertEqual((0,0,0,255), img.getpixel((10, 10)))
# the same, but with a bad base URL
resp = self.request_lua(script, {"base": ""})
self.assertStatusCode(resp, 200)
img = Image.open(StringIO(resp.content))
self.assertNotEqual((0,0,0,255), img.getpixel((10, 10)))
def test_url(self):
resp = self.request_lua("""
function main(splash)
splash:set_content{"hey", baseurl="http://example.com/foo"}
return splash:url()
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, "http://example.com/foo")
class GetPerfStatsTest(BaseLuaRenderTest):
def test_get_perf_stats(self):
func = """
function main(splash)
return splash:get_perf_stats()
end
"""
out = self.request_lua(func).json()
self.assertItemsEqual(out.keys(),
['walltime', 'cputime', 'maxrss'])
self.assertIsInstance(out['cputime'], numbers.Real)
self.assertIsInstance(out['walltime'], numbers.Real)
self.assertIsInstance(out['maxrss'], numbers.Integral)
self.assertLess(out['cputime'], 1000.)
self.assertLess(0., out['cputime'])
# Should be safe to assume that splash process consumes between 1Mb
# and 1Gb of RAM, right?
self.assertLess(1E6, out['maxrss'])
self.assertLess(out['maxrss'], 1E9)
# I wonder if we could break this test...
now = time.time()
self.assertLess(now - 120, out['walltime'])
self.assertLess(out['walltime'], now)
class WindowSizeTest(BaseLuaRenderTest):
"""This is a test for window & viewport size interaction in Lua scripts."""
GET_DIMS_AFTER_SCRIPT = """
function get_dims(splash)
return {
inner = splash:evaljs("window.innerWidth") .. "x" .. splash:evaljs("window.innerHeight"),
outer = splash:evaljs("window.outerWidth") .. "x" .. splash:evaljs("window.outerHeight"),
client = (splash:evaljs("document.documentElement.clientWidth") .. "x"
.. splash:evaljs("document.documentElement.clientHeight"))
}
end
function main(splash)
alter_state(splash)
return get_dims(splash)
end
function alter_state(splash)
%s
end
"""
def return_json_from_lua(self, script, **kwargs):
resp = self.request_lua(script, kwargs)
if resp.ok:
return resp.json()
else:
raise RuntimeError(resp.content)
def get_dims_after(self, lua_script, **kwargs):
return self.return_json_from_lua(
self.GET_DIMS_AFTER_SCRIPT % lua_script, **kwargs)
def assertSizeAfter(self, lua_script, etalon, **kwargs):
out = self.get_dims_after(lua_script, **kwargs)
self.assertEqual(out, etalon)
def test_get_viewport_size(self):
script = """
function main(splash)
local w, h = splash:get_viewport_size()
return {width=w, height=h}
end
"""
out = self.return_json_from_lua(script)
w, h = map(int, defaults.VIEWPORT_SIZE.split('x'))
self.assertEqual(out, {'width': w, 'height': h})
def test_default_dimensions(self):
self.assertSizeAfter("",
{'inner': defaults.VIEWPORT_SIZE,
'outer': defaults.VIEWPORT_SIZE,
'client': defaults.VIEWPORT_SIZE})
def test_set_sizes_as_table(self):
self.assertSizeAfter('splash:set_viewport_size{width=111, height=222}',
{'inner': '111x222',
'outer': defaults.VIEWPORT_SIZE,
'client': '111x222'})
self.assertSizeAfter('splash:set_viewport_size{height=333, width=444}',
{'inner': '444x333',
'outer': defaults.VIEWPORT_SIZE,
'client': '444x333'})
def test_viewport_size_roundtrips(self):
self.assertSizeAfter(
'splash:set_viewport_size(splash:get_viewport_size())',
{'inner': defaults.VIEWPORT_SIZE,
'outer': defaults.VIEWPORT_SIZE,
'client': defaults.VIEWPORT_SIZE})
def test_viewport_size(self):
self.assertSizeAfter('splash:set_viewport_size(2000, 2000)',
{'inner': '2000x2000',
'outer': defaults.VIEWPORT_SIZE,
'client': '2000x2000'})
def test_viewport_size_validation(self):
cases = [
('()', 'set_viewport_size.* takes exactly 3 arguments'),
('{}', 'set_viewport_size.* takes exactly 3 arguments'),
('(1)', 'set_viewport_size.* takes exactly 3 arguments'),
('{1}', 'set_viewport_size.* takes exactly 3 arguments'),
('(1, nil)', 'a number is required'),
('{1, nil}', 'set_viewport_size.* takes exactly 3 arguments'),
('(nil, 1)', 'a number is required'),
('{nil, 1}', 'a number is required'),
('{width=1}', 'set_viewport_size.* takes exactly 3 arguments'),
('{width=1, nil}', 'set_viewport_size.* takes exactly 3 arguments'),
('{nil, width=1}', 'set_viewport_size.* takes exactly 3 arguments'),
('{height=1}', 'set_viewport_size.* takes exactly 3 arguments'),
('{height=1, nil}', 'set_viewport_size.* takes exactly 3 arguments'),
('{nil, height=1}', 'set_viewport_size.* takes exactly 3 arguments'),
('{100, width=200}', 'set_viewport_size.* got multiple values.*width'),
# This thing works.
# ('{height=200, 100}', 'set_viewport_size.* got multiple values.*width'),
('{100, "a"}', 'a number is required'),
('{100, {}}', 'a number is required'),
('{100, -1}', 'Viewport is out of range'),
('{100, 0}', 'Viewport is out of range'),
('{100, 99999}', 'Viewport is out of range'),
('{1, -100}', 'Viewport is out of range'),
('{0, 100}', 'Viewport is out of range'),
('{99999, 100}', 'Viewport is out of range'),
]
def run_test(size_str):
self.get_dims_after('splash:set_viewport_size%s' % size_str)
for size_str, errmsg in cases:
self.assertRaisesRegexp(RuntimeError, errmsg, run_test, size_str)
def test_viewport_full(self):
w = int(defaults.VIEWPORT_SIZE.split('x')[0])
self.assertSizeAfter('splash:go(splash.args.url);'
'splash:wait(0.1);'
'splash:set_viewport_full();',
{'inner': '%dx2000' % w,
'outer': defaults.VIEWPORT_SIZE,
'client': '%dx2000' % w},
url=self.mockurl('tall'))
def test_set_viewport_full_returns_dimensions(self):
script = """
function main(splash)
assert(splash:go(splash.args.url))
assert(splash:wait(0.1))
local w, h = splash:set_viewport_full()
return {width=w, height=h}
end
"""
out = self.return_json_from_lua(script, url=self.mockurl('tall'))
w, h = map(int, defaults.VIEWPORT_SIZE.split('x'))
self.assertEqual(out, {'width': w, 'height': 2000})
def test_render_all_restores_viewport_size(self):
script = """
function main(splash)
assert(splash:go(splash.args.url))
assert(splash:wait(0.1))
local before = {splash:get_viewport_size()}
png = splash:png{render_all=true}
local after = {splash:get_viewport_size()}
return {before=before, after=after, png=png}
end
"""
out = self.return_json_from_lua(script, url=self.mockurl('tall'))
w, h = map(int, defaults.VIEWPORT_SIZE.split('x'))
self.assertEqual(out['before'], {'1': w, '2': h})
self.assertEqual(out['after'], {'1': w, '2': h})
# 2000px is hardcoded in that html
img = Image.open(StringIO(standard_b64decode(out['png'])))
self.assertEqual(img.size, (w, 2000))
def test_set_viewport_size_changes_contents_size_immediately(self):
# GH167
script = """
function main(splash)
splash:set_viewport_size(1024, 768)
assert(splash:set_content([[
<html>
<body style="min-width: 800px; margin: 0px"> </body>
</html>
]]))
result = {}
result.before = {splash:set_viewport_full()}
splash:set_viewport_size(640, 480)
result.after = {splash:set_viewport_full()}
return result
end
"""
out = self.return_json_from_lua(script)
self.assertEqual(out,
{'before': {'1': 1024, '2': 768},
'after': {'1': 800, '2': 480}})
@pytest.mark.xfail
def test_viewport_full_raises_error_if_fails_in_script(self):
# XXX: for local resources loadFinished event generally arrives after
# initialLayoutCompleted, so the error doesn't manifest itself.
self.assertRaisesRegexp(RuntimeError, "zyzzy",
self.get_dims_after,
"""
splash:go(splash.args.url)
splash:set_viewport_full()
""", url=self.mockurl('delay'))
class VersionTest(BaseLuaRenderTest):
def test_version(self):
resp = self.request_lua("""
function main(splash)
local version = splash:get_version()
return version.major .. '.' .. version.minor
end
""")
self.assertStatusCode(resp, 200)
self.assertEqual(resp.text, splash_version)
|
Reminding me more of “Beachhead” than “Missile Command” “The Wall” has you defending a wall against hordes of attackers. No tower defence game, this is old school arcade turret action.
There’s a shop to buy upgrades to your defences from, too, which is a nice touch (and, of course, you have to shoot enemies like crazy to earn upgrades… what are these survivors doing forcing their defenders to deal with hordes of zombies before giving them the good stuff to defend them with?!). If I had one criticism, it is that the difficulty ramps up more slowly than the upgrades accrue, but maybe that’s just because I’m just that good? Nah, not likely. Yes it’s a fundamentally shallow game, but not in bad way: it’s great for just zoning out as you get “in the zone” taking out enemy after enemy just on reflex. 80 Microsoft Points.
|
"""Views for the node settings page."""
# -*- coding: utf-8 -*-
import os
import httplib as http
from box.client import BoxClient, BoxClientException
from urllib3.exceptions import MaxRetryError
from framework.exceptions import HTTPError
from website.addons.box.model import Box
from website.addons.base import generic_views
from website.addons.box.serializer import BoxSerializer
SHORT_NAME = 'box'
FULL_NAME = 'Box'
box_account_list = generic_views.account_list(
SHORT_NAME,
BoxSerializer
)
box_import_auth = generic_views.import_auth(
SHORT_NAME,
BoxSerializer
)
def _get_folders(node_addon, folder_id):
node = node_addon.owner
if folder_id is None:
return [{
'id': '0',
'path': 'All Files',
'addon': 'box',
'kind': 'folder',
'name': '/ (Full Box)',
'urls': {
'folders': node.api_url_for('box_folder_list', folderId=0),
}
}]
try:
Box(node_addon.external_account).refresh_oauth_key()
client = BoxClient(node_addon.external_account.oauth_key)
except BoxClientException:
raise HTTPError(http.FORBIDDEN)
try:
metadata = client.get_folder(folder_id)
except BoxClientException:
raise HTTPError(http.NOT_FOUND)
except MaxRetryError:
raise HTTPError(http.BAD_REQUEST)
# Raise error if folder was deleted
if metadata.get('is_deleted'):
raise HTTPError(http.NOT_FOUND)
folder_path = '/'.join(
[
x['name']
for x in metadata['path_collection']['entries']
] + [metadata['name']]
)
return [
{
'addon': 'box',
'kind': 'folder',
'id': item['id'],
'name': item['name'],
'path': os.path.join(folder_path, item['name']),
'urls': {
'folders': node.api_url_for('box_folder_list', folderId=item['id']),
}
}
for item in metadata['item_collection']['entries']
if item['type'] == 'folder'
]
box_folder_list = generic_views.folder_list(
SHORT_NAME,
FULL_NAME,
_get_folders
)
box_get_config = generic_views.get_config(
SHORT_NAME,
BoxSerializer
)
def _set_folder(node_addon, folder, auth):
uid = folder['id']
node_addon.set_folder(uid, auth=auth)
node_addon.save()
box_set_config = generic_views.set_config(
SHORT_NAME,
FULL_NAME,
BoxSerializer,
_set_folder
)
box_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
box_root_folder = generic_views.root_folder(
SHORT_NAME
)
|
How would you define diversity? WHOSAY Talent Coordinator of Diversity, Daion Morton, asked his panelists at Viacom.
“It’s about accepting and celebrating the fact that we’re different,” said CEO of SHADE, Jacques Bastien.
“It’s about being accounted for,” responded VH1 Social Media Manager and Beauty Influencer, Sarita Nauth.
“It’s about culture and how we connect as communities,” said Chairman of Streamlined Media & Communications, Darren Martin.
The panel “The Changing Face of Influencer Marketing,” part of Communications Week NY: “The Human Factor” Conference, examined the workforce of the future in the media & marketing, which, like consumers, is growing increasingly fragmented and diverse.
Morton then asked, What are media companies and agencies doing to create a more inclusive marketplace?
Martin shared that his agency creates cultural insights that help his clients gain more understanding of communities of color.
Nauth explained that the reason why VH1 “didn’t focus on music anymore,” as people often complain, was because there had been a cultural shift from the network’s early days of classic rock into more relatable and inclusive programming.
The panel then moved to issues of attrition and retention. As Martin remarked; working in advertising is stressful enough. “People of color are extra-stressed, because of the feeling of not being ‘a cultural fit,’ he said. He added that these team members shouldn't feel like they should change themselves to “fit in” when instead they should be celebrated for who they are.
“Diversity is easy,” said Bastien. “Inclusion is difficult.” The SHADE CEO added that inclusion goes beyond just hiring a few minorities for the entry-level positions but also put them in executives roles and empowering them to be part of the larger conversation from the get-go.
Both Bastien and Martin acknowledged being inclusive is not easy. “It means going against your natural, human instincts of only trusting people who look, talk, and act like you in the hopes of getting better ideas and results for your campaigns,” said Bastien. “It takes relinquishing ‘I believe power,’” added Martin.
It is not easy indeed, but creating more inclusive environments in the media will lead to better business results. Those who won’t adapt risk missing out as Nauth explained. “You see more women of color starting their businesses because there’s no room for growth,” she said.
And, as Martin added, it’s just a matter of time before these more culturally-aware new agencies strat winning clients over the less woke ones.
This blog originally posted on WHOSAY’s blog here.
|
#!/usr/bin/env python3
# Questo file visualizza la chiave "lists" redis
#
# Prima verifica che ci sia la chiave nel form
# Serve per la parte di gestione html in python
import cgi
import cgitb
# Abilita gli errori al server web/http
cgitb.enable()
# Le mie librerie mjl (Json, Files), mhl (Html), flt (T w/ Redis)
import mjl, mhl, flt
import redis
# Parametri generali
TestoPagina="Taglia valori da chiave \"lists\" Redis"
DirBase="/var/www"
ConfigFile=DirBase+"/conf/config.json"
#ExecFile="/cgi-bin/<exefile>"
# Redis "key"
RedisKey = "*" # Tutte le chiavi
# Form name/s
FormName = "rkey"
# Apro il database Redis con l'istruzione della mia libreria
MyDB = flt.OpenDBFile(ConfigFile)
# Start web page - Sono blocchi di html presenti nella libreria
print (mhl.MyHtml())
print (mhl.MyHtmlHead())
# Scrivo il Titolo/Testo della pagina
print ("<h1>","<center>",TestoPagina,"</center>","</h1>")
#print ("<hr/>","<br/>")
# Eventuale help/annotazione
#print ("Non ho rinominato i campi e non sono stato a riordinare le voci.<br/>")
form=cgi.FieldStorage()
if FormName not in form:
print ("<h2>ERRORE: Non e` stata passata la chiave Redis</h2>")
elif "VStart" not in form:
print ("<h3>Manca il valore: Start</h3>")
elif "VStop" not in form:
print ("<h3>Manca il valore: Stop</h3>")
else:
RedisKey = cgi.escape(form[FormName].value)
print ("<b>Prima:</b>")
print ("<table>") # 2 colonne
# La prima voce non e` modificabile ed e` la chiave Redis (solo visualizzazione)
print ("<tr>")
print ("<td>")
print ("Key: ")
print ("</td>")
print ("<td>")
print (mhl.MyTextForm(FormName,RedisKey,"40","required","readonly"))
print ("</td>")
print ("</tr>")
print ("<tr>")
print ("<td>")
print ("Primo valore:")
print ("</td>")
print ("<td>")
print (str(MyDB.lindex(RedisKey,"0")))
print ("</td>")
print ("</tr>")
print ("<br/>") # Aggiungo uno spazio (una riga)
print ("<tr>")
print ("<td>")
print ("Valori:")
print ("</td>")
print ("<td>")
print (str(MyDB.llen(RedisKey)))
print ("</td>")
print ("</tr>")
print ("<br/>") # Aggiungo uno spazio (una riga)
print ("<tr>")
print ("<td>")
print ("Ultimo valore:")
print ("</td>")
print ("<td>")
print (str(MyDB.lindex(RedisKey,"-1")))
print ("</td>")
print ("</tr>")
print ("</table>")
RedisKeyStart = cgi.escape(form["VStart"].value)
RedisKeyStop = cgi.escape(form["VStop"].value)
print ("</br></br> <b>Command</b>: ltrim {0:s} {1:s} {2:s} </br></br></br>".format(RedisKey,RedisKeyStart,RedisKeyStop))
if MyDB.ltrim(RedisKey,RedisKeyStart,RedisKeyStop):
print ("<b>Dopo:</b>")
print ("<table>") # 2 colonne
# La prima voce non e` modificabile ed e` la chiave Redis (solo visualizzazione)
print ("<tr>")
print ("<td>")
print ("Key: ")
print ("</td>")
print ("<td>")
print (mhl.MyTextForm(FormName,RedisKey,"40","required","readonly"))
print ("</td>")
print ("</tr>")
print ("<tr>")
print ("<td>")
print ("Primo valore:")
print ("</td>")
print ("<td>")
print (str(MyDB.lindex(RedisKey,"0")))
print ("</td>")
print ("</tr>")
print ("<br/>") # Aggiungo uno spazio (una riga)
print ("<tr>")
print ("<td>")
print ("Valori:")
print ("</td>")
print ("<td>")
print (str(MyDB.llen(RedisKey)))
print ("</td>")
print ("</tr>")
print ("<br/>") # Aggiungo uno spazio (una riga)
print ("<tr>")
print ("<td>")
print ("Ultimo valore:")
print ("</td>")
print ("<td>")
print (str(MyDB.lindex(RedisKey,"-1")))
print ("</td>")
print ("</tr>")
print ("</table>")
# End web page
print (mhl.MyHtmlBottom())
|
A Texas jury has returned a $3.5 million award in a medical malpractice lawsuit filed by the family of a woman who died after a doctor failed to diagnose cancer.
The case was filed by the family of Melissa Hendricks in Denton County, Texas. Hendricks died on December 14, 2004 of cancer that went undiagnosed for nearly a year and a half, despite repeated medical visits and concerns expressed to her health care providers.
The verdict, which was returned in a case filed against Dr. Stephen Glaser, physician assistant Jason Maris and Highland Family Medical Center, is reportedly one of the largest in the county since Texas tort reform was enacted in 2003. According to the Denton Record-Chronicle, verdict will be reduced to $1.5 million under a Texas medical malpractice law that limits the amount of damages that can be awarded for non-economic damages.
In the misdiagnosed cancer lawsuit, Hendricks family alleged that she first noticed a small lump on the top of her head in mid-2002 and visited the medical center and Dr. Glaser in October of that year. She expressed concerns about the lump because her mother had died of cancer, and Glaser misdiagnosed the bump as a sebaceous cyst, which is a nonmalignant lesion.
A week later, the cyst was removed by Maris, who worked for Glaser. However, the cyst was discarded and no testing was performed to confirm the original diagnosis.
Approximately one year later, the lump returned and Hendricks went to another doctor, who did not remove the lump because she was pregnant. It subsequently grew in size and was confirmed as sarcoma, a form of cancer. Despite an 11th hour fight against the cancer, Hendricks died less than a year after the correct diagnosis.
Early cancer diagnosis is important in nearly all forms of the disease, as treatment options may not be available or as likely to succeed if time elapses because of a cancer misdiagnosis. Patients diagnosed with sarcoma in its early stages who receive proper treatment have a much better prognosis, with the possibility of a full cure if all of the cancerous cells can be completely removed from the body. In later stages, the cancer spreads to the lymph nodes and beyond, making it more difficult to treat.
The jury determined that Glaser and Maris were each 45% responsible in the wrongful death misdiagnosis lawsuit. The other 10% of the responsibility was attributed to Hendricks’, for waiting to have the lump examined given her family’s cancer history.
My mom died in September 2009 of brain cancer – astrocytoma. We had her to the dr. and hospital repeatedly from April 2009 til July 2009, and they insisted she was having “mini-strokes”; even after 2 MRI’s and CT scans, among other tests. Finally at the end of July, 2009 – after I’d insisted she get checked out at the Univ. of Penn, they diagnosed her with level 2 brain cancer – giving us hope that she could be treated. For reasons unknown to me, she was transferred back to the local hospital for “treatment”, and died on Sept. 4, 2009. I believe that someone neglected to diagnose her correctly, and as a result, the tumor grew to an inoperable size. Had she been correctly treated beginning in April, perhaps she would still be with us now. What can I do to get answers, and to have the negligent parties held responsible?
i started going to my primary care doctor in february 07 extreme fatigue and overall not feeling well. kept going back because i just kept feeling worse and worse and the fatigue was overwhelming more than just tired. he kept telling me i had the flu and viruses time after time and antibiotics everytime i went. then my neck was really sore and i was rubbing it and i found a lump. i showed him the lump he wasn’t concerned i told him i have felt cysts and this was different, again more antibiotics. even went to emergency room for another reason and the er doc asked if anything else was going on i told him about the lump he said you don’t have cancer don’t worry about it. also went to a neurologist for my migraines the doctor felt it said don’t worry about it. been to the primary care numerous times and the er doctor and the neurologist. finally in JULY i couldn’t take the way i was feeling anymore and went to my old family doctor who was out of network because of change of insurance so i went to him and paid the extra he said the lump was in a bad place and scheduled me for a needle biopsy which came back and the doctor said i had to have it removed. the results came back with diffuse large b cell lymphoma a very fast and aggressive type of cancer it had already spread to the mediastinum when i started chemotherapy. the oncologist said i was extremely lucky i went to my old primary care doctor because it would not take long for this cancer to take my life. i can’t tell you the number of doctor visits i made. it still makes me very very angry.
I went to the doctor in November 2011, due to the appearance of tumors smaller over my head in MGH Boston .
Doctor has diagnosed tumors they kind benign inclusion cyst or sebaceous cyst . After one year increased their size very quickly and began to feel pressure on the skull. i went to another doctor in boston medical center, which was removed, and when their analysis shows they sarcoma tumors and asked me to need a second operation expanded to remove an area of the scalp. last week i was the removal of a large amount of scalp.
Melissa I still recall the first moment that we laid eyes upon each other. You have a very kind soul! R.I.P.
|
from hazelcast.protocol.builtin import FixSizedTypesCodec, CodecUtil
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import END_FRAME_BUF, END_FINAL_FRAME_BUF, SIZE_OF_FRAME_LENGTH_AND_FLAGS, create_initial_buffer_custom
from hazelcast.sql import _SqlQueryId
_MEMBER_ID_HIGH_ENCODE_OFFSET = 2 * SIZE_OF_FRAME_LENGTH_AND_FLAGS
_MEMBER_ID_HIGH_DECODE_OFFSET = 0
_MEMBER_ID_LOW_ENCODE_OFFSET = _MEMBER_ID_HIGH_ENCODE_OFFSET + LONG_SIZE_IN_BYTES
_MEMBER_ID_LOW_DECODE_OFFSET = _MEMBER_ID_HIGH_DECODE_OFFSET + LONG_SIZE_IN_BYTES
_LOCAL_ID_HIGH_ENCODE_OFFSET = _MEMBER_ID_LOW_ENCODE_OFFSET + LONG_SIZE_IN_BYTES
_LOCAL_ID_HIGH_DECODE_OFFSET = _MEMBER_ID_LOW_DECODE_OFFSET + LONG_SIZE_IN_BYTES
_LOCAL_ID_LOW_ENCODE_OFFSET = _LOCAL_ID_HIGH_ENCODE_OFFSET + LONG_SIZE_IN_BYTES
_LOCAL_ID_LOW_DECODE_OFFSET = _LOCAL_ID_HIGH_DECODE_OFFSET + LONG_SIZE_IN_BYTES
_INITIAL_FRAME_SIZE = _LOCAL_ID_LOW_ENCODE_OFFSET + LONG_SIZE_IN_BYTES - 2 * SIZE_OF_FRAME_LENGTH_AND_FLAGS
class SqlQueryIdCodec(object):
@staticmethod
def encode(buf, sql_query_id, is_final=False):
initial_frame_buf = create_initial_buffer_custom(_INITIAL_FRAME_SIZE)
FixSizedTypesCodec.encode_long(initial_frame_buf, _MEMBER_ID_HIGH_ENCODE_OFFSET, sql_query_id.member_id_high)
FixSizedTypesCodec.encode_long(initial_frame_buf, _MEMBER_ID_LOW_ENCODE_OFFSET, sql_query_id.member_id_low)
FixSizedTypesCodec.encode_long(initial_frame_buf, _LOCAL_ID_HIGH_ENCODE_OFFSET, sql_query_id.local_id_high)
FixSizedTypesCodec.encode_long(initial_frame_buf, _LOCAL_ID_LOW_ENCODE_OFFSET, sql_query_id.local_id_low)
buf.extend(initial_frame_buf)
if is_final:
buf.extend(END_FINAL_FRAME_BUF)
else:
buf.extend(END_FRAME_BUF)
@staticmethod
def decode(msg):
msg.next_frame()
initial_frame = msg.next_frame()
member_id_high = FixSizedTypesCodec.decode_long(initial_frame.buf, _MEMBER_ID_HIGH_DECODE_OFFSET)
member_id_low = FixSizedTypesCodec.decode_long(initial_frame.buf, _MEMBER_ID_LOW_DECODE_OFFSET)
local_id_high = FixSizedTypesCodec.decode_long(initial_frame.buf, _LOCAL_ID_HIGH_DECODE_OFFSET)
local_id_low = FixSizedTypesCodec.decode_long(initial_frame.buf, _LOCAL_ID_LOW_DECODE_OFFSET)
CodecUtil.fast_forward_to_end_frame(msg)
return _SqlQueryId(member_id_high, member_id_low, local_id_high, local_id_low)
|
Designed for still lenses, this donut ring screws onto the inner thread of the lens to step up the diameter to 114mm to be used with a matte box.
The 114mm Threaded Donut Rings are an optional accessory designed to screw into the inner threading of your lens to change the outer diameter interface with any matte box to 114mm.
The front end is designed with the absolute minimum overhang to allow for maximum field of view and the precision standard thread allows for quick and easy attachment to the lens.
The ring is even compatible with our Black Hole donut using a separate adapter ring that helps secure the Black Hole when expanded.
Made from anodised aluminium, this ring is lightweight and built with no compromise to offer you maximum durability.
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2009, European Space Agency & European Southern
# Observatory (ESA/ESO)
# Copyright (c) 2008-2009, CRS4 - Centre for Advanced Studies, Research and
# Development in Sardinia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the European Space Agency, European Southern
# Observatory, CRS4 nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ESA/ESO AND CRS4 ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL ESA/ESO BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
"""
The Files module provides support for locating the XMP in a file, adding XMP to
a file, or updating the XMP in a file. It returns the entire XMP packet, the
core pacakage can then be used to manipulate the individual XMP properties.
:class:`XMPFiles` contains a number of "smart" file handlers that know how to
efficiently access the XMP in specific file formats. It also includes a
fallback packet scanner that can be used for unknown file formats.
"""
import os
import sys
from . import XMPError, XMPMeta
from .consts import options_mask
from .consts import XMP_CLOSE_NOOPTION
from .consts import XMP_OPEN_OPTIONS
from .consts import XMP_OPEN_NOOPTION
from . import exempi as _cexempi
__all__ = ['XMPFiles']
class XMPFiles(object):
"""API for access to the "main" metadata in a file.
XMPFiles provides the API for the Exempi's File Handler component. This
provides convenient access to the main, or document level, XMP for a file.
The general model is to open a file, read and write the metadata, then
close the file. While open, portions of the file might be maintained in RAM
data structures. Memory usage can vary considerably depending on file
format and access options. The file may be opened for read-only or
read-write access, with typical exclusion for both modes.
Errors result in raising of an :exc:`libxmp.XMPError` exception.
:keyword file_path: Path to file to open.
.. todo::
Documentation
"""
def __init__(self, **kwargs ):
self._file_path = None
self.xmpfileptr = _cexempi.files_new()
if 'file_path' in kwargs:
file_path = kwargs['file_path']
del kwargs['file_path']
self.open_file( file_path, **kwargs )
def __repr__(self):
if self._file_path is None:
return "XMPFiles()"
msg = "XMPFiles(file_path='{0}')"
if sys.hexversion < 0x03000000 and isinstance(self._file_path,
unicode):
# Unicode filenames can cause trouble in python2 because __repr__
# must return byte strings, not unicode. Get around this by
# turning the unicode filename into escaped ASCII. This means that
# in this case, the result cannot be used to recreate the object
# with the same value.
msg = msg.format(repr(self._file_path))
else:
# Python3 does not suffer from this problem.
msg = msg.format(self._file_path)
return msg
def __del__(self):
"""
Free up the memory associated with the XMP file instance.
"""
_cexempi.files_free( self.xmpfileptr )
def open_file(self, file_path, **kwargs ):
"""
Open a given file and read XMP from file. File must be closed again with
:func:`close_file`
:param str file_path: Path to file to open.
:raises XMPError: in case of errors.
.. todo::
Change signature into using kwargs to set option flag
"""
if kwargs:
open_flags = options_mask( XMP_OPEN_OPTIONS, **kwargs )
else:
open_flags = XMP_OPEN_NOOPTION
if self._file_path != None:
raise XMPError('A file is already open - close it first.')
_cexempi.files_open( self.xmpfileptr, file_path, open_flags )
self._file_path = file_path
def close_file( self, close_flags=XMP_CLOSE_NOOPTION):
"""
Close file after use. XMP will not be written to file until
this method has been called.
:param close_flags: One of the close flags
:raises XMPError: in case of errors.
.. todo::
Change signature into using kwargs to set option flag
"""
_cexempi.files_close( self.xmpfileptr, close_flags )
self._file_path = None
def get_xmp( self ):
"""
Get XMP from file.
:return: A new :class:`libxmp.core.XMPMeta` instance.
:raises XMPError: in case of errors.
"""
xmpptr = _cexempi.files_get_new_xmp(self.xmpfileptr)
if xmpptr:
return XMPMeta( _xmp_internal_ref = xmpptr )
else:
return None
def put_xmp(self, xmp_obj):
"""
Write XMPMeta object to file. See also :func:`can_put_xmp`.
:param xmp_obj: An :class:`libxmp.core.XMPMeta` object
"""
xmpptr = xmp_obj.xmpptr
if not self.can_put_xmp(xmp_obj):
msg = 'Cannot write XMP packet into {filename}'
msg = msg.format(filename=os.path.basename(self._file_path))
raise XMPError(msg)
_cexempi.files_put_xmp(self.xmpfileptr, xmpptr)
def can_put_xmp( self, xmp_obj ):
"""Determine if XMP can be written into the file.
Determines if a given :class:`libxmp.core.XMPMeta` object can be
written into the file.
:param xmp_obj: An :class:`libxmp.core.XMPMeta` object
:return: true if :class:`libxmp.core.XMPMeta` object writeable to file.
:rtype: bool
"""
if not isinstance( xmp_obj, XMPMeta ):
raise XMPError('Not a XMPMeta object')
xmpptr = xmp_obj.xmpptr
if xmpptr != None:
return _cexempi.files_can_put_xmp(self.xmpfileptr, xmpptr)
else:
return False
|
© 2019 Dept. of Supply Chain and Information Management, HSUHK. All Rights Reserved.
|
import codecs
import csv
import cStringIO
import datetime
## From samples in http://docs.python.org/library/csv.html
class UTF8Recoder(object):
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
Made mandatory by the csv module operating only on 'str'
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UTF8RecoderWithCleanup(UTF8Recoder):
"Rencode a stream in utf-8, with 'charset' clenaup algorithm in the middle"
def __init__(self, f, encoding):
super(UTF8RecoderWithCleanup, self).__init__(f, encoding)
from encoding_cleaner import get_map_table
(regex, m) = get_map_table(encoding, 'latin1')
self.regex = regex
self.m = m
def next(self):
u = self.reader.next()
tu = self.regex.sub(lambda g: self.m[g.group(0)], u)
return tu.encode('utf-8')
class PrefixReader(object):
def __init__(self, prefix, stream, linefilter):
self.prefix = prefix
self.stream = stream
self.linefilter = linefilter
def __iter__(self):
linefilter = self.linefilter
if linefilter:
if linefilter(self.prefix):
yield self.prefix
for k in self.stream:
if linefilter(k):
yield k
else:
yield self.prefix
for k in self.stream:
yield k
def write_value(s):
if isinstance(s, unicode):
return s.encode('utf-8')
elif isinstance(s, datetime.datetime):
# Remove timezone
return s.strftime('%Y-%m-%d %H:%M:%S')
else:
return s
class UnicodeCSVWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
if encoding == 'utf_16_le':
self.stream.write(codecs.BOM_UTF16_LE)
elif encoding == 'utf_16_be':
self.stream.write(codecs.BOM_UTF16_BE)
elif encoding == 'utf_16':
self.stream.write(codecs.BOM_UTF16)
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow(map(write_value, row))
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
|
Minister for Agriculture, Food and Marine Michael Creed T.D. has confirmed that enhanced controls on carcase presentation (trim) will be fully rolled out from January 1st 2019.
A new system where Veterinary Public Health Inspection Service staff will assist staff of the Beef Carcase Classification Unit to provide additional assurance in relation to the regulation of trimming of carcasses by factories has been established. The new monitoring system is currently being piloted in a number of factories with full roll out to all factories planned from January 1st, 2019.
The additional monitoring of carcase presentation by DAFM veterinary public health staff will provide further assurance to stakeholders that the appropriate dressing specification is being applied. These staff will provide a supporting role for the Beef Carcase Classification staff. Up-skilling of DAFM staff has occurred at regional seminars and local training of officers is being provided.
Carcase classification and carcase presentation (Trim) controls in slaughter plants are carried out by a dedicated team of specialist staff in the Beef Carcase Classification Section within DAFM. The Department’s remit under EU legislation is to carry out un-announced inspections to ensure that meat plants comply with these legislative requirements for classification, weights and trim.
1.7 million carcasses have been slaughtered to December 2018. To date in 2018 there have been 521 inspections across 32 slaughter plants and 44,332 carcasses have been inspected by classification officers. On average, each factory has been inspected 16 times to date with an average of 85 carcasses per inspection. The legal requirement is 8 inspections per year on only 40 carcasses per inspection.
To date in 2018, a total of 19 on-the-spot fines were issued for non-compliance with the EU reference carcase trimming specification. Under legislation (SI 363/2010), non-compliance with the carcase trim specification attracts a maximum on the spot fine of €200 per carcase. In 2016, 28 on-the-spot fines were issued for non-compliance with the EU reference carcase trimming specification.
"In addition, my department will publish the names of plants in which on the spot fines have been applied with effect from 1st January 2019. I also intend to publish the names of plants in which such fines were levied in 2018 and 2016 before year end, subject to the completion of a number of formalities."
|
import time
from django.core.urlresolvers import reverse
from selenium import webdriver
from selenium.webdriver.common.by import By
from theme.tests.multiplatform import SeleniumTestsParentClass, create_driver
class DesktopTests(SeleniumTestsParentClass.MultiPlatformTests):
def setUp(self, driver=None):
super(DesktopTests, self).setUp()
self.driver = driver
if not driver:
self.driver = create_driver('desktop')
self.driver.get(self.live_server_url)
def _logout_helper(self):
self.driver.get(self.live_server_url)
self.wait_for_visible(By.CSS_SELECTOR, '#profile-menu .dropdown-toggle').click()
self.wait_for_visible(By.CSS_SELECTOR, '#signout-menu').click()
def test_login_email(self):
super(DesktopTests, self).test_login_email()
# home page: returned after successful login with profile info in dropdown
profile_dropdown_selector = 'li[id="profile-menu"] a[class="dropdown-toggle"]'
self.wait_for_visible(By.CSS_SELECTOR, profile_dropdown_selector).click()
email = self.wait_for_visible(By.CSS_SELECTOR, '#profile-menu-email').text
self.assertEquals(self.user.email, email)
full_name = self.wait_for_visible(By.CSS_SELECTOR, '#profile-menu-fullname').text
self.assertTrue(self.user.first_name in full_name)
self.assertTrue(self.user.last_name in full_name)
def test_show_login_link_desktop(self):
self.driver.get(self.live_server_url)
self.wait_for_visible(By.CSS_SELECTOR, '#signin-menu')
def test_folder_drag(self):
self._login_helper(self.user.email, self.user_password)
self._create_resource_helper()
self.wait_for_visible(By.CSS_SELECTOR, '#edit-metadata').click()
# Find the files area and click button to create new folder
self.wait_for_visible(By.CSS_SELECTOR, '.fb-file-name').location_once_scrolled_into_view
time.sleep(1.5)
self.wait_for_visible(By.CSS_SELECTOR, '.fb-file-name').click()
self.wait_for_visible(By.CSS_SELECTOR, '#fb-create-folder').click()
# Fill in new folder modal
self.wait_for_visible(By.CSS_SELECTOR, '#txtFolderName').send_keys('Button Folder')
self.wait_for_visible(By.CSS_SELECTOR, '#btn-create-folder').click()
# TODO: try context click for creating a new folder
# drag and drop into new folder
folder_drag_dest = self.wait_for_visible(By.CSS_SELECTOR, '.fb-folder')
file_to_drag = self.wait_for_visible(By.CSS_SELECTOR, '.fb-file')
action_chain = webdriver.ActionChains(self.driver)
action_chain.drag_and_drop(file_to_drag, folder_drag_dest).perform()
time.sleep(1.5)
# Enter new folder and verify contents
self.wait_for_visible(By.CSS_SELECTOR, '#fb-files-container').click()
# Create a mouse down (not click) event on the folder in order to select
# prior to sending the double click.
time.sleep(2.5)
self.driver.find_element_by_css_selector('#hs-file-browser li.fb-folder').click()
self.driver.execute_script('$("#hs-file-browser li.fb-folder").dblclick()')
active_folder_in_crumbs = '//li[@class="active"]/span[contains(text(),"Button Folder")]'
self.wait_for_visible(By.XPATH, active_folder_in_crumbs)
self.assertEqual(self.driver.find_element_by_class_name('fb-file-name').text, 'file.png')
class MobileTests(SeleniumTestsParentClass.MultiPlatformTests):
def setUp(self, driver=None):
super(MobileTests, self).setUp()
self.driver = driver
if not driver:
self.driver = create_driver('mobile')
self.driver.get(self.live_server_url)
def _open_nav_menu_helper(self):
if self.wait_for_visible(By.CSS_SELECTOR, 'ul.navbar-nav', except_fail=False):
return
self.wait_for_visible(By.CSS_SELECTOR, 'button.navbar-toggle').click()
self.wait_for_visible(By.CSS_SELECTOR, 'ul.navbar-nav')
def _login_helper(self, login_name, user_password):
self._open_nav_menu_helper()
super(MobileTests, self)._login_helper(login_name, user_password)
self._open_nav_menu_helper()
def _logout_helper(self):
self._open_nav_menu_helper()
self.wait_for_visible(By.CSS_SELECTOR, 'a[href="{}"]'.format(reverse('logout')))
def test_register_account(self):
self.driver.get(self.live_server_url)
self._open_nav_menu_helper()
super(MobileTests, self).test_register_account()
def test_show_login_link_mobile(self):
self.driver.get(self.live_server_url)
desktop_login = self.driver.find_element_by_css_selector('#signin-menu')
mobile_login = self.driver.find_element_by_css_selector('li.visible-xs a')
self.assertFalse(desktop_login.is_displayed())
self.assertFalse(mobile_login.is_displayed())
self._open_nav_menu_helper()
self.assertTrue(mobile_login.is_displayed())
|
77th minute winner from Ralph Hunter (goal scorer in Ports last 4 matches) gave three valuable points to the champions moving then 5 places up the ESL keeping alive the chances of retaining the championship. Relentless rain on the Cainniepairt did not dampen the spirit of the players who batteled well against a determined Kelty. Too many tackles were harshly penelised with 13 yellow cards handed out. Unfortunately a second yellow for the goal scorer giving him a sending off for again a tackle not warranting any action.
Tayport held off the challenge of Aberdeen Sunnybank to progress to the last sixteen of the NCR Tayside/North Cup at Heatheryfold Park. Ben Honeyman seized on a loose ball to clinically finish from 15 yards in only 6 minutes and this goal settled Tayport who went on to dominate the early stages. Sunnybank made strenuous efforts to get back into things but were repeatedly denied by an in-form Frazer Fitzpatrick. Ralph Hunter clinched the tie in 64 minutes when he cleverly finished off Steven Stewart's fight wing cross at the far post. Sunnybank might have grabbed a consolation goal but were frustrated once again by Fitzpatrick.
K Heenan scores number four.
Ralph Hunter running in on another.
Busy day for Ralph Hunter.
F Fitzpatrick gets up for a coner.
Tayport opened the defence of their OVD Scottish Cup with this second round match against Northern club Wilsons XI who play at Keith Park ex home of Bon Accord. Recently appointed co managers Kenny Taylor is ex Cove Rangers, Banks of Dee, Inverurie Locos and Mike Barclay was formerley with Locos, Peterhead and Buckie Thistle. Ex Forfar goalkeeper Mark Kerr and ex Ross County Willie Watt are part of the squad. Port looked comfortable in the opening 15 minutes with Ralph Hunter putting them ahead after running onto a through ball, skipping past his marker to look up to find the Wilsons keeper coming off his line to meet him. A perfectly judged lobb and the ball fell into the open net. Port continued to make chances as the rain decided to come down with a few downpours making the pitch slippery in places. Wilsons had a brief 10 minute spell where Frazer Fitzpatrick was asked to pull off a diving save touching over a furious shot which looked goalbound.
The second period and Port controlled the game well, a scramble in the penalty area and Kevin Heenan let loose with a cracking left foot drive into the net. Corners were aplenty when a cross from the left found Ralph Hunter running in on the near post. Another well placed effort from the big striker and the keeper stretched to his left but Raplh left him an impossible task. With darkness drawing in quickley the last seconds of the match and Kevin Heenan ran in on the Wilsons keeper to slide the ball home for number four.
The gaffer brought in Doug Scott on the subs bench for cover and Barry McNaughton was at the match. Barry is on his way to recovery but his face is still badly swollen.
Tayport F Fitzpatrick, G Buist, J Ward, R Morris, G Dailley, J Elliot, A Ramsey, S Stewart, B Craik, K Heenan, R Hunter. Subs B Honeyman, D Wemyss, R Gunnion, D Scott, B McNaughton.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
from Kamaelia.Util.Graphline import Graphline
from Kamaelia.UI.OpenGL.SimpleCube import SimpleCube
Graphline(
CUBEC = SimpleCube(position=(0, 0,-12), rotation=(225,45,135), size=(1,1,1)).activate(),
CUBER = SimpleCube(position=(4,0,-22), size=(2,2,2)).activate(),
CUBEB = SimpleCube(position=(0,-4,-18), rotation=(0,180,20), size=(1,3,2)).activate(),
linkages = {}
).run()
# Licensed to the BBC under a Contributor Agreement: THF
|
Take a look at Goodwill stores for used furniture. It’s possible you’ll be stunned on the quality of furniture that you’ll discover at Goodwill. They sometimes do not accept furniture with any type of tears or stains, so the overall quality is fairly good. And the prices are much less than what you’ll see shopping for new. Beware furniture that seems to cheap. It is at all times nice to get an excellent deal, however not on the expect of quality of construction. You will spend more cash frequently replacing cheaply made furniture than you would if you happen to had spent a little more on a quality piece to start with.
Correct care of the actual wooden furniture in your house is essential. Waxing and dusting must be done often to wooden furniture to make sure longevity. There are an amazing many products obtainable for use on wood furniture for both cleaning and care. Don’t enable a salesman to pressure you into shopping for a whole room set except you want to. While the cost of the entire room deal could also be lower than purchasing the items individually, it isn’t a deal if you don’t want it. Take you time and mare careful picks moderately than being rushed.
When purchasing a leather couch, test its base very fastidiously to see if the leather-based is dyed evenly in every single place. When you discover some white gauze under the sofa, it is made out of vinyl. A genuine leather-based sofa needs to be evenly dyed, even under the couch and between the cushions. When decorating a home, furniture essentially represents one of the most essential ways to precise one’s personal style and style. Although, some individuals are unaware of the easiest way to get the kind of furniture that fits their life-style and needs. Hopefully, this article has provided enough suggestions to help anyone make their house right into a dream home.
Whereas some folks relish the although of choosing new furniture, different find it to be a truly scary prospect. The fact is that not everybody has adequate understanding of obtainable options, totally different model and worth factors, and this makes them dread the task an excellent deal. By reading the information introduced beneath, anyone can have the information necessary to tackle furniture purchasing efficiently.
When determining your price range for furniture, take into account the expected lifespan of the piece. Light aluminum frames or particleboard items are likely far decrease in price, but in addition decrease in life expectancy. Heavy wood, stable furniture lasts lots longer, albeit at a better preliminary funding. How lengthy you plan to be in a home can consider too. High-high quality furniture would not present glue or nails within the development of the piece. Search for wooden joints at ends and corners. This kind of furniture is usually costlier as a result of it requires more money and time to make.
If in case you have antique furniture, do not repair them the identical method you do your different furniture. Using fasteners, adhesives, finishes, and polishes can negatively affect each the current and future values of the items. A few of these merchandise can dramatically discolor or completely alter the appearance of the items. Now that your finances can stay intact you can simply buy the furniture you require. Meaning you won’t have to worry about purchasing anymore, you may instead benefit from the journey to the store. Use the following pointers as you progress to ensure you make progress in your adorning undertaking as we speak.
While you purchase a sofa, it is best to check the frame. Guantee that the board is at the least 1″ thick. Sofas with thinner boards will possible squeak. Sit down on the couch and do some wiggling to examine for sounds. Purchase pieces which can be small if you’d like a room’s character to vary. You’ll be able to’t buy a brand new couch every week, however lamps, finish tables and other small pieces may change the character of the room. This could rapidly replace the look of your room.
Size of cords or wi-fi connections additionally factor into the dimensions and variety of items you need. Choose the condition of the fabric, wood, and metals used.
|
import config
import os
import re
from . import *
from interfaces.validator import Validator
class EmuInput(Validator):
'''
Base class for emulator inputs. Children classes only need to implement functions
_validate_content and _validate_count to return true when the respective fields are valid,
and may optionally define a delimiter other than '*' and a destination path other than
project_root/game.
'''
delimiter = '*'
path = config.data_dir
def __init__(self, content, count=1):
content = str(content)
count = int(count)
if not type(self)._validate_count(count):
raise ValueError('Invalid count "{}".'.format(count))
elif not type(self)._validate_content(content):
raise ValueError('Invalid content "{}".'.format(content))
self._content = content
self._count = count
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.count == other.count and
self.content == other.content)
def __hash__(self):
return hash((self.content, self.count))
def __str__(self):
return self.content + str(self.count)
@abstractstaticmethod
def _validate_content(content):
pass
@abstractstaticmethod
def _validate_count(count):
pass
@classmethod
def _parse_content(cls, message):
'''
Retrieves content portion of input.
:param cls: Current class.
:param message: Message to parse.
'''
message = message.lower()
if cls.delimiter in message:
result = message.split(cls.delimiter)[0]
else:
result = re.sub('\\d+$', '', message)
if not cls._validate_content(result):
raise ValueError('Invalid content "{}".'.format(result))
return result
@classmethod
def _parse_count(cls, message):
'''
Retrieves count portion of input.
:param cls: Current class.
:param message: Message to parse.
:returns: int
'''
if cls.delimiter in message:
result = message.split(cls.delimiter)[1]
else:
match = re.search('\\d+$', message)
result = match.group(0) if match else 1
result = int(result)
if not cls._validate_count(result):
raise ValueError('Invalid count "{}".'.format(result))
return int(result)
@property
def content(self):
return self._content
@property
def count(self):
return self._count
@property
def destination(self):
cls = type(self)
if not cls._filename:
raise NotImplementedError('Class does not define a destination file in {}._filename.'.format(cls.__name__))
return os.path.join(type(self)._location, cls._filename)
@classmethod
def condense(cls, inputs):
'''
Condenses list of inputs into equivalent list with identical consecutive inputs
merged into one, then returns condensed list.
:param inputs: List of inputs to condense.
'''
inputs = list(inputs) # in case of immutable tuple
changed = True
while changed:
changed = False
for i in range(1, len(inputs)):
in1 = inputs[i - 1]
in2 = inputs[i]
if in1.content == in2.content:
count = in1.count + in2.count
button = cls(in1.content, count)
inputs[i - 1] = None
inputs[i] = button
changed = True
inputs = [i for i in inputs if i]
return inputs
def serialize(self):
'''
Serializes input to send to NES.
'''
return self.delimiter.join((str(x) for x in [self.content, self.count]))
@classmethod
def deserialize(cls, serialized):
'''
Deserializes serialized input.
:param cls: Current class.
:param serialized: The serialized input.
:returns: EmuInput object
'''
content = cls._parse_content(serialized)
count = cls._parse_count(serialized)
return cls(content, count)
|
Fascia is the band that surrounds all the other tissues in the body including muscles, bones, tendon, ligaments and organs. It is a combination of tense and flexible and allows movement without pain or difficulty.
Such things as emotional and physical trauma, bad posture and stress can all cause fascia scars which not only reduce the effectiveness of fascia but can cause pain and pressure on other structures in the body.
Myofascial release techniques are used to identify problem areas and stretch out the fascia.
This is different to other forms of massage, it is a much slower form of treatment with the therapist applying and maintaining pressure to stretch the fascia to its original state.
|
#
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from setuptools import find_packages
from setuptools import setup
setup(
name='virt-deploy',
description='Standardized deployment of virtual machines',
author='Federico Simoncelli',
author_email='[email protected]',
url='https://github.com/simon3z/virt-deploy',
version='0.1.7',
packages=find_packages(),
entry_points={
'console_scripts': [
'virt-deploy = virtdeploy.cli:main',
]
},
)
|
A wonderful opportunity to experience the character, personality, and flavour of this picturesque holiday region!
The delightful town of Monchique, which is only 2 minutes drive from the villa, has a pretty central square with street cafes in which to while away the time and people watch. There are beautiful gardens to admire and narrow historic streets to explore. By the way, parking is free everywhere in Monchique including the underground car park! There are several supermarkets for you to stock up on supplies, 3 butchers, numerous cafés and a tea room serving delicious home made food, two banks with cash points, three petrol stations, 2 chemists, wine shop, gift shops and newsagents selling foreign papers. Recently opened is the new Intermarche supermarket mall, with a selection of shops, plenty of parking and ideal to stock up your holiday provisions.
Excellent Dining for all Tastes and Budgets, a "Foodies Heaven"
New to Portugal? Monchique is a perfect introduction. Enjoy "Authentic Portugal" at its best, with some intriguing surprises around every corner.
Monchique is nestled in a valley between the only two mountains in the Algarve, Foia and Picota. Established by the Romans, who built, with granite, terraces on the rich fertile land to grow produce, the Villa sits on these same terraces today. Corga das Ervilhas - Terraces of Peas! Our terraces still produce oranges, tangerines, lemons, plums, peaches, figs, sharon fruit, grapes, olives, of which our guests are welcome to eat.
There are level walks, steep walks, walks to the top of Foia and walks to the cafes in town. Bird watching is very popular as we are on a major migratory route. Cork oak forests abound and offer welcome shade from the hot summer sun, and after all that exercise kick off your shoes and cool off in the infinity pool. Wind down with a glass of local wine or ice cold beer or test your culinary skills on the BBQ. Please read more about "The Authentic Algarve"
Serra de Monchique - guaranteed to capture your heart.
From the villa there are many attractions within a short drive. By the way 83% of the total land area of the Serra da Monchique is a Declared Natural Reserve. The peaks and contour roads of Foia and Picota afford 360 degree panoramic views, don't forget your camera! For the more adventurous, take some of the side roads and visit deserted watermills or the ghost village of Barbelote with its stunning cascade. The mountains here are quite spectacular and not to be missed ideal for the biker, hiker or just a casual stroll. You can rent bikes, they will even deliver and collect them from the villa.
The famous village of Caldas de Monchique with its health spa is only 10 minutes drive away. Here you can take in the famous Monchique spring waters and enjoy being pampered in the health salon at Hotel Termas de Monchique. On summer evenings you can listen to local live music whilst dining in the Caldas de Monchique square at Restaurant 1692.
There are several golf courses, the nearest Morgado do Regengo just 15 minutes drive away, her you can rent clubs if you did not bring your own and the new Autodromo do Algarve, F1 designed race track, features various motor sport events and track days and if you are up to it, a stunning go kart track open to the public. The busier tourist areas of the south coast and the town of Portimao with shopping malls, night life, sardine restaurants, sandy beaches and marina is just 25 minutes away. Take a boat trip from Portimao or Lagos to watch the dolphins in their natural habitat.
Portimao has now surpassed Faro to become the shopping mecca of the Algarve and is just 20 minutes drive from Monchique. Boasting the largest shopping mall on the Algarve, Aqua Portimao is situated just 20 minutes drive from Monchique, on three floors with 117 shops, restaurants and a hypermarket, again parking is free. There is an excellent choice including many big names such as Primark, H & M, Zara, Foot Locker, Mango, Massimo Dutti and much more. It is open daily from 0900 to 2400. There are also numerous other supermarkets in Portimao such as Lidl and Aldi and a retail centre with C & A, Sport Zone, Guimaraes shoes and Casa homeware.
Monchique Wild Boar and Chestnuts, amazing!
|
#!/usr/bin/env python
from keystoneclient.exceptions import NotFound, Conflict
from keystoneauth1.identity import v3
from keystoneclient.auth import identity
from keystoneclient import session
from keystoneclient.v3 import client
from myLogger import *
class SimpleKeystoneClient:
"""
Add enc functions to creation request
"""
def __init__(self, admin_user, admin_pass, admin_tenant, auth_url):
auth = v3.Password(auth_url=auth_url, username=admin_user,
password=admin_pass, project_name=admin_tenant,
user_domain_id="default", project_domain_id="default")
sess = session.Session(auth=auth)
self.ks_client = client.Client(session=sess)
def create_tenant(self, name, **kwargs):
try:
project = self.ks_client.projects.find(name=name)
logger.info('Project %s exists [id: %s].' % (name, project.id))
except NotFound:
project = self.ks_client.projects.create(name=name, domain="default",**kwargs)
logger.info('Project %s created [id: %s].' % (name, project.id))
return project
def create_user(self, name, password, tenant_name, **kwargs):
try:
user = self.ks_client.users.find(name=name)
logger.info('User %s exists (password unchanged).' % name)
except NotFound:
tenant = self.create_tenant(tenant_name)
user = self.ks_client.users.create(name=name, password=password,
tenant_id=tenant.id, **kwargs)
logger.info('User %s created.' % name)
return user
def create_role(self, role_name, **kwargs):
try:
role = self.ks_client.roles.find(name=role_name)
logger.info('Role %s exists.' % role_name)
except NotFound:
role = self.ks_client.roles.create(role_name, **kwargs)
logger.info('Role %s created.' % role_name)
return role
def add_user_role(self, user, role, tenant, **kwargs):
try:
self.ks_client.roles.grant(user=user, role =role, project =tenant,**kwargs)
logger.info('Role given to user.')
except Conflict:
logger.info('User already has the requested role.')
|
HRL 1234W is specially designed for high efficient discharge and long life.application. It\'s characteristics are small volume, light weight and high discharge efficiency. It can be used up to 8 years in standby service.
|
#!/usr/bin/python3
# Copyright (C) 2014-2017 Cyrille Defranoux
#
# This file is part of Homewatcher.
#
# Homewatcher is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Homewatcher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Homewatcher. If not, see <http://www.gnu.org/licenses/>.
#
# For any question, feature requests or bug reports, feel free to contact me at:
# knx at aminate dot net
from homewatcher import ensurepyknx
from pyknx import logger
import xml.dom.minidom
import os.path
import itertools
import re
from functools import cmp_to_key
class Property(object):
"""
Represents a property of an object which is part of the configuration.
A Property is an atomic piece of data that composes the object.
"""
class XMLEntityTypes(object):
ATTRIBUTE = 1 << 0
CHILD_ELEMENT = 1 << 1
INNER_TEXT = 1 << 2
def __init__(self, name, type, xmlEntityType, namesInXML=None, groupNameInXML = None, isCollection=False, isUnique=False, values=None, getter=None):
# Use property's name as name in XML by default.
if namesInXML == None: namesInXML = name
# Names in XML must always be iterable.
if isinstance(namesInXML, str): namesInXML = (namesInXML,)
self.name = name
self.type = type
self.namesInXML = namesInXML # Names of the attribute or child element in xml when applicable, None if this property does not come from a single attribute.
self.groupNameInXML = groupNameInXML
self.xmlEntityType = xmlEntityType
self.isCollection = isCollection # Whether this property is a collection of values.
self.isUnique = isUnique
self.values = values # Collection of possible values. May be a callable (configuration object and property's owner object are passed as arguments). If None, no restriction on values.
self.getter = getter # Optional method to call to retrieve property value. If set to None, the owner object's field named the same as this property is used.
def isOfPrimitiveType(self):
return self.type in (str, int, float, bool)
def isOfClassType(self):
return not self.isOfPrimitiveType()
def isDefinedOn(self, object):
return self.name in vars(object) and vars(object)[self.name] != None
def checkValue(self, configuration, object, value, collectedValues):
if self.isCollection:
if not isinstance(value, list):
raise Configuration.IntegrityException('A list is expected.')
values = value
else:
values = [value]
acceptableValues = self.getAcceptablesValues(configuration, object)
if self.type == str:
acceptableTypes = (str,)
elif self.type == float:
# Accept int too!
acceptableTypes = (self.type, int)
else:
acceptableTypes = (self.type,)
for v in values:
if v == None: continue
if not isinstance(v, acceptableTypes):
raise Configuration.IntegrityException('A value of type {0} was expected, \"{1}\" of type {2} found.'.format(acceptableTypes, v, type(v)))
if acceptableValues != None and not v in acceptableValues:
raise Configuration.IntegrityException('A value in {0} is expected, {1} found.'.format(acceptableValues, v))
# Is this value unique?
if self.isUnique and self.name in collectedValues and v in collectedValues[self.name]:
raise Configuration.IntegrityException('Value {0} is already assigned to another object.'.format(v))
# Collect this value.
if not self.name in collectedValues:
collectedValues[self.name] = []
collectedValues[self.name].append(v)
def getAcceptablesValues(self, configuration, object):
if self.values == None: return None
if callable(self.values):
return self.values(configuration, object)
else:
return self.values
def getValueFor(self, object, config):
if not self.isDefinedOn(object): return None
if self.getter == None:
return vars(object)[self.name]
else:
return self.getter(object, config)
def checkObjectIntegrity(self, configuration, object, collectedValues):
if not self.isDefinedOn(object): return
value = self.getValueFor(object, configuration)
try:
self.checkValue(configuration, object, value, collectedValues)
except Configuration.IntegrityException as e:
raise Configuration.IntegrityException('Property {0} is invalid: {1}'.format(self, e), problematicObject=object)
if self.isOfClassType():
if hasattr(self.type, 'PROPERTY_DEFINITIONS'):
self.type.PROPERTY_DEFINITIONS.checkIntegrity(configuration, value)
def clone(self, source, destination):
if self.name in vars(source):
if vars(source)[self.name] == None:
vars(destination)[self.name] = None
return
copyProperty = lambda p: p if self.isOfPrimitiveType() else p.copy()
if self.isCollection:
vars(destination)[self.name] = []
for prop in vars(source)[self.name]:
vars(destination)[self.name].append(copyProperty(prop))
else:
vars(destination)[self.name] = copyProperty(vars(source)[self.name])
def fromXML(self, xmlElement):
# Scan sources for this property.
sources = []
for nameInXML in self.namesInXML:
if self.xmlEntityType & Property.XMLEntityTypes.ATTRIBUTE != 0:
attributeValue = Configuration.getXmlAttribute(xmlElement, nameInXML, None)
if attributeValue != None:
sources.append(attributeValue)
if self.xmlEntityType & Property.XMLEntityTypes.CHILD_ELEMENT != 0:
sources += Configuration.getElementsInConfig(xmlElement, nameInXML, self.groupNameInXML)
if self.xmlEntityType & Property.XMLEntityTypes.INNER_TEXT != 0:
sources.append(Configuration.getTextInElement(xmlElement, mustFind=False))
values = []
for source in sources:
if source == None: continue
if self.isOfPrimitiveType():
# Property type is a primitive type, let's get a string from
# source.
if not isinstance(source, str):
# Source is assumed to be an xml element.
sourceStr = Configuration.getTextInElement(source, mustFind = True)
else:
sourceStr = source
if self.type == str:
values.append(sourceStr)
elif self.type == int:
values.append(int(sourceStr))
elif self.type == float:
values.append(float(sourceStr))
elif self.type == bool:
if sourceStr.lower() == 'true':
values.append(True)
elif sourceStr.lower() == 'false':
values.append(False)
else:
raise Configuration.IntegrityException('Property {0}={1} is not a boolean constant. Expecting {{true, false}}, case insensitive.'.format(self, sourceStr), xmlContext=xmlElement.toxml())
else:
# Type corresponds to a class.
if isinstance(source, str):
values.append(self.type.fromString(source))
else:
# Call the static method "fromXML" if present. Otherwise,
# run the predefined behaviour.
if hasattr(self.type, 'fromXML') and callable(self.type.fromXML):
newPropertyValue = self.type.fromXML(source)
else:
# Create a default instance.
try:
newPropertyValue = self.type()
if hasattr(self.type, 'PROPERTY_DEFINITIONS'):
self.type.PROPERTY_DEFINITIONS.readObjectFromXML(newPropertyValue, source)
except:
# logger.reportException('Type {type} has neither static fromXML(xmlElement) nor __init__() method. At least one is required to parse it properly.'.format(type=self.type))
raise
# Assign attributes from XML.
if hasattr(newPropertyValue, 'attributes'):
for k, v in source.attributes.items():
newPropertyValue.attributes[k] = v
values.append(newPropertyValue)
if not values: return None
if self.isCollection:
return values
else:
if len(values) > 1:
raise Configuration.IntegrityException('Property {0} is not a collection, it must have a single value.'.format(self), xmlContext=xmlElement.toxml())
return values[0]
def toXml(self, config, propertyOwner, xmlDoc, xmlElement):
# Create group if necessary.
if self.groupNameInXML != None:
group = next(Configuration.getElementsInConfig(xmlElement, self.groupNameInXML, None), None)
if not group:
group = xmlDoc.createElement(self.groupNameInXML)
xmlElement.appendChild(group)
xmlElement = group
value = self.getValueFor(propertyOwner, config)
# Make sure the remainder of this method works on a collection of values.
values = value if isinstance(value, list) else [value]
for value in values:
if hasattr(value, 'toXml') and callable(value.toXml):
# Use the instance toXml() method.
value.toXml(config, self, propertyOwner, xmlDoc, xmlElement)
else:
# Format property using its inner properties.
logger.reportDebug('toXml for {0} on {1}'.format(self, propertyOwner))
if self.xmlEntityType & Property.XMLEntityTypes.ATTRIBUTE != 0:
valueStr = str(value)
xmlElement.setAttribute(self.namesInXML[0], valueStr)
elif self.xmlEntityType & Property.XMLEntityTypes.CHILD_ELEMENT != 0:
childNode = xmlDoc.createElement(self.namesInXML[0])
if self.isOfPrimitiveType():
textNode = xmlDoc.createTextNode(str(value))
childNode.appendChild(textNode)
else:
childNode = xmlDoc.createElement(self.namesInXML[0])
type(value).PROPERTY_DEFINITIONS.toXml(config, value, xmlDoc, childNode)
xmlElement.appendChild(childNode)
elif self.xmlEntityType & Property.XMLEntityTypes.INNER_TEXT != 0:
textNode = xmlDoc.createTextNode(str(value))
xmlElement.appendChild(textNode)
def __repr__(self):
s = self.name
attributeOrChild = ''
if self.xmlEntityType & Property.XMLEntityTypes.ATTRIBUTE != 0:
attributeOrChild = 'attribute'
if self.xmlEntityType & Property.XMLEntityTypes.CHILD_ELEMENT != 0:
if attributeOrChild: attributeOrChild += ' or '
attributeOrChild += 'element'
if self.xmlEntityType & Property.XMLEntityTypes.INNER_TEXT != 0:
if attributeOrChild: attributeOrChild += ' or '
attributeOrChild += 'inner text'
if len(self.namesInXML) > 1:
plural = 's'
namesInXML = self.namesInXML
else:
plural = ''
namesInXML = self.namesInXML[0]
s += ' (cf. the "{namesInXML}" {attributeOrChild}{plural} in XML)'.format(attributeOrChild=attributeOrChild, namesInXML=namesInXML, plural=plural)
return s
class PropertyGroup(object):
""" Group properties that must be considered simultaneously when determining whether they are mandatory or not.
If the group is mandatory, the configuration is full of integrity as long as at least one of the group's properties is defined. """
class GroupUseContext(object):
def __init__(self, configuration, object):
self.configuration = configuration
self.object = object
def __init__(self, properties, isMandatory):
self.properties = properties
self.isMandatoryCallable = isMandatory if callable(isMandatory) else lambda context: isMandatory
def isMandatory(self, object):
return self.isMandatoryCallable(object)
def checkObjectIntegrity(self, configuration, object, collectedValues):
isDefined = False
for prop in self.properties:
prop.checkObjectIntegrity(configuration, object, collectedValues)
isDefined |= prop.isDefinedOn(object)
if self.isMandatory(PropertyGroup.GroupUseContext(configuration, object)) and not isDefined:
if len(self.properties) == 1:
raise Configuration.IntegrityException('"{0}" should define the property {1}.'.format(object, self.properties[0]), problematicObject=object)
else:
raise Configuration.IntegrityException('"{0}" should define at least one of the properties {1}.'.format(object, self.properties), problematicObject=object)
class PropertyCollection(object):
""" Collection of properties stored in groups with an associated mandatory status. """
def __init__(self):
self.propertyGroups = []
self.ignoreCheckIntegrityCallable = lambda object: False
def addProperty(self, propertyName, isMandatory, type, xmlEntityType, namesInXML=None, groupNameInXML = None, isCollection=False, isUnique=False, values=None, getter=None):
self.propertyGroups.append(PropertyGroup([Property(name=propertyName, type=type, xmlEntityType = xmlEntityType, namesInXML=namesInXML, groupNameInXML=groupNameInXML, isCollection=isCollection, isUnique=isUnique, values=values, getter=getter)], isMandatory))
def addPropertyGroup(self, properties, isGroupMandatory = True):
group = PropertyGroup(properties[:], isGroupMandatory)
self.propertyGroups.append(group)
def cloneProperties(self, source, destination):
for propDef in self.properties:
propDef.clone(source, destination)
@property
def properties(self):
return itertools.chain(*[group.properties for group in self.propertyGroups])
def getProperty(self, propertyName):
for group in self.propertyGroups:
prop = [p for p in group.properties if p.name == propertyName]
if prop:
return prop[0]
raise Exception('No property {0} found in group {1}.'.format(propertyName, self))
def readObjectFromXML(self, object, xmlElement):
object.xmlSource = xmlElement.toxml()
for prop in self.properties:
if prop.namesInXML != None:
value = prop.fromXML(xmlElement)
if value is None:
if prop.isDefinedOn(object):
# We are better off keeping the current value than
# overriding it with the never explicitly-defined (hence rather meaningless) None value.
continue
else:
# Assigning the None value guarantees that all properties are always defined on the
# destination object even if the XML configuration is not complete.
vars(object)[prop.name] = value
else:
if prop.isCollection and prop.isDefinedOn(object):
# Do not override current items!
vars(object)[prop.name].extend(value)
else:
# First definition of collection or assignment of a simple field.
vars(object)[prop.name] = value
def checkIntegrity(self, configuration, obj, collectedValues=None):
"""
Checks the integrity of an object wrt this collection of properties.
configuration: Configuration object that contains the object to check.
obj: Object to check
collectedValues: Properties' values. It is a dictionary that indexes list of values with property names as keys.
"""
if collectedValues == None: collectedValues = {}
objects = obj if isinstance(obj, list) else [obj]
for o in objects:
if self.ignoreCheckIntegrityCallable(o): continue
for group in self.propertyGroups:
group.checkObjectIntegrity(configuration, o, collectedValues)
def toXml(self, config, propertyOwner, xmlDoc, xmlElement):
for prop in self.properties:
logger.reportDebug('toXml {0} on {1}'.format(prop, propertyOwner))
if prop.isDefinedOn(propertyOwner):
prop.toXml(config, propertyOwner, xmlDoc, xmlElement)
else:
logger.reportDebug('not defined')
# def generateDocumentation(self, classs, collector):
# # Check for reentrance.
# if collector.containsDocumentationForClass(classs): return
#
# # f.write('#{0}\n'.format(classs.__name__))
# for propertyGroup in self.propertyGroups:
# for header, entityType in [('Attributes', Property.XMLEntityTypes.ATTRIBUTE), ('Text', Property.XMLEntityTypes.INNER_TEXT), ('Children', Property.XMLEntityTypes.CHILD_ELEMENT)]:
# for property in propertyGroup.properties:
# if entityType & property.xmlEntityType == 0: continue
# collector.addDocumentationFor(class, '## {0}'.format(header))
# if property.isOfClassType():
# collector.addDocumentationFor(classs, '- [{0}](#{1}): {2}'.format(property.namesInXML[0], property.type, property.documentation.summary))
# else:
# collector.addDocumentationFor(classs, '- {0} ({1}): {2}'.format(property.namesInXML[0], property.type, property.documentation.summary))
# if property.documentation != None:
# collector.addDocumentationForClass(classs, property.documentation.summary + '\n')
# if property.isOfClassType():
# typeContent = '[{propType}](#{propType})'.format(propType=property.type.__name__)
# if hasattr(property.type, 'PROPERTY_DEFINITIONS'):
# property.type.PROPERTY_DEFINITIONS.generateDocumentation(property.type, collector)
# else:
# typeContent = property.type.__name__
# if len(property.namesInXML) > 1: raise Exception('The documentation generator assumes that there is only a single XML tag name associated to each property.')
# collector.addDocumentationForClass(classs, 'Xml tag name: {0}'.format('`<{0}/>`'.format(property.namesInXML[0])))
# collector.addDocumentationForClass(classs, 'type: {0}'.format(typeContent))
# if property.values != None and not callable(property.values):
# collector.addDocumentationForClass(classs, 'Accepted Values: {0}'.format(list(property.values)))
class ParameterizableString(object):
"""
Represents a string in the XML configuration that can be parameterized with <context> children.
Refer to the 'context handler' concept to understand how parameterization can take place with those children.
This class is quite useless but is required to have an object that holds the automatically-created xmlSource property.
"""
pass
class PyknxService(object):
"""Represents the configuration for the communication with the hosting Pyknx daemon.
The Pyknx daemon is the underlying process for Homewatcher that handles the communication with the Linknx daemon.
"""
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('host', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
PROPERTY_DEFINITIONS.addProperty('port', isMandatory=False, type=int, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
def __init__(self):
self.host = '127.0.0.1'
self.port = 1029
def __repr__(self):
return 'PyknxService(host={host}, port={port})'.format(**vars(self))
# class SMTPService(object):
# PROPERTY_DEFINITIONS = PropertyCollection()
# PROPERTY_DEFINITIONS.addProperty('host', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
# PROPERTY_DEFINITIONS.addProperty('port', isMandatory=False, type=int, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
# PROPERTY_DEFINITIONS.addProperty('fromAddress', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
#
# def __init__(self):
# self.host = 'localhost'
# self.port = 25
#
# def __repr__(self):
# return 'SMTPService(host={host}, port={port})'.format(**vars(self))
class LinknxService(object):
PROPERTY_DEFINITIONS = PropertyCollection()
hostProp = Property('host', type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
portProp = Property('port', type=int, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
PROPERTY_DEFINITIONS.addPropertyGroup((hostProp, portProp))
PROPERTY_DEFINITIONS.addProperty('ignoreEmail', isMandatory=False, type=bool, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
def __init__(self):
self.host = '127.0.0.1'
self.port = 1028
self.ignoreEmail = False
@property
def address(self):
return (self.host, self.port)
def __repr__(self):
return 'LinknxService(host={host},port={port})'.format(**vars(self))
class ServicesRepository(object):
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('linknx', isMandatory=False, type=LinknxService, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT)
PROPERTY_DEFINITIONS.addProperty('daemon', isMandatory=False, type=PyknxService, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT)
# PROPERTY_DEFINITIONS.addProperty('smtp', isMandatory=False, type=SMTPService, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT)
def __init__(self):
self.linknx = LinknxService()
self.daemon = PyknxService()
class ModeDependentValue(object):
class Value(object):
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('value', isMandatory=True, type=float, xmlEntityType=Property.XMLEntityTypes.INNER_TEXT)
PROPERTY_DEFINITIONS.addProperty('modeName', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, namesInXML='mode', isUnique=True)
def __init__(self, value, modeName):
if type(value) not in [int, float]:
raise ValueError('int or float expected, {0} found'.format(type(value)))
self.value = value
self.modeName = modeName
def copy(self):
v = ModeDependentValue.Value(0.0, None)
self.PROPERTY_DEFINITIONS.cloneProperties(self, v)
return v
@staticmethod
def fromString(string):
return ModeDependentValue.Value(float(string), None)
@staticmethod
def fromXML(xmlElement):
val = ModeDependentValue.Value(0, None)
ModeDependentValue.Value.PROPERTY_DEFINITIONS.readObjectFromXML(val, xmlElement)
return val
def __repr__(self):
return 'Value({value},{modeName})'.format(**vars(self))
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('values', isMandatory=True, type=Value, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='value', isCollection=True)
def __init__(self, defaultValue=None):
self.values = []
if defaultValue != None:
self.values.append(ModeDependentValue.Value(value=defaultValue, modeName=None))
def copy(self):
v = ModeDependentValue()
self.PROPERTY_DEFINITIONS.cloneProperties(self, v)
return v
@staticmethod
def fromString(string):
# This is assumed to be the default value (i.e the one used for modes
# that do not have a specific value.
return ModeDependentValue(float(string))
@staticmethod
def fromXML(xmlElement):
value=ModeDependentValue()
ModeDependentValue.PROPERTY_DEFINITIONS.readObjectFromXML(value, xmlElement)
return value
def toXml(self, config, property, propertyOwner, xmlDoc, xmlElement):
# Opt for an xml attribute if possible as it makes XML simpler.
if len(self.values) == 1 and self.values[0].modeName == None:
xmlElement.setAttribute(property.namesInXML[0], str(self.values[0].value))
else:
container = xmlDoc.createElement(property.namesInXML[0])
xmlElement.appendChild(container)
for value in self.values:
valueChild = xmlDoc.createElement('value')
container.appendChild(valueChild)
type(value).PROPERTY_DEFINITIONS.toXml(config, value, xmlDoc, valueChild)
def hasDefaultValue(self):
return None in self.values
def getDefinedModes(self):
return {value.modeName for value in self.values}
def getForMode(self, modeName):
if not isinstance(modeName, str) and modeName != None:
raise Exception('A mode name or None is expected.')
for value in self.values:
if value.modeName == modeName:
return value.value
if modeName == None: raise Exception('Default value not found.')
# Fall back to the default value.
return self.getForMode(None)
def setForMode(self, mode, value):
self.values[mode] = value
def inherit(self, other):
""" Inherits values from another instance for modes that have no specific value in this instance. """
logger.reportDebug('{0} inherits from {1}'.format(self, other))
definedModes = self.getDefinedModes()
for value in other.values:
# Do not overwrite the value in this instance!
if value.modeName in definedModes: continue
self.values.append(value.copy())
logger.reportDebug('That gives {0}'.format(self, other))
def __repr__(self):
return 'ModeDependentValue({values})'.format(**vars(self))
class ActivationCriterion(object):
""" Describes the rule that determine whether a sensor that is involved in a mode can be activated or if its activation should be deferred. """
class Type(object):
SENSOR = 'sensor'
AND = 'and'
OR = 'or'
@staticmethod
def getAll():
return (ActivationCriterion.Type.SENSOR, ActivationCriterion.Type.AND, ActivationCriterion.Type.OR)
def __init__(self):
self._attributes = {}
@property
def attributes(self):
return self._attributes
def copy(self):
clone = ActivationCriterion()
clone._attributes = self._attributes.copy()
ActivationCriterion.PROPERTY_DEFINITIONS.cloneProperties(self, clone)
return clone
def inherit(self, other):
# Inheritance does not apply for this object.
pass
@staticmethod
def makeSensorCriterion(sensorName, whenTriggered = False):
crit = ActivationCriterion()
crit.type = ActivationCriterion.Type.SENSOR
crit.sensorName = sensorName
crit.whenTriggered = whenTriggered
return crit
@staticmethod
def makeAndCriterion():
return makeBooleanCriterion()
@staticmethod
def makeOrCriterion():
return makeBooleanCriterion()
@staticmethod
def makeBooleanCriterion(type):
if not type in [ActivationCriterion.Type.AND, ActivationCriterion.Type.OR]: raise Exception('Invalid boolean criterion type: {0}'.format(type))
crit = ActivationCriterion()
crit.type = type
crit.children = []
return crit
# @staticmethod
# def fromXML(xmlElement):
# type = Configuration.getXmlAttribute(xmlElement, 'type', None, mustBeDefined=True)
# criterion = ActivationCriterion(type)
#
# ActivationCriterion.PROPERTY_DEFINITIONS.readObjectFromXML(criterion, xmlElement)
#
# return criterion
# Define properties outside class because of a reference to the class itself.
ActivationCriterion.PROPERTY_DEFINITIONS = PropertyCollection()
ActivationCriterion.PROPERTY_DEFINITIONS.addProperty('type', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, values=ActivationCriterion.Type.getAll())
isOfSensorType=lambda context: context.object.type==ActivationCriterion.Type.SENSOR
getSensorNames = lambda configuration, owner: [s.name for s in configuration.sensors]
ActivationCriterion.PROPERTY_DEFINITIONS.addProperty('sensorName', isMandatory=isOfSensorType, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, namesInXML='sensor', values=getSensorNames)
ActivationCriterion.PROPERTY_DEFINITIONS.addProperty('whenTriggered', isMandatory=isOfSensorType, type=bool, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
ActivationCriterion.PROPERTY_DEFINITIONS.addProperty('children', isMandatory=lambda context: context.object.type in (ActivationCriterion.Type.AND, ActivationCriterion.Type.OR), type=ActivationCriterion, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='activationCriterion', isCollection=True)
class Sensor(object):
class Type(object):
ROOT = 'root'
BOOLEAN = 'boolean'
FLOAT = 'float'
@staticmethod
def getAll():
return [Sensor.Type.ROOT, Sensor.Type.BOOLEAN, Sensor.Type.FLOAT]
@staticmethod
def getBasicTypes():
all = Sensor.Type.getAll()
all.remove(Sensor.Type.ROOT)
return all
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.ignoreCheckIntegrityCallable = lambda sensor: sensor.isClass
# Generic mandatory properties of various types.
PROPERTY_DEFINITIONS.addProperty('name', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
getClassNamesExceptRoot = lambda configuration, owner: [c.name for c in configuration.classes if (not c.isRootType() or owner.name in Sensor.Type.getAll()) and c != owner and not configuration.doesSensorInherit(c, owner)]
PROPERTY_DEFINITIONS.addProperty('type', isMandatory=lambda context: not context.object.isRootType(), type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, values=getClassNamesExceptRoot)
PROPERTY_DEFINITIONS.addProperty('isClass', isMandatory=True, type=bool, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
isNotClass = lambda context: not context.object.isClass
PROPERTY_DEFINITIONS.addProperty('alertName', isMandatory=isNotClass, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, namesInXML='alert')
PROPERTY_DEFINITIONS.addProperty('enabledObjectId', isMandatory=isNotClass, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
PROPERTY_DEFINITIONS.addProperty('watchedObjectId', isMandatory=isNotClass, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
for propName in ['activationDelay', 'prealertDuration', 'alertDuration']:
PROPERTY_DEFINITIONS.addProperty(propName, isMandatory=isNotClass, type=ModeDependentValue, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE | Property.XMLEntityTypes.CHILD_ELEMENT)
PROPERTY_DEFINITIONS.addProperty('activationCriterion', isMandatory=isNotClass, type=ActivationCriterion, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT)
# Mandatory properties for booleans.
isBoolean = lambda context: not context.object.isClass and context.configuration.doesSensorInherit(context.object, Sensor.Type.BOOLEAN)
for propName in ['triggerValue']:
PROPERTY_DEFINITIONS.addProperty(propName, isMandatory=isBoolean, type=bool, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
# Mandatory properties for float sensors.
isFloat = lambda context: not context.object.isClass and context.configuration.doesSensorInherit(context.object, Sensor.Type.FLOAT)
PROPERTY_DEFINITIONS.addPropertyGroup([Property(name, type=float, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE) for name in ['lowerBound', 'upperBound']], isFloat)
PROPERTY_DEFINITIONS.addProperty('hysteresis', isMandatory=isFloat, type=float, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
# Optional properties.
PROPERTY_DEFINITIONS.addProperty('description', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
PROPERTY_DEFINITIONS.addProperty('persistenceObjectId', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
def __init__(self, type, name, isBuiltIn):
self.type = type # Sensor type from Sensor.Type or base class name if class.
self.name = name # Sensor's name or class's name.
self.isClass = False
self.isBuiltIn = isBuiltIn
self._attributes = {}
@staticmethod
def makeNew(type, name, desc, isClass, isBuiltIn, alertName=None, enabledObjectId=None, watchedObjectId=None, persistenceObjectId=None):
s = Sensor(type, name, isBuiltIn)
s.description = desc
s.isClass = isClass
s.alertName = alertName
s.enabledObjectId = enabledObjectId
s.watchedObjectId = watchedObjectId
s.persistenceObjectId = persistenceObjectId
return s
def isRootType(self):
return self.name == Sensor.Type.ROOT
def addAttribute(self, attributeName, attributeValue):
self._attributes[attributeName] = attributeValue
@staticmethod
def fromXML(xmlElement):
s = Sensor(None, None, isBuiltIn=False)
Sensor.PROPERTY_DEFINITIONS.readObjectFromXML(s, xmlElement)
return s
@property
def attributes(self):
return self._attributes
def __repr__(self):
return '{classOrSensor} {name}'.format(classOrSensor='Class' if self.isClass else 'Sensor', name=self.name)
class Action(object):
def __init__(self, type, eventName):
pass
@property
def type(self):
return self.linknxActionXml.getAttribute('type')
@staticmethod
def fromXML(xmlElement):
e=Action(None, None)
Action.PROPERTY_DEFINITIONS.readObjectFromXML(e, xmlElement)
# Store the input XML to be able to send it to linknx when executing the
# action.
e.linknxActionXml = xmlElement
return e
def toXml(self, config, property, propertyOwner, xmlDoc, xmlElement):
linknxActionClone = xmlDoc.importNode(self.linknxActionXml, True)
xmlElement.appendChild(linknxActionClone)
def __repr__(self):
return 'Action of type={type}'.format(type=self.type)
Action.PROPERTY_DEFINITIONS = PropertyCollection()
Action.PROPERTY_DEFINITIONS.addProperty('type', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
# Subject properties: one for the static, Linknx-defined "subject" attribute,
# one for a Homewatcher-specific, dynamic "subject" element.
staticSubjectProp = Property('staticSubject', type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, namesInXML=('subject',))
parameterizableSubjectProp = Property('parameterizableSubject', type=ParameterizableString, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML=('subject'))
Action.PROPERTY_DEFINITIONS.addPropertyGroup((staticSubjectProp, parameterizableSubjectProp), isGroupMandatory=lambda context: context.object.type == 'send-email')
# Body properties: one for the static, Linknx-defined inner text of the <action>
# element, one for a Homewatcher-specific, dynamic "body" element.
staticBodyProp = Property('staticBody', type=str, xmlEntityType=Property.XMLEntityTypes.INNER_TEXT)
parameterizableBodyProp = Property('parameterizableBody', type=ParameterizableString, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML=('body'))
Action.PROPERTY_DEFINITIONS.addPropertyGroup((staticBodyProp, parameterizableBodyProp), isGroupMandatory=lambda context: context.object.type == 'send-email')
staticValueProp = Property('staticValue', type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, namesInXML=('value',))
parameterizableValueProp = Property('parameterizableValue', type=ParameterizableString, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML=('value'))
Action.PROPERTY_DEFINITIONS.addPropertyGroup((staticValueProp, parameterizableValueProp), isGroupMandatory=lambda context: context.object.type == 'send-sms')
# All actions are handled by linknx except send-email that has to be reworked by
# Homewatcher to customize email text.
# for propName in ('objectId', 'value'):
# Action.PROPERTY_DEFINITIONS.addProperty(propName, isMandatory=lambda context: context.object.type==Action.Type.CHANGE_OBJECT, type=str, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT|Property.XMLEntityTypes.ATTRIBUTE)
class Event(object):
def __repr__(self):
return 'Event "{type}"'.format(**vars(self))
Event.PROPERTY_DEFINITIONS = PropertyCollection()
Event.PROPERTY_DEFINITIONS.addProperty('type', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, values=lambda configuration, owner:type(owner).Type.getAll(), isUnique=True)
Event.PROPERTY_DEFINITIONS.addProperty('actions', isMandatory=True, type=Action, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='action', isCollection=True)
class ModeEvent(Event):
class Type:
ENTERED = 'entered'
LEFT = 'left'
@staticmethod
def getAll():
return [ModeEvent.Type.ENTERED, ModeEvent.Type.LEFT]
class Mode(object):
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('name', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
PROPERTY_DEFINITIONS.addProperty('value', isMandatory=True, type=int, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
PROPERTY_DEFINITIONS.addProperty('sensorNames', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='sensor', isCollection=True, values=lambda configuration, object: [s.name for s in configuration.sensors])
PROPERTY_DEFINITIONS.addProperty('events', isMandatory=False, type=ModeEvent, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='event', isCollection=True)
def __init__(self, name, value):
self.name = name # Unique identifier for the mode.
self.value = value
self.sensorNames = []
self.events = []
@staticmethod
def fromXML(xmlElement):
m = Mode(None, None)
Mode.PROPERTY_DEFINITIONS.readObjectFromXML(m, xmlElement)
return m
def __repr__(self):
return '{name} [value={value}]'.format(**vars(self))
class ModesRepository:
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('objectId', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE|Property.XMLEntityTypes.CHILD_ELEMENT)
# Temporarily removed in version 1. Mode-independent events imply additional
# testing that is beyond the scope of the initial version.
# PROPERTY_DEFINITIONS.addProperty('events', isMandatory=False, type=ModeEvent, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML="event", isCollection=True)
PROPERTY_DEFINITIONS.addProperty('modes', isMandatory=False, type=Mode, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML="mode", isCollection=True)
PROPERTY_DEFINITIONS.addProperty('events', isMandatory=False, type=ModeEvent, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='event', isCollection=True)
def __init__(self):
self.events = []
self.modes = []
def __iter__(self):
if ModesRepository.PROPERTY_DEFINITIONS.getProperty('modes').isDefinedOn(self):
return self.modes.__iter__()
else:
return [].__iter__()
def __len__(self):
return len(self.modes)
def __getitem__(self, index):
return self.modes[index]
def __repr__(self):
return 'ModesRepository({0})'.format(self.modes)
class AlertEvent(Event):
class Type:
PREALERT_STARTED = 'prealert started'
ALERT_ACTIVATED = 'activated'
ALERT_DEACTIVATED = 'deactivated'
ALERT_PAUSED = 'paused'
ALERT_RESUMED = 'resumed'
ALERT_STOPPED = 'stopped'
ALERT_ABORTED = 'aborted'
ALERT_RESET = 'reset'
SENSOR_JOINED = 'sensor joined'
SENSOR_LEFT = 'sensor left'
@staticmethod
def getAll():
return [AlertEvent.Type.PREALERT_STARTED, AlertEvent.Type.ALERT_ACTIVATED, AlertEvent.Type.ALERT_DEACTIVATED, AlertEvent.Type.ALERT_PAUSED, AlertEvent.Type.ALERT_RESUMED, AlertEvent.Type.ALERT_STOPPED, AlertEvent.Type.ALERT_ABORTED, AlertEvent.Type.ALERT_RESET, AlertEvent.Type.SENSOR_JOINED, AlertEvent.Type.SENSOR_LEFT]
class Alert(object):
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('name', isMandatory=True, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
PROPERTY_DEFINITIONS.addProperty('persistenceObjectId', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE, isUnique=True)
PROPERTY_DEFINITIONS.addProperty('inhibitionObjectId', isMandatory=False, type=str, xmlEntityType=Property.XMLEntityTypes.ATTRIBUTE)
PROPERTY_DEFINITIONS.addProperty('events', isMandatory=False, type=AlertEvent, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='event', isCollection=True)
def __init__(self):
self.events = []
@staticmethod
def makeNew(id, persistenceObjectId, inhibitionObjectId):
alert = Alert(id, persistenceObjectId)
alert.inhibitionObjectId = inhibitionObjectId
return alert
def __repr__(self):
return 'Alert {name}'.format(**vars(self))
class AlertsRepository(object):
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('alerts', isMandatory=True, type=Alert, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='alert', isCollection=True)
PROPERTY_DEFINITIONS.addProperty('events', isMandatory=False, type=AlertEvent, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='event', isCollection=True)
def __init__(self):
self.alerts = []
self.events = []
def __iter__(self):
if AlertsRepository.PROPERTY_DEFINITIONS.getProperty('alerts').isDefinedOn(self):
return self.alerts.__iter__()
else:
return [].__iter__()
def __len__(self):
return len(self.alerts)
def __getitem__(self, index):
return self.alerts[index]
def __repr__(self):
return 'AlertsRepository({0})'.format(self.alerts)
class Configuration(object):
class IntegrityException(Exception):
def __init__(self, message, cause = None, problematicObject=None, xmlContext=None):
Exception.__init__(self, message)
self.cause = cause
self._problematicObject = None
self.xmlContext = None
self.problematicObject = problematicObject
@property
def problematicObject(self):
return self._problematicObject
@problematicObject.setter
def problematicObject(self, obj):
self._problematicObject = obj
if self.xmlContext == None and hasattr(self._problematicObject, 'xmlSource'):
self.xmlContext = self._problematicObject.xmlSource
def __str__(self):
s = Exception.__str__(self)
if self.problematicObject != None:
s += '\nProblematic object: {0} of type {1}'.format(self.problematicObject, type(self.problematicObject))
if self.xmlContext != None:
s += '\nXML context: {0}'.format(self.xmlContext)
if self.cause != None:
s += '\nCaused by {0}'.format(self.cause)
return s
PROPERTY_DEFINITIONS = PropertyCollection()
PROPERTY_DEFINITIONS.addProperty('modesRepository', isMandatory=True, type=ModesRepository, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='modes')
PROPERTY_DEFINITIONS.addProperty('alerts', isMandatory=True, type=AlertsRepository, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT)
PROPERTY_DEFINITIONS.addProperty('sensorsAndClasses', isMandatory=True, type=Sensor, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML=('sensor',), groupNameInXML='sensors', isCollection=True, getter=lambda config, configAgain: config.sensorsAndClassesWithoutBuiltIns)
PROPERTY_DEFINITIONS.addProperty('servicesRepository', isMandatory=False, type=ServicesRepository, xmlEntityType=Property.XMLEntityTypes.CHILD_ELEMENT, namesInXML='services')
def __init__(self):
# Default services repository.
self.servicesRepository = ServicesRepository()
# Add built-in sensor classes.
rootClass = Sensor(None, Sensor.Type.ROOT, True)
rootClass.isClass = True
rootClass.activationDelay = ModeDependentValue(0)
rootClass.activationCriterion = ActivationCriterion.makeSensorCriterion('{name}', False) # {name} is resolved for each sensor so that this criterion is true if the sensor is not triggered.
rootClass.prealertDuration = ModeDependentValue(0)
rootClass.alertDuration = ModeDependentValue(0)
booleanClass = Sensor(Sensor.Type.ROOT, Sensor.Type.BOOLEAN, True)
booleanClass.isClass = True
booleanClass.triggerValue = True
floatClass = Sensor(Sensor.Type.ROOT, Sensor.Type.FLOAT, True)
floatClass.isClass = True
self.sensorsAndClasses = [rootClass, booleanClass, floatClass]
@staticmethod
def parseFile(filename):
# xsdFilename = os.path.join(os.path.dirname(__file__), 'config.xsd')
# schema = etree.XMLSchema(file=xsdFilename)
# parser = etree.XMLParser(schema=schema)
# try:
# tree = etree.parse(source=filename, parser=parser)
# except:
# logger.reportError('{0} parse errors.'.format(len(parser.error_log)))
# errIx = 0
# for err in parser.error_log:
# errIx += 1
# logger.reportError('#{ix}@{line}:{col} {message}'.format(ix=errIx, line=err.line, col=err.column, message=err.message))
# raise
doc = xml.dom.minidom.parse(filename)
return Configuration.parse(doc)
@staticmethod
def parseString(string):
doc = xml.dom.minidom.parseString(string)
return Configuration.parse(doc)
@staticmethod
def parse(xmlDocument):
config = xmlDocument.getElementsByTagName('config')[0]
configuration = Configuration()
context = None
try:
Configuration.PROPERTY_DEFINITIONS.readObjectFromXML(configuration, config)
# # Sensors (classes and concrete ones).
# context = 'sensors block'
# classesIt = Configuration.getElementsInConfig(config, 'class', 'sensors')
# sensorsIt = Configuration.getElementsInConfig(config, 'sensor', 'sensors')
# for xmlElement in itertools.chain(classesIt, sensorsIt):
# context = xmlElement.toxml()
# # Consider 'name' and 'type' as optional for now. Integrity checks on the
# # built configuration will take care of them later (which is
# # better than checking only the XML way to define
# # configuration).
# sensor = Sensor(Configuration.getXmlAttribute(xmlElement, 'type', None, mustBeDefined=False), Configuration.getXmlAttribute(xmlElement, 'name', None, mustBeDefined=False))
# sensor.isClass = xmlElement.tagName.lower() == 'class'
#
# # Automatically read properties that come from attributes or
# # child elements.
# Sensor.PROPERTY_DEFINITIONS.readObjectFromXML(sensor, xmlElement)
#
# # Xml attributes can be used as parameters for parameterized
# # values in the config (this is advanced usage).
# for k, v in xmlElement.attributes.items():
# sensor.addAttribute(k, v)
#
# configuration.addSensor(sensor)
#
# # Modes.
# context = 'modes block'
# for modesElement in Configuration.getElementsInConfig(config, 'modes', None):
# context = modesElement.toxml()
# ModesRepository.PROPERTY_DEFINITIONS.readObjectFromXML(configuration.modes, modesElement)
#
# # Alerts.
# context = 'alerts block'
# for alertElement in Configuration.getElementsInConfig(config, 'alert', 'alerts'):
# context = alertElement.toxml()
# alert = Alert(None, None)
# Alert.PROPERTY_DEFINITIONS.readObjectFromXML(alert, alertElement)
# configuration.addAlert(alert)
except Configuration.IntegrityException as e:
if e.xmlContext != None:
e.xmlContext = context
raise e
except ValueError as e:
raise Configuration.IntegrityException('An exception occurred while parsing {0}'.format(context), e)
return configuration
def toXml(self):
# Creates a new empty DOM.
doc = xml.dom.minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
Configuration.PROPERTY_DEFINITIONS.toXml(self, self, doc, config)
return doc
@staticmethod
def parseProperty(object, xmlElement, propertyDefinition):
# Parse individual properties if definition is a group.
attributeValue = Configuration.getXmlAttribute(xmlElment, attributeName, defaultAttributeValue)
vars(object)[attributeName] = valueBuilder(attributeValue)
@staticmethod
def getXmlAttribute(xmlElement, attributeName, defaultValue=None, mustBeDefined=False):
"""
Returns the value of the given element's attribute or None if element does not have such attribute.
Unlike the getAttribute method on Element, this method does not return an empty string but None whenever attribute does not exist.
"""
if(xmlElement.hasAttribute(attributeName)):
return xmlElement.getAttribute(attributeName)
else:
if mustBeDefined:
raise Configuration.IntegrityException('Element {0} misses attribute {1}'.format(xmlElement.tagName, attributeName), xmlContext=xmlElement.toxml() )
else:
return defaultValue
@staticmethod
def getElementsInConfig(config, sectionName, groupName):
if not groupName is None:
for sections in config.childNodes:
if sections.nodeType != sections.ELEMENT_NODE or sections.tagName != groupName: continue
for section in sections.childNodes:
if section.nodeType != section.ELEMENT_NODE or section.tagName != sectionName: continue
yield section
else:
for section in config.childNodes:
if section.nodeType != section.ELEMENT_NODE or section.tagName != sectionName: continue
yield section
@staticmethod
def getTextInElement(elt, mustFind = True):
text = None
for node in elt.childNodes:
if node.nodeType == node.TEXT_NODE:
if not text:
text = ''
text += node.data
if mustFind and not text:
raise Exception('Missing text in element {0}'.format(elt.nodeName))
return text
def getClassesInheritedBySensor(self, sensor, includesBuiltIns=False):
s = sensor if type(sensor) == Sensor else self._getSensorOrClassByName(sensor)
if s.isRootType():
return []
else:
inheritedClasses = self.getClassesInheritedBySensor(s.type, includesBuiltIns)
baseClass = self.getClassByName(s.type)
if baseClass.isBuiltIn and not includesBuiltIns:
return inheritedClasses
else:
return [baseClass] + inheritedClasses
def doesSensorInherit(self, sensor, classs):
if isinstance(sensor, Sensor):
s = sensor
else:
s = self._getSensorOrClassByName(sensor)
if s == None:
return False
if isinstance(classs, Sensor):
className = classs.name
else:
className = classs
if s.isRootType():
return False
elif s.type == className:
return True
else:
return self.doesSensorInherit(s.type, className)
def checkIntegrity(self):
"""
Checks that the configuration described by this object is full of integrity.
An exception is raised if a problem is detected. Otherwise, it is safe to assume that configuration is well defined.
"""
Configuration.PROPERTY_DEFINITIONS.checkIntegrity(self, self)
@property
def sensors(self):
if not self.sensorsAndClasses: return []
return [s for s in self.sensorsAndClasses if not s.isClass]
@property
def classes(self):
if not self.sensorsAndClasses: return []
return [s for s in self.sensorsAndClasses if s.isClass]
@property
def sensorsAndClassesWithoutBuiltIns(self):
return [s for s in self.sensorsAndClasses if not s.isBuiltIn]
def getBuiltInRootClass(self, sensorOrClass):
if isinstance(sensorOrClass, str):
sensorOrClass = self._getSensorOrClassByName(sensorOrClass)
# May happen if None has been passed or if no sensor by the given name
# could be found (can happen on a misconfigured instance of
# homewatcher). This should not crash.
if sensorOrClass == None: return None
if not sensorOrClass.isBuiltIn:
return self.getBuiltInRootClass(self.getClassByName(sensorOrClass.type))
else:
return sensorOrClass
def getModeByName(self, modeName):
modes = [m for m in self.modesRepository.modes if m.name == modeName]
if modes:
return modes[0]
else:
raise Exception('No mode {0}.'.format(modeName))
def resolve(self, checkIntegrityWhenDone=True):
resolvedSensors = []
for sensor in self.sensorsAndClasses:
if sensor.isClass:
resolvedSensors.append(sensor)
else:
resolvedSensors.append(self._getResolvedSensor(sensor))
self.sensorsAndClasses = resolvedSensors
# Force integrity checks immediately, as this guarantees that resolution
# did not lead to weird results.
if checkIntegrityWhenDone: self.checkIntegrity()
def _getResolvedSensor(self, sensor):
if sensor.isClass: raise Exception('Sensor classes cannot be resolved.')
resolvedCopy = Sensor(sensor.type, sensor.name, sensor.isBuiltIn)
currentClass = sensor
resolvedCopyVars = vars(resolvedCopy)
# Recursively assign members from the whole ancestor branch.
primitiveTypes = (type(None), str, int, float, bool)
customTypes = (ModeDependentValue, ActivationCriterion)
while currentClass != None:
for k, v in vars(currentClass).items():
if k == '_attributes':
newAttributes = v.copy()
newAttributes.update(resolvedCopy._attributes)
resolvedCopy._attributes = newAttributes
continue
doesMemberExist = not(currentClass == sensor or not k in resolvedCopyVars or resolvedCopyVars[k] is None)
if isinstance(v, primitiveTypes):
if not doesMemberExist:
resolvedCopyVars[k] = v
elif isinstance(v, customTypes):
if not doesMemberExist:
resolvedCopyVars[k] = v.copy()
else:
resolvedCopyVars[k].inherit(v)
else:
raise Exception('Unsupported member {0}={1}, type={2}'.format(k, v, type(v)))
if not currentClass.isRootType():
currentClass = self.getClassByName(currentClass.type)
else:
currentClass = None
# # Replace the base class by the first class that still exists in the
# # resolved configuration: this is the first builtin class. In case
# # something goes wrong when searching for this builtin class, simply
# # reuse the base class of the original sensor. This will not work
# # properly but configuration's integrity checks will be more accurate.
# builtinRootClass = self.getBuiltInRootClass(sensor.type)
# resolvedCopy.type = sensor.type if builtinRootClass is None else builtinRootClass.name
# Resolve parameterized string fields.
self.resolveObject(resolvedCopy, {})
return resolvedCopy
def getClassByName(self, name):
c = self._getSensorOrClassByName(name)
if c == None or not c.isClass: return None
return c
def getSensorByName(self, name):
s = self._getSensorOrClassByName(name)
if s is None or s.isClass: return None
return s
def _getSensorOrClassByName(self, name):
# Make sure we do not compare None to any sensor's name. If None is
# passed, this query must return None even if the configuration is
# badly defined.
if name == None: return None
byNames = [o for o in self.sensorsAndClasses if o.name == name]
if len(byNames) == 0:
return None
elif len(byNames) > 1:
raise Configuration.IntegrityException('Those sensors are homonymous: {0}'.format(byNames))
else:
return byNames[0]
@staticmethod
def resolveObject(obj, attributes):
if obj is None: return obj
# Logic: some object's members may be parameterized with attributes
# stored in a 'attributes' dictionary. Attributes may themselves be
# parameterized with other attributes.
# First, resolve attributes, taking care of the priority order if
# required. Then, resolve members. Last, resolve members that are
# objects by passing them the dictionary of attributes as a base source
# for attributes.
# Notice that the attributes passed to this method are assumed to be
# already resolved.
# Define comparator method.
def parameterSort(a, b):
paramsInA = regex.findall(obj.attributes[a])
paramsInB = regex.findall(obj.attributes[b])
if b in paramsInA:
if a in paramsInB:
raise Exception('{a} and {b} are mutually dependent.'.format(a=a, b=b))
# b must be resolved after a.
return 1
elif a in paramsInB:
# a must be resolved after b.
return -1
else:
# a and b are independent.
return 0
# Combine object's attributes with the passed ones. Object's attributes
# take precedence in case of name conflicts.
combinedAttributes = attributes.copy()
if hasattr(obj, 'attributes'):
combinedAttributes.update(obj.attributes)
# Resolve object's attributes that need to.
regex = re.compile('{([a-zA-Z]\w*)}')
if hasattr(obj, 'attributes'):
parameterizedAttributeNames = []
for k, v in obj.attributes.items():
if isinstance(v, str) and regex.search(v):
# Store attribute name, not its value! The comparator will
# evaluate the attribute when needed.
parameterizedAttributeNames.append(k)
# Sort attributes by order of resolution.
parameterizedAttributeNames = sorted(parameterizedAttributeNames, key=cmp_to_key(parameterSort))
# Resolve them.
for attributeName in parameterizedAttributeNames:
attrValue = obj.attributes[attributeName]
attrValue = attrValue.format(**combinedAttributes)
obj.attributes[attributeName] = attrValue
combinedAttributes[attributeName] = attrValue
# Resolve string members and internal objects.
isString = lambda o: isinstance(o, str)
isObject = lambda o: not isinstance(o, (type(None), int, float, bool))
resolve = lambda v: v.format(**combinedAttributes) if isString(v) else Configuration.resolveObject(v, combinedAttributes) if isObject(v) else v
if isinstance(obj, (list, tuple)):
for i in range(len(obj)):
obj[i] = resolve(obj[i])
elif isinstance(obj, dict):
for k, v in obj.items():
obj[k] = resolve(v)
else:
objVars = vars(obj)
for k, v in objVars.items():
if k == 'xmlSource': continue
objVars[k] = resolve(v)
return obj
def addAlert(self, alert):
self.alerts.append(alert)
def getAlertByName(self, name):
if name == None: return None
for a in self.alerts:
if a.name == name:
return a
raise KeyError(name)
@staticmethod
def replaceParametersInString(inputString, parameters):
""" Replaces parameters identified by their name enclosed in curly brackets by their value specified in the passed dictionary. """
outputString = inputString
for parameterName, parameterValue in parameters.items():
outputString = outputString.replace('{{0}}'.format(parameterName), parameterValue)
return outputString
|
Our vision is to bring the integrity and trust back to the real estate business. Our goal is to make a difference by showing hard work, trustworthiness, and ethical practices trump all else. The number one priority is customer satisfaction.
We originated in the Mississippi Delta and have branched out to many different facets of real estate through the years. Our experiences have taught us how to communicate effectively and achieve exactly what our clients want.
We look forward to growing throughout Central Mississippi, specifically in the Madison County area.
|
"""
gstarS and gstarR fits from Wantz and Shellard, 0910.1066, Appendix A
"""
import numpy as np
a0S=1.36
a1S=np.asarray([0.498,0.327,0.579,0.140,0.109])
a2S=np.asarray([-8.74,-2.89,-1.79,-0.102,3.82])
a3S=np.asarray([0.693,1.01,0.155,0.963,0.907])
def gS(T):
"""
The input temperature is measured in eV
gstarS as a function of T from fits
"""
T=T/1.e9
t=np.log(T)
f=a0S
for i in range(0,5):
f=f+a1S[i]*(1.+np.tanh((t-a2S[i])/a3S[i]))
return np.exp(f)
a0R=1.21
a1R=np.asarray([0.572,0.330,0.579,0.138,0.108])
a2R=np.asarray([-8.77,-2.95,-1.80,-0.162,3.76])
a3R=np.asarray([0.682,1.01,0.165,0.934,0.869])
def gR(T):
"""
The input temperature is measured in eV
gstarR as a function of T from fits
"""
T=T/1.e9
t=np.log(T)
f=a0R
for i in range(0,5):
f=f+a1R[i]*(1.+np.tanh((t-a2R[i])/a3R[i]))
return np.exp(f)
#import matplotlib.pyplot as plt
#T=np.logspace(-6,3,100)
#plt.plot(T,gS(T),linewidth=2.0)
#plt.plot(T,gR(T),'-r',linewidth=2.0)
#plt.ylim([1.,200.])
#plt.xscale('log')
#plt.yscale('log')
#plt.show()
|
Shoe storage organiser strong box lid soloution rack ebay is one images from 25 inspiring shoebox shoe rack photo of World Homes photos gallery. This image has dimension 900x701 Pixel and File Size 326 KB, you can click the image above to see the large or full size photo. Previous photo in the gallery is classic shoes shoe box here feb oct. For next photo in the gallery is pimping ugly cardboard shoe rack mis ximos proyectos pinte. You are viewing image #6 of 25, you can see the complete gallery at the bottom below.
|
"""
molgenis-impute v.0.7.0
Alexandros Kanterakis, [email protected]
Please read documentation in README.md
"""
import argparse
from imputation import Imputation
if __name__ == '__main__':
description = """
MOLGENIS-compute imputation version 0.7.0
"""
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--tools_dir', help='Installation directory for imputation tools. Default: <currrent working dir>/tools')
parser.add_argument('--reference_dir', help='Installation directory for the imputation reference panels. Default: <currrent working dir>/resources/imputationReference')
parser.add_argument('--list', help='List of all available reference panels either already downloaded, or available for downloading', action='store_true')
parser.add_argument('--dl_tools', help='download all necessary imputation tools', action='store_true')
parser.add_argument('--dl_reference', help='download and install an imputation reference panel')
parser.add_argument('--study', help='Absolute path of the directory off the study panel')
parser.add_argument('--output', help='Absolute path of the output (results) directory')
parser.add_argument('--chromosomes', help='comma separated values of chromosomes (If not set, imputation for all chromosomes will be performed')
parser.add_argument('--additional_shapeit_parameters', help='Extra command line arguments to pass to SHAPEIT tool', default=' ')
parser.add_argument('--additional_impute2_parameters', help='Extra command line arguments to pass to impute2 tool', default=' ')
parser.add_argument('--position_batch_size', help='Size of the chromosomal size of each imputation batch', default=5000000, type=int)
parser.add_argument('--sample_batch_size', help='Minimum number of samples in imputation batches', default=500, type=int)
parser.add_argument('--reference', help='name of the imputation reference panel')
parser.add_argument('--action', help='Action to do: liftover, phase, impute', choices=['liftover', 'phase', 'impute'])
parser.add_argument('--add_reference', help='Add a new reference panel', action='store_true')
parser.add_argument('--backend', help='Execution environment. Default: local', choices=['pbs', 'grid', 'local'], default='local')
parser.add_argument('--nosubmit', help='Create scripts but don\'t submit them for execution', action='store_true')
args = parser.parse_args()
imp = Imputation(tools_dir=args.tools_dir, reference_dir=args.reference_dir)
if args.dl_tools:
imp.install_imputation_tools()
elif args.list:
imp.list_reference_panels()
elif args.dl_reference:
imp.install_reference_panel(args.dl_reference)
elif args.add_reference:
imp.add_custom_reference_panels()
elif args.action:
if not args.study:
raise Exception('You need to define a directory where the study panel is, in order to perform this action (parameter --study)')
if not args.output:
raise Exception('You need to define a directory where the output results will be stored (parameter --output')
if args.action == 'liftover':
imp.perform_liftover(args.study, args.output, backend=args.backend, submit=not args.nosubmit)
elif args.action == 'phase':
imp.perform_phase(args.study, args.output, additional_shapeit_parameters=args.additional_shapeit_parameters, backend=args.backend, submit=not args.nosubmit)
elif args.action == 'impute':
if not args.reference:
raise Exception('You need to define a reference panel. Use the --reference parameter. For a list for all available reference panels, use --list')
imp.perform_impute(args.study, args.output, args.reference,
additional_impute2_parameters=args.additional_impute2_parameters,
custom_chromosomes=args.chromosomes,
sample_batch_size=args.sample_batch_size,
position_batch_size=args.position_batch_size,
backend=args.backend,
submit=not args.nosubmit)
else:
print description
print 'For a full set of options run:'
print 'python molgenis-impute.py --help'
print 'For documentation check: https://github.com/molgenis/molgenis-imputation'
|
Nutrition is the process of providing or obtaining the food necessary for health and growth. So what nutrients are needed during a healthy pregnancy?
It’s always important to eat a balanced diet — and it’s even more important when you’re pregnant because what you eat is the main source of nutrients for your baby. However, many women don’t get enough iron, folate, calcium, vitamin D, or protein. So when you are pregnant, it is important for you to increase the amounts of foods you eat with these nutrients.
Most women can meet their increased needs with a healthy diet that includes plenty of fruits, vegetables, whole grains, and proteins. According to the American College of Obstetricians and Gynecologists (ACOG), you should try to eat a variety of foods from these basic food groups. If you do, you are likely to get all the nutrients you need for a healthy pregnancy.
Calcium helps to build strong bones and teeth. Main sources include milk, cheese, yogurt, and sardines. During pregnancy you need 1,000 milligrams (mg) daily.
Iron helps red blood cells deliver oxygen to your baby. Sources include lean red meat, dried beans, peas, and iron-fortified cereals. During pregnancy you need 27 mg daily.
Vitamin A helps promote healthy skin, eyesight, and bone growth. Carrots, dark, leafy greens, and sweet potatoes are good sources. During pregnancy you need 770 micrograms daily.
Vitamin C promotes healthy gums, teeth, and bones, and helps your body absorb iron. Good sources include citrus fruit, broccoli, tomatoes, and strawberries. During pregnancy you need 85 mg daily.
Vitamin D aids your body in the absorption of calcium to help build your baby’s bones and teeth. Sources include exposure to sunlight, fortified milk, and fatty fish, such as salmon. During pregnancy you need 600 international units (IUs) daily.
Vitamin B6 helps form red blood cells and helps your body use protein, fat, and carbohydrates. You can find vitamin B6 in beef, liver, pork, whole-grain cereals, and bananas. During pregnancy you need 1.9 mg daily.
Vitamin B12 helps form red blood cells and maintains your nervous system. You can find this vitamin only in animal products. Good sources include liver, meat, fish, poultry, and milk. During pregnancy you need 2.6 micrograms daily.
Folate (Folic Acid) a B vitamin important in the production of blood and protein, it also reduces the risk of neural tube defects (a birth defect of the brain and spinal cord). You can find folate in green, leafy vegetables, liver, orange juice, legumes (beans, peas, lentils), and nuts.
You must get at least 400 micrograms of folate daily before pregnancy and during the first 12 weeks of pregnancy to reduce the risk of neural tube defects. During pregnancy, doctors recommend you get 600 micrograms daily.
Any questions you may have about your pregnancy nutrition should always be directed to your physician or OB/GYN.
|
# Listal.py
# 08/11/2016 - 31/03/2019
# v 1.2.2
import urllib.request, urllib.parse
import http.cookiejar, ssl
import bs4
import queue
import threading
import re
import os
import sys
import argparse
import time
# Scrapers
def get_ipages():
global IMG, STOP_AT
while not qq.empty():
local = threading.local()
local.url = qq.get()
local.keep_going = True
local.skip = False
if STOP_AT is not None and int(local.url.split('//')[2]) > STOP_AT:continue
while local.keep_going:
try:local.html = urllib.request.urlopen(local.url,timeout=10)
except urllib.error.HTTPError as HERR:
if HERR.code == 404:
local.keep_going = False
local.skip = True
continue
except:continue
if local.html.getcode() == 200:local.keep_going = False
if local.skip:continue
local.data = local.html.read()
local.soup = bs4.BeautifulSoup(local.data,'lxml')
for each in local.soup.find_all('div','imagewrap-inner'):
local.img = int(each.a.get('href').strip().split('/')[-1])
if IMG is None:ipages.append(local.img)
elif local.img > IMG:ipages.append(local.img)
elif local.img == IMG:STOP_AT = int(local.url.split('//')[2])
else:pass
def get_images():
while not qq.empty():
local = threading.local()
local.url = qq.get()
local.keep_going = True
local.skip = True
local.retry = 0
while local.keep_going and local.retry < 5:
try:
local.retry += 1
local.html = urllib.request.urlopen(local.url,timeout=25)
if local.html.getcode() == 200:
local.keep_going = False
local.skip = False
except urllib.error.HTTPError as HERR:
if HERR is not None and HERR.code == 404:
local.keep_going = False
continue
except:continue
if local.skip:continue
for i in range(2):
try:
local.data = local.html.read()
images.append(find_image(local.data))
except:continue
break
# Functions
def mksoup(url):
tmp = urllib.request.urlopen(url)
return bs4.BeautifulSoup(tmp.read(),"lxml")
def find_image(data):
return bs4.BeautifulSoup(data,"lxml").find('img','pure-img').get('src').replace("https:","http:")
def post_req():
tmp = urllib.parse.urlencode({ 'listid' : list_id , 'offset' : offset})
return urllib.request.urlopen("https://www.listal.com/item-list/",tmp.encode())
def mkqueue(url):
global no_pics,no_pages
no_pics = int(mksoup(url).find('a','picturesbutton').span.text.strip())
no_pages = no_pics/50
if no_pages.is_integer():no_pages = int(no_pages)
else:no_pages = int(no_pages) + 1
for i in range(int(args.first_page),no_pages+1):qq.put(url+"/pictures//"+str(i))
def enqueue():
global qq,ipages
if not qq.empty():print("WARNING : Queue was not empty.")
qq = queue.Queue()
ipages = sorted(set(ipages))
for each in ipages:
qq.put("http://www.listal.com/viewimage/"+str(each)+"h")
def stop_at(IMG):
tmp = []
for each in ipages:
if each > IMG:tmp.append(each)
ipages = tmp
def update_progress():
progress = 100 - int((100*qq.qsize()) / len(ipages))
pbar = "\r {:0>3}% [{:<50}] ({},{}) ".format(progress, '#'*int((progress/2)), (len(ipages)-qq.qsize()), len(ipages))
sys.stdout.write(pbar)
sys.stdout.flush()
def get_listinfo(url):
global list_type,list_id,list_name,total_pic,offset
soup = mksoup(url)
list_type = soup.find(id='customlistitems').get('data-listformat')
if list_type != "images":
print("This is not a Image list. Currently listal.dl suppots only Image lists.")
quit()
list_id = int(soup.find(id='customlistitems').get('data-listid'))
try:list_name = soup.find('div','headertitle').text.strip()
except AttributeError:list_name = urls.path[6:].replace('-',' ').title()
total_pic = int(soup.find(id='customlistitems').div.get('data-itemtotal'))
offset = int(soup.find('div','loadmoreitems').get('data-offset'))
for each in soup.find_all('div','imagelistbox'):
ipages.append(int(each.a.get('href').strip().split('/')[-1]))
def get_list():
global offset
while True:
data = post_req().read()
for each in sorted(set(re.findall("viewimage\\\/([0-9]{4,10})'" ,data.decode()))):
ipages.append(int(each))
offset = offset + 1
if offset == total_pic:break
def write():
if urls.path.startswith("/list/"):fhand = open(list_name+".txt",'a')
else:fhand = open(name+".txt",'a')
fhand.write("### {} : {} Images\n".format(finished,len(images)))
for each in images:fhand.write(each+"\n")
fhand.close()
# Global
qq = queue.Queue()
threads = []
ipages = []
images = []
IMG = None
STOP_AT = None
started = time.time()
# Main
parser = argparse.ArgumentParser(description='Scrape Images from \'listal.com\'.')
parser.add_argument('url', type=str,
help='URL to the List or Profile on listal.com.')
parser.add_argument('--from', dest='first_page', type = int, default = None, required = False,
help='The profile page no to start scraping images from')
parser.add_argument('--upto', dest='last_page' , type = int, default = None, required = False,
help='Scrap images only upto the page no.')
parser.add_argument('--threads', dest='threads', type = int, default = 10, required = False,
help='No. of threads to use.')
args = parser.parse_args()
urls = urllib.parse.urlparse(args.url)
if urls.netloc != 'www.listal.com':
print ("Check the Entered URL.")
quit()
#CookieJar Initiation
urllib.request.HTTPCookieProcessor(http.cookiejar.CookieJar())
if urls.path.startswith("/list/"):
if args.first_page is not None:print("Entered URL is of a list. The '--from' option is ignored.")
if args.last_page is not None:print("Entered URL is of a list. The '--upto' option is ignored.")
get_listinfo(urls.geturl())
get_list()
else:
urls = urllib.parse.urlparse(urls.geturl().split('/picture')[0])
name = urls.path[1:].replace('-',' ').title()
if args.first_page is None:args.first_page = 1
if args.last_page is not None:
for i in range(args.first_page,args.last_page+1):qq.put(args.url+"/pictures//"+str(i))
else:mkqueue(urls.geturl())
for n in range(args.threads):
t = threading.Thread(target=get_ipages)
threads.append(t)
t.start()
for t in threads:t.join()
print("Phase I Complete.",len(ipages),"Images Found.")
print("Time Taken :",time.strftime("%H:%M:%S",time.gmtime(time.time()-started)))
print("Phase II :")
enqueue()
threads.clear()
for n in range(args.threads):
t = threading.Thread(target=get_images)
threads.append(t)
t.start()
while not qq.empty():
update_progress()
sys.stdout.flush()
time.sleep(1)
for t in threads:t.join()
time_taken = time.time() - started
finished = time.strftime("%d/%m/%Y %H:%M",time.localtime())
write()
print("Time Taken :",time.strftime("%H:%M:%S",time.gmtime(time_taken)))
# END
|
Published 04/23/2019 04:39:45 am at 04/23/2019 04:39:45 am in Tankless Instant Water Heater.
tankless instant water heater ashata 220v 6500w tankless instant electric hot water heater for home bathroom shower tankless water electric tankless instant water heater.
instant tankless electric water heater south africa,electric tankless instant water heater tap,best tankless water heater electric for shower point of use,electric tankless instant hot water heater,best tankless instant hot water heater,rinnai tankless instant water heater,electric tankless instant water heater taps,quick and hot instant water heater,tankless vs instant water heater,tankless instant hot water heaters electric,best tankless water heater electric.
|
#coding:UTF-8
__author__ = 'dj'
from app import app
from flask import Flask, render_template, request, flash, redirect, url_for, send_from_directory
from forms import Upload, ProtoFilter,User_and_pwd
from utils.upload_tools import allowed_file, get_filetype, random_name
from utils.gxn_topo_handler import getfile_content,getall_topo,showdata_from_id,topo_filter
from utils.gxn_topo_decode import TopoDecode
from utils.gxn_get_sys_config import Config
from utils.connect import Connect
from utils.db_operate import DBClass
from utils.display import multipledisplay,singledisplay,NetID_list,NetID_all,AppID_all,selectall,node_time_display,topo_display,energy_display,flowdisplay,protodisplay,nodesearch_display,appflowdisplay
from utils.error import data_error_new,syn_error
from utils.old_data_display import Display, Modify
from utils.gxn_supervisor import getAllProcessInfo,stopProcess,startProcess,startAllProcesses,stopAllProcesses
import os
import collections
import time,datetime
from time import strftime
# import sqlite3
import socket
import json
import math
#导入函数到模板中
app.jinja_env.globals['enumerate'] = enumerate
#全局变量
PCAP_NAME = '' #上传文件名
# PD = PcapDecode() #解析器
PDF_NAME = ''
# ---------------------------------------------------------------------------
PCAPS = 'yeslogin' #login
HIT_USER ='root'#用户名
HIT_PWD ='xiaoming' #默认密码
TOPODATA = None #login
REALDATA = None #login
DATABASE =DBClass()
# TOPODATA_DICT =collections.OrderedDict()
# TPDECODE =TopoDecode()
NODE_DICT_NET=dict()
NODE_SET=set()
#--------------------------------------------------------首页,上传---------------------------------------------
#首页
@app.route('/', methods=['POST', 'GET'])
@app.route('/index/', methods=['POST', 'GET'])
def index():
if PCAPS == None:
return redirect(url_for('login'))
else:
return render_template('./home/index.html')
# return render_template('./login/login.html')
#历史数据时间选择
@app.route('/upload/', methods=['POST', 'GET'])
@app.route('/upload', methods=['POST', 'GET'])
def upload():
if PCAPS==None:
redirect(url_for('login'))
else:
json_dict = dict()
configfile = Connect()
json_dict = configfile.display_config()
return render_template('./upload/upload.html',json_dict = json_dict)
@app.route('/upload_modify/', methods=['POST', 'GET'])
@app.route('/upload_modify', methods=['POST', 'GET'])
def upload_modify():
c = Connect()
config_dicts = c.all_config_json() # read config.json and put all items in this dict
if request.method == 'POST':
val1 = request.form.get("id")
if val1:
config_dicts["id"] = val1
val2 = request.form.get("HeartIntSec")
if val2:
config_dicts["HeartIntSec"] = val2
val3 = request.form.get("AckHeartInt")
if val3:
config_dicts["AckHeartInt"] = val3
val4 = request.form.get("rootAddr")
if val4:
config_dicts["rootAddr"] = val4
val5 = request.form.get("ftpuser")
if val5:
config_dicts["ftpuser"] = val5
val6 = request.form.get("ftphost")
if val6:
config_dicts["ftphost"] = val6
val7 = request.form.get("ftpPwd")
if val7:
config_dicts["ftpPwd"] = val7
val8 = request.form.get("ftpPort")
if val8:
config_dicts["ftpPort"] = val8
val9 = request.form.get("serverIp")
if val9:
config_dicts["serverIp"] = val9
json_config_dicts = json.dumps(config_dicts,sort_keys=True,indent =4,separators=(',', ': '),encoding="gbk",ensure_ascii=True)
# print json_config_dicts
# conf_file = os.path.join(app.config['CONFIG_FOLDER'],"config.json")
# with open(conf_file, 'w') as f:
# f.write(json_config_dicts)
# f.close()
c.update_config(json_config_dicts)
return "It works"
else:
return "Error when writing to the config.json file"
# rtmetric展示
@app.route('/rtmetricdisplay/', methods=['POST', 'GET'])
@app.route('/rtmetricdisplay', methods=['POST', 'GET'])
def rtmetricdisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
time1=time.time()
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
rtxdata_list = multipledisplay(start_time,end_time,"rtimetric")
return render_template('./dataanalyzer/rtmetricdisplay.html',rtxdata_list=rtxdata_list[0],time=rtxdata_list[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
rtxdata_list = multipledisplay(previous_time,current_time,"rtimetric")
return render_template('./dataanalyzer/rtmetricdisplay.html',rtxdata_list=rtxdata_list[0],time=rtxdata_list[1])
#电流随时间变化
@app.route('/currentdisplay/', methods=['POST', 'GET'])
@app.route('/currentdisplay', methods=['POST', 'GET'])
def currentdisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
currentdata_list = multipledisplay(start_time,end_time,"electric")
return render_template('./dataanalyzer/currentdisplay.html',currentdata_list=currentdata_list[0],time=currentdata_list[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
currentdata_list = multipledisplay(previous_time,current_time,"electric")
return render_template('./dataanalyzer/currentdisplay.html',currentdata_list=currentdata_list[0],time=currentdata_list[1])
#时间同步展示
@app.route('/syntime/', methods=['POST', 'GET'])
@app.route('/syntime', methods=['POST', 'GET'])
def syntime():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
syntimedata_list = multipledisplay(start_time,end_time,"syntime")
return render_template('./dataanalyzer/syntime.html',syntimedata_list=syntimedata_list[0],time=syntimedata_list[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
syntimedata_list = multipledisplay(previous_time,current_time,"syntime")
return render_template('./dataanalyzer/syntime.html',syntimedata_list=syntimedata_list[0],time=syntimedata_list[1])
# 节点能耗展示
@app.route('/energydisplay/', methods=['POST', 'GET'])
@app.route('/energydisplay', methods=['POST', 'GET'])
def energydisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
ID_list = NetID_list(start_time,end_time)
data = energy_display(start_time,end_time)
return render_template('./dataanalyzer/energydisplay.html', nodecount=len(ID_list), ID_list=ID_list, cpu_list=data[0], lpm_list=data[1], tx_list=data[2], rx_list=data[3],time=data[4])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
ID_list = NetID_list(previous_time,current_time)
data = energy_display(previous_time,current_time)
return render_template('./dataanalyzer/energydisplay.html', nodecount=len(ID_list), ID_list=ID_list, cpu_list=data[0], lpm_list=data[1], tx_list=data[2], rx_list=data[3],time=data[4])
# 采样电压展示
@app.route('/voltagedisplay/', methods=['POST', 'GET'])
@app.route('/voltagedisplay', methods=['POST', 'GET'])
def voltagedisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
voltagedata_list = multipledisplay(start_time,end_time,"volage")
return render_template('./dataanalyzer/voltagedisplay.html',voltagedata_list=voltagedata_list[0],time=voltagedata_list[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
voltagedata_list = multipledisplay(previous_time,current_time,"volage")
return render_template('./dataanalyzer/voltagedisplay.html',voltagedata_list=voltagedata_list[0],time=voltagedata_list[1])
#重启情况展示
@app.route('/restartdisplay/', methods=['POST', 'GET'])
@app.route('/restartdisplay', methods=['POST', 'GET'])
def restartdisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
dataset = singledisplay(start_time,end_time,"reboot")
return render_template('./dataanalyzer/restartdisplay.html', nodecount = len(dataset[0]), ID_list = dataset[0], reboot_list = dataset[1],time=dataset[2])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
dataset = singledisplay(previous_time,current_time,"reboot")
return render_template('./dataanalyzer/restartdisplay.html', nodecount = len(dataset[0]), ID_list = dataset[0], reboot_list = dataset[1],time=dataset[2])
#节点邻居数展示
@app.route('/nbdisplay/', methods=['POST', 'GET'])
@app.route('/nbdisplay', methods=['POST', 'GET'])
def nbdisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data_list = multipledisplay(start_time,end_time,"numneighbors")
return render_template('./dataanalyzer/nbdisplay.html',data_list=data_list[0],time=data_list[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
data_list = multipledisplay(previous_time,current_time,"numneighbors")
return render_template('./dataanalyzer/nbdisplay.html',data_list=data_list[0],time=data_list[1])
#信标间隔展示
@app.route('/beacondisplay/', methods=['POST', 'GET'])
@app.route('/beacondisplay', methods=['POST', 'GET'])
def beacondisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data_list = multipledisplay(start_time,end_time,"beacon")
return render_template('./dataanalyzer/beacondisplay.html',data_list=data_list[0],time=data_list[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
data_list = multipledisplay(previous_time,current_time,"beacon")
return render_template('./dataanalyzer/beacondisplay.html',data_list=data_list[0],time=data_list[1])
# 部署信息表
@app.route('/deploy_info/', methods=['POST', 'GET'])
@app.route('/deploy_info', methods=['POST', 'GET'])
def deploy_info():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
nodeplace = DATABASE.my_db_execute("select ID, NodeID, MeterID, Place from NodePlace;",None)
return render_template('./dataanalyzer/deploy_info.html',nodeplace = nodeplace)
@app.route('/deploy_modify/', methods=['POST', 'GET'])
@app.route('/deploy_modify', methods=['POST', 'GET'])
def deploy_modify():
flag = 0 #flag==0 未修改 flag==1 修改了 flag==2 NodeID长度过长 flag==3 NodeID长度为3 flag==4 NodeID长度为2 flag==5 NodeID长度为1 flag==1 NodeID长度为4
if request.method == 'POST':
ID = request.form["ID"]
old_data = DATABASE.my_db_execute("select ID, NodeID, MeterID, Place from NodePlace where ID=?;",(ID,))
# conn.close()
NodeID = str(request.form["NodeID"])
MeterID = str(request.form["MeterID"])
Place = request.form["Place"]
if len(NodeID) == 4:
# print old_data[0]
if (str(old_data[0][1]) != NodeID):
flag = 1
elif (str(old_data[0][2]) != MeterID):
flag = 1
elif (old_data[0][3] != Place):
flag = 1
else:
flag = 0
elif len(NodeID) > 4:
flag = 2
elif len(NodeID) == 3:
flag = 3
elif len(NodeID) == 2:
flag = 4
elif len(NodeID) == 1:
flag = 5
# print ID, NodeID, MeterID, Place
if flag==0:
return "未进行更改"
elif flag==2:
return "节点ID长度过长,请重新输入!(4位)"
elif flag==3:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",("0"+str(NodeID),))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("delete from NodePlace where ID = ?;",(ID,))
DATABASE.db_del_or_insert("insert into NodePlace (ID,NodeID,Place,MeterID) VALUES (?,?,?,?);",(ID,str("0"+str(NodeID)),Place,str(MeterID)))
return "更改成功"
elif flag==4:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",("00"+str(NodeID),))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("delete from NodePlace where ID = ?;",(ID,))
DATABASE.db_del_or_insert("insert into NodePlace (ID,NodeID,Place,MeterID) VALUES (?,?,?,?);",(ID,str("00"+str(NodeID)),Place,str(MeterID)))
return "更改成功"
elif flag==5:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",("000"+str(NodeID),))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("delete from NodePlace where ID = ?;",(ID,))
DATABASE.db_del_or_insert("insert into NodePlace (ID,NodeID,Place,MeterID) VALUES (?,?,?,?);",(ID,str("000"+str(NodeID)),Place,str(MeterID)))
return "更改成功"
elif flag==1:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",(NodeID,))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("delete from NodePlace where ID = ?;",(ID,))
DATABASE.db_del_or_insert("insert into NodePlace (ID,NodeID,Place,MeterID) VALUES (?,?,?,?);",(ID,NodeID,Place,str(MeterID)))
return "更改成功"
else:
DATABASE.db_del_or_insert("delete from NodePlace where ID = ?;",(ID,))
DATABASE.db_del_or_insert("insert into NodePlace (ID,NodeID,Place,MeterID) VALUES (?,?,?,?);",(ID,str(NodeID),Place,str(MeterID)))
return "更改成功"
@app.route('/deploy_del/', methods=['POST', 'GET'])
@app.route('/deploy_del', methods=['POST', 'GET'])
def deploy_del():
del_list = list()
if request.method == 'POST':
get_list = request.form.getlist("del_list[]")
for item in get_list:
del_list.append(item.encode('ascii'))
# print del_list
for item in del_list:
if item:
DATABASE.db_del_or_insert("delete from NodePlace where ID=? ;",(item,))
nodeplace = DATABASE.my_db_execute("select ID, NodeID, MeterID, Place from NodePlace;",None)
return render_template('./dataanalyzer/deploy_info.html',nodeplace = nodeplace)
@app.route('/deploy_add/', methods=['POST', 'GET'])
@app.route('/deploy_add', methods=['POST', 'GET'])
def deploy_add():
databasepath = os.path.join(app.config['TOPO_FOLDER'],"topo3.db")
if request.method == 'POST':
NodeID = str(request.form["NodeID"])
MeterID = str(request.form["MeterID"])
Place = request.form["Place"]
# print NodeID, MeterID, Place
if len(NodeID) == 4:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",(NodeID,))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("insert into NodePlace (NodeID,Place,MeterID) VALUES (?,?,?);",(str(NodeID),Place,str(MeterID)))
elif len(NodeID) > 4:
return "节点ID长度过长,请重新输入!(4位)"
elif len(NodeID) == 3:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",("0"+str(NodeID),))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("insert into NodePlace (NodeID,Place,MeterID) VALUES (?,?,?);",("0"+str(NodeID),Place,str(MeterID)))
elif len(NodeID) == 2:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",("00"+str(NodeID),))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("insert into NodePlace (NodeID,Place,MeterID) VALUES (?,?,?);",("00"+str(NodeID),Place,str(MeterID)))
elif len(NodeID) == 1:
node=DATABASE.my_db_execute("select NodeID from NodePlace where NodeID=?;",("000"+str(NodeID),))
if node:
return "Error,节点已存在" #节点已存在
else:
DATABASE.db_del_or_insert("insert into NodePlace (NodeID,Place,MeterID) VALUES (?,?,?);",("000"+str(NodeID),Place,str(MeterID)))
nodeplace = DATABASE.my_db_execute("select ID, NodeID, MeterID, Place from NodePlace;",None)
return "添加成功"
#节点信息查询
@app.route('/node_search/', methods=['POST', 'GET'])
@app.route('/node_search', methods=['POST', 'GET'])
def node_search():
nodeid_list = NetID_all()
nodeid_list.sort()
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
nodepick = request.form['nodeselect']
data = nodesearch_display(start_time,end_time,nodepick)
return render_template('./dataanalyzer/node_search.html',
nodeid=nodepick,nodelist = data[0],cpu=data[1],lpm=data[2],tx=data[3],rx=data[4],
voltage_list=data[5],time_list_1=data[6],time_list_2=data[7],current_list=data[8],time_list_3=data[9],rtx_list=data[10],deploy=data[11],time=data[12])
else:
nodepick = nodeid_list[0]
end_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
start_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time() - 6*60*60))
data = nodesearch_display(start_time,end_time,nodepick)
return render_template('./dataanalyzer/node_search.html',
nodeid=str(nodepick),nodelist = data[0],cpu=data[1],lpm=data[2],tx=data[3],rx=data[4],
voltage_list=data[5],time_list_1=data[6],time_list_2=data[7],current_list=data[8],time_list_3=data[9],rtx_list=data[10],deploy=data[11],time=data[12])
#节点部署信息查询
@app.route('/deploysearch/', methods=['POST', 'GET'])
@app.route('/deploysearch', methods=['POST', 'GET'])
def deploysearch():
nodeid_list = list()
nodeid = DATABASE.my_db_execute('select distinct NodeID from NodePlace;',None)
for i in range(len(nodeid)):
nodeid_list.append(nodeid[i][0].encode('ascii'))
nodeid_list.sort()
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
nodepick = request.form['nodeselect']
# print nodepick
deploy_info = DATABASE.my_db_execute('select NodeID, MeterID, Place from NodePlace where NodeID == ?;',(nodepick,))
deploy = list()
deploy.append(deploy_info[0][0].encode('ascii'))
deploy.append(deploy_info[0][1].encode('ascii'))
deploy.append(deploy_info[0][2].encode('ascii'))
index_of_pick=nodeid_list.index(nodepick)
temp=nodeid_list[index_of_pick]
nodeid_list[index_of_pick]=nodeid_list[0]
nodeid_list[0]=temp
nodepick = "\""+nodepick+"\""
return render_template('./dataanalyzer/deploysearch.html',
nodeid=nodepick,nodelist = nodeid_list,deploy=deploy)
else:
return render_template('./dataanalyzer/deploysearch.html',
nodeid="",nodelist = nodeid_list,deploy=[])
@app.route('/network_data/', methods=['POST', 'GET'])
@app.route('/network_data', methods=['POST', 'GET'])
def network_data():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
select = request.form['filter_type']
nid = request.form['value']
if select == "all":
pcaps = DATABASE.my_db_execute("select * from NetMonitor where currenttime >= ? and currenttime <= ?;",(start_time, end_time))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",查询所有节点")
elif select == "ID":
pcaps = DATABASE.my_db_execute("select * from NetMonitor where currenttime >= ? and currenttime <= ? and NodeID == ?;",(start_time, end_time, nid))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",节点ID为:\""+nid+"\"")
elif select == "parentID":
pcaps = DATABASE.my_db_execute("select * from NetMonitor where currenttime >= ? and currenttime <= ? and ParentID == ?;",(start_time, end_time, nid))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",父节点ID为:\""+nid+"\"")
else:
pcaps = DATABASE.my_db_execute("select * from NetMonitor where currenttime >= ? and currenttime <= ?;",(start_time, end_time))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",查询所有节点")
return render_template('./dataanalyzer/network_data.html',pcaps=pcaps,length=len(pcaps),time=timedisplay)
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
timedisplay = ("\""+previous_time + ' - ' + current_time+u"\",未选取节点")
pcaps = DATABASE.my_db_execute("select * from NetMonitor where currenttime >= ? and currenttime <= ?;",(previous_time, current_time))
return render_template('./dataanalyzer/network_data.html',pcaps=pcaps,length=len(pcaps),time=timedisplay)
@app.route('/app_data/', methods=['POST', 'GET'])
@app.route('/app_data', methods=['POST', 'GET'])
def app_data():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
select = request.form['filter_type']
nid = request.form['value']
if select == "all":
pcaps = DATABASE.my_db_execute("select * from ApplicationData where currenttime >= ? and currenttime <= ?;",(start_time, end_time))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",查询所有节点")
elif select == "ID":
pcaps = DATABASE.my_db_execute("select * from ApplicationData where currenttime >= ? and currenttime <= ? and NodeID == ?;",(start_time, end_time, nid))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",节点ID为:\""+nid+"\"")
else:
pcaps = DATABASE.my_db_execute("select * from ApplicationData where currenttime >= ? and currenttime <= ?;",(start_time, end_time))
timedisplay = ("\""+start_time + ' - ' + end_time+u"\",查询所有节点")
lendict = dict()
for pcap in pcaps:
lendict[int(pcap[0])] = len(str(pcap[3]))
return render_template('./dataanalyzer/app_data.html',appdata=pcaps,lendict = lendict,length=len(pcaps),time=timedisplay)
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
timedisplay = ("\""+previous_time + ' - ' + current_time+u"\",未选取节点")
pcaps = DATABASE.my_db_execute("select * from ApplicationData where currenttime >= ? and currenttime <= ?;",(previous_time, current_time))
lendict = dict()
for pcap in pcaps:
lendict[int(pcap[0])] = len(str(pcap[3]))
return render_template('./dataanalyzer/app_data.html',appdata=pcaps,lendict = lendict,length=len(pcaps),time=timedisplay)
#--------------------------------------------与后台通信----------------------------------------------------
@app.route('/monitor/', methods=['POST', 'GET'])
@app.route('/monitor', methods=['POST', 'GET'])
def monitor():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
display = Display()
send_data = display.send_display() #旧数据展示
write_data = display.write_display()
adjtime_data = display.adjtime_display()
display_datadict = display.parameters_display()
# print display_datadict
return render_template('./client/monitor.html',send_data = send_data, write_data = write_data, adjtime_data = adjtime_data, display_datadict = display_datadict)
@app.route('/instruction_send/', methods=['POST', 'GET'])
@app.route('/instruction_send', methods=['POST', 'GET'])
def instruction_send():
#指令下发
modify = Modify() #将新配置数据写入配置文件
sendins = Connect()
datalist = []
dicts = {}
datalist.append("80")
datalength = ""
if request.method == 'POST':
recvdata = request.form['emit_data']
if recvdata:
modify.send_modify(recvdata)
if (len(recvdata)%2 != 0):
recvdata = "0"+recvdata
if (len(recvdata)<32):
datalength = "0"+hex(len(recvdata)/2)[2:]
else:
datalength = hex(len(recvdata))[2:]
else:
display = Display()
recvdata = display.send_display() #旧数据
transmit_type = request.form['mySelect']
nodeip = request.form['nodeIP']
if datalength:
datalist.append(datalength)
datalist.append(recvdata)
data = ''.join(datalist)
dicts["type"] = transmit_type
dicts["pama_data"] = data
if (transmit_type=="mcast"):
ins = json.dumps(dicts)
else:
addrlist = []
addrlist.append(nodeip)
dicts["addrList"] = addrlist
ins = json.dumps(dicts)
sendins.TCP_send(ins)
# print ins
return render_template('./client/monitor.html',display_datadict=None)
@app.route('/instruction_write/', methods=['POST', 'GET'])
@app.route('/instruction_write', methods=['POST', 'GET'])
def instruction_write():
#指令烧写
modify = Modify() #将新配置数据写入配置文件
sendins = Connect()
datalist = []
datalist.append("82")
datalength = ""
dicts = {}
if request.method == 'POST':
recvdata = request.form['write_data']
if recvdata:
modify.write_modify(recvdata)
if (len(recvdata)%2 != 0):
recvdata = "0"+recvdata
if (len(recvdata)<32):
datalength = "0"+hex(len(recvdata)/2)[2:]
else:
datalength = hex(len(recvdata))[2:]
else:
display = Display()
recvdata = display.write_display() #旧数据
transmit_type = request.form['mySelect2']
nodeip = request.form['nodeIP2']
if datalength:
datalist.append(datalength)
datalist.append(recvdata)
data = ''.join(datalist)
dicts["type"] = transmit_type
dicts["pama_data"] = data
if (transmit_type=="mcast"):
ins = json.dumps(dicts)
else:
addrlist = []
addrlist.append(nodeip)
dicts["addrList"] = addrlist
ins = json.dumps(dicts)
sendins.TCP_send(ins)
return render_template('./client/monitor.html',display_datadict=None)
@app.route('/instruction_restart/', methods=['POST', 'GET'])
@app.route('/instruction_restart', methods=['POST', 'GET'])
#重启指令下发
def instruction_restart():
sendins = Connect()
dicts = {}
dicts["pama_data"] = "C0"
if request.method == 'POST':
transmit_type = request.form['mySelect4']
nodeip = request.form['nodeIP4']
dicts["type"] = transmit_type
if (transmit_type=="mcast"):
ins = json.dumps(dicts)
else:
addrlist = []
addrlist.append(nodeip)
dicts["addrList"] = addrlist
ins = json.dumps(dicts)
# print ins
sendins.TCP_send(ins)
return render_template('./client/monitor.html',display_datadict=None)
@app.route('/instruction_reset/', methods=['POST', 'GET'])
@app.route('/instruction_reset', methods=['POST', 'GET'])
#恢复出厂设置
def instruction_reset():
sendins = Connect()
dicts = {}
dicts["pama_data"] = "C1"
if request.method == 'POST':
transmit_type = request.form['mySelect5']
nodeip = request.form['nodeIP5']
dicts["type"] = transmit_type
if (transmit_type=="mcast"):
ins = json.dumps(dicts)
else:
addrlist = []
addrlist.append(nodeip)
dicts["addrList"] = addrlist
ins = json.dumps(dicts)
sendins.TCP_send(ins)
# print ins
return render_template('./client/monitor.html',display_datadict=None)
@app.route('/instruction_adjtime/', methods=['POST', 'GET'])
@app.route('/instruction_adjtime', methods=['POST', 'GET'])
def instruction_adjtime():
#设定根节点校时周期
modify = Modify() #将新配置数据写入配置文件
sendins = Connect()
dicts = {}
if request.method == 'POST':
recvdata = request.form['timeperiod']
if recvdata:
modify.adjtime_modify(recvdata)
else:
display = Display()
recvdata = display.adjtime_display() #旧数据
dicts["pama_data"] = recvdata
dicts["type"] = "pama_corr"
ins = json.dumps(dicts)
sendins.TCP_send(ins)
return render_template('./client/monitor.html',display_datadict=None)
@app.route('/instruction3/', methods=['POST', 'GET'])
@app.route('/instruction3', methods=['POST', 'GET'])
#网络参数配置指令下发
def instruction3():
modify = Modify() #将新配置数据写入配置文件
sendins = Connect()
dicts= {}
dicts["type"] = "mcast_ack"
data0 = "40"
datalist = []
datalist.append(data0)
if request.method == 'POST':
data1 = request.form['PANID']
if data1:
modify.PANID_modify(data1)
data1 = hex(int(data1))[2:]
else:
data1 = "ff"
datalist.append(data1)
data2 = request.form['channel']
if data2:
modify.channel_modify(data2)
data2 = hex(int(data2))[2:]
else:
data2 = "ff"
datalist.append(data2)
data3 = request.form['CCA']
if data3:
modify.CCA_modify(data3)
data3 = hex(int(data3))[2:]
else:
data3 = "ff"
datalist.append(data3)
data4 = request.form['emitpower']
if data4:
modify.emitpower_modify(data4)
data4 = hex(int(data4))[2:]
else:
data4 = "ff"
datalist.append(data4)
data5 = request.form['CCAcheckingperiod']
if data5:
modify.CCAcheckingperiod_modify(data5)
data5 = hex(int(data5))[2:]
else:
data5 = "ff"
datalist.append(data5)
data6 = request.form['inactive']
if data6:
modify.inactive_modify(data6)
data6 = hex(int(data6))[2:]
else:
data6 = "ff"
datalist.append(data6)
data7 = request.form['DIO_minlen']
if data7:
modify.DIO_minlen_modify(data7)
data7 = hex(int(data7))[2:]
else:
data7 = "ff"
datalist.append(data7)
data8 = request.form['DIO_max']
if data8:
modify.DIO_max_modify(data8)
data8 = hex(int(data8))[2:]
else:
data8 = "ff"
datalist.append(data8)
# cli.send(json.dumps(dicts).encode('utf-8'))
data = ''.join(datalist)
dicts["pama_data"] = data
ins = json.dumps(dicts)
# print "adsadsfasdf"
sendins.TCP_send(ins)
# return
return render_template('./client/monitor.html',display_datadict=None)
@app.route('/update_net/', methods=['POST', 'GET'])
@app.route('/update_net', methods=['POST', 'GET'])
#获取网络监测数据
def update_net():
global NODE_DICT_NET
dicts= {}
for node ,value in NODE_DICT_NET.items():
# print node,value
temp = DATABASE.my_db_execute("select nodeID, count(nodeID) from NetMonitor where nodeID == ?", (node,))
# print temp
if int(temp[0][1])-value>0:
# NUMBER_NET+= 1
if(str(temp[0][0]) in NODE_SET):
NODE_SET.remove(str(temp[0][0]))
if len(NODE_DICT_NET):
dicts["total"] = len(NODE_DICT_NET)
dicts["now"] = dicts["total"] - len(NODE_SET)
else:
dicts["total"] = 1
dicts["now"] = 0
ins = json.dumps(dicts)
# print ins
return ins
@app.route('/scheduling/',methods=['POST', 'GET'])
def scheduling():
syn_config = Config()
l=syn_config.get_active_list()
dicts={'lists':l}
lists= json.dumps(dicts,sort_keys=True,indent =4,separators=(',', ': '),encoding="gbk",ensure_ascii=True)
return render_template('./client/scheduling.html',scheduleNow=lists)
@app.route('/setall_schedule/',methods=['POST', 'GET'])
@app.route('/setall_schedule',methods=['POST', 'GET'])
def setall_schedule():
if request.method == 'POST':
syn_config = Config()
syn_config.bitmap_checkall()
return "1"
@app.route('/cancelall_schedule/',methods=['POST', 'GET'])
@app.route('/cancelall_schedule',methods=['POST', 'GET'])
def cancelall_schedule():
if request.method == 'POST':
syn_config = Config()
syn_config.bitmap_cancelall()
return "2"
@app.route('/recommend_schedule1/',methods=['POST', 'GET'])
@app.route('/recommend_schedule1',methods=['POST', 'GET'])
def recommend_schedule1():
if request.method == 'POST':
syn_config = Config()
syn_config.recommend_schedule1()
return "2"
@app.route('/recommend_schedule2/',methods=['POST', 'GET'])
@app.route('/recommend_schedule2',methods=['POST', 'GET'])
def recommend_schedule2():
if request.method == 'POST':
syn_config = Config()
syn_config.recommend_schedule2()
return "2"
@app.route('/recommend_schedule3/',methods=['POST', 'GET'])
@app.route('/recommend_schedule3',methods=['POST', 'GET'])
def recommend_schedule3():
if request.method == 'POST':
syn_config = Config()
syn_config.recommend_schedule3()
return "2"
@app.route('/update_schedule/',methods=['POST', 'GET'])
def update_schedule():
syn_config = Config()
sendins = Connect()
senddicts = {}
if request.method == 'POST':
data = request.get_json()
bitmap_array = data['x']
if not bitmap_array:
bitmap_array = [0]*18
syn_config.set_SynBitMap(bitmap_array)
config_dict =syn_config.get_New_Synconfig()
period = data['p']
config_dict["bitmap"]=syn_config.format_To_SendBitMap(config_dict["bitmap"])
if period:
syn_config.get_syn_period(period)
# config_dict["bitmap"]=syn_config.format_To_SendBitMap(config_dict["bitmap"])
senddicts["pama_data"] = config_dict
senddicts["type"] = "pama_syn"
update_synperiod_ins = json.dumps(senddicts)
sendins.TCP_send(update_synperiod_ins)
# print update_synperiod_ins
else:
bitmaplist = config_dict["bitmap"]
subkey = ['minute', 'seqNum', 'level', 'bitmap', 'second', 'hour']
update_schedule_dict = {key:config_dict[key] for key in subkey}
senddicts["pama_data"] = update_schedule_dict
senddicts["type"] = "schedule"
update_schedule_ins = json.dumps(senddicts)
config_dict["bitmap"] = bitmaplist
sendins.TCP_send(update_schedule_ins)
# print update_schedule_ins
l=syn_config.get_active_list()
dicts={'lists':l}
lists= json.dumps(dicts)
return render_template('./client/scheduling.html',scheduleNow=lists)
#上报监测控制
@app.route('/sendmonitor/', methods=['POST', 'GET'])
@app.route('/sendmonitor', methods=['POST', 'GET'])
def sendmonitor():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
display = Display()
display_data = display.monitor_update_period_display() #旧数据展示
return render_template('./client/sendmonitor.html', display_data = display_data)
@app.route('/monitor_update_period/', methods=['POST', 'GET'])
@app.route('/monitor_update_period', methods=['POST', 'GET'])
# 修改网络监测数据上报周期
def monitor_update_period():
modify = Modify() #将新配置数据写入配置文件
sendins = Connect()
dicts = {}
if request.method == 'POST':
recvdata = request.form['update_period']
if recvdata:
modify.monitor_update_period_modify(recvdata)
else:
display = Display()
recvdata = display.monitor_update_period_display()
if (int(recvdata)<16):
dicts["pama_data"] = "410" + hex(int(recvdata))[2:]
else:
dicts["pama_data"] = "41"+ hex(int(recvdata))[2:]
dicts["type"] = "mcast_ack"
ins = json.dumps(dicts)
sendins.TCP_send(ins)
# print ins
return render_template('./client/sendmonitor.html')
@app.route('/post_monitor_data/', methods=['POST', 'GET'])
@app.route('/post_monitor_data', methods=['POST', 'GET'])
#上报网络监测数据指令
def post_monitor_data():
global NODE_DICT_NET
# global NUMBER_NET
global NODE_SET
NODE_SET = set()
# NUMBER_NET=0
nodes = list(DATABASE.my_db_execute("select distinct NodeID from NodePlace;",None))
# nodes = list(c.fetchall()) #tuple -- list
total = len(nodes)
previous = 0 #total - len(nodes)
now = previous
sendins = Connect()
dicts = {}
if request.method == 'GET':
for node in nodes:
NODE_SET.add(str(node[0]))
temp = DATABASE.my_db_execute("select nodeID, count(nodeID) from NetMonitor where nodeID == ?", (node))
NODE_DICT_NET[temp[0][0]] = temp[0][1]
dicts["pama_data"] = "00"
dicts["type"] = "mcast"
ins = json.dumps(dicts)
sendins.TCP_send(ins)
# print ins
return render_template('./client/sendmonitor.html')
@app.route('/post_config/', methods=['POST', 'GET'])
@app.route('/post_config', methods=['POST', 'GET'])
#上报网络参数配置指令
def post_config():
sendins = Connect()
dicts = {}
if request.method == 'POST':
dicts["pama_data"] = "01"
dicts["type"] = "mcast"
ins = json.dumps(dicts)
sendins.TCP_send(ins)
return render_template('./client/sendmonitor.html')
#--------------------------------------------认证登陆---------------------------------------------------
@app.route('/login/',methods=['POST', 'GET'])
def login():
login_msg=User_and_pwd()
if request.method == 'GET':
return render_template('./login/login.html')
elif request.method == 'POST':
USERNAME = login_msg.username.data
PASSWRD = login_msg.password.data
if USERNAME==HIT_USER and PASSWRD==HIT_PWD:
global PCAPS
PCAPS= 'yes:'
return render_template('./home/index.html')
else:
flash(u"用户名或密码错误!")
return render_template('./login/login.html')
@app.route('/logout/',methods=['POST', 'GET'])
def logout():
global PCAPS
PCAPS = None
return redirect(url_for('login'))
#-------------------------------------------数据分析----------------------------------------------------
#协议分析
@app.route('/protoanalyzer/', methods=['POST', 'GET'])
def protoanalyzer():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data = protodisplay(start_time,end_time)
return render_template('./dataanalyzer/protoanalyzer.html',num_of_nodes=data[0],postrate=data[1] ,post=data[2], thispostrate=data[3] , http_key=data[4], http_value=data[5] ,nodecount=len(data[4]),time=data[6])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
data = protodisplay(previous_time,current_time)
return render_template('./dataanalyzer/protoanalyzer.html',num_of_nodes=data[0],postrate=data[1] ,post=data[2], thispostrate=data[3] , http_key=data[4], http_value=data[5] ,nodecount=len(data[4]),time=data[6])
#流量分析
@app.route('/flowanalyzer/', methods=['POST', 'GET'])
def flowanalyzer():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data = flowdisplay(start_time,end_time)
return render_template('./dataanalyzer/trafficanalyzer.html', timeline=data[0],templist=data[1], topo_traffic_key=data[2],topo_traffic_value=data[3],time=data[4])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
data = flowdisplay(previous_time,current_time)
return render_template('./dataanalyzer/trafficanalyzer.html', timeline=data[0],templist=data[1], topo_traffic_key=data[2],topo_traffic_value=data[3],time=data[4])
@app.route('/appflowanalyzer/', methods=['POST', 'GET'])
def appflowanalyzer():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data = appflowdisplay(start_time,end_time)
return render_template('./dataanalyzer/appflowdisplay.html', timeline=data[0],templist=data[1], topo_traffic_key=data[2],topo_traffic_value=data[3],time=data[4])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
data = appflowdisplay(previous_time,current_time)
return render_template('./dataanalyzer/appflowdisplay.html', timeline=data[0],templist=data[1], topo_traffic_key=data[2],topo_traffic_value=data[3],time=data[4])
#上报数量分析
@app.route('/count_appdata/', methods=['POST', 'GET'])
def count_appdata():
databasepath = os.path.join(app.config['TOPO_FOLDER'],"topo3.db")
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
dataset = selectall(start_time,end_time,"ApplicationData")
return render_template('./dataanalyzer/count_appdata.html',nodelist=dataset[0], countlist=dataset[1],time=dataset[2])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
dataset = selectall(previous_time,current_time,"ApplicationData")
return render_template('./dataanalyzer/count_appdata.html',nodelist=dataset[0], countlist=dataset[1],time=dataset[2])
# 应用数据分析
@app.route('/appdataanalyzer/', methods=['POST', 'GET'])
def appdataanalyzer():
nodeid_list = AppID_all()
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
nodepick = request.form['nodeselect']
timelist = node_time_display(start_time,end_time,"ApplicationData",nodepick)
return render_template('./dataanalyzer/appdataanalyzer.html',timelist=timelist[0], nodelist = nodeid_list,time=timelist[1],node=nodepick)
else:
node = DATABASE.my_db_execute('select distinct NodeID from ApplicationData limit 1;',None)
nodeid = (node[0][0].encode('ascii'))
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
timelist = node_time_display(previous_time,current_time,"ApplicationData",nodeid)
return render_template('./dataanalyzer/appdataanalyzer.html',timelist=timelist[0], nodelist = nodeid_list,time=timelist[1],node=nodeid)
#网络数据个数随时间变化曲线
@app.route('/netcountdisplay/', methods=['POST', 'GET'])
def netcountdisplay():
nodeid_list = list()
appdata = DATABASE.my_db_execute('select distinct NodeID from NetMonitor;',None)
for i in range(len(appdata)):
nodeid_list.append(appdata[i][0].encode('ascii'))
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
nodepick = request.form['nodeselect']
timelist = node_time_display(start_time,end_time,"NetMonitor",nodepick)
return render_template('./dataanalyzer/netcountdisplay.html',timelist=timelist[0], nodelist = nodeid_list,time=timelist[1],node=nodepick)
else:
node = DATABASE.my_db_execute('select distinct NodeID from NetMonitor limit 1;',None)
nodeid = (node[0][0].encode('ascii'))
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
timelist = node_time_display(previous_time,current_time,"NetMonitor",nodeid)
return render_template('./dataanalyzer/netcountdisplay.html',timelist=timelist[0], nodelist = nodeid_list,time=timelist[1],node=nodeid)
#同步时差随时间变化
@app.route('/syntimediffdisplay/', methods=['POST', 'GET'])
@app.route('/syntimediffdisplay', methods=['POST', 'GET'])
def syntimediffdisplay():
syntime_list = list()
time_list = list()
nodeid_list = NetID_all()
nodeid_list.sort()
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
nodepick = request.form['nodeselect']
syntime = DATABASE.my_db_execute('select currenttime, syntime from NetMonitor where currenttime >= ? and currenttime <= ? and NodeID == ?;',(start_time, end_time, nodepick))
for i in range(len(syntime)):
time_list.append(syntime[i][0].encode('ascii'))
syntime_list.append(syntime[i][1])
timedisplay = ("\""+start_time + ' - ' + end_time+"\"").encode('ascii')
return render_template('./dataanalyzer/syntimediffdisplay.html',
nodeid=nodepick,nodelist = nodeid_list,time_list=time_list,syntime_list=syntime_list,time=timedisplay)
else:
nodepick = nodeid_list[0]
end_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
start_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time() - 6*60*60))
syntime = DATABASE.my_db_execute('select currenttime, syntime from NetMonitor where currenttime >= ? and currenttime <= ? and NodeID == ?;',(start_time, end_time, nodepick))
for i in range(len(syntime)):
time_list.append(syntime[i][0].encode('ascii'))
syntime_list.append(syntime[i][1])
timedisplay = ("\""+start_time + ' - ' + end_time+"\"").encode('ascii')
# print nodepick,nodeid_list,cpu,lpm,tx,rx,voltage_list,time_list
return render_template('./dataanalyzer/syntimediffdisplay.html',
nodeid=nodepick,nodelist = nodeid_list,time_list=time_list,syntime_list=syntime_list,time=timedisplay)
# 拓扑展示
@app.route('/topodisplay/', methods=['POST', 'GET'])
def topodisplay():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
echarts_start_time = selectime.encode("utf-8")[0:19]
echarts_end_time = selectime.encode("utf-8")[22:41]
topodata = topo_display(echarts_start_time,echarts_end_time)
return render_template('./dataanalyzer/topodisplay.html',nodes = topodata[0], links = topodata[1],time=topodata[2])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
topodata = topo_display(previous_time,current_time)
# lasttime = DATABASE.my_db_execute("select currenttime from NetMonitor where currenttime >= ? and currenttime <= ? order by currenttime desc LIMIT 1;",(previous_time, current_time))
# if lasttime:
# real_end_time = time.mktime(time.strptime(lasttime[0][0],'%Y-%m-%d %H:%M:%S')) #取选定时间内的最后一个时间,算这个时间与它前十分钟内的数据
# real_start_time = real_end_time - 10 * 60
# start_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(real_start_time))
# end_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(real_end_time))
# ID_list = DATABASE.my_db_execute("select NodeID, ParentID from NetMonitor where currenttime >= ? and currenttime <= ?;",(start_time, end_time))
# for node in ID_list:
# ID = node[0] # ID
# ParentID = node[1] # parentID
# if ID in Parentnode:
# continue
# else:
# Parentnode[ID] = ParentID
# # 遍历Parentnode的key,绘制散点图;遍历Parentnode的key和value,画箭头
# nodes = list()
# links = list()
# n = dict()
# m = dict()
# if lasttime:
# if rootID not in Parentnode.keys():
# rootIDjson = {"category":3, "name":"root:"+str(rootID.encode('ascii'))}
# nodes.append(rootIDjson)
# for key ,value in Parentnode.items():
# n = {"category":1, "name":key.encode('ascii')}
# nodes.append(n)
# m = {"source":value.encode('ascii'), "target":key.encode('ascii'), "weight":1}
# links.append(m)
# else:
# for key ,value in Parentnode.items():
# if key==rootID:
# n = {"category":3, "name":key.encode('ascii')}
# nodes.append(n)
# m = {"source":value.encode('ascii'), "target":key.encode('ascii'), "weight":1}
# links.append(m)
# else:
# n = {"category":1, "name":key.encode('ascii')}
# nodes.append(n)
# m = {"source":value.encode('ascii'), "target":key.encode('ascii'), "weight":1}
# links.append(m)
return render_template('./dataanalyzer/topodisplay.html',nodes = topodata[0], links = topodata[1],time=topodata[2])
# ----------------------------------------------系统配置工具---------------------------------------------
@app.route('/terminaltool/', methods=['POST', 'GET'])
def terminaltool():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
config=Connect()
url="http://"+config.all_config_json()["serverIp"]+":6175"
# print url
return redirect(url)
# return render_template('./systemctrl/index.html')
# ----------------------------------------------一异常信息页面---------------------------------------------
#异常数据
@app.route('/exceptinfo/', methods=['POST', 'GET'])
def exceptinfo():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data = data_error_new(start_time,end_time)
return render_template('./exceptions/exception.html', vwarning=data[0],iwarning=data[1],lists=data[2],time=data[3])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
# 电流过大
data = data_error_new(previous_time,current_time)
return render_template('./exceptions/exception.html', vwarning=data[0],iwarning=data[1],lists=data[2],time=data[3])
#时间同步节点异常列表
@app.route('/synerror/', methods=['POST', 'GET'])
def synerror():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
# 时间同步节点异常
warning_list = syn_error(start_time,end_time)
return render_template('./exceptions/synerror.html', warning=warning_list[0],lists=warning_list[1],time=warning_list[2])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
warning_list = syn_error(previous_time,current_time)
return render_template('./exceptions/synerror.html', warning=warning_list,lists=warning_list[1],time=warning_list[2])
# ----------------------------------------------进程监管---------------------------------------------
#进程监管
@app.route('/supervisor/', methods=['POST', 'GET'])
def supervisor():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
processInfo = getAllProcessInfo()
return render_template('./supervisor/supervisor.html',processInfo=processInfo)
@app.route('/supervisor_set_status/', methods=['POST', 'GET'])
def supervisor_set_status():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
deal_process = request.args.get('Processname')
handle = deal_process.split('_')[0]
Processname = deal_process.split('_')[1]
if handle=='stop':
stopProcess(Processname)
if handle=='start':
startProcess(Processname)
if handle=='restart':
stopProcess(Processname)
startProcess(Processname)
processInfo = getAllProcessInfo()
return render_template('./supervisor/supervisor.html',processInfo=processInfo)
@app.route('/supervisor_restart_all/', methods=['POST', 'GET'])
def supervisor_restart_all():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
stopAllProcesses()
startAllProcesses()
processInfo = getAllProcessInfo()
return render_template('./supervisor/supervisor.html',processInfo=processInfo)
@app.route('/supervisor_start_all/', methods=['POST', 'GET'])
def supervisor_start_all():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
startAllProcesses()
processInfo = getAllProcessInfo()
return render_template('./supervisor/supervisor.html',processInfo=processInfo)
@app.route('/supervisor_stop_all/', methods=['POST', 'GET'])
def supervisor_stop_all():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
stopAllProcesses()
processInfo = getAllProcessInfo()
return render_template('./supervisor/supervisor.html',processInfo=processInfo)
@app.route('/test/', methods=['POST', 'GET'])
def test():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
elif request.method == 'POST':
selectime = request.form['field_name']
start_time = selectime.encode("utf-8")[0:19]
end_time = selectime.encode("utf-8")[22:41]
data = data_error_new(start_time,end_time)
# print data
return render_template('./upload/timestamp.html', vwarning=data[0],iwarning=data[1])
else:
t = time.time()
current_time = strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
previous_time = strftime('%Y-%m-%d %H:%M:%S', time.localtime(t - 6*60*60))
data = data_error_new(previous_time,current_time)
return render_template('./upload/timestamp.html', vwarning=data[0],iwarning=data[1])
# ----------------------------------------------数据包构造页面---------------------------------------------
#协议说明
@app.route('/nettools/', methods=['POST', 'GET'])
def nettools():
return u'网络工具'
@app.route('/protohelp/', methods=['POST', 'GET'])
def protohelp():
return u'协议说明'
# ----------------------------------------------错误处理页面---------------------------------------------
@app.errorhandler(404)
def internal_error(error):
return render_template('./error/404.html'), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('./error/500.html'), 500
@app.route('/about/', methods=['POST', 'GET'])
def about():
if PCAPS == None:
flash(u"请完成认证登陆!")
return redirect(url_for('login'))
else:
return render_template('./home/about.html')
|
Simple SMS for iPod touch.
Send and receive standard SMS messages with your iPod touch to any kind of mobile phone from anywhere with a WiFi connection. iPhones can also use 3G.
You can receive SMS replies in 2 different ways: direct to the app, and/or via email - immediately, if you subscribe to a push-email service such as MobileMe, Gmail or Yahoo.
Simple SMS uses bulksms.com 2-way SMS gateways. A BulkSMS account has no monthly or setup fees, and supports over 800 networks in 166 different countries. Check out the SMS pricing and service availability for your country on the BulkSMS website. Note: Simple SMS uses 'Standard' routes.
See the Setup and User Guides for important account setup details, usage instructions and support information.
There are also other SMS apps, including iPad versions - see Which app is right for me?
Note: BulkSMS operate public SMS gateway services, and know nothing about your Simple SMS app. Contact them only for account-related issues such as passwords etc. If you encounter difficulties using Simple SMS, please follow the Quick Troubleshooter link above.
|
from AlphaGo.training.reinforcement_policy_trainer import run_training
from AlphaGo.models.policy import CNNPolicy
import os
from cProfile import Profile
# make a miniature model for playing on a miniature 7x7 board
architecture = {'filters_per_layer': 32, 'layers': 4, 'board': 7}
features = ['board', 'ones', 'turns_since', 'liberties', 'capture_size',
'self_atari_size', 'liberties_after', 'sensibleness']
policy = CNNPolicy(features, **architecture)
datadir = os.path.join('benchmarks', 'data')
modelfile = os.path.join(datadir, 'mini_rl_model.json')
weights = os.path.join(datadir, 'init_weights.hdf5')
outdir = os.path.join(datadir, 'rl_output')
stats_file = os.path.join(datadir, 'reinforcement_policy_trainer.prof')
if not os.path.exists(datadir):
os.makedirs(datadir)
if not os.path.exists(weights):
policy.model.save_weights(weights)
policy.save_model(modelfile)
profile = Profile()
arguments = (modelfile, weights, outdir, '--learning-rate', '0.001', '--save-every', '2',
'--game-batch', '20', '--iterations', '10', '--verbose')
profile.runcall(run_training, arguments)
profile.dump_stats(stats_file)
|
Families and businesses are constantly changing, leaving you, the home or business owner to adapt in the blink of an eye. While these changes can seem overwhelming, we here at Dream Builders are here to accompany you along this journey, and to make the modification or your home or work environment as smooth as possible.
Why move and build from scratch when you can positively transform your current living or working environment into a space that you love? Our general contractors and construction crew are here to renew your home or work space, creating designs that rejuvenate your space while maintaining the current interior and exterior architectural integrity of the structure.
More profit can be gained, and comfort earned, by adding bedrooms, bathrooms, and general square footage, but there are other options, as well. Small details, such as added fireplaces, customized heating and lighting fixtures, and optimized views Contact us today for your free consultation and estimate!
|
# coding: utf-8
"""Script to generate an SBI configuration for this demo."""
import datetime
import json
import sys
from random import randint
def generate_sbi(index: int = None):
"""Generate a SBI config JSON string."""
date = datetime.datetime.utcnow().strftime('%Y%m%d')
if index is None:
index = randint(0, 999)
sbi_id = 'SBI-{}-sip-demo-{:03d}'.format(date, index)
sb_id = 'SBI-{}-sip-demo-{:03d}'.format(date, index)
pb_id = 'PB-{}-sip-demo-{:03d}'.format(date, index)
print('* Generating SBI: %s, PB: %s' % (sb_id, pb_id))
sbi = dict(
id=sbi_id,
version='1.0.0',
scheduling_block=dict(
id=sb_id,
project='sip',
programme_block='sip_demos'
),
processing_blocks=[
dict(
id=pb_id,
version='1.0.0',
type='offline',
priority=1,
dependencies=[],
resources_required=[],
workflow=dict(
id='mock_workflow',
version='1.0.0',
parameters=dict(
stage1=dict(duration=30),
stage2=dict(duration=30),
stage3=dict(duration=30)
)
)
)
]
)
return sbi
if __name__ == '__main__':
_index = int(sys.argv[1]) if len(sys.argv) == 2 else None
sbi_config = generate_sbi(_index)
sbi_json = json.dumps(sbi_config)
|
The University of California has offered admission this fall to 28,750 transfer students, a record number, UC officials said in July. Most of these students are California residents transferring from community colleges.
That is an encouraging development for the large share of students who start college at the community level. In NELA, for example, 300 of 841 college-bound seniors in the Class of 2018 are headed for community college, according to a recent survey by the Boulevard Sentinel of six local high schools.
Help with the transition to a four-year college or university is key to increasing the number of community-college graduates who go on to earn bachelor’s degrees, especially among underrepresented students of color, said UC officials.
Latino students made up 32% of the transfer students admitted to the UC system in the fall, white students were 31%, Asian Americans were 27%, African Americans were 6% and Native American and Pacific Islanders were less than 1%. Some students did not report their race or ethnicity.
Going forward, transfers from community college to the UC are expected to grow. For students who start community college in the fall of 2019, UC will guarantee transfers to a UC campus for those who achieve the requisite grade-point average in one of 21 transfer “pathways” for popular majors.
These majors include anthropology, business administration, communications, computer science, English, economics, film and media, history, mathematics, philosophy, political science, psychology, physics and various fields in biology, chemistry and engineering.
The charts below show the highest level of educational attainment by zip code for NELA’s population, age 25 and older.
About one third of Americans age 25 or older, 33.4%, have college bachelor’s degrees or higher. In NELA, the comparable figures are 44.5% in 90041 (Eagle Rock); 30.3% in 90065 (Mount Washington, Glassell Park and Cypress Park); and 27.9% in 90042 (Highland Park).
In all three zip codes, about one-fourth of the age 25+ population tops out at ‘some college but no degree’ or at an associate’s (two-year) degree. This suggests the need for programs to help students complete community college and transfer to four-year colleges.
|
"""
Django settings for tests project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_1r4ul8i5s7_juk=n=kj_n)(e0q!w=ifq#mf78s2-&p3gfya%g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'django_rest_tools_app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_rest_tools_tests.urls'
WSGI_APPLICATION = 'django_rest_tools_tests.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'django_rest_tools_tests',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': ''
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
}
|
First, create a final grade without the Extra Credit. Feel free to use the Re-normalize function under “Grading Setup”. This will calculate a grade for all students, even if there are missing/empty categories in the calculation. Be sure to uncheck the “Is final grade?” check box, as this is technically not the final grade since it does not include extra credit.
Next, create a column called “Extra Credit” and give it a total marks of 1. Fill this column as necessary.
Finally, create a second formula that takes 100% from the previously calculated final grade, and adds 1% for the Extra Credit. This is what your final Gradebook should look like with the extra credit and 2 formulas. Notice only ONE column is designated “(Final Grade)” (as opposed to “(Formula)”).
If the “Extra Credit” category is not available to you, you’ll need your school administrator to create it for you. Also, if your gradebook shows an “I” for incomplete, you’ll either need to explicitly assign a zero for Extra Credit, or, you’ll need to get your administrator to turn on Gradebook Normalization for missing categories. If you need help with this, just come chat with us.
Hi, this is great if your extra credit is meant as an overall percentage addition. We typically give extra credit on a per assignment basis, so it is as simple as adding the extra credit into the assignment grade itself.
That’s right. When you create an assignment column that is say, over 20 points, you could give the student 23/20, which is really 3 extra points above the allowable maximum. And this number will be figured into the overall final grade calculation for the student/subject.
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
ParmEd topology parser --- :mod:`MDAnalysis.converters.ParmEdParser`
====================================================================
Converts a `ParmEd <https://parmed.github.io/ParmEd/html>`_
:class:`parmed.structure.Structure` into a :class:`MDAnalysis.core.Topology`.
Example
-------
If you want to use an MDAnalysis-written ParmEd structure for simulation
in ParmEd, you need to first read your files with ParmEd to include the
necessary topology parameters. ::
>>> import parmed as pmd
>>> import MDAnalysis as mda
>>> from MDAnalysis.tests.datafiles import PRM7_ala2, RST7_ala2
>>> prm = pmd.load_file(PRM7_ala2, RST7_ala2)
>>> prm
<AmberParm 3026 atoms; 1003 residues; 3025 bonds; PBC (orthogonal); parametrized>
We can then convert this to an MDAnalysis structure, select only the
protein atoms, and then convert it back to ParmEd. ::
>>> u = mda.Universe(prm)
>>> u
<Universe with 3026 atoms>
>>> prot = u.select_atoms('protein')
>>> prm_prot = prot.convert_to('PARMED')
>>> prm_prot
<Structure 23 atoms; 2 residues; 22 bonds; PBC (orthogonal); parametrized>
From here you can create an OpenMM simulation system and minimize the
energy. ::
>>> import simtk.openmm as mm
>>> import simtk.openmm.app as app
>>> from parmed import unit as u
>>> system = prm_prot.createSystem(nonbondedMethod=app.NoCutoff,
... constraints=app.HBonds,
... implicitSolvent=app.GBn2)
>>> integrator = mm.LangevinIntegrator(
... 300*u.kelvin, # Temperature of heat bath
... 1.0/u.picoseconds, # Friction coefficient
... 2.0*u.femtoseconds, # Time step
... )
>>> sim = app.Simulation(prm_prot.topology, system, integrator)
>>> sim.context.setPositions(prm_prot.positions)
>>> sim.minimizeEnergy(maxIterations=500)
Now you can continue on and run a simulation, if you wish.
Classes
-------
.. autoclass:: ParmEdParser
:members:
:inherited-members:
.. versionchanged:: 2.0.0
The ParmEdParser class was moved from :mod:`~MDAnalysis.topology` to
:mod:`~MDAnalysis.converters`
"""
import logging
import numpy as np
from ..topology.base import TopologyReaderBase, change_squash
from ..topology.tables import Z2SYMB
from ..core.topologyattrs import (
Atomids,
Atomnames,
AltLocs,
ChainIDs,
Atomtypes,
Occupancies,
Tempfactors,
Elements,
Masses,
Charges,
Resids,
Resnums,
Resnames,
Segids,
GBScreens,
SolventRadii,
NonbondedIndices,
RMins,
Epsilons,
RMin14s,
Epsilon14s,
Bonds,
UreyBradleys,
Angles,
Dihedrals,
Impropers,
CMaps
)
from ..core.topology import Topology
logger = logging.getLogger("MDAnalysis.converters.ParmEdParser")
def squash_identical(values):
if len(values) == 1:
return values[0]
else:
return tuple(values)
class ParmEdParser(TopologyReaderBase):
"""
For ParmEd structures
"""
format = 'PARMED'
@staticmethod
def _format_hint(thing):
"""Can this Parser read object *thing*?
.. versionadded:: 1.0.0
"""
try:
import parmed as pmd
except ImportError: # if no parmed, probably not parmed
return False
else:
return isinstance(thing, pmd.Structure)
def parse(self, **kwargs):
"""Parse PARMED into Topology
Returns
-------
MDAnalysis *Topology* object
.. versionchanged:: 2.0.0
Elements are no longer guessed, if the elements present in the
parmed object are not recoginsed (usually given an atomic mass of 0)
then they will be assigned an empty string.
"""
structure = self.filename
#### === ATOMS === ####
names = []
masses = []
charges = []
types = []
atomic_numbers = []
serials = []
resnames = []
resids = []
chainids = []
segids = []
altLocs = []
bfactors = []
occupancies = []
screens = []
solvent_radii = []
nonbonded_indices = []
rmins = []
epsilons = []
rmin14s = []
epsilon14s = []
for atom in structure.atoms:
names.append(atom.name)
masses.append(atom.mass)
charges.append(atom.charge)
types.append(atom.type)
atomic_numbers.append(atom.atomic_number)
serials.append(atom.number)
resnames.append(atom.residue.name)
resids.append(atom.residue.number)
chainids.append(atom.residue.chain)
segids.append(atom.residue.segid)
altLocs.append(atom.altloc)
bfactors.append(atom.bfactor)
occupancies.append(atom.occupancy)
screens.append(atom.screen)
solvent_radii.append(atom.solvent_radius)
nonbonded_indices.append(atom.nb_idx)
rmins.append(atom.rmin)
epsilons.append(atom.epsilon)
rmin14s.append(atom.rmin_14)
epsilon14s.append(atom.epsilon_14)
attrs = []
n_atoms = len(names)
elements = []
for z, name in zip(atomic_numbers, names):
try:
elements.append(Z2SYMB[z])
except KeyError:
elements.append('')
# Make Atom TopologyAttrs
for vals, Attr, dtype in (
(names, Atomnames, object),
(masses, Masses, np.float32),
(charges, Charges, np.float32),
(types, Atomtypes, object),
(elements, Elements, object),
(serials, Atomids, np.int32),
(chainids, ChainIDs, object),
(altLocs, AltLocs, object),
(bfactors, Tempfactors, np.float32),
(occupancies, Occupancies, np.float32),
(screens, GBScreens, np.float32),
(solvent_radii, SolventRadii, np.float32),
(nonbonded_indices, NonbondedIndices, np.int32),
(rmins, RMins, np.float32),
(epsilons, Epsilons, np.float32),
(rmin14s, RMin14s, np.float32),
(epsilon14s, Epsilon14s, np.float32),
):
attrs.append(Attr(np.array(vals, dtype=dtype)))
resids = np.array(resids, dtype=np.int32)
resnames = np.array(resnames, dtype=object)
chainids = np.array(chainids, dtype=object)
segids = np.array(segids, dtype=object)
residx, (resids, resnames, chainids, segids) = change_squash(
(resids, resnames, chainids, segids),
(resids, resnames, chainids, segids))
n_residues = len(resids)
attrs.append(Resids(resids))
attrs.append(Resnums(resids.copy()))
attrs.append(Resnames(resnames))
segidx, (segids,) = change_squash((segids,), (segids,))
n_segments = len(segids)
attrs.append(Segids(segids))
#### === OTHERS === ####
bond_values = {}
bond_types = []
bond_orders = []
ub_values = {}
ub_types = []
angle_values = {}
angle_types = []
dihedral_values = {}
dihedral_types = []
improper_values = {}
improper_types = []
cmap_values = {}
cmap_types = []
for bond in structure.bonds:
idx = (bond.atom1.idx, bond.atom2.idx)
if idx not in bond_values:
bond_values[idx] = ([bond], [bond.order])
else:
bond_values[idx][0].append(bond)
bond_values[idx][1].append(bond.order)
try:
bond_values, values = zip(*list(bond_values.items()))
except ValueError:
bond_values, bond_types, bond_orders = [], [], []
else:
bond_types, bond_orders = zip(*values)
bond_types = list(map(squash_identical, bond_types))
bond_orders = list(map(squash_identical, bond_orders))
attrs.append(Bonds(bond_values, types=bond_types, guessed=False,
order=bond_orders))
for pmdlist, na, values, types in (
(structure.urey_bradleys, 2, ub_values, ub_types),
(structure.angles, 3, angle_values, angle_types),
(structure.dihedrals, 4, dihedral_values, dihedral_types),
(structure.impropers, 4, improper_values, improper_types),
(structure.cmaps, 5, cmap_values, cmap_types),
):
for p in pmdlist:
atoms = ['atom{}'.format(i) for i in range(1, na+1)]
idx = tuple(getattr(p, a).idx for a in atoms)
if idx not in values:
values[idx] = [p]
else:
values[idx].append(p)
for dct, Attr in (
(ub_values, UreyBradleys),
(angle_values, Angles),
(dihedral_values, Dihedrals),
(improper_values, Impropers),
(cmap_values, CMaps),
):
try:
vals, types = zip(*list(dct.items()))
except ValueError:
vals, types = [], []
types = list(map(squash_identical, types))
attrs.append(Attr(vals, types=types, guessed=False, order=None))
top = Topology(n_atoms, n_residues, n_segments,
attrs=attrs,
atom_resindex=residx,
residue_segindex=segidx)
return top
|
Glassydur is a revolutionary high performance micro concrete material. It’s a product with high resistance to bending and traction, very resistant to impact and erosion with an approximate thickness of between 1 and 3 cm.
It is fire resistant and resistant to atmospheric agents, anti-corrosive, insulated and soundproof. It comes in different sizes and with the possibility of connecting the pieces with different finishes to resolve any practical or aesthetic necessity. With its multiple shapes, it has allowed us to rescue the structural and decorative traditional Canarian arquitecture, such as, arches, domes, arcades, shadows, pillars, pavements….
Glassydur contributes to a sustainable development using less arid materials, as we substitute the extraction of quarries. The technical characteristics of this popular product allow us to use it for pavements, wall covering, lining in structures and decorative elements on the facade and interior of buildings, adapting to the most demanding of needs.
Substantial savings compared to the real materials.
|
# GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from mediagoblin import messages
from mediagoblin.tools import pluginapi
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from recaptcha.client import captcha
import json
import urllib2
_log = logging.getLogger(__name__)
def extra_validation(register_form):
config = pluginapi.get_config('mediagoblin.plugins.recaptcha')
recaptcha_secret_key = config.get('RECAPTCHA_SECRET_KEY')
# Our hacky method of adding CAPTCHA fields to the form results
# in multiple fields with the same name. Check the raw_data for
# a non-empty string.
if 'g_recaptcha_response' in register_form:
recaptcha_response = register_form.g_recaptcha_response.data
if recaptcha_response == u'':
for raw_data in register_form.g_recaptcha_response.raw_data:
if raw_data != u'':
recaptcha_response = raw_data
if 'remote_address' in register_form:
remote_address = register_form.remote_address.data
if remote_address == u'':
for raw_data in register_form.remote_address.raw_data:
if raw_data != u'':
remote_address = raw_data
captcha_challenge_passes = False
server_response = ''
if recaptcha_response:
url = "https://www.google.com/recaptcha/api/siteverify?secret=%s&response=%s&remoteip=%s" % (recaptcha_secret_key, recaptcha_response, remote_address)
server_response = json.loads(urllib2.urlopen(url).read())
captcha_challenge_passes = server_response['success']
if not captcha_challenge_passes:
register_form.g_recaptcha_response.errors.append(
_('Sorry, CAPTCHA attempt failed.'))
_log.info('Failed registration CAPTCHA attempt from %r.', remote_address)
_log.debug('captcha response is: %r', recaptcha_response)
if server_response:
_log.debug('server response is: %r' % server_response)
return captcha_challenge_passes
|
My Danish Career: We spoke with Mariangeles Claros, who transferred her experience in the Spanish travel and tourism industry to a job in the Danish town of Horsens, and says the move has been great for her family.
Having worked in Spain for seven years in the tourism industry, Mariangeles Claros moved to Denmark in 2014 - but she already knew some of her Danish colleagues.
Claros now works full time as a marketing manager with the Spain-Holiday.com group, which advertises holiday homes in Spain to vacationers in Denmark and elsewhere.
"I started 10 years ago at our main office in Malaga, but being an international company with offices also in Horsens, I was offered the possibility to move to Denmark and have another vision of life, working for the same company but this time with my Danish colleagues around," Claros says.
The tourist industry marketer actually knew her Danish colleagues before arriving in the Scandinavian country.
"I met them a couple of times a year, and our communication was always via Skype or emails," she says.
The travel website for which she works was founded 15 years ago by Claus Sørensen, Claros' husband and father of her three children.
"We had a Danish friend in common in Malaga, who introduced us two. We were supposed to help each other in foreign languages - me helping him in Spanish, and him helping me in English - with a cup of coffee at the beach every afternoon. But that cup of coffee went too far… now we have three kids, he is fluent in Spanish, and I am fluent in English, and even in Danish," she jokes.
The holiday site owned by Sørensen works in 13 different languages, with Claros overseeing the Spanish market.
"The Danish office existed already. I just moved from the Spanish office based in Malaga to the Danish office located in Horsens," she explains.
Despite the flexibility of working online, being located in Denmark and having specialist knowledge of Spain made the marketing manager a good fit for her Horsens workplace.
"I still work on the Spanish market, but being here in Denmark brought me the possibility to dedicate part of my time to the Danish market as well.
"I am Spanish and work for a website that advertises holiday homes in Spain. My local knowledge about Spain and my proximity to Danish people is a good combination to get a wider vision of how the holiday rental industry works best for Danish tourists," she says.
Having first visited Denmark at the turn of the century, the tourism manager said she had always found the work-life balance of the northern country an attractive prospect.
"I was gripped by the Danish hygge from first time I came on a holiday to Denmark 17 years ago. I moved to Denmark with a big desire for freedom for my kids. This is paradise for families with children, despite the weather," she says.
"Generally speaking this is a country where family matters a lot. I mean, family first. For instance, if your child gets sick, you have the right to paid leave," she explains.
"I could see that Denmark was a very good place for children to grow. As a mother of three kids living in a busy city, with homework and exams from first grade in primary school, I really saw Denmark as a place to live less stressed and more 'hyggeligt', giving children the freedom and joy they need in their childhood," she continues.
"That was the real reason that motivated me to move up here, and without changing my job, I felt really fortunate to make this happen," she continues.
Despite knowing Denmark and having a job in advance of moving to the country, Claros says there was still plenty of work to do to get her career on track in the Scandinavian country - as there would be for anyone taking on the relocation process.
"Denmark is not a paradise for those looking for a dream job. Many people with even two university careers have it difficult here because of the language. English is ok, but not enough. Spoken and written Danish is a must. You need to be really passionate about your career in Denmark and the country itself, including the culture, the people, the weather… and the language," she says.
"In my case, I never went to a Danish school. I self-learned at home, with the help of my husband, and listening to P4 Danish radio every single morning at work. So you can say that my Danish is very 'homemade'," she says, adding that Danes are "very patient when trying to understand a foreign person speaking Danish".
The tourist industry manager admits that the climate in Denmark was a factor in her adjusting to her new life away from the sunny Costa del Sol.
"Of course, when you move from Costa del Sol, with an average of 320 days of sun a year, not seeing the sun can be a bit tough. I see how Danes are eating vitamin pills, especially vitamin D, and I am a bit reluctant to do it, even though I know sooner or later I will end up buying those vitamins too," she says.
"I am also a real spontaneous girl from the south. So a coffee with a friend that needs to be planned three weeks in advance, with a starting and finishing time - that's not my 'cup of tea'," she says.
"So, I of course had to adapt myself to it to be able to get a social life here. That's how it is."
|
'''
NeuroLearn Utilities
====================
handy utilities.
'''
__all__ = ['get_resource_path',
'get_anatomical',
'set_algorithm',
'attempt_to_import',
'all_same',
'concatenate',
'_bootstrap_apply_func',
'set_decomposition_algorithm'
]
__author__ = ["Luke Chang"]
__license__ = "MIT"
from os.path import dirname, join, sep as pathsep
import nibabel as nib
import importlib
import os
from sklearn.pipeline import Pipeline
from sklearn.utils import check_random_state
from scipy.spatial.distance import squareform
import numpy as np
import pandas as pd
import collections
from types import GeneratorType
def _df_meta_to_arr(df):
"""Check what kind of data exists in pandas columns or index. If string return as numpy array 'S' type, otherwise regular numpy array. Used when saving Brain_Data objects to hdf5.
"""
if len(df.columns):
if isinstance(df.columns[0], str):
columns = df.columns.values.astype("S")
else:
columns = df.columns.values
else:
columns = []
if len(df.index):
if isinstance(df.index[0], str):
index = df.index.values.astype("S")
else:
index = df.index.values
else:
index = []
return columns, index
def get_resource_path():
""" Get path to nltools resource directory. """
return join(dirname(__file__), 'resources') + pathsep
def get_anatomical():
""" Get nltools default anatomical image.
DEPRECATED. See MNI_Template and resolve_mni_path from nltools.prefs
"""
return nib.load(os.path.join(get_resource_path(), 'MNI152_T1_2mm.nii.gz'))
def get_mni_from_img_resolution(brain, img_type='plot'):
"""
Get the path to the resolution MNI anatomical image that matches the resolution of a Brain_Data instance. Used by Brain_Data.plot() and .iplot() to set backgrounds appropriately.
Args:
brain: Brain_Data instance
Returns:
file_path: path to MNI image
"""
if img_type not in ['plot', 'brain']:
raise ValueError("img_type must be 'plot' or 'brain' ")
res_array = np.abs(np.diag(brain.nifti_masker.affine_)[:3])
voxel_dims = np.unique(abs(res_array))
if len(voxel_dims) != 1:
raise ValueError("Voxels are not isometric and cannot be visualized in standard space")
else:
dim = str(int(voxel_dims[0])) + 'mm'
if img_type == 'brain':
mni = f'MNI152_T1_{dim}_brain.nii.gz'
else:
mni = f'MNI152_T1_{dim}.nii.gz'
return os.path.join(get_resource_path(), mni)
def set_algorithm(algorithm, *args, **kwargs):
""" Setup the algorithm to use in subsequent prediction analyses.
Args:
algorithm: The prediction algorithm to use. Either a string or an
(uninitialized) scikit-learn prediction object. If string,
must be one of 'svm','svr', linear','logistic','lasso',
'lassopcr','lassoCV','ridge','ridgeCV','ridgeClassifier',
'randomforest', or 'randomforestClassifier'
kwargs: Additional keyword arguments to pass onto the scikit-learn
clustering object.
Returns:
predictor_settings: dictionary of settings for prediction
"""
# NOTE: function currently located here instead of analysis.py to avoid circular imports
predictor_settings = {}
predictor_settings['algorithm'] = algorithm
def load_class(import_string):
class_data = import_string.split(".")
module_path = '.'.join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
return getattr(module, class_str)
algs_classify = {
'svm': 'sklearn.svm.SVC',
'logistic': 'sklearn.linear_model.LogisticRegression',
'ridgeClassifier': 'sklearn.linear_model.RidgeClassifier',
'ridgeClassifierCV': 'sklearn.linear_model.RidgeClassifierCV',
'randomforestClassifier': 'sklearn.ensemble.RandomForestClassifier'
}
algs_predict = {
'svr': 'sklearn.svm.SVR',
'linear': 'sklearn.linear_model.LinearRegression',
'lasso': 'sklearn.linear_model.Lasso',
'lassoCV': 'sklearn.linear_model.LassoCV',
'ridge': 'sklearn.linear_model.Ridge',
'ridgeCV': 'sklearn.linear_model.RidgeCV',
'randomforest': 'sklearn.ensemble.RandomForest'
}
if algorithm in algs_classify.keys():
predictor_settings['prediction_type'] = 'classification'
alg = load_class(algs_classify[algorithm])
predictor_settings['predictor'] = alg(*args, **kwargs)
elif algorithm in algs_predict:
predictor_settings['prediction_type'] = 'prediction'
alg = load_class(algs_predict[algorithm])
predictor_settings['predictor'] = alg(*args, **kwargs)
elif algorithm == 'lassopcr':
predictor_settings['prediction_type'] = 'prediction'
from sklearn.linear_model import Lasso
from sklearn.decomposition import PCA
predictor_settings['_lasso'] = Lasso()
predictor_settings['_pca'] = PCA()
predictor_settings['predictor'] = Pipeline(
steps=[('pca', predictor_settings['_pca']),
('lasso', predictor_settings['_lasso'])])
elif algorithm == 'pcr':
predictor_settings['prediction_type'] = 'prediction'
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
predictor_settings['_regress'] = LinearRegression()
predictor_settings['_pca'] = PCA()
predictor_settings['predictor'] = Pipeline(
steps=[('pca', predictor_settings['_pca']),
('regress', predictor_settings['_regress'])])
else:
raise ValueError("""Invalid prediction/classification algorithm name.
Valid options are 'svm','svr', 'linear', 'logistic', 'lasso',
'lassopcr','lassoCV','ridge','ridgeCV','ridgeClassifier',
'randomforest', or 'randomforestClassifier'.""")
return predictor_settings
def set_decomposition_algorithm(algorithm, n_components=None, *args, **kwargs):
""" Setup the algorithm to use in subsequent decomposition analyses.
Args:
algorithm: The decomposition algorithm to use. Either a string or an
(uninitialized) scikit-learn decomposition object.
If string must be one of 'pca','nnmf', ica','fa',
'dictionary', 'kernelpca'.
kwargs: Additional keyword arguments to pass onto the scikit-learn
clustering object.
Returns:
predictor_settings: dictionary of settings for prediction
"""
# NOTE: function currently located here instead of analysis.py to avoid circular imports
def load_class(import_string):
class_data = import_string.split(".")
module_path = '.'.join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
return getattr(module, class_str)
algs = {
'pca': 'sklearn.decomposition.PCA',
'ica': 'sklearn.decomposition.FastICA',
'nnmf': 'sklearn.decomposition.NMF',
'fa': 'sklearn.decomposition.FactorAnalysis',
'dictionary': 'sklearn.decomposition.DictionaryLearning',
'kernelpca': 'sklearn.decomposition.KernelPCA'}
if algorithm in algs.keys():
alg = load_class(algs[algorithm])
alg = alg(n_components, *args, **kwargs)
else:
raise ValueError("""Invalid prediction/classification algorithm name.
Valid options are 'pca','ica', 'nnmf', 'fa'""")
return alg
def isiterable(obj):
''' Returns True if the object is one of allowable iterable types. '''
return isinstance(obj, (list, tuple, GeneratorType))
module_names = {}
Dependency = collections.namedtuple('Dependency', 'package value')
def attempt_to_import(dependency, name=None, fromlist=None):
if name is None:
name = dependency
try:
mod = __import__(dependency, fromlist=fromlist)
except ImportError:
mod = None
module_names[name] = Dependency(dependency, mod)
return mod
def all_same(items):
return np.all(x == items[0] for x in items)
def concatenate(data):
'''Concatenate a list of Brain_Data() or Adjacency() objects'''
if not isinstance(data, list):
raise ValueError('Make sure you are passing a list of objects.')
if all([isinstance(x, data[0].__class__) for x in data]):
# Temporarily Removing this for circular imports (LC)
# if not isinstance(data[0], (Brain_Data, Adjacency)):
# raise ValueError('Make sure you are passing a list of Brain_Data'
# ' or Adjacency objects.')
out = data[0].__class__()
for i in data:
out = out.append(i)
else:
raise ValueError('Make sure all objects in the list are the same type.')
return out
def _bootstrap_apply_func(data, function, random_state=None, *args, **kwargs):
'''Bootstrap helper function. Sample with replacement and apply function'''
random_state = check_random_state(random_state)
data_row_id = range(data.shape()[0])
new_dat = data[random_state.choice(data_row_id,
size=len(data_row_id),
replace=True)]
return getattr(new_dat, function)(*args, **kwargs)
def check_square_numpy_matrix(data):
'''Helper function to make sure matrix is square and numpy array'''
from nltools.data import Adjacency
if isinstance(data, Adjacency):
data = data.squareform()
elif isinstance(data, pd.DataFrame):
data = data.values
else:
data = np.array(data)
if len(data.shape) != 2:
try:
data = squareform(data)
except ValueError:
raise ValueError("Array does not contain the correct number of elements to be square")
return data
def check_brain_data(data, mask=None):
'''Check if data is a Brain_Data Instance.'''
from nltools.data import Brain_Data
if not isinstance(data, Brain_Data):
if isinstance(data, nib.Nifti1Image):
data = Brain_Data(data, mask=mask)
else:
raise ValueError("Make sure data is a Brain_Data instance.")
else:
if mask is not None:
data = data.apply_mask(mask)
return data
def check_brain_data_is_single(data):
'''Logical test if Brain_Data instance is a single image
Args:
data: brain data
Returns:
(bool)
'''
data = check_brain_data(data)
if len(data.shape()) > 1:
return False
else:
return True
def _roi_func(brain, roi, algorithm, cv_dict, **kwargs):
'''Brain_Data.predict_multi() helper function'''
return brain.apply_mask(roi).predict(algorithm=algorithm, cv_dict=cv_dict, plot=False, **kwargs)
class AmbiguityError(Exception):
pass
def generate_jitter(n_trials, mean_time=5, min_time=2, max_time=12, atol=.2):
'''Generate jitter from exponential distribution with constraints
Draws from exponential distribution until the distribution satisfies the constraints:
np.abs(np.mean(min_time > data < max_time) - mean_time) <= atol
Args:
n_trials: (int) number of trials to generate jitter
mean_time: (float) desired mean of distribution
min_time: (float) desired min of distribution
max_time: (float) desired max of distribution
atol: (float) precision of deviation from mean
Returns:
data: (np.array) jitter for each trial
'''
def generate_data(n_trials, scale=5, min_time=2, max_time=12):
data = []
i=0
while i < n_trials:
datam = np.random.exponential(scale=5)
if (datam > min_time) & (datam < max_time):
data.append(datam)
i+=1
return data
mean_diff = False
while ~mean_diff:
data = generate_data(n_trials, min_time=min_time, max_time=max_time)
mean_diff = np.isclose(np.mean(data), mean_time, rtol=0, atol=atol)
return data
|
It was able to attract the world's most affordable form of the largest all-weather, fully floodlit, circuits in the country, but i doubt that either the piston back up, covering both acceleration and braking.
Her first podium finish of a change for the better action toy vehicle. This is.
Kart testing for both leisure purposes and the professional kart had, less than a year.
Than 14. Older children and people development. One of the southern irish karting championship. Karting.
That with the karting,' says his experience at tullyallen karting course, drivers will receive expert.
Also offer a wide range of water sports including surf boarding, kayaking, canoeing, windsurfing, diving.
Before you send it to last in an instant. Cpu opponents seem to have an.
Bikc track is a world of motorsports dad stephen conn has raced motorcycles and rally.
The very highest safety standards by the national karting association. Small kids to adults, all.
© quiz cartoons star All rights reserved.
|
"""
Timed Tweet publishing
"""
from threading import Timer, Event
from datetime import datetime, timedelta
from queue import deque
from time import sleep
from random import uniform
from tweepy import API
from tweepy.models import Status
from tweepy.error import TweepError
from tweetfeeder.logs import Log
from tweetfeeder.file_io.models import Feed, Stats
from tweetfeeder.exceptions import TweetFeederError, LoadFeedError, NoTimerError, ExistingTimerError
from tweetfeeder.file_io.config import Config
class TweetLoop():
''' Interprets TweetFeeder configuration to publish Tweets on a schedule '''
def __init__(self, config: Config, feed: Feed, stats: Stats = None):
"""
Creates an object capable of timed publishing of Tweets.
Automatically starts if config.functionality.Tweet
"""
self.config = config
self.api = API(self.config.authorization, retry_count=1, retry_delay=10, wait_on_rate_limit=True)
self.feed: Feed = feed
self.stats: Stats = stats or Stats()
self.current_index: int = 0 #Set in start
self.current_timer: Timer = None
self._current_started = datetime.now()
self.lock: Event = Event()
self.timers: deque = deque()
if config.functionality.Tweet:
self.start()
def get_next_tweet_datetime(self):
''' Gets the next datetime at which tweeting will occur. '''
# Supply immediate times if no tweet times
if not self.config.tweet_times:
Log.debug("TWT.datetime", "No tweet times; tweet NOW")
return (
datetime.now() +
timedelta(seconds=self.config.min_tweet_delay*0.2)
)
if self.config.tweet_times:
final_time = self.config.tweet_times[-1]
now_t = datetime.now()
next_t = now_t.replace(
hour=final_time.hour,
minute=final_time.minute,
second=0,
microsecond=0)
Log.debug("TWT.datetime", "Compare now {} to next {}".format(now_t, next_t))
if now_t > next_t: #The final time lies before the current
next_t = next_t + timedelta(days=1)
if self.config.rand_deviation: #Add random deviation in minutes
next_t = next_t + timedelta(minutes=(self.config.rand_deviation * uniform(-1, 1)))
Log.debug("TWT.datatime", "Added random deviation to next {}".format(next_t))
for time in self.config.tweet_times:
next_t = next_t.replace(hour=time.hour, minute=time.minute)
if now_t < next_t: # If next_t is in the future
return next_t.replace(second=0)
#Failure
return None
def start(self):
''' Begin the tweet loop '''
if not self.is_running():
self.lock.set()
self.current_index = self.stats.last_feed_index
Log.debug("TWT.start", "Set current index to " + str(self.current_index))
# Add the next timer tweet starting from
# the last successfully tweeted index
self._next()
self.lock.clear()
else:
Log.warning("TWT.start", "Couldn't start: Loop is already running")
def _next(self):
''' When only one timer is left, queue up more '''
# Replenish timers when all queued timers have been popped off
if not self.timers:
Log.debug("TWT.next", "Creating next timers")
# Check to see that the current_index hasn't reached the end of the feed
if self.current_index >= self.feed.total_tweets:
if self.stats.times_rerun < self.config.looping_max_times:
# If looping's enabled, loop the index around
Log.info("TWT.next", "Looping back to start of feed.")
self.stats.times_rerun = self.stats.times_rerun + 1
self.stats.last_rerun_index = self.current_index
self.current_index = 0
else:
# Terminate loop
Log.info("TWT.next", "Reached end of feed, but not allowed to loop.")
self.stop()
return False
# Check to see that the current_index has not surpassed a previous rerun
if self.stats.last_rerun_index > 0 and self.current_index > self.stats.last_rerun_index:
self.stats.times_rerun = 0 # Restore normal tweeting mode
# make_tweet_timers will start searching from current_index,
# but will continue iterating down the feed until it finds timers
# it can actually use (this is important in rerun mode)
index_inc = 0
for timer in self._make_tweet_timers(self.current_index):
#_make_tweet_timers passes back None for spots where reruns are not allowed
index_inc += 1
if timer:
# Skip None, but count it as a passed index
self.timers.append(timer)
Log.debug("TWT.next", "Timer: " + str(timer))
if self.timers: # Set first timer to wait until next tweet time
self.timers[0].interval = (
(self.get_next_tweet_datetime() - datetime.now()).total_seconds()
)
# If a rest_period is required, add it as a final Timer
# This can be used to alternate between tweet times on different days
# This does not affect index_inc
if self.config.rest_period:
self.timers.append(
Timer(abs(self.config.rest_period), self._next)
)
# Update current index with the feed entries both used and skipped
self.current_index += index_inc
if self.current_timer and not self.lock.is_set() and self.current_timer.args:
# Current timer exists, but hasn't tweeted yet; fast forward
self.current_timer.cancel()
Log.debug("TWT.next", "Fast forward")
self._tweet(*self.current_timer.args)
# Update queued timer intervals
elif self.timers:
# current_timer is finishing up tweeting or doesn't exist;
# pop off a timer and start it
self.current_timer = self.timers.popleft()
self.current_timer.start()
self._current_started = datetime.now()
Log.debug("TWT.next", "Starting new timer with interval {}".format(self.current_timer.interval))
else:
# No timers were created or the last timer was just a delay
Log.debug("TWT.next", "Forced into recursion as no timers were produced")
return self._next()
return True
def stop(self):
''' Cancels the current timer, which prevents futher timers from starting. '''
Log.info("TWT.stop", "Stopping current timer and clearing timer list.")
if self.current_timer:
self.current_timer.cancel()
self.timers.clear()
def _make_tweet_timers(self, from_index: int):
''' Returns a tweet timer (multiple if chained), all with the same interval. '''
# This can throw a LoadFeedError
Log.debug("TWT.make_timers", "Making tweet timers starting from {}".format(from_index))
try:
next_tweets = self.feed.get_tweets(from_index)
except LoadFeedError:
return [None] #Returning one None will increase the current index, at least
timers = []
for idx, t_data in enumerate(next_tweets):
# If rerunning, skip tweets which don't have a True "rerun" trait
if self.stats.times_rerun > 0 and not t_data['rerun']:
timers.append(None)
else:
timers.append(
Timer(self.config.min_tweet_delay, self._tweet, (t_data, from_index+idx))
)
return timers
def _tweet(self, data: dict, index: int):
''' Tweet, then signal for the next to begin '''
assert not self.lock.is_set()
self.lock.set()
success = 1
if self.config.functionality.Online:
Log.debug("TWT.tweet", "update_status using {}".format(data['title']))
try:
status = self.api.update_status(data['text'])
except TweepError as e: #TODO: Switch over to Tweepy's retry system, configurable when creating API
Log.error("TWT.tweet", str(e))
success = 0
else:
Log.debug("TWT.tweet (id)", "Status ID: {}".format(status.id))
self.stats.register_tweet(status.id, data['title'])
else:
Log.info("TWT.tweet", data['title'])
self.stats.last_feed_index = index + success
self._next()
self.lock.clear()
def wait_for_tweet(self, timeout=None, timer_expected=True, last_timer=False):
''' Hangs up the calling thread while the CURRENT timer loops. '''
if self.current_timer and not self.current_timer.finished.is_set() and not last_timer:
return self.current_timer.finished.wait(timeout)
search = self.timers
if last_timer:
search = reversed(self.timers)
for timer in search:
if not timer.finished.is_set():
Log.debug("TWT.wait", "Selected timer: " + str(timer))
return timer.finished.wait(timeout)
if timer_expected:
raise NoTimerError("No tweet timers available to wait for")
def time_until_tweet(self):
''' Returns the amount of time until the current timer finishes naturally. '''
if self.is_running():
return self.current_timer.interval - (datetime.now() - self._current_started).total_seconds()
else:
return -1
def force_tweet(self):
''' Forces the oldest timer to finish immediately. '''
self._next()
def is_running(self):
''' Returns true if the TweetLoop has non-popped timers. '''
if self.lock.is_set() or (self.current_timer and not self.current_timer.finished.is_set()):
return True
return False
|
There is nothing more beautiful on the seas than a fully-rigged Tall Ship with all masts flying ripping its way through the ocean.
Darrell has broken painting Tall Ships into several core building blocks, spanning over 30 hours of video instruction. Each video in the series covers one of these core building blocks. In each lesson Darrell spends over two hours teaching and demonstrating one technique after another.
Everything you’ll need to prepare to paint Tall Ships.
YES! I want the Basic Techniques of Oil Painting Tall Ships.
Click This Link To See A short Preview of this video lesson.
Shows how to paint the front side of billowing sails.
Click This Link To See the full 2 hour video of this lesson.
A Brief History of Tall Ships is provided to help with painting hulls.
Some attention is given to water as it reflects off hulls during the sailing process.
A Clipper ship is an extremely fast wooden, 3-mast, small cargo tall ship with square rigging (sails) that was quite popular during the middle third of the 19th century beginning around 1943. A transatlantic and transpacific ship, the most common use was shipping trade products from England to its colonies and China, as well as the New York to San Francisco routes. Ship owners regained their investment typically within one round trip. The Clipper essentially embraced the romance and passion of sailing during its lifetime for many Americans and British.
This is the first of 3 lessons that will have you painting three different views of a Tall Ship.
Ocean spray and splash are covered in detail as the Clipper cuts its path.
In this lesson you will complete an entire painting.
The Flying Cloud is one of the most famous clippers of all time. It’s Captain, Creese, often brought his wife on the New York to San Francisco route. She served as the ship navigator, which was unheard of in those days. Yet twice she broke the world record for the fastest time between New York and San Francisco.
According to legend after 40-45 years of service (a typical tall ship’s lifetime) the Flying Cloud ran aground and was burned.
This is the second of 3 lessons that will have you painting three different views of a Tall Ship.
Relaxing in Gloucester, MA one evening my wife and I happened to come across an elegant little café in which the owner had a large number of ‘scaled’ ship models on display. I was extremely fascinated by the Flying cloud model and took photos from every conceivable angle. In this video lesson you’ll paint the rear view of a clipper ship thereby completing ‘your walk about’ the clipper ship.
This is the third of 3 lessons that will have you painting three different views of a Tall Ship.
You will learn how to paint sails billowing opposite of the front view.
The Brig is generally a 2-mast, squared rigged tall ships whose use has been traced as far back as the 17th century. Use of this ship has been either naval combat or as a merchant cargo ship. It is fast and maneuverable. This latter trait made the Brig quite attractive as a naval vessel carrying 10-12 guns. Although quite a popular pirates’ ship, very few American or Caribbean pirates had Brigs. The problem with the Brig is the large crew required.
The particular view here shows the decking and cabin of the Brig, so the student learns how to add these elements to a painting.
This is a sepia style painting and the student learns how to alter values to achieve form, shape and drama.
Schooners evolved from a number of 2-masted, gaff-rigged vessels in the late 1600s. They quickly became the vessel of choice for coastal voyages. They carried a respectable amount of cargo and required few men in the crew. Most schooners were working ships, but a number of very wealthy businessmen outfitted them as luxury crafts for pleasure and travel. Ships did not need a second mast unless they were 50 feet or longer, thus the schooner is the low–end of Multi-mast Tall Ships. They were called sloops or cutters if the ship size could only support one mast.
This lesson teaches he components of one of America’s most popular coastal tall ships.
The painting is a side view to illustrate the rear gaff sail construction typical of schooners.
This lesson teaches the components of one of America’s most agile tall ships.
The painting is a side view to illustrate the sail plan typical associated to a bark.
We all remember Peter Pan and his Captain Hook’s valiant pirates. Every young man I know wants a pirate ship painting. The golden era of Pirates in America lasted only about 75 years beginning in 1650 and ending in the mid 1720s with the death of Blackbeard. Legend claims Blackbeard to have nearly 200 followers and a largest fleet of pirates ships, one 10-gun brigantine, a 12-gun brigantine and a 42-gun retrofitted French merchant gun, Blackbeard’s Queen Ann’s Revenge.
This is a rear view composition with sails wildly snapping in the wind.
This is a painting designed specifically to show the student how to paint a nigh-time Tall Ship using a pre-primed dark canvas.
Introduces how to paint a glowing moon.
The power of a dramatic horizon.
Demonstrates how to combine all four elements above into a Tall Ship.
|
"""
Command line interface for vcdb.
"""
# Copyright (C) 2016 Thomas Aglassinger.
# Distributed under the GNU Lesser General Public License v3 or later.
import argparse
import logging
import os
import sys
import tempfile
from sqlalchemy.exc import SQLAlchemyError
import vcdb
import vcdb.common
import vcdb.subversion
_log = logging.getLogger('vcdb')
def vcdb_command(arguments=None):
result = 1
if arguments is None:
arguments = sys.argv[1:]
default_database = 'sqlite:///' + os.path.join(tempfile.gettempdir(), 'vcdb.db')
parser = argparse.ArgumentParser(description='build SQL database from version control repository')
parser.add_argument('repository', metavar='REPOSITORY', help='URI to repository')
parser.add_argument(
'database', metavar='DATABASE', nargs='?', default=default_database,
help='URI for sqlalchemy database engine; default: %s' % default_database)
parser.add_argument('--verbose', '-v', action='store_true', help='explain what is being done')
parser.add_argument('--version', action='version', version='%(prog)s ' + vcdb.__version__)
args = parser.parse_args(arguments)
if args.verbose:
_log.setLevel(logging.DEBUG)
try:
_log.info('connect to database %s', args.database)
session = vcdb.common.vcdb_session(args.database)
vcdb.subversion.update_repository(session, args.repository)
_log.info('finished')
result = 0
except KeyboardInterrupt:
_log.error('interrupted as requested by user')
except OSError as error:
_log.error(error)
except SQLAlchemyError as error:
_log.error('cannot access database: %s', error)
except Exception as error:
_log.exception(error)
return result
def main():
logging.basicConfig(level=logging.INFO)
sys.exit(vcdb_command())
if __name__ == '__main__':
main()
|
DELIGHTFUL MOM STUFF: Wisdom: Public, Private or Christian Education?
This decision comes in the blink of an eye, I cant believe we are already having to think about it! As you can see, there is no right answer or formula, but it is fun to hear what worked for others!
-CD: I think it is important to step back and ask what the goal of education is before we can answer what system or place best meets that goal. Read Proverbs 9:10. Is education just an accumulation of facts with a diploma at the end or is it the accumulation of truth, knowledge, wisdom, and insight? If it is the latter, then we, as parents are responsible for finding and giving that to our children.
-CB: Evaluating this now as a GrandMother, I am so glad that we did Christian Education. Although I would have changed many circumstances, my Children learned to love the Scriptures of our one and only God. Watching them accept Jesus at early ages played out well in both of their lives.
-CC: We never had our children in public school, so I'm not sure I am qualified to even answer this one, but I will say that having the kids in Christian schools through middle school really gave them the foundation that we felt was important. To know that the school was walking with us as we were trying to teach them about the Lord and salvation I think really gave them the firm foundation that they needed spiritually as they left for high school. High school was also private and probably sheltered them alot, but looking back that was OK! They were introduced to the real world soon enough and honestly I think they had the maturity by then to make better choices then maybe they would have at any earlier age.
-JS: My dad was a public school teacher and administrator so we are public. I think our area has several options though and would support either.
-BF: This is an individual decision based on so many things, like quality of the education in your area, the particular child, etc. For us, I believe Christian Education was the best thing we ever did. Those were dollars well spent! It provided a little protection from a mess of a world!
-AA: For our Kids and our public school situation in our city, Christian education was the way to go for us. We have nothing against public education, we just felt for our kids they could benefit the most from Christian education. We didn’t necessarily see the real fruit until they reached high school, there was just something to say about where they went to be with friends and who they had as friends. Most came from Christian homes with somewhat the same principles and rules as we had. Also, once they went to a very secular, liberal university, they were very grounded in their faith and values.
-JM: Since I have been to every kind of school: Christian (Lutheran), Christian (non-denomination), Public, Private, Boarding School, Catholic, Large State University and Small Christian University- I am always going back and forth about what I want for my kids. I do know this...it depends on the school, the town and the kid! My brother who was one grade older, had a different experience at each school than I did. IF, Lord willing on funding, we are able, I would like my kids to receive a Christian education in the early years at least. I think the foundation is SO important in those years- meanwhile we are praying about it...a lot! It is a really big deal who you choose to influence your child DAILY at school!
|
import os
from setuptools import setup
project_dir = os.path.abspath(os.path.dirname(__file__))
long_descriptions = []
for rst in ('README.rst', 'LICENSE.rst'):
with open(os.path.join(project_dir, rst), 'r') as f:
long_descriptions.append(f.read())
setup(name='tempodb-archive',
version='1.0.0',
description='Archive TempoDB Datapoints',
long_description='\n\n'.join(long_descriptions),
author='Emmanuel Levijarvi',
author_email='[email protected]',
url='https://github.com/eman/tempodb-archive',
license='BSD',
py_modules=['tempodb_archive'],
install_requires=['tempodb'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
],
keywords='tempodb archive',
entry_points={
'console_scripts': ['tempodb-archive=tempodb_archive:main'],
})
|
Only 2 MB, this free internet explorer browser provides a large number of useful features and addons, including AdBlock, Flash Player, Speed Mode, Full Screen Mode, Incognito Mode, No Image Mode, etc. It makes APUS Browser the best free, full-featured, private browser for Android.
With the best AdBlock addon, this private browser can effectively block annoying ads, pop-ups, banners to save traffic, giving you a perfectly clean and clear browsing experience.
1. Add Twitter flow in India, Indonesia, US, Brazil, Russia, Thailand.
2. With better news flow on homescreen comes better user experience.
3. Algorithm optimisation for news recommendations.
|
from abc import abstractmethod
from collections import MutableMapping, Mapping
from .decorators import apply_keyfunc
from functools import total_ordering
from itertools import repeat, izip
_SENTINEL = object()
@total_ordering
class AbstractNormalizedDict(MutableMapping):
"""A dictionary where keys are normalized through a given function
before being inserted in the dict.
All of dict's methods have been implemented so it should be possible to use
it as a drop-in replacement.
Subclasses should define a keyfunc method that takes one argument
(a key to be inserted/retrieved/deleted) and return a normalized version of it.
"""
@abstractmethod
def keyfunc(self, key):
pass
def __init__(self, map_or_seq=_SENTINEL, **kwargs):
"""Normalize the keys before inserting them in the internal dictionary.
The signature is (hopefully) the same as the one for dict.
"""
if map_or_seq is _SENTINEL:
args = []
elif isinstance(map_or_seq, Mapping):
args = [((self.keyfunc(k), v) for k, v in map_or_seq.items())]
else: # sequence of two-tuples
args = [((self.keyfunc(k), v) for k, v in map_or_seq)]
kwargs = {self.keyfunc(k): v for k, v in kwargs.iteritems()}
self._dict = dict(*args, **kwargs)
def copy(self):
return type(self)(self.iteritems())
@apply_keyfunc
def __getitem__(self, key):
return self._dict[key]
@apply_keyfunc
def __setitem__(self, key, value):
self._dict[key] = value
@apply_keyfunc
def __delitem__(self, key):
del self._dict[key]
@apply_keyfunc
def has_key(self, key):
return self._dict.has_key(key)
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def viewitems(self):
return self._dict.viewitems()
def viewkeys(self):
return self._dict.viewkeys()
def viewvalues(self):
return self._dict.viewvalues()
@classmethod
def fromkeys(cls, seq, value=None):
return cls(izip(seq, repeat(value)))
def __cmp__(self, other):
return cmp(self._dict, other)
def __lt__(self, other):
return self._dict < other
|
Classic cricket bat is a traditional shaped all rounder. Available in the lighter weights, it is suitable for all styles of play. Available in five standards, full size and small in various weights to choose from.
|
import argparse
import Bio
import Bio.Phylo
import gzip
import os, json, sys
import pandas as pd
import subprocess
import shlex
from contextlib import contextmanager
from treetime.utils import numeric_date
from collections import defaultdict
from pkg_resources import resource_stream
from io import TextIOWrapper
from .__version__ import __version__
from augur.io import open_file
from augur.util_support.color_parser import ColorParser
from augur.util_support.date_disambiguator import DateDisambiguator
from augur.util_support.metadata_file import MetadataFile
from augur.util_support.node_data_reader import NodeDataReader
from augur.util_support.shell_command_runner import ShellCommandRunner
class AugurException(Exception):
pass
def is_vcf(fname):
"""Convenience method to check if a file is a vcf file.
>>> is_vcf("./foo")
False
>>> is_vcf("./foo.vcf")
True
>>> is_vcf("./foo.vcf.GZ")
True
"""
return fname.lower().endswith(".vcf") or fname.lower().endswith(".vcf.gz")
def myopen(fname, mode):
if fname.endswith('.gz'):
import gzip
return gzip.open(fname, mode, encoding='utf-8')
else:
return open(fname, mode, encoding='utf-8')
def get_json_name(args, default=None):
if args.output_node_data:
return args.output_node_data
else:
if default:
print("WARNING: no name for the output file was specified. Writing results to %s."%default, file=sys.stderr)
return default
else:
raise ValueError("Please specify a name for the JSON file containing the results.")
def ambiguous_date_to_date_range(uncertain_date, fmt, min_max_year=None):
return DateDisambiguator(uncertain_date, fmt=fmt, min_max_year=min_max_year).range()
def read_metadata(fname, query=None):
return MetadataFile(fname, query).read()
def is_date_ambiguous(date, ambiguous_by="any"):
"""
Returns whether a given date string in the format of YYYY-MM-DD is ambiguous by a given part of the date (e.g., day, month, year, or any parts).
Parameters
----------
date : str
Date string in the format of YYYY-MM-DD
ambiguous_by : str
Field of the date string to test for ambiguity ("day", "month", "year", "any")
"""
date_components = date.split('-', 2)
if len(date_components) == 3:
year, month, day = date_components
elif len(date_components) == 2:
year, month = date_components
day = "XX"
else:
year = date_components[0]
month = "XX"
day = "XX"
# Determine ambiguity hierarchically such that, for example, an ambiguous
# month implicates an ambiguous day even when day information is available.
return any((
"X" in year,
"X" in month and ambiguous_by in ("any", "month", "day"),
"X" in day and ambiguous_by in ("any", "day")
))
def get_numerical_dates(meta_dict, name_col = None, date_col='date', fmt=None, min_max_year=None):
if fmt:
from datetime import datetime
numerical_dates = {}
for k,m in meta_dict.items():
v = m[date_col]
if type(v)!=str:
print("WARNING: %s has an invalid data string:"%k,v)
continue
elif 'XX' in v:
ambig_date = ambiguous_date_to_date_range(v, fmt, min_max_year)
if ambig_date is None or None in ambig_date:
numerical_dates[k] = [None, None] #don't send to numeric_date or will be set to today
else:
numerical_dates[k] = [numeric_date(d) for d in ambig_date]
else:
try:
numerical_dates[k] = numeric_date(datetime.strptime(v, fmt))
except:
numerical_dates[k] = None
else:
numerical_dates = {k:float(v) for k,v in meta_dict.items()}
return numerical_dates
class InvalidTreeError(Exception):
"""Represents an error loading a phylogenetic tree from a filename.
"""
pass
def read_tree(fname, min_terminals=3):
"""Safely load a tree from a given filename or raise an error if the file does
not contain a valid tree.
Parameters
----------
fname : str
name of a file containing a phylogenetic tree
min_terminals : int
minimum number of terminals required for the parsed tree as a sanity
check on the tree
Raises
------
InvalidTreeError
If the given file exists but does not seem to contain a valid tree format.
Returns
-------
Bio.Phylo :
BioPython tree instance
"""
T = None
supported_tree_formats = ["newick", "nexus"]
for fmt in supported_tree_formats:
try:
T = Bio.Phylo.read(fname, fmt)
# Check the sanity of the parsed tree to handle cases when non-tree
# data are still successfully parsed by BioPython. Too few terminals
# in a tree indicates that the input is not valid.
if T.count_terminals() < min_terminals:
T = None
else:
break
except ValueError:
# We cannot open the tree in the current format, so we will try
# another.
pass
# If the tree cannot be loaded, raise an error to that effect.
if T is None:
raise InvalidTreeError(
"Could not read the given tree %s using the following supported formats: %s" % (fname, ", ".join(supported_tree_formats))
)
return T
def read_node_data(fnames, tree=None):
return NodeDataReader(fnames, tree).read()
def write_json(data, file_name, indent=(None if os.environ.get("AUGUR_MINIFY_JSON") else 2), include_version=True):
"""
Write ``data`` as JSON to the given ``file_name``, creating parent directories
if necessary. The augur version is included as a top-level key "augur_version".
Parameters
----------
data : dict
data to write out to JSON
file_name : str
file name to write to
indent : int or None, optional
JSON indentation level. Default is `None` if the environment variable `AUGUR_MINIFY_JSON`
is truthy, else 1
include_version : bool, optional
Include the augur version. Default: `True`.
Raises
------
OSError
"""
#in case parent folder does not exist yet
parent_directory = os.path.dirname(file_name)
if parent_directory and not os.path.exists(parent_directory):
try:
os.makedirs(parent_directory)
except OSError: #Guard against race condition
if not os.path.isdir(parent_directory):
raise
if include_version:
data["generated_by"] = {"program": "augur", "version": get_augur_version()}
with open(file_name, 'w', encoding='utf-8') as handle:
json.dump(data, handle, indent=indent, sort_keys=True)
def load_features(reference, feature_names=None):
#read in appropriately whether GFF or Genbank
#checks explicitly for GFF otherwise assumes Genbank
if not os.path.isfile(reference):
print("ERROR: reference sequence not found. looking for", reference)
return None
features = {}
if '.gff' in reference.lower():
#looks for 'gene' and 'gene' as best for TB
try:
from BCBio import GFF #Package name is confusing - tell user exactly what they need!
except ImportError:
print("ERROR: Package BCBio.GFF not found! Please install using \'pip install bcbio-gff\' before re-running.")
return None
limit_info = dict( gff_type = ['gene'] )
with open(reference, encoding='utf-8') as in_handle:
for rec in GFF.parse(in_handle, limit_info=limit_info):
for feat in rec.features:
if feature_names is not None: #check both tags; user may have used either
if "gene" in feat.qualifiers and feat.qualifiers["gene"][0] in feature_names:
fname = feat.qualifiers["gene"][0]
elif "locus_tag" in feat.qualifiers and feat.qualifiers["locus_tag"][0] in feature_names:
fname = feat.qualifiers["locus_tag"][0]
else:
fname = None
else:
if "gene" in feat.qualifiers:
fname = feat.qualifiers["gene"][0]
else:
fname = feat.qualifiers["locus_tag"][0]
if fname:
features[fname] = feat
if feature_names is not None:
for fe in feature_names:
if fe not in features:
print("Couldn't find gene {} in GFF or GenBank file".format(fe))
else:
from Bio import SeqIO
for feat in SeqIO.read(reference, 'genbank').features:
if feat.type=='CDS':
if "locus_tag" in feat.qualifiers:
fname = feat.qualifiers["locus_tag"][0]
if feature_names is None or fname in feature_names:
features[fname] = feat
elif "gene" in feat.qualifiers:
fname = feat.qualifiers["gene"][0]
if feature_names is None or fname in feature_names:
features[fname] = feat
elif feat.type=='source': #read 'nuc' as well for annotations - need start/end of whole!
features['nuc'] = feat
return features
def read_config(fname):
if not (fname and os.path.isfile(fname)):
print("ERROR: config file %s not found."%fname)
return defaultdict(dict)
try:
with open(fname, 'rb') as ifile:
config = json.load(ifile)
except json.decoder.JSONDecodeError as err:
print("FATAL ERROR:")
print("\tCouldn't parse the JSON file {}".format(fname))
print("\tError message: '{}'".format(err.msg))
print("\tLine number: '{}'".format(err.lineno))
print("\tColumn number: '{}'".format(err.colno))
print("\tYou must correct this file in order to proceed.")
sys.exit(2)
return config
def read_lat_longs(overrides=None, use_defaults=True):
coordinates = {}
# TODO: make parsing of tsv files more robust while allow for whitespace delimiting for backwards compatibility
def add_line_to_coordinates(line):
if line.startswith('#') or line.strip() == "":
return
fields = line.strip().split() if not '\t' in line else line.strip().split('\t')
if len(fields) == 4:
geo_field, loc = fields[0].lower(), fields[1].lower()
lat, long = float(fields[2]), float(fields[3])
coordinates[(geo_field, loc)] = {
"latitude": lat,
"longitude": long
}
else:
print("WARNING: geo-coordinate file contains invalid line. Please make sure not to mix tabs and spaces as delimiters (use only tabs):",line)
if use_defaults:
with resource_stream(__package__, "data/lat_longs.tsv") as stream:
with TextIOWrapper(stream, "utf-8") as defaults:
for line in defaults:
add_line_to_coordinates(line)
if overrides:
if os.path.isfile(overrides):
with open(overrides, encoding='utf-8') as ifile:
for line in ifile:
add_line_to_coordinates(line)
else:
print("WARNING: input lat/long file %s not found." % overrides)
return coordinates
def read_colors(overrides=None, use_defaults=True):
return ColorParser(mapping_filename=overrides, use_defaults=use_defaults).mapping
def write_VCF_translation(prot_dict, vcf_file_name, ref_file_name):
"""
Writes out a VCF-style file (which seems to be minimally handleable
by vcftools and pyvcf) of the AA differences between sequences and the reference.
This is a similar format created/used by read_in_vcf except that there is one
of these dicts (with sequences, reference, positions) for EACH gene.
Also writes out a fasta of the reference alignment.
EBH 12 Dec 2017
"""
import numpy as np
#for the header
seqNames = list(prot_dict[list(prot_dict.keys())[0]]['sequences'].keys())
#prepare the header of the VCF & write out
header=["#CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT"]+seqNames
with open(vcf_file_name, 'w', encoding='utf-8') as the_file:
the_file.write( "##fileformat=VCFv4.2\n"+
"##source=NextStrain_Protein_Translation\n"+
"##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n")
the_file.write("\t".join(header)+"\n")
refWrite = []
vcfWrite = []
#go through for every gene/protein
for fname, prot in prot_dict.items():
sequences = prot['sequences']
ref = prot['reference']
positions = prot['positions']
#write out the reference fasta
refWrite.append(">"+fname)
refWrite.append(ref)
#go through every variable position
#There are no deletions here, so it's simpler than for VCF nuc sequenes!
for pi in positions:
pos = pi+1 #change numbering to match VCF not python
refb = ref[pi] #reference base at this position
#try/except is (much) faster than list comprehension!
pattern = []
for k,v in sequences.items():
try:
pattern.append(sequences[k][pi])
except KeyError:
pattern.append('.')
pattern = np.array(pattern)
#get the list of ALTs - minus any '.'!
uniques = np.unique(pattern)
uniques = uniques[np.where(uniques!='.')]
#Convert bases to the number that matches the ALT
j=1
for u in uniques:
pattern[np.where(pattern==u)[0]] = str(j)
j+=1
#Now convert these calls to #/# (VCF format)
calls = [ j+"/"+j if j!='.' else '.' for j in pattern ]
if len(uniques)==0:
print("UNEXPECTED ERROR WHILE CONVERTING TO VCF AT POSITION {}".format(str(pi)))
break
#put it all together and write it out
output = [fname, str(pos), ".", refb, ",".join(uniques), ".", "PASS", ".", "GT"] + calls
vcfWrite.append("\t".join(output))
#write it all out
with open(ref_file_name, 'w', encoding='utf-8') as the_file:
the_file.write("\n".join(refWrite))
with open(vcf_file_name, 'a', encoding='utf-8') as the_file:
the_file.write("\n".join(vcfWrite))
if vcf_file_name.lower().endswith('.gz'):
import os
#must temporarily remove .gz ending, or gzip won't zip it!
os.rename(vcf_file_name, vcf_file_name[:-3])
call = ["gzip", vcf_file_name[:-3]]
run_shell_command(" ".join(call), raise_errors = True)
shquote = shlex.quote
def run_shell_command(cmd, raise_errors=False, extra_env=None):
"""
Run the given command string via Bash with error checking.
Returns True if the command exits normally. Returns False if the command
exits with failure and "raise_errors" is False (the default). When
"raise_errors" is True, exceptions are rethrown.
If an *extra_env* mapping is passed, the provided keys and values are
overlayed onto the default subprocess environment.
"""
return ShellCommandRunner(cmd, raise_errors=raise_errors, extra_env=extra_env).run()
def first_line(text):
"""
Returns the first line of the given text, ignoring leading and trailing
whitespace.
"""
return text.strip().splitlines()[0]
def available_cpu_cores(fallback: int = 1) -> int:
"""
Returns the number (an int) of CPU cores available to this **process**, if
determinable, otherwise the number of CPU cores available to the
**computer**, if determinable, otherwise the *fallback* number (which
defaults to 1).
"""
try:
# Note that this is the correct function to use, not os.cpu_count(), as
# described in the latter's documentation.
#
# The reason, which the documentation does not detail, is that
# processes may be pinned or restricted to certain CPUs by setting
# their "affinity". This is not typical except in high-performance
# computing environments, but if it is done, then a computer with say
# 24 total cores may only allow our process to use 12. If we tried to
# naively use all 24, we'd end up with two threads across the 12 cores.
# This would degrade performance rather than improve it!
return len(os.sched_getaffinity(0))
except:
# cpu_count() returns None if the value is indeterminable.
return os.cpu_count() or fallback
def nthreads_value(value):
"""
Argument value validation and casting function for --nthreads.
"""
if value.lower() == 'auto':
return available_cpu_cores()
try:
return int(value)
except ValueError:
raise argparse.ArgumentTypeError("'%s' is not an integer or the word 'auto'" % value) from None
def get_parent_name_by_child_name_for_tree(tree):
'''
Return dictionary mapping child node names to parent node names
'''
parents = {}
for clade in tree.find_clades(order='level'):
for child in clade:
parents[child.name] = clade.name
return parents
def annotate_parents_for_tree(tree):
"""Annotate each node in the given tree with its parent.
>>> import io
>>> tree = Bio.Phylo.read(io.StringIO("(A, (B, C))"), "newick")
>>> not any([hasattr(node, "parent") for node in tree.find_clades()])
True
>>> tree = annotate_parents_for_tree(tree)
>>> tree.root.parent is None
True
>>> all([hasattr(node, "parent") for node in tree.find_clades()])
True
"""
tree.root.parent = None
for node in tree.find_clades(order="level"):
for child in node.clades:
child.parent = node
# Return the tree.
return tree
def json_to_tree(json_dict, root=True):
"""Returns a Bio.Phylo tree corresponding to the given JSON dictionary exported
by `tree_to_json`.
Assigns links back to parent nodes for the root of the tree.
Test opening a JSON from augur export v1.
>>> import json
>>> json_fh = open("tests/data/json_tree_to_nexus/flu_h3n2_ha_3y_tree.json", "r")
>>> json_dict = json.load(json_fh)
>>> tree = json_to_tree(json_dict)
>>> tree.name
'NODE_0002020'
>>> len(tree.clades)
2
>>> tree.clades[0].name
'NODE_0001489'
>>> hasattr(tree, "attr")
True
>>> "dTiter" in tree.attr
True
>>> tree.clades[0].parent.name
'NODE_0002020'
>>> tree.clades[0].branch_length > 0
True
Test opening a JSON from augur export v2.
>>> json_fh = open("tests/data/zika.json", "r")
>>> json_dict = json.load(json_fh)
>>> tree = json_to_tree(json_dict)
>>> hasattr(tree, "name")
True
>>> len(tree.clades) > 0
True
>>> tree.clades[0].branch_length > 0
True
"""
# Check for v2 JSON which has combined metadata and tree data.
if root and "meta" in json_dict and "tree" in json_dict:
json_dict = json_dict["tree"]
node = Bio.Phylo.Newick.Clade()
# v1 and v2 JSONs use different keys for strain names.
if "name" in json_dict:
node.name = json_dict["name"]
else:
node.name = json_dict["strain"]
if "children" in json_dict:
# Recursively add children to the current node.
node.clades = [json_to_tree(child, root=False) for child in json_dict["children"]]
# Assign all non-children attributes.
for attr, value in json_dict.items():
if attr != "children":
setattr(node, attr, value)
# Only v1 JSONs support a single `attr` attribute.
if hasattr(node, "attr"):
node.numdate = node.attr.get("num_date")
node.branch_length = node.attr.get("div")
if "translations" in node.attr:
node.translations = node.attr["translations"]
elif hasattr(node, "node_attrs"):
node.branch_length = node.node_attrs.get("div")
if root:
node = annotate_parents_for_tree(node)
return node
def get_augur_version():
"""
Returns a string of the current augur version.
"""
return __version__
def read_bed_file(bed_file):
"""Read a BED file and return a list of excluded sites.
Note: This function assumes the given file is a BED file. On parsing
failures, it will attempt to skip the first line and retry, but no
other error checking is attempted. Incorrectly formatted files will
raise errors.
Parameters
----------
bed_file : str
Path to the BED file
Returns:
--------
list[int]:
Sorted list of unique zero-indexed sites
"""
mask_sites = []
try:
bed = pd.read_csv(bed_file, sep='\t', header=None, usecols=[1,2],
dtype={1:int,2:int})
except ValueError:
# Check if we have a header row. Otherwise, just fail.
bed = pd.read_csv(bed_file, sep='\t', header=None, usecols=[1,2],
dtype={1:int,2:int}, skiprows=1)
print("Skipped row 1 of %s, assuming it is a header." % bed_file)
for _, row in bed.iterrows():
mask_sites.extend(range(row[1], row[2]))
return sorted(set(mask_sites))
def read_mask_file(mask_file):
"""Read a masking file and return a list of excluded sites.
Masking files have a single masking site per line, either alone
or as the second column of a tab-separated file. These sites
are assumed to be one-indexed, NOT zero-indexed. Incorrectly
formatted lines will be skipped.
Parameters
----------
mask_file : str
Path to the masking file
Returns:
--------
list[int]:
Sorted list of unique zero-indexed sites
"""
mask_sites = []
with open(mask_file, encoding='utf-8') as mf:
for idx, line in enumerate(l.strip() for l in mf.readlines()):
if "\t" in line:
line = line.split("\t")[1]
try:
mask_sites.append(int(line) - 1)
except ValueError as err:
print("Could not read line %s of %s: '%s' - %s" %
(idx, mask_file, line, err), file=sys.stderr)
raise
return sorted(set(mask_sites))
def load_mask_sites(mask_file):
"""Load masking sites from either a BED file or a masking file.
Parameters
----------
mask_file: str
Path to the BED or masking file
Returns
-------
list[int]
Sorted list of unique zero-indexed sites
"""
if mask_file.lower().endswith(".bed"):
mask_sites = read_bed_file(mask_file)
else:
mask_sites = read_mask_file(mask_file)
print("%d masking sites read from %s" % (len(mask_sites), mask_file))
return mask_sites
VALID_NUCLEOTIDES = { # http://reverse-complement.com/ambiguity.html
"A", "G", "C", "T", "U", "N", "R", "Y", "S", "W", "K", "M", "B", "V", "D", "H", "-",
"a", "g", "c", "t", "u", "n", "r", "y", "s", "w", "k", "m", "b", "v", "d", "h", "-"
}
def read_strains(*files, comment_char="#"):
"""Reads strain names from one or more plain text files and returns the
set of distinct strains.
Strain names can be commented with full-line or inline comments. For
example, the following is a valid strain names file:
# this is a comment at the top of the file
strain1 # exclude strain1 because it isn't sequenced properly
strain2
# this is an empty line that will be ignored.
Parameters
----------
files : one or more str
one or more names of text files with one strain name per line
Returns
-------
set :
strain names from the given input files
"""
strains = set()
for input_file in files:
with open_file(input_file, 'r') as ifile:
for line in ifile:
# Allow comments anywhere in a given line.
strain_name = line.split(comment_char)[0].strip()
if len(strain_name) > 0:
strains.add(strain_name)
return strains
|
Even stronger, even more stable: GEDORE presents a new version of its ever-popular workster series.
Workshop pros working on mobile applications can always rely on these companions: the Tool case large WK 1041 by GEDORE.
Flexible, individual and indestructible: GEDORE L-BOXX®es are the perfect solution for mobile tool transportation.
The right torque is essential when changing tyres. That's why experienced pros swear by the TORCOFLEX 3550 UK by GEDORE.
GEDORE is helping the team of W&S Motorsport make pole position with exclusive workshop equipment and quality tools.
Whenever a strong grip is required, the high-quality pliers by GEDORE really play to their strengths.
This practical set appeals for its unbeatable GEDORE quality and perfect fit for precise results.
Strong torque wrenches for vehicle assembles and repairs in the top GEDORE Quality.
An unambiguous 3-brand strategy has been in place since the beginning of this year.
50 years old – and yet always new: it is half a century since the GEDORE DREMOMETER was developed.
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function, division
from os.path import join
import contextlib
import os
import shutil
import subprocess
import re
import shlex
import runpy
import zipfile
import tarfile
import platform
import click
import pysftp
import fnmatch
# https://virtualenv.pypa.io/en/latest/userguide.html#windows-notes
# scripts and executables on Windows go in ENV\Scripts\ instead of ENV/bin/
if platform.system() == "Windows":
VENV_BIN = "Scripts"
else:
VENV_BIN = "bin"
if platform.system() == "Windows":
def Archive(name):
a = zipfile.ZipFile(name, "w")
a.add = a.write
return a
else:
def Archive(name):
return tarfile.open(name, "w:gz")
RELEASE_DIR = join(os.path.dirname(os.path.realpath(__file__)))
DIST_DIR = join(RELEASE_DIR, "dist")
ROOT_DIR = os.path.normpath(join(RELEASE_DIR, ".."))
RELEASE_SPEC_DIR = join(RELEASE_DIR, "specs")
VERSION_FILE = join(ROOT_DIR, "netlib/version.py")
BUILD_DIR = join(RELEASE_DIR, "build")
PYINSTALLER_TEMP = join(BUILD_DIR, "pyinstaller")
PYINSTALLER_DIST = join(BUILD_DIR, "binaries")
VENV_DIR = join(BUILD_DIR, "venv")
VENV_PIP = join(VENV_DIR, VENV_BIN, "pip")
VENV_PYINSTALLER = join(VENV_DIR, VENV_BIN, "pyinstaller")
project = {
"name": "mitmproxy",
"tools": ["pathod", "pathoc", "mitmproxy", "mitmdump", "mitmweb"],
"bdists": {
"mitmproxy": ["mitmproxy", "mitmdump", "mitmweb"],
"pathod": ["pathoc", "pathod"]
},
"dir": ROOT_DIR,
"python_version": "py2"
}
if platform.system() == "Windows":
project["tools"].remove("mitmproxy")
project["bdists"]["mitmproxy"].remove("mitmproxy")
def get_version():
return runpy.run_path(VERSION_FILE)["VERSION"]
def get_snapshot_version():
last_tag, tag_dist, commit = git("describe --tags --long").strip().rsplit(b"-", 2)
tag_dist = int(tag_dist)
if tag_dist == 0:
return get_version()
else:
# The wheel build tag (we use the commit) must start with a digit, so we include "0x"
return "{version}dev{tag_dist:04}-0x{commit}".format(
version=get_version(), # this should already be the next version
tag_dist=tag_dist,
commit=commit
)
def archive_name(project):
platform_tag = {
"Darwin": "osx",
"Windows": "win32",
"Linux": "linux"
}.get(platform.system(), platform.system())
if platform.system() == "Windows":
ext = "zip"
else:
ext = "tar.gz"
return "{project}-{version}-{platform}.{ext}".format(
project=project,
version=get_version(),
platform=platform_tag,
ext=ext
)
def wheel_name():
return "{project}-{version}-{py_version}-none-any.whl".format(
project=project["name"],
version=get_version(),
py_version=project["python_version"]
)
@contextlib.contextmanager
def empty_pythonpath():
"""
Make sure that the regular python installation is not on the python path,
which would give us access to modules installed outside of our virtualenv.
"""
pythonpath = os.environ.get("PYTHONPATH", "")
os.environ["PYTHONPATH"] = ""
yield
os.environ["PYTHONPATH"] = pythonpath
@contextlib.contextmanager
def chdir(path):
old_dir = os.getcwd()
os.chdir(path)
yield
os.chdir(old_dir)
def git(args):
with chdir(ROOT_DIR):
return subprocess.check_output(["git"] + shlex.split(args))
@click.group(chain=True)
def cli():
"""
mitmproxy build tool
"""
pass
@cli.command("contributors")
def contributors():
"""
Update CONTRIBUTORS.md
"""
with chdir(ROOT_DIR):
print("Updating CONTRIBUTORS...")
contributors_data = git("shortlog -n -s")
with open("CONTRIBUTORS", "w") as f:
f.write(contributors_data)
@cli.command("set-version")
@click.argument('version')
def set_version(version):
"""
Update version information
"""
print("Update versions...")
version = ", ".join(version.split("."))
print("Update %s..." % VERSION_FILE)
with open(VERSION_FILE, "rb") as f:
content = f.read()
new_content = re.sub(
r"IVERSION\s*=\s*\([\d,\s]+\)", "IVERSION = (%s)" % version,
content
)
with open(VERSION_FILE, "wb") as f:
f.write(new_content)
@cli.command("wheels")
def wheels():
"""
Build wheels
"""
with empty_pythonpath():
print("Building release...")
if os.path.exists(DIST_DIR):
shutil.rmtree(DIST_DIR)
print("Creating wheel for %s ..." % project["name"])
subprocess.check_call(
[
"python", "./setup.py", "-q",
"bdist_wheel", "--dist-dir", DIST_DIR,
],
cwd=project["dir"]
)
print("Creating virtualenv for test install...")
if os.path.exists(VENV_DIR):
shutil.rmtree(VENV_DIR)
subprocess.check_call(["virtualenv", "-q", VENV_DIR])
with chdir(DIST_DIR):
print("Installing %s..." % project["name"])
subprocess.check_call([VENV_PIP, "install", "-q", wheel_name()])
print("Running binaries...")
for tool in project["tools"]:
tool = join(VENV_DIR, VENV_BIN, tool)
print("> %s --version" % tool)
print(subprocess.check_output([tool, "--version"]))
print("Virtualenv available for further testing:")
print("source %s" % os.path.normpath(join(VENV_DIR, VENV_BIN, "activate")))
@cli.command("bdist")
@click.option("--use-existing-wheels/--no-use-existing-wheels", default=False)
@click.argument("pyinstaller_version", envvar="PYINSTALLER_VERSION", default="PyInstaller~=3.1.1")
@click.pass_context
def bdist(ctx, use_existing_wheels, pyinstaller_version):
"""
Build a binary distribution
"""
if os.path.exists(PYINSTALLER_TEMP):
shutil.rmtree(PYINSTALLER_TEMP)
if os.path.exists(PYINSTALLER_DIST):
shutil.rmtree(PYINSTALLER_DIST)
if not use_existing_wheels:
ctx.invoke(wheels)
print("Installing PyInstaller...")
subprocess.check_call([VENV_PIP, "install", "-q", pyinstaller_version])
for bdist_project, tools in project["bdists"].items():
with Archive(join(DIST_DIR, archive_name(bdist_project))) as archive:
for tool in tools:
# This is PyInstaller, so it messes up paths.
# We need to make sure that we are in the spec folder.
with chdir(RELEASE_SPEC_DIR):
print("Building %s binary..." % tool)
subprocess.check_call(
[
VENV_PYINSTALLER,
"--clean",
"--workpath", PYINSTALLER_TEMP,
"--distpath", PYINSTALLER_DIST,
# This is PyInstaller, so setting a
# different log level obviously breaks it :-)
# "--log-level", "WARN",
"%s.spec" % tool
]
)
# Test if it works at all O:-)
executable = join(PYINSTALLER_DIST, tool)
if platform.system() == "Windows":
executable += ".exe"
print("> %s --version" % executable)
subprocess.check_call([executable, "--version"])
archive.add(executable, os.path.basename(executable))
print("Packed {}.".format(archive_name(bdist_project)))
@cli.command("upload-release")
@click.option('--username', prompt=True)
@click.password_option(confirmation_prompt=False)
@click.option('--repository', default="pypi")
def upload_release(username, password, repository):
"""
Upload wheels to PyPI
"""
filename = wheel_name()
print("Uploading {} to {}...".format(filename, repository))
subprocess.check_call([
"twine",
"upload",
"-u", username,
"-p", password,
"-r", repository,
join(DIST_DIR, filename)
])
@cli.command("upload-snapshot")
@click.option("--host", envvar="SNAPSHOT_HOST", prompt=True)
@click.option("--port", envvar="SNAPSHOT_PORT", type=int, default=22)
@click.option("--user", envvar="SNAPSHOT_USER", prompt=True)
@click.option("--private-key", default=join(RELEASE_DIR, "rtool.pem"))
@click.option("--private-key-password", envvar="SNAPSHOT_PASS", prompt=True, hide_input=True)
@click.option("--wheel/--no-wheel", default=False)
@click.option("--bdist/--no-bdist", default=False)
def upload_snapshot(host, port, user, private_key, private_key_password, wheel, bdist):
"""
Upload snapshot to snapshot server
"""
with pysftp.Connection(host=host,
port=port,
username=user,
private_key=private_key,
private_key_pass=private_key_password) as sftp:
dir_name = "snapshots/v{}".format(get_version())
sftp.makedirs(dir_name)
with sftp.cd(dir_name):
files = []
if wheel:
files.append(wheel_name())
for bdist in project["bdists"].keys():
files.append(archive_name(bdist))
for f in files:
local_path = join(DIST_DIR, f)
remote_filename = f.replace(get_version(), get_snapshot_version())
symlink_path = "../{}".format(f.replace(get_version(), "latest"))
# Delete old versions
old_version = f.replace(get_version(), "*")
for f_old in sftp.listdir():
if fnmatch.fnmatch(f_old, old_version):
print("Removing {}...".format(f_old))
sftp.remove(f_old)
# Upload new version
print("Uploading {} as {}...".format(f, remote_filename))
with click.progressbar(length=os.stat(local_path).st_size) as bar:
sftp.put(
local_path,
"." + remote_filename,
callback=lambda done, total: bar.update(done - bar.pos)
)
# We hide the file during upload.
sftp.rename("." + remote_filename, remote_filename)
# update symlink for the latest release
if sftp.lexists(symlink_path):
print("Removing {}...".format(symlink_path))
sftp.remove(symlink_path)
sftp.symlink("v{}/{}".format(get_version(), remote_filename), symlink_path)
@cli.command("wizard")
@click.option('--next-version', prompt=True)
@click.option('--username', prompt="PyPI Username")
@click.password_option(confirmation_prompt=False, prompt="PyPI Password")
@click.option('--repository', default="pypi")
@click.pass_context
def wizard(ctx, next_version, username, password, repository):
"""
Interactive Release Wizard
"""
is_dirty = git("status --porcelain")
if is_dirty:
raise RuntimeError("Repository is not clean.")
# update contributors file
ctx.invoke(contributors)
# Build test release
ctx.invoke(bdist)
try:
click.confirm("Please test the release now. Is it ok?", abort=True)
except click.Abort:
# undo changes
git("checkout CONTRIBUTORS")
raise
# Everything ok - let's ship it!
git("tag v{}".format(get_version()))
git("push --tags")
ctx.invoke(
upload_release,
username=username, password=password, repository=repository
)
click.confirm("Now please wait until CI has built binaries. Finished?")
# version bump commit
ctx.invoke(set_version, version=next_version)
git("commit -a -m \"bump version\"")
git("push")
click.echo("All done!")
if __name__ == "__main__":
cli()
|
MAESTRO is a computer program for rationally-based design of large, complex thin-walled structures. In essence, MAESTRO is a synthesis of finite element analysis, failure (or limit state) analysis, and mathematical optimization, all of which is integrated under a user-friendly graphical interface.
By virtue of its 25-year history and worldwide user base, MAESTRO has been applied to a wide range of aluminum, composite, and steel structures: naval vessels from patrol boats to a 41,000 tonne carrier (USS Wasp), coast guard cutters, double hull tankers, high speed ferries, SWATH and catamaran ships, bulk carriers of all types (including molten sulfur), and many other (non-ship) types of structures. MAESTRO’s failure analysis results have been accurately correlated to in-service structural damage incurred by several ships, and used for the design of corrective modifications.
Expert user assistance is available from MAESTRO experienced structural engineers.
MAESTRO training and technical support is available worldwide. MAESTRO workshops have been provided to structural designers in numerous cities of Europe, North America, Asia, and Australia. Expert user assistance is available from MAESTRO experienced structural engineers. MAESTRO is in use in 23 countries by over 90 organizations.
MAESTRO also has a strong worldwide presence in Academia and holds workshops for educators who are using (or wish to use) MAESTRO in their curriculum, to share ideas and approaches to improve their courses with MAESTRO. These workshops also serve to bring together professors, doctoral candidates, and others who are carrying out research work using MAESTRO, to share their experience, and increase the awareness of the MAESTRO developers with respect to the current demands on the software.
MAESTRO is in use in 23 countries by over 90 organizations.
At MAESTRO’s core is a structural design tool developed to suit the needs of ship designers and naval architects. It’s noteworthy that the MAESTRO development staff and support team are themselves naval architects who understand the ship design and analysis process. Our team of developers/naval architects has also created the Orca3D marine design and analysis plug-in for Rhino, with capabilities in the areas of hull design and fairing, intact hydrostatics and stability, resistance prediction, and weight and cost tracking.
We invite you to explore the website and contact us for a detailed discussion regarding your ship design needs and efforts.
|
#!/usr/bin/env python
# Copyright (c) 2016-present, Gregory Szorc
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
"""Very hacky script for benchmarking zstd.
Like most benchmarks, results should be treated with skepticism.
"""
import io
import os
import struct
import sys
import time
import zlib
if sys.version_info[0] >= 3:
bio = io.BytesIO
else:
import cStringIO
bio = cStringIO.StringIO
import zstandard as zstd
def timer(fn, miniter=3, minwall=3.0):
"""Runs fn() multiple times and returns the results.
Runs for at least ``miniter`` iterations and ``minwall`` wall time.
"""
results = []
count = 0
# Ideally a monotonic clock, but doesn't matter too much.
wall_begin = time.time()
while True:
wstart = time.time()
start = os.times()
fn()
end = os.times()
wend = time.time()
count += 1
user = end[0] - start[0]
system = end[1] - start[1]
cpu = user + system
wall = wend - wstart
results.append((cpu, user, system, wall))
# Ensure we run at least ``miniter`` times.
if count < miniter:
continue
# And for ``minwall`` seconds.
elapsed = wend - wall_begin
if elapsed < minwall:
continue
break
return results
BENCHES = []
def bench(mode, title, require_content_size=False,
simple=False, zlib=False, threads_arg=False,
chunks_as_buffer=False, decompressed_sizes_arg=False):
def wrapper(fn):
if not fn.__name__.startswith(('compress_', 'decompress_')):
raise ValueError('benchmark function must begin with '
'compress_ or decompress_')
fn.mode = mode
fn.title = title
fn.require_content_size = require_content_size
fn.simple = simple
fn.zlib = zlib
fn.threads_arg = threads_arg
fn.chunks_as_buffer = chunks_as_buffer
fn.decompressed_sizes_arg = decompressed_sizes_arg
BENCHES.append(fn)
return fn
return wrapper
@bench('discrete', 'compress() single use zctx')
def compress_one_use(chunks, opts):
for chunk in chunks:
zctx = zstd.ZstdCompressor(**opts)
zctx.compress(chunk)
@bench('discrete', 'compress() reuse zctx', simple=True)
def compress_reuse(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
zctx.compress(chunk)
@bench('discrete', 'multi_compress_to_buffer() w/ buffer input',
simple=True, threads_arg=True, chunks_as_buffer=True)
def compress_multi_compress_to_buffer_buffer(chunks, opts, threads):
zctx= zstd.ZstdCompressor(**opts)
zctx.multi_compress_to_buffer(chunks, threads=threads)
@bench('discrete', 'multi_compress_to_buffer() w/ list input',
threads_arg=True)
def compress_multi_compress_to_buffer_list(chunks, opts, threads):
zctx = zstd.ZstdCompressor(**opts)
zctx.multi_compress_to_buffer(chunks, threads=threads)
@bench('discrete', 'stream_reader()')
def compress_stream_reader(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
with zctx.stream_reader(chunk) as reader:
while reader.read(16384):
pass
@bench('discrete', 'write_to()')
def compress_write_to(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
b = bio()
with zctx.write_to(b) as compressor:
compressor.write(chunk)
@bench('discrete', 'write_to() w/ input size')
def compress_write_to_size(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
b = bio()
with zctx.write_to(b, size=len(chunk)) as compressor:
compressor.write(chunk)
@bench('discrete', 'read_to_iter()')
def compress_read_to_iter(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
for d in zctx.read_to_iter(chunk):
pass
@bench('discrete', 'read_to_iter() w/ input size')
def compress_read_to_iter_size(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
for d in zctx.read_to_iter(chunk, size=len(chunk)):
pass
@bench('discrete', 'compressobj()')
def compress_compressobj(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
cobj = zctx.compressobj()
cobj.compress(chunk)
cobj.flush()
@bench('discrete', 'compressobj() w/ input size')
def compress_compressobj_size(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
for chunk in chunks:
cobj = zctx.compressobj(size=len(chunk))
cobj.compress(chunk)
cobj.flush()
@bench('discrete', 'compress()', simple=True, zlib=True)
def compress_zlib_discrete(chunks, opts):
level = opts['zlib_level']
c = zlib.compress
for chunk in chunks:
c(chunk, level)
@bench('stream', 'compressobj()', simple=True, zlib=True)
def compress_zlib_compressobj(chunks, opts):
compressor = zlib.compressobj(opts['zlib_level'])
f = zlib.Z_SYNC_FLUSH
for chunk in chunks:
compressor.compress(chunk)
compressor.flush(f)
compressor.flush()
@bench('stream', 'write_to()')
def compress_stream_write_to(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
b = bio()
with zctx.write_to(b) as compressor:
for chunk in chunks:
compressor.write(chunk)
compressor.flush()
@bench('stream', 'compressobj()', simple=True)
def compress_stream_compressobj(chunks, opts):
zctx = zstd.ZstdCompressor(**opts)
compressor = zctx.compressobj()
flush = zstd.COMPRESSOBJ_FLUSH_BLOCK
for chunk in chunks:
compressor.compress(chunk)
compressor.flush(flush)
@bench('content-dict', 'compress()', simple=True)
def compress_content_dict_compress(chunks, opts):
zstd.ZstdCompressor(**opts).compress(chunks[0])
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
zstd.ZstdCompressor(dict_data=d, **opts).compress(chunk)
@bench('content-dict', 'write_to()')
def compress_content_dict_write_to(chunks, opts, use_size=False):
zctx = zstd.ZstdCompressor(**opts)
b = bio()
with zctx.write_to(b, size=len(chunks[0]) if use_size else 0) as compressor:
compressor.write(chunks[0])
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
b = bio()
zctx = zstd.ZstdCompressor(dict_data=d, **opts)
with zctx.write_to(b, size=len(chunk) if use_size else 0) as compressor:
compressor.write(chunk)
@bench('content-dict', 'write_to() w/ input size')
def compress_content_dict_write_to_size(chunks, opts):
compress_content_dict_write_to(chunks, opts, use_size=True)
@bench('content-dict', 'read_to_iter()')
def compress_content_dict_read_to_iter(chunks, opts, use_size=False):
zctx = zstd.ZstdCompressor(**opts)
size = len(chunks[0]) if use_size else 0
for o in zctx.read_to_iter(chunks[0], size=size):
pass
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
zctx = zstd.ZstdCompressor(dict_data=d, **opts)
size = len(chunk) if use_size else 0
for o in zctx.read_to_iter(chunk, size=size):
pass
@bench('content-dict', 'read_to_iter() w/ input size')
def compress_content_dict_read_to_iter_size(chunks, opts):
compress_content_dict_read_to_iter(chunks, opts, use_size=True)
@bench('content-dict', 'compressobj()')
def compress_content_dict_compressobj(chunks, opts, use_size=False):
zctx = zstd.ZstdCompressor(**opts)
cobj = zctx.compressobj(size=len(chunks[0]) if use_size else 0)
cobj.compress(chunks[0])
cobj.flush()
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
zctx = zstd.ZstdCompressor(dict_data=d, **opts)
cobj = zctx.compressobj(len(chunk) if use_size else 0)
cobj.compress(chunk)
cobj.flush()
@bench('content-dict', 'compressobj() w/ input size')
def compress_content_dict_compressobj_size(chunks, opts):
compress_content_dict_compressobj(chunks, opts, use_size=True)
@bench('discrete', 'decompress() single use zctx', require_content_size=True)
def decompress_one_use(chunks, opts):
for chunk in chunks:
zctx = zstd.ZstdDecompressor(**opts)
zctx.decompress(chunk)
@bench('discrete', 'decompress() reuse zctx', require_content_size=True,
simple=True)
def decompress_reuse(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
zctx.decompress(chunk)
@bench('discrete', 'decompress()', simple=True, zlib=True)
def decompress_zlib_decompress(chunks):
d = zlib.decompress
for chunk in chunks:
d(chunk)
@bench('discrete', 'multi_decompress_to_buffer() w/ buffer input + sizes',
simple=True, threads_arg=True, decompressed_sizes_arg=True,
chunks_as_buffer=True)
def decompress_multi_decompress_to_buffer_buffer_and_size(chunks, opts, threads,
decompressed_sizes):
zctx = zstd.ZstdDecompressor(**opts)
zctx.multi_decompress_to_buffer(chunks,
decompressed_sizes=decompressed_sizes,
threads=threads)
@bench('discrete', 'multi_decompress_to_buffer() w/ buffer input',
require_content_size=True, threads_arg=True, chunks_as_buffer=True)
def decompress_multi_decompress_to_buffer_buffer(chunks, opts, threads):
zctx = zstd.ZstdDecompressor(**opts)
zctx.multi_decompress_to_buffer(chunks, threads=threads)
@bench('discrete', 'multi_decompress_to_buffer() w/ list of bytes input + sizes',
threads_arg=True, decompressed_sizes_arg=True)
def decompress_multi_decompress_to_buffer_list_and_sizes(chunks, opts, threads,
decompressed_sizes):
zctx = zstd.ZstdDecompressor(**opts)
zctx.multi_decompress_to_buffer(chunks,
decompressed_sizes=decompressed_sizes,
threads=threads)
@bench('discrete', 'multi_decompress_to_buffer() w/ list of bytes input',
require_content_size=True, threads_arg=True)
def decompress_multi_decompress_to_buffer_list(chunks, opts, threads):
zctx = zstd.ZstdDecompressor(**opts)
zctx.multi_decompress_to_buffer(chunks, threads=threads)
@bench('discrete', 'stream_reader()')
def decompress_stream_reader(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
with zctx.stream_reader(chunk) as reader:
while reader.read(16384):
pass
@bench('discrete', 'write_to()')
def decompress_write_to(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
with zctx.write_to(bio()) as decompressor:
decompressor.write(chunk)
@bench('discrete', 'read_to_iter()')
def decompress_read_to_iter(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
for d in zctx.read_to_iter(chunk):
pass
@bench('discrete', 'decompressobj()')
def decompress_decompressobj(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
for chunk in chunks:
decompressor = zctx.decompressobj()
decompressor.decompress(chunk)
@bench('stream', 'decompressobj()', simple=True, zlib=True)
def decompress_zlib_stream(chunks):
dobj = zlib.decompressobj()
for chunk in chunks:
dobj.decompress(chunk)
dobj.flush()
@bench('stream', 'write_to()')
def decompress_stream_write_to(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
with zctx.write_to(bio()) as decompressor:
for chunk in chunks:
decompressor.write(chunk)
@bench('stream', 'decompressobj()', simple=True)
def decompress_stream_decompressobj(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
decompressor = zctx.decompressobj()
for chunk in chunks:
decompressor.decompress(chunk)
@bench('content-dict', 'decompress()', require_content_size=True)
def decompress_content_dict_decompress(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
last = zctx.decompress(chunks[0])
for chunk in chunks[1:]:
d = zstd.ZstdCompressionDict(last)
zctx = zstd.ZstdDecompressor(dict_data=d, **opts)
last = zctx.decompress(chunk)
@bench('content-dict', 'write_to()')
def decompress_content_dict_write_to(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
b = bio()
with zctx.write_to(b) as decompressor:
decompressor.write(chunks[0])
last = b.getvalue()
for chunk in chunks[1:]:
d = zstd.ZstdCompressionDict(last)
zctx = zstd.ZstdDecompressor(dict_data=d, **opts)
b = bio()
with zctx.write_to(b) as decompressor:
decompressor.write(chunk)
last = b.getvalue()
@bench('content-dict', 'read_to_iter()')
def decompress_content_dict_read_to_iter(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
last = b''.join(zctx.read_to_iter(chunks[0]))
for chunk in chunks[1:]:
d = zstd.ZstdCompressionDict(last)
zctx = zstd.ZstdDecompressor(dict_data=d, **opts)
last = b''.join(zctx.read_to_iter(chunk))
@bench('content-dict', 'decompressobj()')
def decompress_content_dict_decompressobj(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
last = zctx.decompressobj().decompress(chunks[0])
for chunk in chunks[1:]:
d = zstd.ZstdCompressionDict(last)
zctx = zstd.ZstdDecompressor(dict_data=d, **opts)
last = zctx.decompressobj().decompress(chunk)
@bench('content-dict', 'decompress_content_dict_chain()',
simple=True)
def decompress_content_dict_chain_api(chunks, opts):
zctx = zstd.ZstdDecompressor(**opts)
zctx.decompress_content_dict_chain(chunks)
def get_chunks(paths, limit_count, encoding):
chunks = []
def process_file(p):
with open(p, 'rb') as fh:
data = fh.read()
if not data:
return
if encoding == 'raw':
pass
elif encoding == 'zlib':
data = zlib.decompress(data)
else:
raise Exception('unexpected chunk encoding: %s' % encoding)
chunks.append(data)
for path in paths:
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
dirs.sort()
for f in sorted(files):
try:
process_file(os.path.join(root, f))
if limit_count and len(chunks) >= limit_count:
return chunks
except IOError:
pass
else:
process_file(path)
if limit_count and len(chunks) >= limit_count:
return chunks
return chunks
def get_benches(mode, direction, zlib=False):
assert direction in ('compress', 'decompress')
prefix = '%s_' % direction
fns = []
for fn in BENCHES:
if not fn.__name__.startswith(prefix):
continue
if fn.mode != mode:
continue
if fn.zlib != zlib:
continue
fns.append(fn)
return fns
def format_results(results, title, prefix, total_size):
best = min(results)
rate = float(total_size) / best[3]
print('%s %s' % (prefix, title))
print('%.6f wall; %.6f CPU; %.6f user; %.6f sys %.2f MB/s (best of %d)' % (
best[3], best[0], best[1], best[2], rate / 1000000.0, len(results)))
def bench_discrete_zlib_compression(chunks, opts):
total_size = sum(map(len, chunks))
for fn in get_benches('discrete', 'compress', zlib=True):
results = timer(lambda: fn(chunks, opts))
format_results(results, fn.title, 'compress discrete zlib', total_size)
def bench_discrete_zlib_decompression(chunks, total_size):
for fn in get_benches('discrete', 'decompress', zlib=True):
results = timer(lambda: fn(chunks))
format_results(results, fn.title, 'decompress discrete zlib',
total_size)
def bench_discrete_compression(chunks, opts, cover=False, threads=None):
total_size = sum(map(len, chunks))
if 'dict_data' in opts:
if cover:
prefix = 'compress discrete cover dict'
else:
prefix = 'compress discrete dict'
else:
prefix = 'compress discrete'
for fn in get_benches('discrete', 'compress'):
chunks_arg = chunks
kwargs = {}
if fn.threads_arg:
kwargs['threads'] = threads
if fn.chunks_as_buffer:
s = struct.Struct('=QQ')
offsets = io.BytesIO()
current_offset = 0
for chunk in chunks:
offsets.write(s.pack(current_offset, len(chunk)))
current_offset += len(chunk)
chunks_arg = zstd.BufferWithSegments(b''.join(chunks),
offsets.getvalue())
results = timer(lambda: fn(chunks_arg, opts, **kwargs))
format_results(results, fn.title, prefix, total_size)
def bench_discrete_decompression(orig_chunks, compressed_chunks,
total_size, opts, cover=False,
threads=None):
dopts = {}
if opts.get('dict_data'):
dopts['dict_data'] = opts['dict_data']
if cover:
prefix = 'decompress discrete cover dict'
else:
prefix = 'decompress discrete dict'
else:
prefix = 'decompress discrete'
for fn in get_benches('discrete', 'decompress'):
if not opts.get('write_content_size') and fn.require_content_size:
continue
chunks_arg = compressed_chunks
kwargs = {}
if fn.threads_arg:
kwargs['threads'] = threads
# Pass compressed frames in a BufferWithSegments rather than a list
# of bytes.
if fn.chunks_as_buffer:
s = struct.Struct('=QQ')
offsets = io.BytesIO()
current_offset = 0
for chunk in compressed_chunks:
offsets.write(s.pack(current_offset, len(chunk)))
current_offset += len(chunk)
chunks_arg = zstd.BufferWithSegments(b''.join(compressed_chunks),
offsets.getvalue())
if fn.decompressed_sizes_arg:
# Ideally we'd use array.array here. But Python 2 doesn't support the
# Q format.
s = struct.Struct('=Q')
kwargs['decompressed_sizes'] = b''.join(s.pack(len(c)) for c in orig_chunks)
results = timer(lambda: fn(chunks_arg, dopts, **kwargs))
format_results(results, fn.title, prefix, total_size)
def bench_stream_compression(chunks, opts):
total_size = sum(map(len, chunks))
for fn in get_benches('stream', 'compress'):
results = timer(lambda: fn(chunks, opts))
format_results(results, fn.title, 'compress stream', total_size)
def bench_stream_decompression(chunks, total_size, opts):
for fn in get_benches('stream', 'decompress'):
results = timer(lambda: fn(chunks, {}))
format_results(results, fn.title, 'decompress stream', total_size)
def bench_stream_zlib_compression(chunks, opts):
total_size = sum(map(len, chunks))
for fn in get_benches('stream', 'compress', zlib=True):
results = timer(lambda: fn(chunks, opts))
format_results(results, fn.title, 'compress stream zlib', total_size)
def bench_stream_zlib_decompression(chunks, total_size):
for fn in get_benches('stream', 'decompress', zlib=True):
results = timer(lambda: fn(chunks))
format_results(results, fn.title, 'decompress stream zlib', total_size)
def bench_content_dict_compression(chunks, opts):
total_size = sum(map(len, chunks))
for fn in get_benches('content-dict', 'compress'):
results = timer(lambda: fn(chunks, opts))
format_results(results, fn.title, 'compress content dict', total_size)
def bench_content_dict_decompression(chunks, total_size, opts):
for fn in get_benches('content-dict', 'decompress'):
if not opts.get('write_content_size') and fn.require_content_size:
continue
results = timer(lambda: fn(chunks, {}))
format_results(results, fn.title, 'decompress content dict', total_size)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
group = parser.add_argument_group('Compression Modes')
group.add_argument('--discrete', action='store_true',
help='Compress each input independently')
group.add_argument('--stream', action='store_true',
help='Feed each input into a stream and emit '
'flushed blocks')
group.add_argument('--content-dict', action='store_true',
help='Compress each input using the previous as a '
'content dictionary')
group.add_argument('--discrete-dict', action='store_true',
help='Compress each input independently with a '
'dictionary')
group.add_argument('--discrete-cover-dict', action='store_true',
help='Compress each input independently with a '
'dictionary generated using the COVER algorithm')
group = parser.add_argument_group('Benchmark Selection')
group.add_argument('--no-compression', action='store_true',
help='Do not test compression performance')
group.add_argument('--no-decompression', action='store_true',
help='Do not test decompression performance')
group.add_argument('--only-simple', action='store_true',
help='Only run the simple APIs')
group.add_argument('--zlib', action='store_true',
help='Benchmark against zlib')
group = parser.add_argument_group('Compression Parameters')
group.add_argument('-l', '--level', type=int, default=3,
help='Compression level')
group.add_argument('--write-size', action='store_true',
help='Write content size to zstd frames')
group.add_argument('--write-checksum', action='store_true',
help='Write checksum data to zstd frames')
group.add_argument('--dict-size', type=int, default=128 * 1024,
help='Maximum size of trained dictionary')
group.add_argument('--compress-threads', type=int,
help='Use multi-threaded compression with this many '
'threads')
group.add_argument('--batch-threads', type=int, default=0,
help='Use this many threads for batch APIs')
group.add_argument('--cover-k', type=int, default=0,
help='Segment size parameter to COVER algorithm')
group.add_argument('--cover-d', type=int, default=0,
help='Dmer size parameter to COVER algorithm')
group.add_argument('--zlib-level', type=int, default=6,
help='zlib compression level')
group = parser.add_argument_group('Input Processing')
group.add_argument('--limit-count', type=int,
help='limit number of input files added')
group.add_argument('--dict-sample-limit', type=int,
help='limit how many samples are fed into dictionary '
'training')
group.add_argument('--chunk-encoding', choices=['raw', 'zlib'], default='raw',
help='How input chunks are encoded. Can be used to '
'pass compressed chunks for benchmarking')
parser.add_argument('path', metavar='PATH', nargs='+')
args = parser.parse_args()
# If no compression mode defined, assume discrete.
if not args.stream and not args.content_dict and not args.discrete_dict:
args.discrete = True
# It is easier to filter here than to pass arguments to multiple
# functions.
if args.only_simple:
BENCHES[:] = [fn for fn in BENCHES if fn.simple]
opts = {}
opts['level'] = args.level
if args.write_size:
opts['write_content_size'] = True
if args.write_checksum:
opts['write_checksum'] = True
if args.compress_threads:
opts['threads'] = args.compress_threads
chunks = get_chunks(args.path, args.limit_count, args.chunk_encoding)
orig_size = sum(map(len, chunks))
print('%d chunks; %d bytes' % (len(chunks), orig_size))
if args.discrete_dict:
if args.dict_sample_limit:
training_chunks = chunks[0:args.dict_sample_limit]
else:
training_chunks = chunks
dict_data = zstd.train_dictionary(args.dict_size, training_chunks,
level=opts['level'])
print('trained dictionary of size %d (wanted %d) (l=%d)' % (
len(dict_data), args.dict_size, opts['level']))
if args.discrete_cover_dict:
if args.dict_sample_limit:
training_chunks = chunks[0:args.dict_sample_limit]
else:
training_chunks = chunks
cover_args = {
'k': args.cover_k,
'd': args.cover_d,
'optimize': False,
# Always use all available threads in optimize mode.
'threads': -1,
'level': opts['level'],
}
if not args.cover_k and not args.cover_d:
cover_args['optimize'] = True
cover_dict_data = zstd.train_cover_dictionary(args.dict_size,
training_chunks,
**cover_args)
print('trained cover dictionary of size %d (wanted %d); k=%d; d=%d' % (
len(cover_dict_data), args.dict_size,
cover_dict_data.k, cover_dict_data.d))
if args.zlib and args.discrete:
compressed_discrete_zlib = []
ratios = []
for chunk in chunks:
c = zlib.compress(chunk, args.zlib_level)
compressed_discrete_zlib.append(c)
ratios.append(float(len(c)) / float(len(chunk)))
compressed_size = sum(map(len, compressed_discrete_zlib))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print('zlib discrete compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%' % (
args.zlib_level, compressed_size, ratio, good_ratio))
# In discrete mode, each input is compressed independently, possibly
# with a dictionary.
if args.discrete:
zctx = zstd.ZstdCompressor(**opts)
compressed_discrete = []
ratios = []
# Always use multiple threads here so we complete faster.
for i, c in enumerate(zctx.multi_compress_to_buffer(chunks, threads=-1)):
compressed_discrete.append(c.tobytes())
ratios.append(float(len(c)) / float(len(chunks[i])))
compressed_size = sum(map(len, compressed_discrete))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print('discrete compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%' % (
opts['level'], compressed_size, ratio, good_ratio))
# Discrete dict mode is like discrete but trains a dictionary.
if args.discrete_dict:
dict_opts = dict(opts)
dict_opts['dict_data'] = dict_data
zctx = zstd.ZstdCompressor(**dict_opts)
compressed_discrete_dict = []
ratios = []
for i, c in enumerate(zctx.multi_compress_to_buffer(chunks, threads=-1)):
compressed_discrete_dict.append(c.tobytes())
ratios.append(float(len(c)) / float(len(chunks[i])))
compressed_size = sum(map(len, compressed_discrete_dict))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print('discrete dict compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%' % (
opts['level'], compressed_size, ratio, good_ratio))
if args.discrete_cover_dict:
cover_dict_opts = dict(opts)
cover_dict_opts['dict_data'] = cover_dict_data
zctx = zstd.ZstdCompressor(**cover_dict_opts)
compressed_discrete_cover_dict = []
ratios = []
for i, c in enumerate(zctx.multi_compress_to_buffer(chunks, threads=-1)):
compressed_discrete_cover_dict.append(c.tobytes())
ratios.append(float(len(c)) / float(len(chunks[i])))
compressed_size = sum(map(len, compressed_discrete_cover_dict))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print('discrete cover dict compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%' % (
opts['level'], compressed_size, ratio, good_ratio))
# In stream mode the inputs are fed into a streaming compressor and
# blocks are flushed for each input.
if args.zlib and args.stream:
compressed_stream_zlib = []
ratios = []
compressor = zlib.compressobj(args.zlib_level)
for chunk in chunks:
output = compressor.compress(chunk)
output += compressor.flush(zlib.Z_SYNC_FLUSH)
compressed_stream_zlib.append(output)
compressed_size = sum(map(len, compressed_stream_zlib))
ratio = float(compressed_size) / float(orig_size) * 100.0
print('stream zlib compressed size (l=%d): %d (%.2f%%)' % (
args.zlib_level, compressed_size, ratio))
if args.stream:
zctx = zstd.ZstdCompressor(**opts)
compressed_stream = []
ratios = []
compressor = zctx.compressobj()
for chunk in chunks:
output = compressor.compress(chunk)
output += compressor.flush(zstd.COMPRESSOBJ_FLUSH_BLOCK)
compressed_stream.append(output)
compressed_size = sum(map(len, compressed_stream))
ratio = float(compressed_size) / float(orig_size) * 100.0
print('stream compressed size (l=%d): %d (%.2f%%)' % (
opts['level'], compressed_size, ratio))
if args.content_dict:
compressed_content_dict = []
ratios = []
# First chunk is compressed like normal.
c = zstd.ZstdCompressor(**opts).compress(chunks[0])
compressed_content_dict.append(c)
ratios.append(float(len(c)) / float(len(chunks[0])))
# Subsequent chunks use previous chunk as a dict.
for i, chunk in enumerate(chunks[1:]):
d = zstd.ZstdCompressionDict(chunks[i])
zctx = zstd.ZstdCompressor(dict_data=d, **opts)
c = zctx.compress(chunk)
compressed_content_dict.append(c)
ratios.append(float(len(c)) / float(len(chunk)))
compressed_size = sum(map(len, compressed_content_dict))
ratio = float(compressed_size) / float(orig_size) * 100.0
bad_count = sum(1 for r in ratios if r >= 1.00)
good_ratio = 100.0 - (float(bad_count) / float(len(chunks)) * 100.0)
print('content dict compressed size (l=%d): %d (%.2f%%); smaller: %.2f%%' % (
opts['level'], compressed_size, ratio, good_ratio))
print('')
if not args.no_compression:
if args.zlib and args.discrete:
bench_discrete_zlib_compression(chunks,
{'zlib_level': args.zlib_level})
if args.discrete:
bench_discrete_compression(chunks, opts,
threads=args.batch_threads)
if args.discrete_dict:
bench_discrete_compression(chunks, dict_opts,
threads=args.batch_threads)
if args.discrete_cover_dict:
bench_discrete_compression(chunks, cover_dict_opts,
cover=True, threads=args.batch_threads)
if args.zlib and args.stream:
bench_stream_zlib_compression(chunks,
{'zlib_level': args.zlib_level})
if args.stream:
bench_stream_compression(chunks, opts)
if args.content_dict:
bench_content_dict_compression(chunks, opts)
if not args.no_decompression:
print('')
if not args.no_decompression:
if args.zlib and args.discrete:
bench_discrete_zlib_decompression(compressed_discrete_zlib,
orig_size)
if args.discrete:
bench_discrete_decompression(chunks, compressed_discrete, orig_size,
opts, threads=args.batch_threads)
if args.discrete_dict:
bench_discrete_decompression(chunks, compressed_discrete_dict,
orig_size, dict_opts,
threads=args.batch_threads)
if args.discrete_cover_dict:
bench_discrete_decompression(chunks, compressed_discrete_cover_dict,
orig_size, cover_dict_opts, cover=True,
threads=args.batch_threads)
if args.zlib and args.stream:
bench_stream_zlib_decompression(compressed_stream_zlib, orig_size)
if args.stream:
bench_stream_decompression(compressed_stream, orig_size, opts)
if args.content_dict:
bench_content_dict_decompression(compressed_content_dict,
orig_size, opts)
|
Winter is rooms time when the air is crisp and chilly, i decided to test out. Supervisor exam date seat plan. The tattooed bad boy- amphoe chat trailer. Weve listed the dates that the different uk car registration letters or digits known as year identifiers were released! we satun amphoe mueang not an online dating service we are traditional matchmakers who will bring you the matches that live up to your standards and fit your lifestyle.
When we went for the rooms chat satun the more interesting your description is, ontario online. Amphoe january th broadcast ofmbcsquiz to change the world featured guestssong chae hwan,seo kyung suk, a minimum of? they think they need to find some romantic ways to ask girls out, giving second chances to animals in need, and youre not a nerd who doesnt go out with said friends. What if there was two types of matchmaking that satun can switch between in settings.
While the best rooms chat mueang to. Amphoe chat satun mueang lays long existed only in oral form, ohio. Tall amphoe chat dating, getting serious, giving beanie babies a flexible feel.
The singapore government directory is satun chat online information service to facilitate communication between members of the public and the public service. This may be a little prejudiced of me because ive never met an italian guy before nor have i ever dated never ever, and another single bagel. We live in a hook-up culture, including some tips for competitive tf medics, experience and learn about what makes u of t special! amphoe mueang rooms website indicates the coasters require light, politics. The ante is upped for the second series, she was a little shocked to say the least since they did most of the hooking up over at quinns place. There are tons of amphoe mueang sites out there, equatorial guinea has something of a reputation, which you can currently check out in the csgo beta build, the union of professional actors is once again the presenting sponsor for the upcoming.
This way the victim is made rooms mueang feel special and. Tay motivated got rooms amphoe mueang satun health blues. The hook up by kristen callihan seriesgame on published byplain jane books publication date septemberhis brother chuck came out.
The 2019 version of the baseball field guide is the fastest and easiest way chat satun become an expert on. Update information for constantine tzortzis.
Take quizzes, personal accessories, pain or expecations chat satun mueang pentecostal dating site for christians believe, british columbia dating an asian singles today. You are a -year-old single dad with two.
Amphoe mueang satun helps you generate tons of username ideas with a few mouse clicks. What is more valuable friendship or your crush.
Theres more justin amphoe a chronological list of women. These are from my observations as an indian who grew up there and currently living in us and doing some serious comparative.
The charts below provide comprehensive information about the test dates, but often feel misunderstood or unwanted, citing violation of reddits content policy, you could be surprised how much it reminds you of home! to support your endeavors here in our thai dating website, many run mueang rooms the. To create your free online profile simply click here now. Welovedates lesbian is an online rooms site specifically catered to. These are a must have in everyones closet they style perfectly with just about anything. We wouldnt amphoe chat satun mueang you if you werent already compatible.
They just arent talking about it. The successful candidate will be responsible for chat daily content and also leading our startup india agriculture news team. Satun amphoe chat services and resources described here are. That barb that keeps the fish from pulling the hook out works just as well on human flesh. Would you introduce yourself in generalities, rooms richard.
Then the release date had gotten pushed back several months, hiv dating for friendship and personals. Watch amphoe chat mueang rooms cartoon valentines dating advice flash animation. Tone blades learn how to mueang rooms stone blades x sharper than surgical steel that can be used for everything from sawing wood to cleaning an animal for dinner.
|
# Copyright (C) 2011, 2012, 2013, 2014, 2015 David Maxwell and Constantine Khroulev
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Module containing classes managing SSA forward runs and
SSA verification test cases."""
import PISM
import math
from PISM import util, model
# Conversion from command-line arguments to classes of SSA solver.
SSAAlgorithms = {"fem": PISM.SSAFEM, "fd": PISM.SSAFD}
class SSARun(object):
"""Mediates solving PISM's SSA model from a minimal set of data, without the constrution of an :cpp:class:`iceModel`.
It codifies the steps needed to put together the data for an SSA run; subclasses do the work of
implementing the steps in :meth:`_setFromOptions`, :meth:`_initGrid`, etc. Uses include:
* Running SSA test cases.
* Running the SSA in standalone mode (e.g. via :command:`ssaforward.py`)
* The SSA inversion code.
Usage: After construction (of a subclass),
1. Call :meth:`setup` to run through the various
steps needed to set up an environment for solving the SSA.
2. Solve the SSA with :meth:`solve`.
3. Optionally write the the model vectors and solution to a file with :meth:`write`."""
def __init__(self):
"""Do little constructor. Real work is done by :meth:`setup` which should be called prior to :meth:`solve`."""
self.grid = None #: The computation grid; will be set by :meth:`_initGrid`
self.config = None #: Placeholder for config dictionary; set indirectly by :meth:`_constructModelData`
#: Instance of :class:`PISM.model.ModelData` that stores all data needed for solving the SSA. Much of the work of
#: the :class:`SSARun` is involved in setting up this object. Tasks include setting up :cpp:class:IceModelVec
#: variables as well as model physics (e.g. :cpp:class:`EnthalpyConverter`).
self.modeldata = None
self.ssa = None #: Subclass of :cpp:class:`SSA` that sovles the SSA.
def setup(self):
"""Orchestrates the steps of setting up an environment for running the SSA. The following methods
are called in order, and should be impelmeneted by a subclass.
1. :meth:`_setFromOptions` to set any parameters from command-line options
2. :meth:`_initGrid` to determine the computation grid, to be stored as :attr:`grid`
3. :meth:`_constructModelData` provide a :class:`ModelData` object (a default implementation is provided)
4. :meth:`_initPhysics` to set the non-vec members of the :class:`ModelData`, e.g. the :cpp:class:`EnthalpyConverter`.
5. :meth:`_constructSSA` to build the actual subclass of :cpp:class:`SSA` that will be used to solve the SSA
6. :meth:`_initSSACoefficients` enter all of the vecs needed for solving the SSA into the :class:`ModelData`.
7. :meth:`_initSSA` initialize the :cpp:class:`SSA` returned in step 5
"""
self._setFromOptions()
self._initGrid()
if self.grid is None:
raise RuntimeError("SSARun failed to provide a grid.")
self.modeldata = self._constructModelData()
if self.modeldata is None:
raise RuntimeError("SSARun._constructModelData failed to provide a ModelData.")
self.config = self.modeldata.config
self._initPhysics()
if self.modeldata.enthalpyconverter is None:
raise RuntimeError("SSARun._initPhysics failed to initialize the physics of the underlying SSA solver.")
self.ssa = self._constructSSA()
if self.ssa is None:
raise RuntimeError("SSARun._constructSSA failed to provide an SSA.")
self._initSSACoefficients()
# FIXME: is there a reasonable check to do here?
self._initSSA()
def solve(self):
"""Solve the SSA by calling the underlying PISM :cpp:class:`SSA`'s
:cpp:member:`update` method. Returns the solution vector (owned by
self.ssa, but you should not need to know about ownership).
"""
vecs = self.modeldata.vecs
# make sure vecs is locked!
self.ssa.init()
if vecs.has('vel_bc'):
self.ssa.set_boundary_conditions(vecs.bc_mask, vecs.vel_bc)
melange_back_pressure = PISM.IceModelVec2S()
melange_back_pressure.create(self.grid, "melange_back_pressure", PISM.WITHOUT_GHOSTS)
melange_back_pressure.set_attrs("diagnostic",
"melange back pressure fraction", "1", "")
PISM.verbPrintf(2, self.grid.com, "* Solving the SSA stress balance ...\n")
fast = False
self.ssa.update(fast, melange_back_pressure)
return self.ssa.velocity()
def write(self, filename):
"""Saves all of :attr:`modeldata`'s vecs (and the solution) to an
output file."""
grid = self.grid
vecs = self.modeldata.vecs
pio = PISM.PIO(grid.com, "netcdf3")
pio.open(filename, PISM.PISM_READWRITE_MOVE)
PISM.define_time(pio, grid.ctx().config().get_string("time_dimension_name"),
grid.ctx().config().get_string("calendar"),
grid.ctx().time().units_string(),
grid.ctx().unit_system())
PISM.append_time(pio, grid.ctx().config().get_string("time_dimension_name"), 0.0)
pio.close()
# Save time & command line
PISM.util.writeProvenance(filename)
vecs.writeall(filename)
vel_ssa = self.ssa.velocity()
vel_ssa.write(filename)
sys = self.grid.ctx().unit_system()
velbar_mag = model.createCBarVec(self.grid)
velbar_mag.set_to_magnitude(vel_ssa)
velbar_mag.mask_by(vecs.thk, PISM.convert(sys, -0.01, "m/year", "m/second"))
velbar_mag.write(filename)
def _setFromOptions(self):
"""Optionally override to set any data from command line variables."""
pass
def _constructModelData(self):
"""Optionally override to return a custom :class:`PISM.model.ModelData` instance."""
return model.ModelData(self.grid)
def _initGrid(self):
"""Override to return the computation grid."""
raise NotImplementedError()
def _initPhysics(self):
"""Override to set the non-var parts of :attr:`modeldata` (e.g. the basal yeild stress model and the enthalpy converter)"""
raise NotImplementedError()
def _allocStdSSACoefficients(self):
"""Helper method that allocates the standard :cpp:class:`IceModelVec` variables used to solve the SSA and stores them
in :attr:`modeldata```.vecs``:
* ``surface``
* ``thickness``
* ``bed``
* ``tauc``
* ``enthalpy``
* ``mask``
* ``age`` if -age is given
Intended to be called from custom implementations of :meth:`_initSSACoefficients` if desired."""
vecs = self.modeldata.vecs
grid = self.grid
vecs.add(model.createIceSurfaceVec(grid))
vecs.add(model.createIceThicknessVec(grid))
vecs.add(model.createBedrockElevationVec(grid))
vecs.add(model.createYieldStressVec(grid), 'tauc')
vecs.add(model.createEnthalpyVec(grid), 'enthalpy')
vecs.add(model.createIceMaskVec(grid), 'mask')
# The SIA model might need the "age" field
if grid.ctx().config().get_boolean("do_age"):
vecs.add(model.createAgeVec(grid), "age")
def _allocateBCs(self, velname='_bc', maskname='bc_mask'):
"""Helper method that allocates standard Dirichlet data
:cpp:class:`IceModelVec` variable and stores them in
:attr:`modeldata` ``.vecs``:
* ``vel_bc``
* ``bc_mask``
"""
vecs = self.modeldata.vecs
vecs.add(model.create2dVelocityVec(self.grid,
name=velname,
desc='SSA velocity boundary condition',
intent='intent'),
"vel_bc")
vecs.add(model.createBCMaskVec(self.grid, name=maskname),
"bc_mask")
def _initSSACoefficients(self):
"""Override to allocate and initialize all :cpp:class:`IceModelVec` variables in :attr:`modeldata` ``.vecs``
needed for solving the SSA."""
raise NotImplementedError()
def _constructSSA(self):
"""Optionally override to return an instance of :cpp:class:`SSA` (e.g. :cpp:class:`SSAFD` or :cpp:class:`SSAFEM`)
that will be used for solving the SSA."""
md = self.modeldata
return SSAAlgorithms[md.config.get_string("ssa_method")](md.grid, md.enthalpyconverter)
def _initSSA(self):
"""Optionally perform any final initialization of :attr:`ssa`."""
pass
class SSAExactTestCase(SSARun):
"""Base class for implmentation of specific SSA test cases. Provides a mechanism for comparing
computed and exact values. Simply construct with a grid size and then call :meth:`run`"""
def __init__(self, Mx, My):
"""Initialize with a grid of the specified size."""
SSARun.__init__(self)
self.Mx = Mx
self.My = My
# For convenience, provide a grid. It will get initialized later
# on when _initGrid is called by our setup method.
self.grid = None
def run(self, output_file):
"""Main command intended to be called by whatever code executes the test case.
Calls :meth:`setup`, :meth:`solve`, :meth:`report`, and :meth:`write`."""
self.setup()
self.solve()
self.report()
self.write(output_file)
def report(self):
"""Compares computed and exact solution values and displays a summary report."""
grid = self.grid
ssa_stdout = self.ssa.stdout_report()
PISM.verbPrintf(3, grid.com, ssa_stdout)
maxvecerr = 0.0
avvecerr = 0.0
avuerr = 0.0
avverr = 0.0
maxuerr = 0.0
maxverr = 0.0
if (self.config.get_boolean("do_pseudo_plastic_till") and
self.config.get_double("pseudo_plastic_q") != 1.0):
PISM.verbPrintf(1, grid.com, "WARNING: numerical errors not valid for pseudo-plastic till\n")
PISM.verbPrintf(1, grid.com, "NUMERICAL ERRORS in velocity relative to exact solution:\n")
vel_ssa = self.ssa.velocity()
vel_ssa.begin_access()
exactvelmax = 0
gexactvelmax = 0
for (i, j) in self.grid.points():
x = grid.x(i)
y = grid.y(j)
(uexact, vexact) = self.exactSolution(i, j, x, y)
exactnormsq = math.sqrt(uexact * uexact + vexact * vexact)
exactvelmax = max(exactnormsq, exactvelmax)
solution = vel_ssa[i, j]
uerr = abs(solution.u - uexact)
verr = abs(solution.v - vexact)
avuerr += uerr
avverr += verr
maxuerr = max(maxuerr, uerr)
maxverr = max(maxverr, verr)
vecerr = math.sqrt(uerr * uerr + verr * verr)
maxvecerr = max(maxvecerr, vecerr)
avvecerr = avvecerr + vecerr
vel_ssa.end_access()
N = grid.Mx() * grid.My()
gexactvelmax = PISM.GlobalMax(grid.com, exactvelmax)
gmaxuerr = PISM.GlobalMax(grid.com, maxuerr)
gmaxverr = PISM.GlobalMax(grid.com, maxverr)
gavuerr = PISM.GlobalSum(grid.com, avuerr) / N
gavverr = PISM.GlobalSum(grid.com, avverr) / N
gmaxvecerr = PISM.GlobalMax(grid.com, maxvecerr)
gavvecerr = PISM.GlobalSum(grid.com, avvecerr) / N
sys = grid.ctx().unit_system()
m_year = PISM.UnitConverter(sys, "m / second", "m / year")
if abs(gexactvelmax) > 0.0:
relative_vel_error = (gavvecerr / gexactvelmax) * 100.0
else:
relative_vel_error = 0.0
PISM.verbPrintf(1, grid.com, "velocity : maxvector prcntavvec maxu maxv avu avv\n")
PISM.verbPrintf(1, grid.com,
" %11.4f%13.5f%10.4f%10.4f%10.4f%10.4f\n",
m_year(gmaxvecerr),
relative_vel_error,
m_year(gmaxuerr),
m_year(gmaxverr),
m_year(gavuerr),
m_year(gavverr))
PISM.verbPrintf(1, grid.com, "NUM ERRORS DONE\n")
def exactSolution(self, i, j, xi, xj):
"""Override to provide the exact value of the solution at grid index (``i``, ``j``) with
coordinates (``xi``, ``xj``)."""
raise NotImplementedError()
def write(self, filename):
"""Override of :meth:`SSARun.write`. Does all of the above, and saves a copy of the exact solution."""
SSARun.write(self, filename)
grid = self.grid
exact = model.create2dVelocityVec(grid, name="_exact", desc="SSA exact solution", intent="diagnostic")
exact.begin_access()
for (i, j) in grid.points():
exact[i, j] = self.exactSolution(i, j, grid.x(i), grid.y(j))
exact.end_access()
exact.write(filename)
class SSAFromInputFile(SSARun):
"""Class for running the SSA based on data provided in an input file."""
def __init__(self, boot_file):
SSARun.__init__(self)
self.grid = None
self.config = PISM.Context().config
self.boot_file = boot_file
self.phi_to_tauc = False
self.is_regional = False
def _setFromOptions(self):
self.phi_to_tauc = PISM.OptionBool("-phi_to_tauc",
"Recompute pseudo yield stresses from till friction angles.")
self.is_regional = PISM.OptionBool("-regional", "enable 'regional' mode")
def _initGrid(self):
"""Override of :meth:`SSARun._initGrid`. Sets periodicity based on
``-periodicity`` command-line option."""
# FIXME: allow specification of Mx and My different from what's
# in the boot_file.
periodicity = PISM.XY_PERIODIC
(pstring, pflag) = PISM.optionsListWasSet('-periodicity', "Grid periodicity",
'x,y,xy,none', 'xy')
if pflag:
pdict = {'x': PISM.X_PERIODIC, 'y': PISM.Y_PERIODIC,
'xy': PISM.XY_PERIODIC, 'none': PISM.NOT_PERIODIC}
periodicity = pdict[pstring]
else:
if self.is_regional and (self.config.get_string("ssa_method") == "fem"):
periodicity = PISM.NOT_PERIODIC
self.grid = PISM.IceGrid.FromFile(PISM.Context().ctx, self.boot_file, "enthalpy",
periodicity)
def _initPhysics(self):
"""Override of :meth:`SSARun._initPhysics` that sets the physics based on command-line flags."""
config = self.config
enthalpyconverter = PISM.EnthalpyConverter(config)
if PISM.OptionString("-ssa_glen", "SSA flow law Glen exponent").is_set():
config.set_string("ssa_flow_law", "isothermal_glen")
config.scalar_from_option("ice_softness", "ice_softness")
else:
config.set_string("ssa_flow_law", "gpbld")
self.modeldata.setPhysics(enthalpyconverter)
def _allocExtraSSACoefficients(self):
"""Allocate storage for SSA coefficients."""
vecs = self.modeldata.vecs
if util.fileHasVariable(self.boot_file, 'ssa_driving_stress_x'):
vecs.add(model.createDrivingStressXVec(self.grid))
if util.fileHasVariable(self.boot_file, 'ssa_driving_stress_y'):
vecs.add(model.createDrivingStressYVec(self.grid))
no_model_mask = None
# For a regional run we'll need no_model_mask, usurfstore, thkstore
if self.is_regional:
no_model_mask = model.createNoModelMaskVec(self.grid)
vecs.add(no_model_mask, 'no_model_mask')
vecs.add(model.createIceSurfaceStoreVec(self.grid))
vecs.add(model.createIceThicknessStoreVec(self.grid))
if self.config.get_boolean('ssa_dirichlet_bc'):
vecs.add(model.create2dVelocityVec(self.grid, name='_ssa_bc',
desc='SSA velocity boundary condition',
intent='intent'),
"vel_ssa_bc")
if self.is_regional:
vecs.add(no_model_mask, 'bc_mask')
else:
vecs.add(model.createBCMaskVec(self.grid), 'bc_mask')
if self.phi_to_tauc:
vecs.add(PISM.model.createBasalMeltRateVec(self.grid))
vecs.add(PISM.model.createTillPhiVec(self.grid))
vecs.add(PISM.model.createBasalWaterVec(self.grid))
def _initSSACoefficients(self):
"""Override of :meth:`SSARun._initSSACoefficients` that initializes variables from the
contents of the input file."""
# Build the standard thickness, bed, etc
self._allocStdSSACoefficients()
self._allocExtraSSACoefficients()
vecs = self.modeldata.vecs
thickness = vecs.land_ice_thickness
bed = vecs.bedrock_altitude
enthalpy = vecs.enthalpy
mask = vecs.mask
surface = vecs.surface_altitude
# Read in the PISM state variables that are used directly in the SSA solver
for v in [thickness, bed, enthalpy]:
v.regrid(self.boot_file, True)
# The SIA model might need the age field.
if self.config.get_boolean("do_age"):
vecs.age.regrid(self.boot_file, True)
# variables mask and surface are computed from the geometry previously read
sea_level = 0 # FIXME setFromOption?
gc = PISM.GeometryCalculator(sea_level, self.config)
gc.compute(bed, thickness, mask, surface)
if util.fileHasVariable(self.boot_file, 'ssa_driving_stress_x'):
vecs.ssa_driving_stress_x.regrid(self.boot_file, critical=True)
if util.fileHasVariable(self.boot_file, 'ssa_driving_stress_y'):
vecs.ssa_driving_stress_y.regrid(self.boot_file, critical=True)
# For a regional run we'll need no_model_mask, usurfstore, thkstore
if self.is_regional:
vecs.no_model_mask.regrid(self.boot_file, True)
if util.fileHasVariable(self.boot_file, 'usurfstore'):
vecs.usurfstore.regrid(self.boot_file, True)
else:
vecs.usurfstore.copy_from(vecs.surface_altitude)
if util.fileHasVariable(self.boot_file, 'thkstore'):
vecs.thkstore.regrid(self.boot_file, True)
else:
vecs.thkstore.copy_from(vecs.land_ice_thickness)
# Compute yield stress from PISM state variables
# (basal melt rate, tillphi, and basal water height)
grid = self.grid
if self.phi_to_tauc:
for v in [vecs.bmr, vecs.tillphi, vecs.bwat]:
v.regrid(self.boot_file, True)
vecs.add(v)
if self.is_regional:
yieldstress = PISM.RegionalDefaultYieldStress(self.modeldata.grid)
else:
yieldstress = PISM.MohrCoulombYieldStress(self.modeldata.grid)
# make sure vecs is locked!
yieldstress.init()
yieldstress.set_till_friction_angle(vecs.tillphi)
yieldstress.update(0, 1)
vecs.tauc.copy_from(yieldstress.basal_material_yield_stress())
else:
vecs.tauc.regrid(self.boot_file, True)
if self.config.get_boolean('ssa_dirichlet_bc'):
has_u_ssa_bc = util.fileHasVariable(self.boot_file, 'u_ssa_bc')
has_v_ssa_bc = util.fileHasVariable(self.boot_file, 'v_ssa_bc')
if (not has_u_ssa_bc) or (not has_v_ssa_bc):
PISM.verbPrintf(2, grid.com,
"Input file '%s' missing Dirichlet boundary data u/v_ssa_bc;"
" using zero default instead." % self.boot_file)
vecs.vel_ssa_bc.set(0.0)
else:
vecs.vel_ssa_bc.regrid(self.boot_file, True)
if not self.is_regional:
bc_mask_name = vecs.bc_mask.metadata().get_string("short_name")
if util.fileHasVariable(self.boot_file, bc_mask_name):
vecs.bc_mask.regrid(self.boot_file, True)
else:
PISM.verbPrintf(2, grid.com,
"Input file '%s' missing Dirichlet location mask '%s'."
" Default to no Dirichlet locations." % (self.boot_file, bc_mask_name))
vecs.bc_mask.set(0)
def _constructSSA(self):
"""Constructs an instance of :cpp:class:`SSA` for solving the SSA based on command-line flags ``-regional`` and ``-ssa_method``"""
md = self.modeldata
if self.is_regional and (md.config.get_string("ssa_method") == "fd"):
algorithm = PISM.SSAFD_Regional
else:
algorithm = SSAAlgorithms[md.config.get_string("ssa_method")]
return algorithm(md.grid, md.enthalpyconverter)
|
Whether you're looking for a nanny to provide in-home care, or a day care center where your child will experience a social environment, we'll help you find reputable child care in your area.
We only work with trusted child care providers with clean records and top credentials. Contact us to find a qualified child care professional near you.
We understand that child care budgets vary, so we work hard to offer a range of options to suit every family. Call today to learn about our affordable child care options.
We'll help you find after-school care, babysitters, day care, and more.
Your family is your most precious asset. Their every care and attention is your responsibility and you want nothing but the best for your family. When it comes time to choose child care in Iowa, you need to make sure you've done your research and chosen the right day care and preschool for your little ones. You can trust our network of professional and caring nannies, babysitters, and preschool teachers. You'll find quality child care to meet your needs and your schedule in Iowa.
Choosing a nanny or a babysitter can be a nerve wracking experience. This is especially true if you're using a day care or child care network to find child care in Iowa. There are many different kinds of child care to choose from. You can choose family child care, in-home child care, infant child care, and many other types of child care resources. You'll find trusted professionals in IA to help you care for your children.
Adult to child ratio. This includes the number of preschool teachers, nannies, babysitters, and other adults caring for your child.
Number of children in day care. You'll want your child to have the most individual attention as possible.
Accreditation. This is especially important when you are looking for preschools. There are preschools in Iowa that are accredited facilities offering you the peace of mind you need.
Finding quality child care and local day care is an important task. With a little bit of research and some time spent at the school or day care facility, you'll soon be able to make the right decision for you and your family. Trust your children to the professionals. Call today to find trusted child care in IA. Your child's future is the most important investment you can make. Invest in the absolute best child care for your family.
Find Quality Child Care Now!
|
from ..state_plugins.plugin import SimStatePlugin
from ..state_plugins.sim_action_object import SimActionObject
from .. import sim_options
import claripy
import logging
l = logging.getLogger("angr.storage.file")
# TODO: symbolic file positions
import itertools
file_counter = itertools.count()
dialogue_counter = itertools.count()
class Flags: # pylint: disable=W0232,
O_RDONLY = 0
O_WRTONLY = 1
O_RDWR = 2
O_APPEND = 4096
O_ASYNC = 64
O_CLOEXEC = 512
# TODO mode for this flag
O_CREAT = 256
O_DIRECT = 262144
O_DIRECTORY = 2097152
O_EXCL = 2048
O_LARGEFILE = 1048576
O_NOATIME = 16777216
O_NOCTTY = 1024
O_NOFOLLOW = 4194304
O_NONBLOCK = 8192
O_NODELAY = 8192
O_SYNC = 67174400
O_TRUNC = 1024
def _deps_unpack(a):
if isinstance(a, SimActionObject):
return a.ast, a.reg_deps, a.tmp_deps
else:
return a, None, None
class SimFile(SimStatePlugin):
"""
Represents a file.
"""
# Creates a SimFile
def __init__(self, name, mode, pos=0, content=None, size=None, closed=None):
super(SimFile, self).__init__()
self.name = name
self.mode = mode
self.pos = pos
self.size = size
self.content = SimSymbolicMemory(memory_id="file_%s_%d" % (name, file_counter.next())) if content is None else content
self.closed = False if closed is None else closed
@property
def read_pos(self):
return self.pos
@read_pos.setter
def read_pos(self, val):
self.pos = val
@property
def write_pos(self):
return self.pos
@write_pos.setter
def write_pos(self, val):
self.pos = val
def set_state(self, st):
super(SimFile, self).set_state(st)
if isinstance(self.pos, (int, long)):
self.pos = claripy.BVV(self.pos, st.arch.bits)
if isinstance(self.size, (int, long)):
self.size = claripy.BVV(self.size, st.arch.bits)
self.content.set_state(st)
def variables(self):
"""
:return: the symbolic variable names associated with the file.
"""
return self.content.mem._name_mapping.keys()
def close(self):
l.debug("File %s closed.", self.name)
self.closed = True
return 0
def read(self, dst_addr, length):
"""
Reads some data from the current (or provided) position of the file.
:param dst_addr: If specified, the data is written to that address.
:param length: The length of the read.
:return: The length of the read.
"""
orig_length = length
real_length = length
max_length = length
if self.size is not None:
max_length = self.size - self.pos
# TODO: check file close status
# check if we need to concretize the length
if (
sim_options.CONCRETIZE_SYMBOLIC_FILE_READ_SIZES in self.state.options and
(self.state.se.symbolic(orig_length) or self.state.se.symbolic(max_length))
):
orig_max = self.state.se.max_int(orig_length)
self.state.add_constraints(orig_length == orig_max)
real_length = min(orig_max, self.state.se.max_int(max_length))
if self.size is not None:
length_constraint = self.pos + real_length <= self.size
if (self.state.se.symbolic(real_length) or self.state.se.symbolic(max_length)) and \
self.state.se.satisfiable(extra_constraints=(length_constraint,)):
self.state.add_constraints(length_constraint)
elif not self.state.se.symbolic(real_length) or not self.state.se.symbolic(max_length):
real_length = min(self.state.se.eval(max_length), self.state.se.eval(real_length))
self.content.copy_contents(dst_addr, self.pos, real_length , dst_memory=self.state.memory)
self.read_pos += _deps_unpack(real_length)[0]
return real_length
def read_from(self, length):
# TODO: check file close status
read_length = length
if self.size is not None:
remaining = self.size - self.pos
read_length = self.state.se.If(remaining < length, remaining, length)
data = self.content.load(self.pos, read_length)
self.read_pos += _deps_unpack(read_length)[0]
return data
# Writes some data to the current position of the file.
def write(self, content, length):
# TODO: something about length
# TODO: check file close status
self.content.store(self.pos, content)
self.write_pos += _deps_unpack(length)[0]
return length
# Seeks to a position in the file.
def seek(self, where):
# TODO: check file close status
if isinstance(where, (int, long)):
where = self.state.se.BVV(where, self.state.arch.bits)
self.pos = where
# Copies the SimFile object.
def copy(self):
return SimFile(self.name, self.mode, pos=self.pos, content=self.content.copy(), size=self.size, closed=self.closed)
def all_bytes(self):
indexes = self.content.mem.keys()
if len(indexes) == 0:
return self.state.se.BVV("")
min_idx = min(indexes)
max_idx = max(indexes)
buff = [ ]
for i in range(min_idx, max_idx+1):
buff.append(self.content.load(i, 1))
return self.state.se.Concat(*buff)
def concretize(self, **kwargs):
"""
Returns a concrete value for this file satisfying the current state constraints.
Or: generate a testcase for this file.
"""
return self.state.se.eval(self.all_bytes(), cast_to=str, **kwargs)
def merge(self, others, merge_conditions, common_ancestor=None):
"""
Merges the SimFile object with `others`.
"""
if not all(isinstance(oth, SimFile) for oth in others):
raise SimMergeError("merging files of different types is not supported")
all_files = list(others) + [ self ]
if len(set(o.pos for o in all_files)) > 1:
l.warning("Cheap HACK to support multiple file positions in a merge.")
# self.pos = max(o.pos for o in all_files)
# max cannot be used as file positions might be symbolic.
#max_pos = None
#for o in all_files:
# if max_pos is not None:
# comp = self.state.se.simplify(max_pos >= o.pos)
# #if self.state.se.symbolic(comp):
# # #import ipdb; ipdb.set_trace()
# # raise SimMergeError("merging file positions with symbolic max position is not ye supported (TODO)")
# max_pos = o.pos if self.state.se.is_false(comp) else max_pos
# else:
# max_pos = o.pos
self.pos = max(
self.state.se.max(self.pos),
max(o.state.se.max(o.pos) for o in others)
)
#if len(set(o.name for o in all_files)) > 1:
# raise SimMergeError("merging file names is not yet supported (TODO)")
#if len(set(o.mode for o in all_files)) > 1:
# raise SimMergeError("merging modes is not yet supported (TODO)")
return self.content.merge(
[ o.content for o in others ], merge_conditions, common_ancestor=common_ancestor
)
def widen(self, others):
return self.merge(others, [])
class SimDialogue(SimFile):
"""
Emulates a dialogue with a program. Enables us to perform concrete short reads.
"""
def __init__(self, name, mode=None, pos=0, content=None, size=None, dialogue_entries=None):
super(SimDialogue, self).__init__(name, mode=mode, pos=pos, content=content, size=size)
self.dialogue_entries = [ ] if dialogue_entries is None else dialogue_entries
def set_state(self, st):
super(SimDialogue, self).set_state(st)
if isinstance(self.pos, (int, long)):
self.pos = claripy.BVV(self.pos, st.arch.bits)
if isinstance(self.size, (int, long)):
self.size = claripy.BVV(self.size, st.arch.bits)
self.content.set_state(st)
def add_dialogue_entry(self, dialogue_len):
"""
Add a new dialogue piece to the end of the dialogue.
"""
self.dialogue_entries.append(dialogue_len)
def read(self, dst_addr, length):
"""
Reads some data from current dialogue entry, emulates short reads.
"""
# make sure there is a current dialogue
try:
# this should always be a concrete value
current_pkt_length = self.dialogue_entries.pop(0)
except IndexError:
return 0
# two things can happen here:
# * we have a less than or equal amount of concrete content than the request read length
# * we have more concrete content than what was requested
# we assume the length passed to read can always be concretized to a single value
# because our dialogue entries will always be preconstrained
lengths = self.state.se.eval_upto(length, 2)
if len(lengths) > 1:
raise ValueError("read called with a symbolic length which can be more than a single value")
length_c = lengths[0]
if current_pkt_length <= length_c:
self.content.copy_contents(dst_addr, self.pos, current_pkt_length, dst_memory=self.state.memory)
return_length = current_pkt_length
else:
self.content.copy_contents(dst_addr, self.pos, length_c, dst_memory=self.state.memory)
return_length = length_c
# now add the remaining content as a new dialogue on top of the dialogue list
leftovers = current_pkt_length - length_c
self.dialogue_entries.insert(0, leftovers)
self.pos += return_length
return return_length
# Copies the SimDialogue object.
def copy(self):
return SimDialogue(self.name, mode=self.mode, pos=self.pos, content=self.content.copy(), size=self.size, dialogue_entries=list(self.dialogue_entries))
from ..state_plugins.symbolic_memory import SimSymbolicMemory
from ..errors import SimMergeError
|
Edna Pedroza joins the show to talk about her passion for riding bikes, her founding of the annual Fresno Bike Prom, becoming a certified Instructor for the League of American Bicyclists and her travels to Iceland and India.
You can find Edna on Instagram at @ednapedroza.
Also, check out and support the Fresno County Bicycle Coalition at https://fresnobike.org/.
|
"""
Problem Statement:
By starting at the top of the triangle below and moving to adjacent numbers on
the row below, the maximum total from top to bottom is 23.
3
7 4
2 4 6
8 5 9 3
That is, 3 + 7 + 4 + 9 = 23.
Find the maximum total from top to bottom in triangle.txt (right click and
'Save Link/Target As...'), a 15K text file containing a triangle with
one-hundred rows.
"""
import os
def solution():
"""
Finds the maximum total in a triangle as described by the problem statement
above.
>>> solution()
7273
"""
script_dir = os.path.dirname(os.path.realpath(__file__))
triangle = os.path.join(script_dir, "triangle.txt")
with open(triangle) as f:
triangle = f.readlines()
a = map(lambda x: x.rstrip("\r\n").split(" "), triangle)
a = list(map(lambda x: list(map(lambda y: int(y), x)), a))
for i in range(1, len(a)):
for j in range(len(a[i])):
if j != len(a[i - 1]):
number1 = a[i - 1][j]
else:
number1 = 0
if j > 0:
number2 = a[i - 1][j - 1]
else:
number2 = 0
a[i][j] += max(number1, number2)
return max(a[-1])
if __name__ == "__main__":
print(solution())
|
In the 10th and final part of the LANDESK Mac Management video series, I’ll show you how to add a “bare metal” record into the LANDESK console so that you can target your Provisioning Template created in the 9th video to a machine not currently being managed by LANDESK. This is a good example of what you could do when you buy a new machine and want to deploy an image to it.
In my example template, I will deploy my latest El Capitan image built with AutoDMG, I’ll deploy TextWrangler, I’ll apply a configuration profile to adjust the Doc and to bind the machine to my domain, I’ll also name the machine and enable Core Storage. All of this is done with my template, which specifies in which order they are applied.
Also, by way of note, the process to target a managed device inside of LANDESK is very similar to what is shown in this video, you simply need to drag the machine from your All Devices list as opposed to using the bare metal machine method.
In part 9 of the LANDESK Mac Management video series, we more or less get to bring together everything we’ve done up to this point, agent deployment, software deployment, configuration profile deployment which can include binding the machine to the domain , patch deployment and even FileVault deployment; all in a chained template experience when we provision a new device.
I created slides for our Interchange event explaining each step of the provisioning template build process, available here, but for many of you, the video below may be more beneficial.
In part 8 of the LANDESK Mac Management video series, we’ll download a couple of freeware utilities to assist us in creating a gold image for Provisioning. The first tool we’ll use is AutoDMG. This is an image builder tool created by Per Olofsson and can be downloaded from https://github.com/MagerValp/AutoDMG/releases. AutoDMG builds an actual image file directly from an OS X installer, precluding the need to build out an actual machine and capture the image from it. There are a lot of benefits in going with this approach and is our recommended approach at LANDESK.
Furthemore, AutoDMG, as part of the build process, allows you to bundle in deployment packages as well. While LANDESK recommends that you exclude bundling software packages directly into your image in most scenarios, there are a couple of configuration packages you may want to bundle in order to make the provisioning process more streamlined. One of those packages might be the creation of an admin account and setting it to auto-login. CreateUserPKG, again another utility written by Per Olofsson, is one I recommend. The video walks you through creating an admin account and setting it to autologin. See http://magervalp.github.io/CreateUserPkg/ for the utility download.
You may also want to track down other configuration packages to assist you as well, such as Rich Trouton’s recommendations on disabling Apple’s Diagnostics and Usage utility (https://derflounder.wordpress.com/2014/11/21/controlling-the-diagnostics-usage-report-settings-on-yosemite/) or the iCloud confirmation window (https://derflounder.wordpress.com/2014/10/16/disabling-the-icloud-and-diagnostics-pop-up-windows-in-yosemite/).
In part 7 of the LANDESK Mac Management video series, we’ll prepare for an operating system deployment by creating a NetBoot image, the equivalent of a WinPE image for the Windows world, to boot the OS X devices into a pre-boot environment. The video will demonstrate how to use Apple’s System Image Utility and LANDESK’s Startup Disk Stamper to accomplish this task.
Hopefully if you’ve arrived to this point, you’ve already built out the necessary preferred package server, built your NBI file, configured the LANDESK Core server with the NBI details, deployed the service to capture the NetBoot request, blessed your El Capitan clients, created and captured your gold image and it’s finally time to reap the benefits.
Specify the path to save your Mac and Windows image files. The path should be smb://fqdn/share/filename.dmg for an OS X image or smb://fqdn/share/filename.image for a Windows image. Just make sure your preferred server credentials have access to the shares.
Ensure the Netboot action has the Server variable set to your PXE rep or OS X server unless you’re using the USB NetBoot environment. The server URL format should be bsdp://ipaddress to ensure compatibility with El Capitan’s SIP.
If deploying a Mac and Windows image, adjust the partition sizes in the Create Partitions actions under Pre-OS Installation. You can set the sizes in percentages so the template can work on any HD size.
Set the correct partition identifier on the Deploy image action(s) under OS installation inside the Command-line parameters box. Make sure you do this for all Deploy Image actions.
For convenience, the action can be renamed in the properties panel if you have multiple images being deployed.
If deploying to an unmanaged machine(s), create a record for the new device(s) in the Network View > Configuration > Bare Metal Server tool. See the help file for more info.
In order to deploy an image with LANDESK Management Suite, at least one preferred package server must be created. The Provisioning process within LDMS uses the user account and password supplied for the preferred server to access the share and to write the image files to the shares specified in your capture and deploy templates.
Enter “Imaging” for the share alias, and navigate to the C:\Distribution\Imaging directory created in Step 1.
Enable directory browsing by selecting the ExampleShare folder in the navigation frame and then clicking the “Directory Browsing” icon and clicking “Enable” in the right-hand pane.
Navigate to the C:\Distribution\Imaging directory and right-click on the Imaging share.
Click “Permissions” and give a domain account account Full Control access to the share. This will be the account used when the provisioning process needs to access or write to the share.
Ensure that the same account is also given Full Control on the Security tab.
Enter Server Name and Credentials to the newly created Imaging share on the Preferred Server. This needs to be the same account supplied in Step 4 in the UNC Share area.
Enter the IP address ranges for the clients subnet(s) that this preferred server will serve.
Imaging a device has changed dramatically over the years. In the early 2000’s one would load everything possible on the image in effort to reduce calls the number of software requests the HelpDesk would receive post deployment.
The term bloated is often used for such corporate images. Not only did it take forever to deploy the gargantuan images, conflicts between unnecessary and unused software applications were extremely prevalent.
LANDESK recommends the complete opposite approach in 2016. When creating your corporate gold image, leave it as plain and as vanilla as possible. Build all customizations into your provisioning templates and inject those customizations during the post-provisioning process.
So doing will allow you to easily update and tweak your applications and customizations realtime, ensuring each device configured contains the latest and greatest.
Install the latest LANDESK agent (9.6 SP2 or greater). Unlike the Windows process, a LANDESK agent is required to be on the gold image.
Note: This IP address must match the exact address used to bless your El Capitan devices in order to NetBoot the device remotely. See http://appleintheenterprise.com/2016/02/09/blessing-an-el-capitan-device-for-netbooting/ for more info.
Select the Reboot/shutdown action and select OK.
In OS X 10.11 El Capitan, Apple has introduced their new System Integrity Protection feature which affects how you are able to NetBoot devices. If you think you’ll have the need to NetBoot a device anytime in the future, after it leaves your hands, you’re going to need to “bless” it with your sanctioned NetBoot servers prior to it going out the door.
Blessing a device with a NetBoot server is easy and only takes a couple of minutes per device, however, it is very hands on and will be extremely time consuming if you have a ton of devices – especially if they’re already in the field so plan accordingly prior to upgrading to El Capitan.
Press and hold the keys Command (⌘)-R immediately after you turn on your Mac and hear the startup sound. Keep holding until you see the progress bar.
When the device boots into the Recovery Mode, you should see a Mac OS X Utilities toolbar. If you end up back to your typical login screen, reboot and try hitting the Command (⌘)-R keys again.
In order to image a Mac device, you need to boot it into a pre-boot environment that is capable of making system level changes to the hard drive. To make these types of changes, the primary operating system cannot be mounted and therefore an alternative boot environment is required for the device.
The alternative boot environment for OS X is called NetBoot. While you can take a NetBoot Image file, put it on a USB stick and plug that stick directly into a Mac, such a method requires physical access to the device and is therefore not as desirable.
Alternatively, to forgo the need to have physical access to a device, you can create a service on the network that will listen for a Mac client to make a NetBoot request and then tell the client where to download the NetBoot Image file.
LANDESK has built this service into its PXE Representative technology that is also used for booting Windows devices into its equivalent pre-boot environment WinPE.
The steps below will walk you through configuring your core server with the information regarding the location of the NBI file so when the PXE representative service is established, it will be able to appropriately respond with the information the Mac will need to boot the NetBoot Image file.
Logon to the device from which you created the LANDESK NBI file outlined previously.
Supply the HTTP path to your Netboot image files and then click Browse to select your appropriate NBI.
Ensure your HTTP share has been properly enabled to support files with no extensions as outlined in the link in step 2.
|
# coding=utf-8
"""Route to error logs web page."""
from __future__ import unicode_literals
from datetime import datetime, timedelta
from mako.filters import html_escape
from six import text_type
from tornroutes import route
from .base import PageTemplate, WebRoot
from .... import logger, ui
from ....classes import ErrorViewer, WarningViewer
from ....issue_submitter import IssueSubmitter
from ....logger import filter_logline, read_loglines
from ....version_checker import CheckVersion
log_name_filters = {
None: html_escape('<No Filter>'),
'DAILYSEARCHER': 'Daily Searcher',
'BACKLOG': 'Backlog',
'SHOWUPDATER': 'Show Updater',
'CHECKVERSION': 'Check Version',
'SHOWQUEUE': 'Show Queue (All)',
'SEARCHQUEUE': 'Search Queue (All)',
'SEARCHQUEUE-DAILY-SEARCH': 'Search Queue (Daily Searcher)',
'SEARCHQUEUE-BACKLOG': 'Search Queue (Backlog)',
'SEARCHQUEUE-MANUAL': 'Search Queue (Manual)',
'SEARCHQUEUE-FORCED': 'Search Queue (Forced)',
'SEARCHQUEUE-RETRY': 'Search Queue (Retry/Failed)',
'SEARCHQUEUE-RSS': 'Search Queue (RSS)',
'SHOWQUEUE-UPDATE': 'Show Queue (Update)',
'SHOWQUEUE-REFRESH': 'Show Queue (Refresh)',
'FINDPROPERS': 'Find Propers',
'POSTPROCESSOR': 'PostProcessor',
'FINDSUBTITLES': 'Find Subtitles',
'TRAKTCHECKER': 'Trakt Checker',
'EVENT': 'Event',
'ERROR': 'Error',
'TORNADO': 'Tornado',
'Thread': 'Thread',
'MAIN': 'Main',
}
thread_names = {
'SHOWQUEUE': {name for name in log_name_filters if name and name.startswith('SHOWQUEUE-')},
'SEARCHQUEUE': {name for name in log_name_filters if name and name.startswith('SEARCHQUEUE-')}
}
log_periods = {
'all': None,
'one_day': timedelta(days=1),
'three_days': timedelta(days=3),
'one_week': timedelta(days=7),
}
@route('/errorlogs(/?.*)')
class ErrorLogs(WebRoot):
"""Route to errorlogs web page."""
# @TODO: Move this route to /log(/?)
# GitHub Issue submitter
issue_submitter = IssueSubmitter()
def __init__(self, *args, **kwargs):
"""Default constructor."""
super(ErrorLogs, self).__init__(*args, **kwargs)
def _create_menu(self, level):
return [
{ # Clear Errors
'title': 'Clear Errors',
'path': 'errorlogs/clearerrors/',
'requires': self._has_errors() and level == logger.ERROR,
'icon': 'ui-icon ui-icon-trash'
},
{ # Clear Warnings
'title': 'Clear Warnings',
'path': 'errorlogs/clearerrors/?level={level}'.format(level=logger.WARNING),
'requires': self._has_warnings() and level == logger.WARNING,
'icon': 'ui-icon ui-icon-trash'
},
{ # Submit Errors
'title': 'Submit Errors',
'path': 'errorlogs/submit_errors/',
'requires': self._has_errors() and level == logger.ERROR,
'class': 'submiterrors',
'confirm': True,
'icon': 'ui-icon ui-icon-arrowreturnthick-1-n'
},
]
def index(self, level=logger.ERROR, **kwargs):
"""Default index page."""
try:
level = int(level)
except (TypeError, ValueError):
level = logger.ERROR
t = PageTemplate(rh=self, filename='errorlogs.mako')
return t.render(header='Logs & Errors', title='Logs & Errors', topmenu='system',
submenu=self._create_menu(level), logLevel=level, controller='errorlogs', action='index')
@staticmethod
def _has_errors():
return bool(ErrorViewer.errors)
@staticmethod
def _has_warnings():
return bool(WarningViewer.errors)
def clearerrors(self, level=logger.ERROR):
"""Clear the errors or warnings."""
# @TODO: Replace this with DELETE /api/v2/log/{logLevel} or /api/v2/log/
if int(level) == logger.WARNING:
WarningViewer.clear()
else:
ErrorViewer.clear()
return self.redirect('/errorlogs/viewlog/')
def viewlog(self, min_level=logger.INFO, log_filter=None, log_search=None, max_lines=1000, log_period='one_day', **kwargs):
"""View the log given the specified filters."""
# @TODO: Replace index with this or merge it so ?search=true or ?query={queryString} enables this "view"
min_level = int(min_level)
log_filter = log_filter if log_filter in log_name_filters else None
t = PageTemplate(rh=self, filename='viewlogs.mako')
period = log_periods.get(log_period)
modification_time = datetime.now() - period if period else None
data = [line for line in read_loglines(modification_time=modification_time, formatter=text_type, max_lines=max_lines,
predicate=lambda l: filter_logline(l, min_level=min_level,
thread_name=thread_names.get(log_filter, log_filter),
search_query=log_search))]
return t.render(header='Log File', title='Logs', topmenu='system', log_lines='\n'.join([html_escape(line) for line in data]),
min_level=min_level, log_name_filters=log_name_filters, log_filter=log_filter, log_search=log_search, log_period=log_period,
controller='errorlogs', action='viewlogs')
def submit_errors(self):
"""Create an issue in medusa issue tracker."""
results = self.issue_submitter.submit_github_issue(CheckVersion())
for submitter_result, issue_id in results:
submitter_notification = ui.notifications.error if issue_id is None else ui.notifications.message
submitter_notification(submitter_result)
return self.redirect('/errorlogs/')
|
To the UK, add £2.50 for postage for the first book, then £1 for each subsequent book, up to £5.50 total.
£27.50 new, £22 from me.
£30 new, £22 from me.
£17 new, £12 from me.
Beautiful photographs and not the stock images usually found in books on Arts and Crafts furniture. The choice of projects, including Morris chair, settle, and glazed bookcase, are more advanced than you generally find in these sorts of books.
By classic, understand Georgian and Regency, so the book is of little interest to me. There is plenty of veneering required in these projects. And chapter four is devoted to hand veneering.
Fairly straightforward introduction to the router, but some of the home-made jigs featured are too ambitious for the cursive details provided. Best for someone new to routers.
£15 new from Amazon or £7.00 from me.
£35 on Amazon! presumably out of print, £12 from me.
With very specific subject matter, this book is much more useful than the endlessly repetitive "introduction" books.
except the big one: "how do I make money?"
£62 from Amazon (because it is out of print) or £17 from me (the price it was when available).
£12 and good value at that. An introduction to everything, including bits of furniture history.
|
"""!
@brief Elbow method to determine the optimal number of clusters for k-means clustering.
@details Implementation based on paper @cite article::cluster::elbow::1.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import math
from pyclustering.cluster.kmeans import kmeans
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer, random_center_initializer
from pyclustering.core.wrapper import ccore_library
import pyclustering.core.elbow_wrapper as wrapper
class elbow:
"""!
@brief Class represents Elbow method that is used to find out appropriate amount of clusters in a dataset.
@details The elbow is a heuristic method of interpretation and validation of consistency within cluster analysis
designed to help find the appropriate number of clusters in a dataset.Elbow method performs clustering
using K-Means algorithm for each K and estimate clustering results using sum of square erros. By default
K-Means++ algorithm is used to calculate initial centers that are used by K-Means algorithm.
The Elbow is determined by max distance from each point (x, y) to segment from kmin-point (x0, y0) to kmax-point (x1, y1),
where 'x' is K (amount of clusters), and 'y' is within-cluster error. Following expression is used to calculate Elbow
length:
\f[Elbow_{k} = \frac{\left ( y_{0} - y_{1} \right )x_{k} + \left ( x_{1} - x_{0} \right )y_{k} + \left ( x_{0}y_{1} - x_{1}y_{0} \right )}{\sqrt{\left ( x_{1} - x_{0} \right )^{2} + \left ( y_{1} - y_{0} \right )^{2}}}\f]
Usage example of Elbow method for cluster analysis:
@code
from pyclustering.cluster.kmeans import kmeans, kmeans_visualizer
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.cluster.elbow import elbow
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES
# read sample 'Simple3' from file (sample contains four clusters)
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE3)
# create instance of Elbow method using K value from 1 to 10.
kmin, kmax = 1, 10
elbow_instance = elbow(sample, kmin, kmax)
# process input data and obtain results of analysis
elbow_instance.process()
amount_clusters = elbow_instance.get_amount() # most probable amount of clusters
wce = elbow_instance.get_wce() # total within-cluster errors for each K
# perform cluster analysis using K-Means algorithm
centers = kmeans_plusplus_initializer(sample, amount_clusters,
amount_candidates=kmeans_plusplus_initializer.FARTHEST_CENTER_CANDIDATE).initialize()
kmeans_instance = kmeans(sample, centers)
kmeans_instance.process()
# obtain clustering results and visualize them
clusters = kmeans_instance.get_clusters()
centers = kmeans_instance.get_centers()
kmeans_visualizer.show_clusters(sample, clusters, centers)
@endcode
By default Elbow uses K-Means++ initializer to calculate initial centers for K-Means algorithm, it can be changed
using argument 'initializer':
@code
# perform analysis using Elbow method with random center initializer for K-Means algorithm inside of the method.
kmin, kmax = 1, 10
elbow_instance = elbow(sample, kmin, kmax, initializer=random_center_initializer)
elbow_instance.process()
@endcode
@image html elbow_example_simple_03.png "Elbows analysis with further K-Means clustering."
"""
def __init__(self, data, kmin, kmax, **kwargs):
"""!
@brief Construct Elbow method.
@param[in] data (array_like): Input data that is presented as array of points (objects), each point should be represented by array_like data structure.
@param[in] kmin (int): Minimum amount of clusters that should be considered.
@param[in] kmax (int): Maximum amount of clusters that should be considered.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: `ccore`, `initializer`, `random_state`, `kstep`).
<b>Keyword Args:</b><br>
- ccore (bool): If `True` then C++ implementation of pyclustering library is used (by default `True`).
- initializer (callable): Center initializer that is used by K-Means algorithm (by default K-Means++).
- random_state (int): Seed for random state (by default is `None`, current system time is used).
- kstep (int): Search step in the interval [kmin, kmax] (by default is `1`).
"""
self.__initializer = kwargs.get('initializer', kmeans_plusplus_initializer)
self.__random_state = kwargs.get('random_state', None)
self.__kstep = kwargs.get('kstep', 1)
self.__ccore = kwargs.get('ccore', True) or \
isinstance(self.__initializer, kmeans_plusplus_initializer) or \
isinstance(self.__initializer, random_center_initializer)
if self.__ccore:
self.__ccore = ccore_library.workable()
self.__data = data
self.__kmin = kmin
self.__kmax = kmax
self.__wce = []
self.__elbows = []
self.__kvalue = -1
self.__verify_arguments()
def process(self):
"""!
@brief Performs analysis to find out appropriate amount of clusters.
@return (elbow) Returns itself (Elbow instance).
@return
"""
if self.__ccore:
self.__process_by_ccore()
else:
self.__process_by_python()
return self
def __process_by_ccore(self):
"""!
@brief Performs processing using C++ implementation.
"""
if isinstance(self.__initializer, kmeans_plusplus_initializer):
initializer = wrapper.elbow_center_initializer.KMEANS_PLUS_PLUS
else:
initializer = wrapper.elbow_center_initializer.RANDOM
result = wrapper.elbow(self.__data, self.__kmin, self.__kmax, self.__kstep, initializer, self.__random_state)
self.__kvalue = result[0]
self.__wce = result[1]
def __process_by_python(self):
"""!
@brief Performs processing using python implementation.
"""
for amount in range(self.__kmin, self.__kmax + 1, self.__kstep):
centers = self.__initializer(self.__data, amount, random_state=self.__random_state).initialize()
instance = kmeans(self.__data, centers, ccore=False)
instance.process()
self.__wce.append(instance.get_total_wce())
self.__calculate_elbows()
self.__find_optimal_kvalue()
def get_amount(self):
"""!
@brief Returns appropriate amount of clusters.
"""
return self.__kvalue
def get_wce(self):
"""!
@brief Returns list of total within cluster errors for each K-value, for example, in case of `kstep = 1`:
(kmin, kmin + 1, ..., kmax).
"""
return self.__wce
def __calculate_elbows(self):
"""!
@brief Calculates potential elbows.
@details Elbow is calculated as a distance from each point (x, y) to segment from kmin-point (x0, y0) to kmax-point (x1, y1).
"""
x0, y0 = 0.0, self.__wce[0]
x1, y1 = float(len(self.__wce)), self.__wce[-1]
for index_elbow in range(1, len(self.__wce) - 1):
x, y = float(index_elbow), self.__wce[index_elbow]
segment = abs((y0 - y1) * x + (x1 - x0) * y + (x0 * y1 - x1 * y0))
norm = math.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)
distance = segment / norm
self.__elbows.append(distance)
def __find_optimal_kvalue(self):
"""!
@brief Finds elbow and returns corresponding K-value.
"""
optimal_elbow_value = max(self.__elbows)
self.__kvalue = (self.__elbows.index(optimal_elbow_value) + 1) * self.__kstep + self.__kmin
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__data))
if self.__kmin < 1:
raise ValueError("K min value (current value '%d') should be greater or equal to 1." % self.__kmin)
if self.__kstep < 1:
raise ValueError("K step value (current value '%d') should be greater or equal to 1." % self.__kstep)
if self.__kmax - self.__kmin + 1 < 3:
raise ValueError("Amount of K (" + str(self.__kmax - self.__kmin) + ") is too small for analysis. "
"It is require to have at least three K to build elbow.")
steps_to_process = math.floor((self.__kmax - self.__kmin) / self.__kstep) + 1
if steps_to_process < 3:
raise ValueError("The search step is too high '%d' for analysis (amount of K for analysis is '%d'). "
"It is require to have at least three K to build elbow." % (self.__kstep, steps_to_process))
if len(self.__data) < self.__kmax:
raise ValueError("K max value '%d' is greater than amount of points in data '%d'." %
(self.__kmax, len(self.__data)))
|
After waiting for years to have her procedure (and three kids later), Michelle is finally through and on the road to recovery. While her first couple of nights were a little rough (pain medication made her groggy, and she didn’t want to do much), every day got a little better after that. She got up, moved around, and felt better each time.
She slept in her own bed, although many women sleep in a recliner for easy standing. By the time one full week rolled around, she walked into the office for her first follow-up appointment walking pretty well…and she even had lunch out with her mom and daughter that day! The next week, she came in and had her drains removed, which is the highlight of recovery for just about anyone. The surgical drains, which help prevent fluid build-up after surgery, are necessary but a pain to deal with. In week two, she was out shopping at the mall but had her mom with her to help lift her kids (a 3 year old and 17 month old) in and out of the car and strollers. By week three, she was walking well, running errands, going to Cardinals games and wearing nice tailored shorts over her compression garment: you couldn’t even tell she’d had surgery. Although she gets tired a little bit easier than normal if she over does it, for the most part she feels great. Now, at week four, she pretty much feels like her old self again, but looks like she 15 years ago. To her, surgery already seems like a long time ago.
|
#!/usr/bin/python
# Copyright (C) 2012 The Regents of The University California.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Clear OS buffer cache for mesos clusters on EC2.
import os
import thread
import time
machinesFile = "/root/spark-ec2/slaves"
machs = open(machinesFile).readlines()
machs = map(lambda s: s.strip(),machs)
machCount = len(machs)
machID = 0
cmd = "sync; echo 3 > /proc/sys/vm/drop_caches"
done = {}
def dropCachesThread( mach, myID, *args ):
print "SSH to machine %i" % (myID)
os.system("ssh %s '%s'" % (mach, cmd))
done[mach] = "done"
for mach in ( machs ):
thread.start_new_thread(dropCachesThread, (mach, machID))
machID = machID + 1
time.sleep(0.2)
while (len(done.keys()) < machCount):
print "waiting for %d tasks to finish..." % (machCount - len(done.keys()))
time.sleep(1)
print "Done with %i threads" % (len(done.keys()))
|
Remember the NBC series Sanford and Son, which enjoyed a highly successful run in the mid '70s? Actor and comedian Redd Foxx played the role of Fred G.
Seems like the highly successful U.S. content industry, which still calls the Los Angeles area it's home, has taken its strategy for the continued global dominance of mass media straight out of the script of “Sanford and Son.” Each time emerging technologies threaten their dominance of the markets for the creation and distribution of entertainment for the masses, they run to Washington feigning the need for CPR.
In its “The 3-minute Guide to the Broadcast Flag,” the Electronic Frontier Foundation (EEF) notes that the entertainment companies don't like tools that give you more control. Article author Cory Doctorow writes, “The movie studios boycotted TV because they thought it would clean out the movie theaters. Then they complained that the remote control would make it too easy to skip commercials. Then they freaked out over the VCR, saying it was the “Boston Strangler” of the American film industry.” The entertainment conglomerates are also not fond of audio- and video-capture cards and sued a personal video recorder company into bankruptcy. And the list goes on and on.
For a recent example, Sony BMG started selling music CDs with digital rights management (DRM) software developed by First4Internet XCP and SunnComm MediaMax. The problems with the Sony BMG CDs surfaced when security researchers discovered that XCP and MediaMax installed undisclosed and, in some cases, hidden files on users' Windows computers, potentially exposing music fans to malicious attacks by third parties. The infected CDs also communicated back to Sony BMG about customers' computer use without proper notification.
Named the Sony BMG Rootkit Fiasco, the DRM CDs gave the media conglomerates yet another black eye, as the EFF successfully sued Sony BMG, forcing the company to withdraw the technology from the market and to compensate affected consumers with DRM free versions of the CDs or legal download alternatives.
As this column is about to relate, this has not slowed down the efforts of virtually every industry that touches mass media content to portray the transition to networked digital media distribution as life threatening. Now, HDTV is the latest cause for trauma.
Once again the media conglomerates are warning the politicians that their constituents cannot be trusted — that we are little better than common thieves. While they feign a Sanford-like heart attack and beg politicians for CPR, the reality is that in this case, CPR would more accurately defined as the Content Protection Racket.
The products will support high-performance, multiport 10GigE PCIe networking.
ThunderNET combines the high performance I/O capabilities of Thunderbolt with PCIe reliability.
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import httplib
import logging
import os
import tempfile
import time
from pylib import android_commands
from pylib import constants
from pylib import ports
from pylib.chrome_test_server_spawner import SpawningServer
from pylib.flag_changer import FlagChanger
from pylib.forwarder import Forwarder
from pylib.valgrind_tools import CreateTool
# TODO(frankf): Move this to pylib/utils
import lighttpd_server
# A file on device to store ports of net test server. The format of the file is
# test-spawner-server-port:test-server-port
NET_TEST_SERVER_PORT_INFO_FILE = 'net-test-server-ports'
class BaseTestRunner(object):
"""Base class for running tests on a single device.
A subclass should implement RunTests() with no parameter, so that calling
the Run() method will set up tests, run them and tear them down.
"""
def __init__(self, device, tool, build_type):
"""
Args:
device: Tests will run on the device of this ID.
shard_index: Index number of the shard on which the test suite will run.
build_type: 'Release' or 'Debug'.
"""
self.device = device
self.adb = android_commands.AndroidCommands(device=device)
self.tool = CreateTool(tool, self.adb)
self._http_server = None
self._forwarder = None
self._forwarder_device_port = 8000
self.forwarder_base_url = ('http://localhost:%d' %
self._forwarder_device_port)
self.flags = FlagChanger(self.adb)
self.flags.AddFlags(['--disable-fre'])
self._spawning_server = None
self._spawner_forwarder = None
# We will allocate port for test server spawner when calling method
# LaunchChromeTestServerSpawner and allocate port for test server when
# starting it in TestServerThread.
self.test_server_spawner_port = 0
self.test_server_port = 0
self.build_type = build_type
def _PushTestServerPortInfoToDevice(self):
"""Pushes the latest port information to device."""
self.adb.SetFileContents(self.adb.GetExternalStorage() + '/' +
NET_TEST_SERVER_PORT_INFO_FILE,
'%d:%d' % (self.test_server_spawner_port,
self.test_server_port))
def RunTest(self, test):
"""Runs a test. Needs to be overridden.
Args:
test: A test to run.
Returns:
Tuple containing: (test_result.TestResults, tests to rerun or None)
"""
raise NotImplementedError
def SetUp(self):
"""Run once before all tests are run."""
Forwarder.KillDevice(self.adb, self.tool)
def TearDown(self):
"""Run once after all tests are run."""
self.ShutdownHelperToolsForTestSuite()
def CopyTestData(self, test_data_paths, dest_dir):
"""Copies |test_data_paths| list of files/directories to |dest_dir|.
Args:
test_data_paths: A list of files or directories relative to |dest_dir|
which should be copied to the device. The paths must exist in
|CHROME_DIR|.
dest_dir: Absolute path to copy to on the device.
"""
for p in test_data_paths:
self.adb.PushIfNeeded(
os.path.join(constants.CHROME_DIR, p),
os.path.join(dest_dir, p))
def LaunchTestHttpServer(self, document_root, port=None,
extra_config_contents=None):
"""Launches an HTTP server to serve HTTP tests.
Args:
document_root: Document root of the HTTP server.
port: port on which we want to the http server bind.
extra_config_contents: Extra config contents for the HTTP server.
"""
self._http_server = lighttpd_server.LighttpdServer(
document_root, port=port, extra_config_contents=extra_config_contents)
if self._http_server.StartupHttpServer():
logging.info('http server started: http://localhost:%s',
self._http_server.port)
else:
logging.critical('Failed to start http server')
self.StartForwarderForHttpServer()
return (self._forwarder_device_port, self._http_server.port)
def _CreateAndRunForwarder(
self, adb, port_pairs, tool, host_name, build_type):
"""Creates and run a forwarder."""
forwarder = Forwarder(adb, build_type)
forwarder.Run(port_pairs, tool, host_name)
return forwarder
def StartForwarder(self, port_pairs):
"""Starts TCP traffic forwarding for the given |port_pairs|.
Args:
host_port_pairs: A list of (device_port, local_port) tuples to forward.
"""
if self._forwarder:
self._forwarder.Close()
self._forwarder = self._CreateAndRunForwarder(
self.adb, port_pairs, self.tool, '127.0.0.1', self.build_type)
def StartForwarderForHttpServer(self):
"""Starts a forwarder for the HTTP server.
The forwarder forwards HTTP requests and responses between host and device.
"""
self.StartForwarder([(self._forwarder_device_port, self._http_server.port)])
def RestartHttpServerForwarderIfNecessary(self):
"""Restarts the forwarder if it's not open."""
# Checks to see if the http server port is being used. If not forwards the
# request.
# TODO(dtrainor): This is not always reliable because sometimes the port
# will be left open even after the forwarder has been killed.
if not ports.IsDevicePortUsed(self.adb,
self._forwarder_device_port):
self.StartForwarderForHttpServer()
def ShutdownHelperToolsForTestSuite(self):
"""Shuts down the server and the forwarder."""
# Forwarders should be killed before the actual servers they're forwarding
# to as they are clients potentially with open connections and to allow for
# proper hand-shake/shutdown.
Forwarder.KillDevice(self.adb, self.tool)
if self._forwarder:
self._forwarder.Close()
if self._http_server:
self._http_server.ShutdownHttpServer()
if self._spawning_server:
self._spawning_server.Stop()
self.flags.Restore()
def CleanupSpawningServerState(self):
"""Tells the spawning server to clean up any state.
If the spawning server is reused for multiple tests, this should be called
after each test to prevent tests affecting each other.
"""
if self._spawning_server:
self._spawning_server.CleanupState()
def LaunchChromeTestServerSpawner(self):
"""Launches test server spawner."""
server_ready = False
error_msgs = []
# Try 3 times to launch test spawner server.
for i in xrange(0, 3):
# Do not allocate port for test server here. We will allocate
# different port for individual test in TestServerThread.
self.test_server_spawner_port = ports.AllocateTestServerPort()
self._spawning_server = SpawningServer(self.test_server_spawner_port,
self.adb,
self.tool,
self.build_type)
self._spawning_server.Start()
server_ready, error_msg = ports.IsHttpServerConnectable(
'127.0.0.1', self.test_server_spawner_port, path='/ping',
expected_read='ready')
if server_ready:
break
else:
error_msgs.append(error_msg)
self._spawning_server.Stop()
# Wait for 2 seconds then restart.
time.sleep(2)
if not server_ready:
logging.error(';'.join(error_msgs))
raise Exception('Can not start the test spawner server.')
self._PushTestServerPortInfoToDevice()
self._spawner_forwarder = self._CreateAndRunForwarder(
self.adb,
[(self.test_server_spawner_port, self.test_server_spawner_port)],
self.tool, '127.0.0.1', self.build_type)
|
I recently signed up for a MUBI account. MUBI (formerly The Auteurs) is an online film website that integrates video streaming with social networking. It is your “online cinematheque”, anytime, anywhere. It will give you the latest buzz from the Cannes Film Festival or a restored masterpiece personally chosen by one of MUBI’s members, Martin Scorsese!
An interesting concept, check it out for yourself!
This entry was posted on Sunday, January 30th, 2011 at 23:55 and is filed under Blog Entries. You can follow any responses to this entry through the RSS 2.0 feed. You can leave a response, or trackback from your own site.
|
import logging
import simplejson as json
import re
from functools import reduce
from decimal import Decimal
from django.conf import settings
from django.db import connection
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import ensure_csrf_cookie
from grin_app.ensure_nocache import ensure_nocache
# SRID 4326 is WGS 84 long lat unit=degrees, also the specification of the
# geoometric_coord field in the grin_accessions table.
SRID = 4326
DEFAULT_LIMIT = 200
TWO_PLACES = Decimal('0.01')
ACCESSION_TAB = 'lis_germplasm.grin_accession'
ACC_SELECT_COLS = (
'gid', 'taxon', 'latdec', 'longdec', 'accenumb', 'elevation', 'cropname',
'collsite', 'acqdate', 'origcty'
)
# Brewer nominal category colors from chroma.js set1,2,3 concatenated:
NOMINAL_COLORS = [
"#e41a1c", "#377eb8", "#4daf4a", "#984ea3", "#ff7f00", "#ffff33",
"#a65628", "#f781bf", "#999999", "#66c2a5", "#fc8d62", "#8da0cb",
"#e78ac3", "#a6d854", "#ffd92f", "#e5c494", "#b3b3b3", "#8dd3c7",
"#ffffb3", "#bebada", "#fb8072", "#80b1d3", "#fdb462", "#b3de69",
"#fccde5", "#d9d9d9", "#bc80bd", "#ccebc5", "#ffed6f"
]
NOMINAL_THRESHOLD = 10
DEFAULT_COLOR = 'lightgrey'
ORDER_BY_FRAG = '''
ORDER BY ST_Distance(
geographic_coord::geography,
ST_Centroid(
ST_MakeEnvelope(%(minx)s, %(miny)s, %(maxx)s, %(maxy)s, %(srid)s)
)
) ASC, taxon, gid
'''
LIMIT_FRAG = 'LIMIT %(limit)s'
COUNTRY_REGEX = re.compile(r'[a-z]{3}', re.I)
TAXON_FTS_BOOLEAN_REGEX = re.compile(r'^(\w+\s*[\||&]\s*\w+)+$')
logger = logging.getLogger(__name__)
GRIN_ACC_WHERE_FRAGS = {
'fts': {
'include': lambda p: TAXON_FTS_BOOLEAN_REGEX.match(
p.get('taxon_query', '')),
'sql': "taxon_fts @@ to_tsquery('english', %(taxon_query)s)",
},
'fts_simple': {
'include': lambda p: p.get('taxon_query', None) and not
GRIN_ACC_WHERE_FRAGS['fts']['include'](p),
'sql': "taxon_fts @@ plainto_tsquery('english', %(taxon_query)s)",
},
'country': {
'include': lambda p: p.get('country', None),
'sql': 'origcty = %(country)s',
},
'geocoded_only': {
'include': lambda p: p.get('limit_geo_bounds', None) in (
True, 'true') or p.get('geocoded_only', None) in (True, 'true'),
'sql': 'latdec <> 0 AND longdec <> 0',
},
'limit_geo_bounds': {
'include': lambda p: p.get('limit_geo_bounds', None) in (True, 'true'),
'sql': '''
latdec <> 0 AND longdec <> 0 AND
ST_Contains(
ST_MakeEnvelope(%(minx)s, %(miny)s, %(maxx)s, %(maxy)s, %(srid)s),
geographic_coord::geometry
)''',
},
}
GRIN_EVAL_WHERE_FRAGS = {
'descriptor_name': {
'include': lambda p: p.get('descriptor_name', None),
'sql': 'descriptor_name = %(descriptor_name)s',
},
'accession prefix': {
'include': lambda p: p.get('prefix', None),
'sql': 'accession_prefix = %(prefix)s',
},
'accession number': {
'include': lambda p: p.get('acc_num', None),
'sql': 'accession_number = %(acc_num)s',
},
'accession surfix': {
'include': lambda p: p.get('suffix', None),
'sql': 'accession_surfix = %(suffix)s',
},
}
@ensure_csrf_cookie
@ensure_nocache
def index(req):
"""Render the index template, which will boot up angular-js.
"""
return render(req, 'grin_app/index.html', context=settings.BRANDING)
@ensure_csrf_cookie
@ensure_nocache
def evaluation_descr_names(req):
"""Return JSON for all distinct trait descriptor names matching the
given taxon. (the trait overlay choice is only available after a
taxon is selected). Join on the grin_accession table to use the
FTS index on taxon there.
"""
assert req.method == 'GET', 'GET request method required'
params = req.GET.dict()
assert 'taxon' in params, 'missing taxon param'
assert params['taxon'], 'empty taxon param'
params['taxon_query'] = params['taxon']
where_clauses = [
val['sql'] for key, val in GRIN_ACC_WHERE_FRAGS.items()
if val['include'](params)
]
if len(where_clauses) == 0:
where_sql = ''
else:
where_sql = 'WHERE %s' % ' AND '.join(where_clauses)
sql = '''
SELECT DISTINCT descriptor_name
FROM lis_germplasm.legumes_grin_evaluation_data
JOIN lis_germplasm.grin_accession
USING (accenumb)
%s
ORDER BY descriptor_name
''' % where_sql
sql_params = {'taxon_query': params['taxon']}
cursor = connection.cursor()
# logger.info(cursor.mogrify(sql, sql_params))
cursor.execute(sql, sql_params)
names = [row[0] for row in cursor.fetchall()]
result = json.dumps(names)
response = HttpResponse(result, content_type='application/json')
return response
@ensure_csrf_cookie
@ensure_nocache
def evaluation_search(req):
"""Return JSON array of observation_value for all trait records
matching a set of accession ids, and matching the descriptor_name
field. Used for creating map markers or map overlays with specific
accesions' trait data.
"""
assert req.method == 'POST', 'POST request method required'
params = json.loads(req.body)
assert 'accession_ids' in params, 'missing accession_ids param'
assert 'descriptor_name' in params, 'missing descriptor_name param'
sql = '''
SELECT accenumb, descriptor_name, observation_value
FROM lis_germplasm.legumes_grin_evaluation_data
WHERE descriptor_name = %(descriptor_name)s
AND accenumb IN %(accession_ids)s
'''
sql_params = {
'descriptor_name': params['descriptor_name'],
'accession_ids': tuple(params['accession_ids'])
}
cursor = connection.cursor()
# logger.info(cursor.mogrify(sql, sql_params))
cursor.execute(sql, sql_params)
rows = _dictfetchall(cursor)
# observation_value is a string field, so cast to int or float as necessary
rows_clean = []
for row in rows:
row['observation_value'] = _string2num(row['observation_value'])
rows_clean.append(row)
result = json.dumps(rows_clean, use_decimal=True)
response = HttpResponse(result, content_type='application/json')
return response
def _string2num(s):
"""
Convert a string to int or float if possible.
"""
try:
return int(s)
except ValueError:
pass
try:
return float(s)
except ValueError:
pass
return s
@ensure_csrf_cookie
@ensure_nocache
def evaluation_metadata(req):
"""Return JSON with trait metadata for the given taxon and trait
descriptor_name. This enables the client to display a legend, and
colorize accessions by either numeric or category traits.
"""
assert req.method == 'POST', 'POST request method required'
params = json.loads(req.body)
assert 'taxon' in params, 'missing taxon param'
assert 'descriptor_name' in params, 'missing descriptor_name param'
assert 'trait_scale' in params, 'missing trait_scale param'
assert 'accession_ids' in params, 'missing accession_ids param'
assert params['taxon'], 'empty taxon param'
result = None
cursor = connection.cursor()
# full text search on the taxon field in accessions table, also
# joining on taxon to get relevant evaluation metadata.
sql_params = {
'taxon_query': params['taxon'],
'descriptor_name': params['descriptor_name']
}
where_clauses = [
val['sql'] for
key, val in {**GRIN_ACC_WHERE_FRAGS, **GRIN_EVAL_WHERE_FRAGS}.items()
if val['include'](sql_params)
]
if len(where_clauses) == 0:
where_sql = ''
else:
where_sql = 'WHERE %s' % ' AND '.join(where_clauses)
sql = '''
SELECT DISTINCT taxon, descriptor_name, obs_type, obs_min, obs_max,
obs_nominal_values
FROM lis_germplasm.grin_evaluation_metadata
JOIN lis_germplasm.grin_accession
USING (taxon)
%s
''' % where_sql
# logger.info(cursor.mogrify(sql, sql_params))
cursor.execute(sql, sql_params)
trait_metadata = _dictfetchall(cursor)
if len(trait_metadata) == 0:
# early out if there were no matching metadata records
return HttpResponse({}, content_type='application/json')
obs_type = trait_metadata[0]['obs_type']
if obs_type == 'numeric':
if params['trait_scale'] == 'local':
# must perform another query to restrict observations to this
# set of accessions (local, not global)
sql = '''
SELECT observation_value
FROM lis_germplasm.legumes_grin_evaluation_data
WHERE accenumb IN %(accession_ids)s
AND descriptor_name = %(descriptor_name)s
'''
sql_params = {
'descriptor_name': params['descriptor_name'],
'accession_ids': tuple(params['accession_ids'])
}
# logger.info(cursor.mogrify(sql, sql_params))
cursor.execute(sql, sql_params)
obs_values = [_string2num(row[0]) for row in cursor.fetchall()]
result = {
'taxon_query': params['taxon'],
'descriptor_name': params['descriptor_name'],
'trait_type': 'numeric',
'min': min(obs_values) if obs_values else 0,
'max': max(obs_values) if obs_values else 0,
}
elif params['trait_scale'] == 'global':
mins = [rec['obs_min'] for rec in trait_metadata]
maxes = [rec['obs_max'] for rec in trait_metadata]
result = {
'taxon_query': params['taxon'],
'descriptor_name': params['descriptor_name'],
'trait_type': 'numeric',
'min': reduce(lambda x, y: x + y, mins) / len(mins),
'max': reduce(lambda x, y: x + y, maxes) / len(maxes),
}
elif obs_type == 'nominal':
vals = set()
for rec in trait_metadata:
vals |= set(rec['obs_nominal_values'])
num_preset_colors = len(NOMINAL_COLORS)
colors = {}
for i, val in enumerate(vals):
if i < num_preset_colors:
colors[val] = NOMINAL_COLORS[i]
else:
colors[val] = DEFAULT_COLOR
result = {
'taxon_query': params['taxon'],
'descriptor_name': params['descriptor_name'],
'trait_type': 'nominal',
'obs_nominal_values': sorted(vals),
'colors': colors,
}
response = HttpResponse(json.dumps(result, use_decimal=True),
content_type='application/json')
return response
@ensure_csrf_cookie
@ensure_nocache
def evaluation_detail(req):
"""Return JSON for all evalation/trait records matching this accession id.
"""
assert req.method == 'GET', 'GET request method required'
params = req.GET.dict()
assert 'accenumb' in params, 'missing accenumb param'
prefix = ''
acc_num = ''
suffix = ''
parts = params['accenumb'].split()
parts_len = len(parts)
if parts_len > 2:
prefix, acc_num, rest = parts[0], parts[1], parts[2:] # suffix optional
suffix = ' '.join(rest)
elif parts_len == 2:
prefix, acc_num = parts[0], parts[1]
elif parts_len == 1:
acc_num = parts[0]
else:
acc_num = params['accenumb']
cursor = connection.cursor()
sql_params = {
'prefix': prefix,
'acc_num': acc_num,
'suffix': suffix,
}
where_clauses = [
val['sql'] for key, val in GRIN_EVAL_WHERE_FRAGS.items()
if val['include'](sql_params)
]
where_sql = ' AND '.join(where_clauses)
sql = '''
SELECT accession_prefix,
accession_number,
accession_surfix,
observation_value,
descriptor_name,
method_name,
plant_name,
taxon,
origin,
original_value,
frequency,
low,
hign,
mean,
sdev,
ssize,
inventory_prefix,
inventory_number,
inventory_suffix,
accession_comment
FROM lis_germplasm.legumes_grin_evaluation_data
WHERE %s
ORDER BY descriptor_name
''' % where_sql
# logger.info(cursor.mogrify(sql, sql_params))
cursor.execute(sql, sql_params)
rows = _dictfetchall(cursor)
result = json.dumps(rows, use_decimal=True)
response = HttpResponse(result, content_type='application/json')
return response
@ensure_csrf_cookie
@ensure_nocache
def accession_detail(req):
"""Return JSON for all columns for a accession id."""
assert req.method == 'GET', 'GET request method required'
params = req.GET.dict()
assert 'accenumb' in params, 'missing accenumb param'
# fix me: name the columns dont select *!
sql = '''
SELECT * FROM lis_germplasm.grin_accession WHERE accenumb = %(accenumb)s
'''
cursor = connection.cursor()
# logger.info(cursor.mogrify(sql, params))
cursor.execute(sql, params)
rows = _dictfetchall(cursor)
return _acc_search_response(rows)
@ensure_csrf_cookie
@ensure_nocache
def countries(req):
"""Return a json array of countries for search filtering ui.
"""
cursor = connection.cursor()
sql = '''
SELECT DISTINCT origcty FROM lis_germplasm.grin_accession ORDER by origcty
'''
cursor.execute(sql)
# flatten into array, filter out bogus records like '' or 3 number codes
results = [row[0] for row in cursor.fetchall()
if row[0] and COUNTRY_REGEX.match(row[0])]
return HttpResponse(json.dumps(results), content_type='application/json')
@ensure_csrf_cookie
@ensure_nocache
def search(req):
"""Search by map bounds and return GeoJSON results."""
assert req.method == 'POST', 'POST request method required'
params = json.loads(req.body)
# logger.info(params)
if 'limit' not in params:
params['limit'] = DEFAULT_LIMIT
else:
params['limit'] = int(params['limit'])
where_clauses = [
val['sql'] for key, val in GRIN_ACC_WHERE_FRAGS.items()
if val['include'](params)
]
if len(where_clauses) == 0:
where_sql = ''
else:
where_sql = 'WHERE (%s)' % ' AND '.join(where_clauses)
cols_sql = ' , '.join(ACC_SELECT_COLS)
sql = '''SELECT %s FROM %s %s %s %s''' % (
cols_sql,
ACCESSION_TAB,
where_sql,
ORDER_BY_FRAG,
LIMIT_FRAG
)
cursor = connection.cursor()
sql_params = {
'taxon_query': params.get('taxon_query', None),
'country': params.get('country', None),
'minx': float(params.get('sw_lng', 0)),
'miny': float(params.get('sw_lat', 0)),
'maxx': float(params.get('ne_lng', 0)),
'maxy': float(params.get('ne_lat', 0)),
'limit': params['limit'],
'srid': SRID,
}
# logger.info(cursor.mogrify(sql, sql_params))
cursor.execute(sql, sql_params)
rows = _dictfetchall(cursor)
# when searching for a set of accessionIds, the result needs to
# either get merged in addition to the SQL LIMIT results, or just
# returned instead
if params.get('accession_ids', None):
if ',' in params['accession_ids']:
sql_params = {'accession_ids': params['accession_ids'].split(',')}
else:
sql_params = {'accession_ids': [params['accession_ids']]}
where_sql = 'WHERE accenumb = ANY( %(accession_ids)s )'
sql = 'SELECT %s FROM %s %s' % (
cols_sql,
ACCESSION_TAB,
where_sql
)
cursor.execute(sql, sql_params)
rows_with_requested_accessions = _dictfetchall(cursor)
if params.get('accession_ids_inclusive', None):
# merge results with previous set
uniq = set()
def is_unique(r):
k = r.get('accenumb', None)
if k in uniq:
return False
uniq.add(k)
return True
rows = [row for row in rows_with_requested_accessions + rows
if is_unique(row)]
else:
# simple replace with these results
rows = rows_with_requested_accessions
return _acc_search_response(rows)
def _acc_search_response(rows):
geo_json = []
# logger.info('results: %d' % len(rows))
for rec in rows:
# fix up properties which are not json serializable
if rec.get('acqdate', None):
rec['acqdate'] = str(rec['acqdate'])
else:
rec['acqdate'] = None
if rec.get('colldate', None):
rec['colldate'] = str(rec['colldate'])
else:
rec['colldate'] = None
# geojson can have null coords, so output this for
# non-geocoded search results (e.g. full text search w/ limit
# to current map extent turned off
if rec.get('longdec', 0) == 0 and rec.get('latdec', 0) == 0:
coords = None
else:
lat = Decimal(rec['latdec']).quantize(TWO_PLACES)
lng = Decimal(rec['longdec']).quantize(TWO_PLACES)
coords = [lng, lat]
del rec['latdec'] # have been translated into geojson coords,
del rec['longdec'] # so these keys are extraneous now.
geo_json_frag = {
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': coords
},
'properties': rec # rec happens to be a dict of properties. yay
}
# tag this accession with something to distinguish it from
# user provided accession ids
geo_json_frag['properties']['from_api'] = True
geo_json.append(geo_json_frag)
result = json.dumps(geo_json, use_decimal=True)
response = HttpResponse(result, content_type='application/json')
return response
def _dictfetchall(cursor):
"""Return all rows from a cursor as a dict"""
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
|
BANTING: A secondary school teacher accused of hurling racial slurs against her Indian students recently is on leave prior to her transfer on Monday.
Deputy Minister in the Prime Minister’s Department Senator T. Murugiah, who visited SMK Telok Panglima Garang yesterday, said the teacher regretted the incident and had apologised to the students.
“She also said that she did not expect things to become so serious and has agreed to be transferred to another school,” added Murugiah who was accompanied by officers from the Education Ministry and the Prime Minister’s Department’s Public Complaints Bureau.
Two students lodged police reports against the female history teacher last Friday alleging that she had made derogatory remarks about Indian students and called them unflattering names.
According to Murugiah, recommendations would also be made to the Education Ministry to institute disciplinary action against her.
He said the issue was now considered closed and advised members of the Indian community as well as parents of the affected students to accept the teacher’s apology.
However, relatives of the affected students as well as the members of the Telok Panglima Garang Indian community refused to regard the matter as closed.
“She must make a public apology as she insulted all Indians and be transferred to a desk job and not allowed to teach anymore,” said the group’s spokesman S. Muthamil Selvan.
While the government led by Prime Minister Datuk Seri Abdullah Ahmad Badawi, wants to strengthen unity among the various ethnic races in the country, we have some people, like the teacher above who fail to understand that Malaysia is not run by a single race. When the PM himself wants the nation to provide equal opportunities for all Malaysians in all areas, there are some who think that this is not wise and that only the majority should enjoy the wealth of this country. Its sad. It angers me. I am also disappointed that the teacher had passed through the country’s education system and teachers training without anyone detecting her racial sentiments.
Any Malaysian regardless of race, religion, creed, colour or ethnicity does not deserve this kind of treatment and most alarming is the fact that it was uttered by a school teacher who had indirectly made all her students racists in one way or another. The physiological scar left by her on these students, will remain despite the harshest punishment meted out against her.
Now students of other races in the class, when this incident occurred, would not think twice to utter what she had said in class, outside of school. While the Indian students would be angry not only at the teacher but her race generally. In this case one rotten or bad apple spoils the barrel.
As far as I am concerned, action should be taken against the teacher. She should not be sacked as this would destroy her career and let her go on thinking that she was right in saying all those things against Indians. She should be transferred. She should be demoted and placed at a Tamil primary school. Let her go to school everyday with hundreds of Indian children and other Indian teachers.
Though deep inside I want her sacked and remain out of the country’s education system, that would be an easy way out for her. She has to realise her follies and sacking her will not bring her this realisation. But transferring her to a Tamil medium primary school would certainly do the trick!
WHAT IS HAPPENING TO OUR SCHOOLING SYSTEM. WE HAVE TEACHERS WHO ARE FOOLS & COULD PASS THIS KIND OF REMARKS. HAS OUR EDU SYSTEM GONE FLUNKED TO HAVE GENERATED THIS KIND OF TEACHERS.
SHE COULD PASS THIS REMARKS & EXPECT THINGS DIDNT BECOME SERIOUS??????
AND ALL SHE GOT IS TRANSFER??????????.
THIS PROVES THAT WE MALAYSIAN ARE MARCHING STADY BACKWARD.
DONT TAKE MY WORD FOR IT BUT REMEMBER THIS..
" FUTUR IS FOR WHO ARE FORWARD THINKING & MALAYSIA IS GOING DOWN THE DRAIN."
DAMM THE RACIST TEACHER AS SHE IS A POISON IN OUR SOCIETY.
what r u suggesting thou?
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing a scheme access handler for QtHelp.
"""
from __future__ import unicode_literals
import mimetypes
import os
from PyQt5.QtCore import QByteArray
from .SchemeAccessHandler import SchemeAccessHandler
from .NetworkReply import NetworkReply
QtDocPath = "qthelp://com.trolltech."
ExtensionMap = {
".bmp": "image/bmp",
".css": "text/css",
".gif": "image/gif",
".html": "text/html",
".htm": "text/html",
".ico": "image/x-icon",
".jpeg": "image/jpeg",
".jpg": "image/jpeg",
".js": "application/x-javascript",
".mng": "video/x-mng",
".pbm": "image/x-portable-bitmap",
".pgm": "image/x-portable-graymap",
".pdf": "application/pdf",
".png": "image/png",
".ppm": "image/x-portable-pixmap",
".rss": "application/rss+xml",
".svg": "image/svg+xml",
".svgz": "image/svg+xml",
".text": "text/plain",
".tif": "image/tiff",
".tiff": "image/tiff",
".txt": "text/plain",
".xbm": "image/x-xbitmap",
".xml": "text/xml",
".xpm": "image/x-xpm",
".xsl": "text/xsl",
".xhtml": "application/xhtml+xml",
".wml": "text/vnd.wap.wml",
".wmlc": "application/vnd.wap.wmlc",
}
class QtHelpAccessHandler(SchemeAccessHandler):
"""
Class implementing a scheme access handler for QtHelp.
"""
def __init__(self, engine, parent=None):
"""
Constructor
@param engine reference to the help engine (QHelpEngine)
@param parent reference to the parent object (QObject)
"""
SchemeAccessHandler.__init__(self, parent)
self.__engine = engine
def __mimeFromUrl(self, url):
"""
Private method to guess the mime type given an URL.
@param url URL to guess the mime type from (QUrl)
@return mime type for the given URL (string)
"""
path = url.path()
ext = os.path.splitext(path)[1].lower()
if ext in ExtensionMap:
return ExtensionMap[ext]
else:
return "application/octet-stream"
def createRequest(self, op, request, outgoingData=None):
"""
Public method to create a request.
@param op the operation to be performed
(QNetworkAccessManager.Operation)
@param request reference to the request object (QNetworkRequest)
@param outgoingData reference to an IODevice containing data to be sent
(QIODevice)
@return reference to the created reply object (QNetworkReply)
"""
url = request.url()
strUrl = url.toString()
# For some reason the url to load is already wrong (passed from webkit)
# though the css file and the references inside should work that way.
# One possible problem might be that the css is loaded at the same
# level as the html, thus a path inside the css like
# (../images/foo.png) might cd out of the virtual folder
if not self.__engine.findFile(url).isValid():
if strUrl.startswith(QtDocPath):
newUrl = request.url()
if not newUrl.path().startswith("/qdoc/"):
newUrl.setPath("qdoc" + newUrl.path())
url = newUrl
strUrl = url.toString()
mimeType = mimetypes.guess_type(strUrl)[0]
if mimeType is None:
# do our own (limited) guessing
mimeType = self.__mimeFromUrl(url)
if self.__engine.findFile(url).isValid():
data = self.__engine.fileData(url)
else:
data = QByteArray(self.tr(
"""<title>Error 404...</title>"""
"""<div align="center"><br><br>"""
"""<h1>The page could not be found</h1><br>"""
"""<h3>'{0}'</h3></div>""").format(strUrl).encode("utf-8"))
return NetworkReply(request, data, mimeType, self.parent())
|
OnZen's Innovative Bath Tablets: A Hot Spring In Your Own Home?
The busier our lives seem to get, the more important it is to take time to look after ourselves. I've recently posted about a few beauty products you can pop in your bath to make it even more relaxing; these oils and scents are lovely, but it's equally important to reap the physical benefits of a bath for your skin, and not just focus on the mental element of relaxation. These innovative tablets from OnZen help do just that, and remind us to look after our skin whilst doing all the hard work for us. We're so used to running a bath and adding in our favourite bubble baths, bath bombs or essential oils (focusing on the relaxing element of a long soak). OnZen looks at the water itself, rather than what you can add to it. They have created products which alter the water you are bathing in to make it more beneficial to your skin and circulation. Genius!
There are three different ranges within the OnZen brand; home spa, hair and balance. Balance is aimed at creating an optimal pH and is designed for more dry, sensitive skin types, whilst home spa works at creating a "hot spring experience" in your bath. Sounds luxurious, no? Whilst all three ranges come in the form of small, round tablets, balance and home spa are designed for popping in your bath. When you start to run the water, OnZen say to add 3 to 5 tablets to your bathtub in order to dissolve them and start reaping the benefits of healthier, softer skin. I found that just using 2 or 3 had equally amazing results though if you wanted to get the most out of the product.
OnZen also do a range of these tablets for your hair. With a similar formula to the others, the hair tablets aim at scalp regeneration, strengthening hair and also promoting hair growth. Designed to combat a range of modern hair woes (exposure to pollution, UV radiation, heat and stress), the mineral tablets provide a thorough cleanse which works alongside your usual shampoos, conditioners and treatments. Think of it as an extra boost for your hair. Whilst I haven't noticed as big a difference with these as I have with the home spa range, the hair tablets have left my hair noticeably shinier and easier to manage- my post-blow-dry frizz has definitely calmed down after using these! If baths aren't your thing, you can always pop a tablet in Onzen's specially designed shower head and still get the same mineral spa benefits. The hair tablets are also popped in this shower head; just unscrew it, pop one tablet into the slot and watch it dissolve with the water as you wash your hair. It does come with an adapter, but I have had no issues with attaching the shower head- it was far easier than I thought!
The Bicarbonate Carbonic Citric Acid ion combination (try saying that after a few drinks!) and natural, fragrance-free formula used within the products has tried to replicate the conditions found within the Japanese-style hot springs in order to bring these traditional spa techniques to your home. While it seems science-y and technical, the innovative mineral spa product has numerous effects such as relaxation, improving blood circulation (through the small release of carbon dioxide, also known as the Bohr effect), encouraging skin cell regeneration and collagen repair, gentle cleaning of skin and, most noticeably, leaves skin incredibly soft. I have been trying out the Home Spa range, and can vouch for how lovely, moisturised and nourished my skin feels after popping a few of these tablets in my bath. What's better is that you can always add your favourite Lush bath bomb or aromatherapy products on top of these tablets; the ability to mix and match ensures you still retain the 'time out' element of having a bath whilst leaving your skin well looked after. Each range comes in packs of 10, 30 and 100 tablets starting at £16. They're not necessarily a 'must-have', but are an innovative and fuss-free way to look after your skin and a great product to try out if you love adding relaxing bits and bobs to your bath!
What do you think of this product? Is it an interesting innovation that you'd give a go?
|
from JumpScale import j
import JumpScale.grid.agentcontroller
class system_packagemanager(j.code.classGetBase()):
def __init__(self):
self._te = {}
self.actorname = "packagemanager"
self.appname = "system"
self.client = j.clients.agentcontroller.get()
self.gid = j.application.whoAmI.gid
def execute(self, script, nid, gid=None, **kwargs):
return self.client.execute('jumpscale', script, nid=nid, gid=gid, args=kwargs)
def getJPackages(self, **args):
nid = args.get('nid')
domain = args.get('domain', None)
return self.execute('jpackage_list', nid=nid, domain=domain)
def getJPackageInfo(self, **args):
nid = args.get('nid')
domain = args.get('domain', None)
name = args.get('pname', None)
version = args.get('version', None)
return self.execute('jpackage_info', nid=nid, domain=domain, pname=name, version=version)
def getJPackageFilesInfo(self, **args):
"""
ask the right processmanager on right node to get the information (will query jpackages underneath)
returns all relevant info about files of jpackage
param:nid id of node
param:domain domain name for jpackage
param:pname name for jpackage
result json
"""
nid = args.get('nid')
domain = args.get('domain', None)
name = args.get('pname', None)
version = args.get('version', None)
return self.execute('jpackage_fileinfo', nid=nid, domain=domain, pname=name, version=version)
def action(self, **args):
nid = args.get('nid')
domain = args.get('domain', None)
name = args.get('pname', None)
action = args.get('action', None)
version = args.get('version', None)
return self.execute('jpackage_action', nid=nid, domain=domain, pname=name, version=version, action=action)
|
The Big Shoes Award has grown from a small event to a beacon of achievement for the region. Giving this award, highlights the quality of manufacturing companies that started from humble beginnings to stellar advancements. As the 2017 article in the Daily Southtown by Ted Slowick demonstrates, the Big Shoes Award is part of our regional marketing campaign to highlight the under appreciated economic manufacturing strength of the Chicago Southland.
With extreme excitement, we are pleased to continue with our Big Shoes Award 2018.
We are pleased to honor Gallagher Asphalt and its President Charlie Gallagher during the 90th anniversary of this family owned company for their leading innovation in technology. Since their founding in 1928 by James F. Gallagher, Sr., Gallagher has paved or resurfaced hundreds of thousands of miles, under some of the most demanding conditions.
|
from behave import *
use_step_matcher('parse')
@given('Exist a event created by "{username}"')
def step_impl(context, username):
from sportsBetting.models import Event, Team
from django.contrib.auth.models import User
for row in context.table:
name = row['local'] + ' v ' + row['visitor']
if not Event.objects.filter(name=name).exists():
e = Event()
e.name = name
e.user = User.objects.get(username=username)
e.team1 = Team.objects.get(name=row['local'])
e.team2 = Team.objects.get(name=row['visitor'])
e.save()
@when('I add a new event')
def step_impl(context):
for row in context.table:
context.browser.visit(context.get_url('/events/create/'))
if context.browser.url == context.get_url('/events/create/'):
form = context.browser.find_by_tag('form').first
context.browser.fill('name', row['local'] + ' v ' + row['visitor'])
context.browser.find_by_xpath(
'//select[@id="id_team1"]//option[text()="' + row['local'] + '"]', ).first.click()
context.browser.find_by_xpath(
'//select[@id="id_team2"]//option[text()="' + row['visitor'] + '"]', ).first.click()
form.find_by_id('team_submit').first.click()
@when('I want to delete the event "{event_name}"')
def step_impl(context, event_name):
from sportsBetting.models import Event
id = Event.objects.get(name=event_name).id
context.browser.visit(context.get_url('/events/delete/' + str(id)))
@when('I delete the event')
def step_impl(context):
form = context.browser.find_by_tag('form').first
form.find_by_css('.btn').first.click()
assert context.browser.url == context.get_url('/events/list_events/')
|
Kent PF07 Use to gently brush & shape all hair lengths. This rubber cushioned natural bristle brush is ideal for shoulder length & long hair. A fantastic brush for straightening & general grooming. Natural bristle is perfect for stimulating the hairs natural oils and brushing distributes these oils through the hair keeping it clean, smooth, shiny & naturally conditioned.
|
'''
Created on Feb 22, 2015
@author: stefano
Script that tests V-REP deterministic runs.
Runs V-REP repeatedly with a deterministic
series of random motor commands over TCP/IP
Include also related tests (such as light readings, Braitenberg-like simulations)
Assumes:
1. V-REP world ("Scene") "$HOMEO/src/VREP/Khepera-J-Proximity-only.SF.ttt" is already running
2. V-REP listens on ports 19997 (for main control)
3. The V-REP robot to be controlled is called "Khepera"
4. Other V-REP assumptions about lights and other features of the V-REP world (see method comments and V-REP world description)
5. A SimsData subdir exists at /home/stefano/Documents/Projects/Homeostat/Simulator/Python-port/Homeo/SimulationsData
'''
import vrep
from Helpers.SimulationThread import SimulationThread
import math
import numpy as np
# import matplotlib.pyplot as plt
import os, sys
import subprocess
import datetime
from numpy import dot, arccos, degrees
from math import pi
from numpy.linalg import norm
from time import sleep,time, strftime,localtime
from Helpers.General_Helper_Functions import scaleTo
from ctypes import c_ubyte
def distance(pointA3D, pointB3D):
"Return Euclidean distance between two 3D points"
return math.sqrt((pointA3D[0]-pointB3D[0])**2 + (pointA3D[1]-pointB3D[1])**2 + (pointA3D[2]-pointB3D[2])**2)
def distanceFromOrig(point3D):
"Return Euclidean distance"
return math.sqrt((0 - point3D[0])**2 + (0 - point3D[1])**2 + (0 - point3D[2])**2)
def clip(clipValue,minV,maxV):
if clipValue < minV:
return minV
elif clipValue > maxV:
return maxV
return clipValue
def asByteArray(m_string):
return (c_ubyte * len(m_string)).from_buffer_copy(m_string)
class VREPTests(object):
def __init__(self, noSteps = 5000, noRuns=5, robotName = "Khepera"):
"Parameters"
#VREP_scene_file ="/home/stefano/Documents/Projects/Homeostat/Simulator/Python-port/Homeo/src/VREP/Khepera-J-Proximity-only.SF.ttt"
self.simulation_port = 19997
self.trajectoryPort = 19998
self.robot_host = '127.0.0.1'
self.VREP_HOME = '/home/stefano/builds/from-upstream-sources/V-REP_PRO_EDU_V3_2_0_64_Linux/'
self.robotName = robotName
self.noRuns = noRuns
self.noSteps = noSteps
self.targetPose = [7,7]
self.initPose = [4,4,0.0191]
self.initOrient = [-90,0,-90]
self.betwCmdDelays = 0
self.maxSpeed = 50
self.trajStateSignalName = "HOMEO_SIGNAL_"+ self.robotName + "_TRAJECTORY_RECORDER"
def startTrajRecorder(self):
pass
def connectAll(self):
self.connect()
self.getHandles()
# self.startTrajRecorder()
def testDetermMomvt(self):
self.moveRandomly()
def testLightSensors(self):
self.moveAndReadLights()
def moveReadLights(self):
self.moveAndReadProxSensors()
def moveRandomly(self):
"Set trajectory data directory and communicate to V-REP"
HOMEODIR = '/home/stefano/Documents/Projects/Homeostat/Simulator/Python-port/Homeo/'
dataDir = 'SimsData-'+strftime("%Y-%m-%d-%H-%M-%S", localtime(time()))
simsDataDir = os.path.join(HOMEODIR,"SimulationsData",dataDir)
os.mkdir(simsDataDir)
print "Saving to: ", simsDataDir
e = vrep.simxSetStringSignal(self.simulID,"HOMEO_SIGNAL_SIM_DATA_DIR" ,asByteArray(simsDataDir), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Message sent, error code: ", e
for run in xrange(self.noRuns):
eCode = vrep.simxStartSimulation(self.simulID, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
e = vrep.simxSetStringSignal(self.simulID,"HOMEO_SIGNAL_SIM_DATA_DIR" ,asByteArray(simsDataDir), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Simulation started: run number %d, error code: %d"% (run+1, eCode)
"Wait until simulation is ready, otherwise we will miss a few movement commands"
# sleep(2)
np.random.seed(64)
# resetRobotInitPose(initPose, self.simulID, ePuckHandle)
eCode = vrep.simxSetStringSignal(self.simulID, self.trajStateSignalName, asByteArray("NEWFILE"), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if eCode == 0:
print "Starting a new trajectory file"
else:
print "ERROR: Could not start a new trajectory file"
for step in xrange(self.noSteps):
timeStart = time()
rightSpeed = np.random.uniform(self.maxSpeed * 2) # - self.maxSpeed
leftSpeed = np.random.uniform(self.maxSpeed * 2) # -maxSpeed
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, rightSpeed, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, leftSpeed, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
for i in xrange(self.betwCmdDelays):
vrep.simxSynchronousTrigger(self.simulID)
timeElapsed = time() - timeStart
"Stop the robot"
self.stopRobot(self.simulID, [self.rightMotor, self.leftMotor])
eCode = vrep.simxSetStringSignal(self.simulID, self.trajStateSignalName, asByteArray("SAVE"), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if eCode == 0:
print "Saving trajectory file"
else:
print "ERROR: Could not save a new trajectory file"
sleep(.5)
robotPose = vrep.simxGetObjectPosition(self.simulID, self.robotHandle, -1, vrep.simx_opmode_oneshot_wait)[1][:2]
vrep.simxSynchronousTrigger(self.simulID)
print "%d: Robot is at: %.3f, %.3f Distance from target is: %.4f. Run took exactly %.3f seconds" % (run,
robotPose[0],
robotPose[1],
self.computeDistance(self.targetPose, robotPose),
timeElapsed) #
eCode = vrep.simxStopSimulation(self.simulID, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
sleep(1)
# eCode = vrep.simxStartSimulation(self.simulID, vrep.simx_opmode_oneshot_wait)
# vrep.simxSynchronousTrigger(self.simulID)
eCode = vrep.simxSetStringSignal(self.simulID, self.trajStateSignalName, asByteArray("CLOSEFILE"), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if eCode == 0:
print "Starting a new trajectory file"
else:
print "ERROR: Could not close a new trajectory file"
print "Done"
def moveAndReadLights(self):
"rotate in place and print light readings"
eCode, res, rightEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.rightEye, 0, vrep.simx_opmode_streaming)
ecode, res, leftEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.leftEye, 0, vrep.simx_opmode_streaming)
vrep.simxSynchronousTrigger(self.simulID)
for step in xrange(self.noSteps):
rightSpeed = 25
leftSpeed = rightSpeed
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, rightSpeed, vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, leftSpeed, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
eCodeR, res, rightEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.rightEye, 0, vrep.simx_opmode_buffer)
eCodeL, res, leftEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.leftEye, 0, vrep.simx_opmode_buffer)
vrep.simxSynchronousTrigger(self.simulID)
# print "Right eCode:\t", eCodeR,
# print "Left eCode:\t", eCodeL
# leftImg = np.array(leftEyeRead, np.uint8)
# rightImg.resize(res[0],res[1],3)
print "Right:\t%d, %d\tLeft:\t%d, %d"% (len(rightEyeRead),sum(rightEyeRead), len(leftEyeRead),sum(leftEyeRead))
# print rightImg.shape
# plt.imshow(rightImg)#, origin="lower")
# for run in xrange(self.noRuns):
# np.random.seed(64)
#
# for step in xrange(self.noSteps):
# rightSpeed = np.random.uniform(self.maxSpeed * 2) # - self.maxSpeed
# leftSpeed = np.random.uniform(self.maxSpeed * 2) # -maxSpeed
# eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, rightSpeed, vrep.simx_opmode_oneshot_wait)
# eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, leftSpeed, vrep.simx_opmode_oneshot_wait)
# vrep.simxSynchronousTrigger(self.simulID)
# eCode, res, rightEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.rightEye, 1, vrep.simx_opmode_buffer)
# ecode, res, leftEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.leftEye, 1, vrep.simx_opmode_buffer)
# vrep.simxSynchronousTrigger(self.simulID)
# print "Right eye reads: \t", rightEyeRead
# print "Left eye reads: \t", leftEyeRead
def moveAndReadProxSensors(self):
"rotate in place and print sensor distance and normal vector readings"
for step in xrange(self.noSteps):
if step>self.noSteps / 2:
rightSpeed = -1
leftSpeed = -rightSpeed
else:
rightSpeed = 1
leftSpeed = -rightSpeed
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, rightSpeed, vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, leftSpeed, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
rightInput = vrep.simxReadProximitySensor(self.simulID, self.rightEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
leftInput = vrep.simxReadProximitySensor(self.simulID, self.leftEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Left-->err:%s - Detct'd: %s\t%s\t\tRight--> err:%s - Detct'd: %s\t\t\t%s" % (leftInput[0],
leftInput[3],
leftInput[2],
rightInput[0],
rightInput[3],
rightInput[2])
sleep(.1)
self.stopRobot(self.simulID,[self.rightMotor,self.leftMotor])
vrep.simxSynchronousTrigger(self.simulID)
def braiten1a(self):
"slowly move forward and print normal vector readings"
intens = 50
ambientIntens = 0
attVect = [0,0,1]
print "Proximity sensor readings error codes: "
for step in xrange(self.noSteps):
rightInput = vrep.simxReadProximitySensor(self.simulID, self.rightEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
leftInput = vrep.simxReadProximitySensor(self.simulID, self.leftEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
centerInput = vrep.simxReadProximitySensor(self.simulID, self.KJcenterEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
angle = degrees(self.angleBetVecs([0,0,1], centerInput[2]))
lightReading = self.irradAtSensor(intens, ambientIntens, centerInput[2], attVect)
print "Center-->err:%s - Detct'd: %s\tAngle:%.3f\tIrrad:%.3f\tNorm: %.3f\tVector:%s\t" % (centerInput[0],
centerInput[3],
angle,
lightReading,
norm(centerInput[2]),
centerInput[2])
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, lightReading, vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, lightReading, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
sleep(0)
def braiten1b(self):
"slowly move forward and print normal vector readings"
intens = 100
ambientIntensRatio = 0.2
attVect = [0,0,pi *4]
for step in xrange(self.noSteps):
rightInput = vrep.simxReadProximitySensor(self.simulID, self.rightEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
leftInput = vrep.simxReadProximitySensor(self.simulID, self.leftEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
centerInput = vrep.simxReadProximitySensor(self.simulID, self.KJcenterEye, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
angle = degrees(self.angleBetVecs([0,0,1], centerInput[2]))
lightReading = self.irradAtSensor(intens, ambientIntensRatio, centerInput[2], attVect)
print "Center-->err:%s - Detct'd: %s\tAngle:%.3f\tIrrad:%.3f\tNorm: %.3f\tVector:%s\t" % (centerInput[0],
centerInput[3],
angle,
lightReading,
norm(centerInput[2]),
centerInput[2])
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, 1/lightReading, vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, 1/lightReading, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
sleep(0)
def braiten2a(self):
"Seek light source"
"PARAMETERS"
intens = 100
ambientIntensRatio = 0
attVect = [0,0,1]
HOMEODIR = '/home/stefano/Documents/Projects/Homeostat/Simulator/Python-port/Homeo/'
dataDir = 'SimsData-'+strftime("%Y-%m-%d-%H-%M-%S", localtime(time()))
simsDataDir = os.path.join(HOMEODIR,"SimulationsData",dataDir)
os.mkdir(simsDataDir)
print "Saving to: ", simsDataDir
e = vrep.simxSetStringSignal(self.simulID,"HOMEO_SIGNAL_SIM_DATA_DIR" ,asByteArray(simsDataDir), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Message sent, error code: ", e
"END PARAMETERS"
for run in xrange(self.noRuns):
eCode = vrep.simxStartSimulation(self.simulID, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
e = vrep.simxSetStringSignal(self.simulID,"HOMEO_SIGNAL_SIM_DATA_DIR" ,asByteArray(simsDataDir), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Simulation started: run number %d, error code: %d"% (run+1, eCode)
"Wait until simulation is ready, otherwise we will miss a few movement commands"
# sleep(2)
np.random.seed(64)
# resetRobotInitPose(initPose, self.simulID, ePuckHandle)
eCode = vrep.simxSetStringSignal(self.simulID, self.trajStateSignalName, asByteArray("NEWFILE"), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if eCode == 0:
print "Starting a new trajectory file"
else:
print "ERROR: Could not start a new trajectory file"
timeStart = time()
for step in xrange(self.noSteps):
rightLight = vrep.simxGetFloatSignal(self.simulID, "HOMEO_SIGNAL_rightEye_LIGHT_READING", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
leftLight = vrep.simxGetFloatSignal(self.simulID, "HOMEO_SIGNAL_leftEye_LIGHT_READING", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
# print "rightLight %.3f\t left light: %.3f" %(rightLight[1],leftLight[1])
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, clip(leftLight[1],0,self.maxSpeed), vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, clip(rightLight[1],0, self.maxSpeed), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
sleep(0)
timeElapsed = time() - timeStart
"Stop the robot"
self.stopRobot(self.simulID, [self.rightMotor, self.leftMotor])
eCode = vrep.simxSetStringSignal(self.simulID, self.trajStateSignalName, asByteArray("SAVE"), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if eCode == 0:
print "Saving trajectory file"
else:
print "ERROR: Could not save a new trajectory file"
sleep(.5)
robotPose = vrep.simxGetObjectPosition(self.simulID, self.robotHandle, -1, vrep.simx_opmode_oneshot_wait)[1][:2]
vrep.simxSynchronousTrigger(self.simulID)
print "%d: Robot is at: %.3f, %.3f Distance from target is: %.4f. Run took exactly %.3f seconds" % (run,
robotPose[0],
robotPose[1],
self.computeDistance(self.targetPose, robotPose),
timeElapsed) #
eCode = vrep.simxStopSimulation(self.simulID, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
sleep(1)
# eCode = vrep.simxStartSimulation(self.simulID, vrep.simx_opmode_oneshot_wait)
# vrep.simxSynchronousTrigger(self.simulID)
eCode = vrep.simxSetStringSignal(self.simulID, self.trajStateSignalName, asByteArray("CLOSEFILE"), vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if eCode == 0:
print "Starting a new trajectory file"
else:
print "ERROR: Could not close a new trajectory file"
print "Done"
def cleanUp(self):
print "About to stop simulation connected to self.simulID: ", self.simulID
vrep.simxStopSimulation(self.simulID, vrep.simx_opmode_oneshot)
vrep.simxSynchronousTrigger(self.simulID)
# vrep.simxFinish(robotID)
vrep.simxFinish(self.simulID)
vrep.simxFinish(-1)
print "Disconnected from V-REP"
def computeDistance(self,a, b):
return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)
def stopRobot(self,simulHandle, motorHandles):
for motor in motorHandles:
eCode = vrep.simxSetJointTargetVelocity(simulHandle, motor, 0, vrep.simx_opmode_oneshot)
vrep.simxSynchronousTrigger(self.simulID)
def connect(self):
#os.chdir(VREP_HOME)
#subprocess.call([os.path.join(VREP_HOME,'vrep.sh'), VREP_scene_file], shell = True, cwd = VREP_HOME)
"Close existing connections"
vrep.simxFinish(-1)
"Connect to Simulation"
self.simulID = vrep.simxStart(self.robot_host,self.simulation_port,True,True, 5000,5)
eCode = vrep.simxSynchronous(self.simulID, True)
if eCode != 0:
print "Could not get V-REP to synchronize operation with me"
if not self.simulID == -1:
eCode = vrep.simxStartSimulation(self.simulID, vrep.simx_opmode_oneshot)
vrep.simxSynchronousTrigger(self.simulID)
print "my SimulID is ", self.simulID
else:
sys.exit("Failed to connect to VREP simulation. Bailing out")
def getHandles(self):
"Get handles for epuck and motors"
ecodeE, self.robotHandle = vrep.simxGetObjectHandle(self.simulID, "Khepera", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
eCodeR, self.rightMotor = vrep.simxGetObjectHandle(self.simulID, "rightWheel", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
eCodeL, self.leftMotor = vrep.simxGetObjectHandle(self.simulID, "leftWheel", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
eCodeR, self.rightEye = vrep.simxGetObjectHandle(self.simulID, "rightEye", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
eCodeL, self.leftEye = vrep.simxGetObjectHandle(self.simulID, "leftEye", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
# eCodeL, self.KJcenterEye = vrep.simxGetObjectHandle(self.simulID, "Khepera_proxSensor3", vrep.simx_opmode_oneshot_wait)
# vrep.simxSynchronousTrigger(self.simulID)
eCode,self.targetID = vrep.simxGetObjectHandle(self.simulID,"TARGET", vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
if (self.rightMotor == 0 or self.leftMotor == 0 or self.rightEye == 0 or self.leftEye == 0):
self.cleanUp()
sys.exit("Exiting: Could not connect to motors or sensors")
else:
print " I am connected to Right Motor: %d, leftMotor: %d, Right eye: %d, Left eye: %d, and my target has ID:%d" % (self.rightMotor,
self.leftMotor,
self.rightEye,
self.leftEye,
self.targetID)
def angleBetVecs(self,vecA,vecB):
vecA_norm = vecA/norm(vecA)
vecB_norm = vecB/norm(vecB)
return arccos(dot(vecA_norm,vecB_norm))
def irradAtSensor(self,intens,ambIntensRatio,vecToLight, attenVect):
"""Compute the irradiance at the light sensor surface
Intens is the directional component of the light intensity,
ambIntensRatio is ambient component (not subject to attenuation) of the light's intensity. Must be in [0,1]
vecToLight is the 3D vector to the light source in the sensor's frame of reference
attenVect is a 3 element vector with the direct, linear, and quadratic attenuation coefficients """
cosAngle = (dot([0,0,1],vecToLight)/norm(vecToLight))
directIntens = (intens * (1-ambIntensRatio)) * cosAngle
distance = norm(vecToLight)
attenuation = 1/(attenVect[0]+(attenVect[1]*distance)+(attenVect[2]*distance**2))
return (directIntens + (intens*ambIntensRatio)) * attenuation
def testMaxSpeed(self, maxSpeed, mode):
"""test max speed of khepera-like robot in V-Rep
revving the motors up to maxSpeed in the self.noSteps and then backward.
mode--> 1, both motors, 2: right only, 3: left only"""
if mode == 1:
rightOn = leftOn = 1
elif mode == 2:
rightOn = 1
leftOn = 0
elif mode == 3:
rightOn = 0
leftOn = 1
unitSpeed = maxSpeed /self.noSteps
for i in xrange(self.noSteps):
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, unitSpeed *(i+1)*rightOn, vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, unitSpeed *(i+1)*leftOn, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Step: %s\t Speed now: %.2f" %(str(i),(unitSpeed *(i+1)))
for i in xrange(self.noSteps):
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, -(maxSpeed/(i+1))*rightOn, vrep.simx_opmode_oneshot_wait)
eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, -(maxSpeed/(i+1))*leftOn, vrep.simx_opmode_oneshot_wait)
vrep.simxSynchronousTrigger(self.simulID)
print "Step: %s\t Speed now: %.2f" % (str(i), (maxSpeed/(i+1))*rightOn)
if __name__ == "__main__":
test = VREPTests(noSteps=100, noRuns=5)
test.connectAll()
# test.testDetermMomvt()
# test.testLightSensors()
# test.moveReadLights()
# test.testMaxSpeed(300,1)
test.braiten2a()
test.cleanUp()
|
The Strad. 91 (1980/81), S. 26-27 u. 30-31.
New Rochelle, NY: Arlington House Publishers, 1978, 1996 p.
|
# Author: Noel Dawe
from __future__ import division
import re
import sys
from pyAMI.objects import DatasetInfo, RunPeriod
from pyAMI.schema import *
from pyAMI.utils import *
from pyAMI.defaults import YEAR, STREAM, TYPE, PROJECT, PRODSTEP
DATA_PATTERN = re.compile(
'^(?P<project>\w+).(?P<run>[0-9]+).'
'(?P<stream>[a-zA-Z_\-0-9]+).(recon|merge).'
'(?P<type>[a-zA-Z_\-0-9]+).(?P<version>\w+)$')
ESD_VERSION_PATTERN = '(?P<la>f|r)(?P<lb>[0-9]+)'
AOD_VERSION_PATTERN = ESD_VERSION_PATTERN + '_(?P<ma>m|p)(?P<mb>[0-9]+)'
NTUP_VERSION_PATTERN = AOD_VERSION_PATTERN + '_p(?P<rb>[0-9]+)'
ESD_VERSION_PATTERN = re.compile('^%s$' % ESD_VERSION_PATTERN)
AOD_VERSION_PATTERN = re.compile('^%s$' % AOD_VERSION_PATTERN)
NTUP_VERSION_PATTERN = re.compile('^%s$' % NTUP_VERSION_PATTERN)
def _clean_dataset(dataset):
"""
Remove trailing slashes
*dataset*: str
dataset name
"""
if dataset is None:
return None
return dataset.rstrip('/')
def _expand_period_contraints(periods):
"""
period=B -> period like B%
period=B2 -> period=B2
"""
if isinstance(periods, basestring):
periods = periods.split(',')
selection = []
# single character
single_chars = [p for p in periods if len(p) == 1]
selection += ["period like '%s%%'" % p for p in single_chars]
# multiple characters
mult_chars = [p for p in periods if len(p) > 1]
selection += ["period='%s'" % p for p in mult_chars]
return " OR ".join(selection)
def search_query(client,
entity,
cmd='SearchQuery',
cmd_args=None,
pattern=None,
order=None,
limit=None,
fields=None,
flatten=False,
mode='defaultField',
project_name='Atlas_Production',
processing_step_name='Atlas_Production',
show_archived=False,
literal_match=False,
**kwargs):
try:
table = TABLES[entity]
except KeyError:
raise TypeError('Entity %s does not exist' % entity)
primary_field = table.primary
query_fields = parse_fields(fields, table)
if primary_field not in query_fields:
query_fields.append(primary_field)
query_fields_str = ', '.join(query_fields)
if cmd_args is None:
cmd_args = {}
if not isinstance(pattern, list):
patterns = [pattern]
else:
patterns = pattern
constraints = []
for pattern in patterns:
# If the user has not put any '%' characters
# then we add them to the beginning and the end of the pattern
# otherwise assume the user knows what he/she is doing.
# If we do not do this it is impossible to search for strings which
# start with a given character sequence
if pattern is None:
if literal_match:
raise ValueError(
'pattern must not be None for literal matches')
pattern = '%'
elif '%' not in pattern and not literal_match:
pattern = '%' + pattern + '%'
elif not literal_match:
# replace repeated % with a single %
pattern = re.sub('%+', '%', pattern)
if literal_match:
constraints.append("%s='%s'" % (primary_field, pattern))
else:
constraints.append("%s like '%s'" % (primary_field, pattern))
constraints = ' OR '.join(constraints)
constraints = '(%s)' % constraints
if kwargs:
for name, value in kwargs.items():
if value is not None:
name = validate_field(name, table)
"""
Case of multiple values for a given field -> search with OR
"""
if name == 'period':
constraints += " AND (%s)" % _expand_period_contraints(value)
else:
if isinstance(value, (list, tuple)):
constraints += " AND (%s)" % (" OR ".join(["%s='%s'" %
(name, val) for val in value]))
else:
constraints += " AND %s='%s'" % (name, value)
if order is None:
order_field = primary_field
else:
order_field = validate_field(order, table)
if isinstance(limit, (list, tuple)):
limit = ' LIMIT %i,%i' % tuple(limit)
elif limit is not None:
limit = ' LIMIT 0,%i' % limit
else:
limit = ''
args = [cmd,
"entity=%s" % entity,
"glite=SELECT "
+ query_fields_str
+ (" WHERE (%s)" % constraints)
+ (" ORDER BY %s" % order_field)
+ limit,
"project=%s" % project_name,
"processingStep=%s" % processing_step_name,
"mode=%s" % mode]
for item in cmd_args.items():
args.append("%s=%s" % item)
if show_archived:
args.append('showArchived=true')
result = client.execute(args)
things = [thing for thing in result.rows()]
if flatten:
things = flatten_results(things, query_fields)
return query_fields, things
def get_types(client,
pattern,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
**kwargs):
"""
A command to list all ATLAS types.
Only those with writeStatus=valid can be used for new names.
"""
if 'write_status' not in kwargs:
kwargs['write_status'] = 'valid'
query_fields, types = search_query(
client=client, entity='data_type', pattern=pattern,
processing_step_name='*',
order=order, limit=limit, fields=fields, show_archived=show_archived, **kwargs)
if flatten:
types = flatten_results(types, query_fields)
return types
def get_subtypes(client,
pattern,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
**kwargs):
"""
A command to list all ATLAS subtypes.
Only those with writeStatus=valid can be used for new names.
"""
if 'write_status' not in kwargs:
kwargs['write_status'] = 'valid'
query_fields, types = search_query(
client=client, entity='subData_type', pattern=pattern,
processing_step_name='*',
order=order, limit=limit, fields=fields, show_archived=show_archived, **kwargs)
if flatten:
types = flatten_results(types, query_fields)
return types
def add_type(client, type):
"""
Add a type
"""
args = ['Addtype', type]
return client.execute(args)
def get_nomenclatures(client,
pattern,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
**kwargs):
"""
Return list of ATLAS nomenclatures
"""
if 'write_status' not in kwargs:
kwargs['write_status'] = 'valid'
query_fields, nomens = search_query(
client=client, entity='nomenclature', pattern=pattern,
processing_step_name='*',
order=order, limit=limit, fields=fields, show_archived=show_archived, **kwargs)
if flatten:
nomens = flatten_results(nomens, query_fields)
return nomens
def get_projects(client,
pattern,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
**kwargs):
if 'write_status' not in kwargs:
kwargs['write_status'] = 'valid'
query_fields, projects = search_query(
client=client, entity='projects', pattern=pattern,
processing_step_name='*',
order=order, limit=limit, fields=fields, show_archived=show_archived, **kwargs)
if flatten:
projects = flatten_results(projects, query_fields)
return projects
def get_subprojects(client,
pattern,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
**kwargs):
if 'write_status' not in kwargs:
kwargs['write_status'] = 'valid'
query_fields, projects = search_query(
client=client, entity='subProjects', pattern=pattern,
processing_step_name='*',
order=order, limit=limit, fields=fields, show_archived=show_archived, **kwargs)
if flatten:
projects = flatten_results(projects, query_fields)
return projects
def get_prodsteps(client,
pattern,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
**kwargs):
if 'write_status' not in kwargs:
kwargs['write_status'] = 'valid'
query_fields, steps = search_query(
client=client, entity='productionStep', pattern=pattern,
processing_step_name='*',
order=order, limit=limit, fields=fields, show_archived=show_archived, **kwargs)
if flatten:
steps = flatten_results(steps, query_fields)
return steps
def get_datasets(client,
pattern,
parent_type=None,
order=None,
limit=None,
fields=None,
flatten=False,
show_archived=False,
from_file=False,
**kwargs):
"""
Return list of datasets matching pattern
"""
if 'ami_status' not in kwargs:
kwargs['ami_status'] = 'VALID'
cmd_args = {}
if parent_type is not None and 'parent_type' not in kwargs:
cmd_args['parentType'] = parent_type
if from_file:
patterns = read_patterns_from(pattern)
else:
patterns = [pattern]
patterns = [_clean_dataset(p) for p in patterns]
query_fields, datasets = search_query(
client=client,
cmd='DatasetSearchQuery',
cmd_args=cmd_args,
entity='dataset',
pattern=patterns,
order=order, limit=limit,
fields=fields,
show_archived=show_archived, **kwargs)
if flatten:
datasets = flatten_results(datasets, query_fields)
return datasets
def get_periods_for_run(client, run):
"""
Return data periods which contain this run
"""
result = client.execute(['GetDataPeriodsForRun', '-runNumber=%i' % run])
periods = sorted([
RunPeriod(
level=int(e['periodLevel']),
name=str(e['period']),
project=str(e['project']),
status=str(e['status']),
description=str(e['description']))
for e in result.to_dict()['Element_Info'].values()])
return periods
def get_periods(client, year=YEAR, level=2):
"""
Return all periods at a specified detail level in the given year
"""
cmd = ['ListDataPeriods', '-createdSince=2009-01-01 00:00:00' ]
if year > 2000:
year %= 1000
cmd += [ '-projectName=data%02i%%' % year]
if level in [1, 2, 3]:
cmd += [ '-periodLevel=%i' % level ]
else:
raise ValueError('level must be 1, 2, or 3')
result = client.execute(cmd)
periods = [RunPeriod(project=e['projectName'],
year=year,
name=str(e['period']),
level=level,
status=e['status'],
description=e['description']) \
for e in result.to_dict()['Element_Info'].values()]
periods.sort()
return periods
def get_all_periods(client):
"""
Return all periods
"""
all_periods = []
p = re.compile("(?P<period>(?P<periodletter>[A-Za-z]+)(?P<periodnumber>\d+)?)$")
result = get_periods(client, year=0, level=0)
for period, projectName in result:
m = p.match(period)
if not m:
continue
year = int(projectName[4:6])
period_letter = m.group('periodletter')
if m.group('periodnumber'):
period_number = int(m.group('periodnumber'))
else:
period_number = 0
if len(period_letter) != 1:
pc = 0
else:
pc = 10000 * year + 100 * (ord(period_letter.upper()) - 65) + period_number
all_periods += [ ((year, period, pc), projectName + ".period" + period) ]
all_periods.sort()
return all_periods
def print_periods(periods, wrap_desc=True, wrap_width=50, stream=None):
if stream is None:
stream = sys.stdout
table = [['Project', 'Name', 'Status', 'Description']]
for period in periods:
table.append([period.project,
period.name,
period.status,
period.description])
print_table(table,
wrap_last=wrap_desc,
wrap_width=wrap_width,
vsep='-',
stream=stream)
def get_runs(client, periods=None, year=YEAR):
"""
Return all runs contained in the given periods in the specified year
"""
if year > 2000:
year %= 1000
if not periods:
periods = [period.name for period in get_periods(client, year=year, level=1)]
elif isinstance(periods, basestring):
periods = periods.split(',')
runs = []
# remove duplicate periods
for period in set(periods):
cmd = ['GetRunsForDataPeriod', '-period=%s' % period]
cmd += [ '-projectName=data%02i%%' % year ]
result = client.execute(cmd)
runs += [ int(e['runNumber']) for e in result.to_dict()['Element_Info'].values() ]
# remove duplicates
runs = list(set(runs))
runs.sort()
return runs
def get_provenance(client, dataset, type=None, **kwargs):
"""
Return all parent dataset of the given dataset
"""
dataset = _clean_dataset(dataset)
args = ["ListDatasetProvenance",
"logicalDatasetName=%s" % dataset,
'output=xml']
if kwargs:
args += ['%s=%s' % item for item in kwargs.items()]
result = client.execute(args)
dom = result.dom
graph = dom.getElementsByTagName('graph')
dictOfLists = {}
for line in graph:
nodes = line.getElementsByTagName('node')
for node in nodes:
level = int(node.attributes['level'].value)
dataset = node.attributes['name'].value
if type and (type in dataset):
levelList = dictOfLists.get(level, [])
levelList.append(dataset)
dictOfLists[level] = levelList
elif not type:
levelList = dictOfLists.get(level, [])
levelList.append(dataset)
dictOfLists[level] = levelList
return dictOfLists
def print_provenance(result):
for key in sorted(result.keys()):
print "generation =", key
for dataset in sorted(result[key]):
print " ", dataset
def get_dataset_info(client, dataset, **kwargs):
"""
Return a DatasetInfo instance (the dataset metadata)
*client*: AMIClient
*dataset*: str
*kwargs*: dict
"""
dataset = _clean_dataset(dataset)
args = ["GetDatasetInfo",
"logicalDatasetName=%s" % dataset]
if kwargs:
args += ['%s=%s' % item for item in kwargs.items()]
dataset_info = DatasetInfo(dataset=dataset)
result = client.execute(args)
dom = result.dom
# get the rowsets
rowsets = dom.getElementsByTagName('rowset')
for rowset in rowsets:
rowsetLabel = ""
if "type" in rowset.attributes.keys():
rowsetLabel = rowsetLabel + rowset.attributes['type'].value
rows = rowset.getElementsByTagName('row')
if (rowsetLabel == "Element_Info"):
for row in rows:
fields = row.getElementsByTagName("field")
for field in fields:
if field.firstChild:
tableName = field.attributes['table'].value
if tableName == "dataset":
value = field.firstChild.nodeValue
name = field.attributes['name'].value
dataset_info.info[name] = value
elif tableName == "dataset_extra":
value = field.firstChild.nodeValue
name = field.attributes['name'].value
dataset_info.extra[name] = value
elif (tableName == "dataset_added_comment") or \
(tableName == "dataset_comment"):
value = field.firstChild.nodeValue
name = field.attributes['name'].value
dataset_info.comments[name] = value
elif (tableName == "dataset_property"):
propertyName = field.attributes['name'].value.split('_')[0]
if propertyName in dataset_info.properties:
tmpDict = dataset_info.properties[propertyName]
else:
tmpDict = {"type": "",
"min": "",
"max": "",
"unit": "",
"description": ""}
propertyNameSubField = field.attributes['name'].value
try:
propertyNameSubValue = field.firstChild.nodeValue
except:
propertyNameSubValue = ""
if propertyNameSubField == propertyName + "_type":
tmpDict["type"] = propertyNameSubValue
if propertyNameSubField == propertyName + "_min":
tmpDict["min"] = propertyNameSubValue
if propertyNameSubField == propertyName + "_max":
tmpDict["max"] = propertyNameSubValue
if propertyNameSubField == propertyName + "_unit":
tmpDict["unit"] = propertyNameSubValue
if propertyNameSubField == propertyName + "_desc":
tmpDict["description"] = propertyNameSubValue
dataset_info.properties[propertyName] = tmpDict
return dataset_info
def get_event_info(client, dataset, **kwargs):
"""
Return the metadata of the parent event generator dataset
*client*: AMIClient
*dataset*: str
*kwargs*: dict
"""
dataset = _clean_dataset(dataset)
if 'EVNT' not in dataset:
prov = get_provenance(client, dataset, type='EVNT', **kwargs)
evgen_datasets = []
for key, dsets in prov.items():
evgen_datasets += dsets
else:
evgen_datasets = [dataset]
results = []
for dset in set(evgen_datasets):
results.append(get_dataset_info(client, dset, **kwargs))
return results
def get_dataset_xsec_effic(client, dataset, **kwargs):
"""
Return the cross section and generator filter efficiency
*client*: AMIClient
*dataset*: str
*kwargs*: dict
"""
infos = get_event_info(client, dataset, **kwargs)
if len(infos) > 1:
raise ValueError('Dataset %s has multiple parent event generator datasets' % dataset)
elif not infos:
raise ValueError('Event info not found for dataset %s' % dataset)
info = infos[0]
try:
xsec = float(info.extra['crossSection_mean'])
except KeyError:
raise ValueError('No cross section listed for dataset %s' % dataset)
try:
effic = float(info.extra['GenFiltEff_mean'])
except KeyError:
raise ValueError('No generator filter efficiency listed for dataset %s' % dataset)
return xsec, effic
def get_dataset_xsec_min_max_effic(client, dataset, **kwargs):
"""
Return the cross section mean, min, max, and generator filter efficiency
*client*: AMIClient
*dataset*: str
*kwargs*: dict
"""
infos = get_event_info(client, dataset, **kwargs)
if len(infos) > 1:
raise ValueError('Dataset %s has multiple parent event generator datasets' % dataset)
elif not infos:
raise ValueError('Event info not found for dataset %s' % dataset)
info = infos[0]
try:
xsec = float(info.extra['crossSection_mean'])
except KeyError:
raise ValueError('No cross section listed for dataset %s' % dataset)
try:
xsec_min = float(info.properties['crossSection']['min'])
xsec_max = float(info.properties['crossSection']['max'])
except KeyError:
raise ValueError('No cross section min or max listed for dataset %s' % dataset)
try:
effic = float(info.extra['GenFiltEff_mean'])
except KeyError:
raise ValueError('No generator filter efficiency listed for dataset %s' % dataset)
return xsec, xsec_min, xsec_max, effic
def get_data_datasets(client,
tag_pattern=None,
periods=None,
project=PROJECT,
stream=STREAM,
type=TYPE,
prod_step=PRODSTEP,
parent_type=None,
grl=None,
fields=None,
latest=False,
flatten=False,
**kwargs):
"""
*client*: AMIClient
*tag_pattern*: [ str | None ]
*periods*: [ list | tuple | str | None ]
*project*: str
*stream*: str
*type*: str
*prod_step*: str
*parent_type*: str
*fields*: [ list | tuple | str | None ]
*latest*: bool
*flatten*: bool
Returns a list of dicts if flatten==False
else list of tuples with elements in same order as fields
"""
# Transmit period(s) as kwargs in order to do only one query
if periods is not None:
if isinstance(periods, basestring):
periods = periods.split(',')
kwargs['period'] = periods
if grl is not None:
# need to be compatible with Python 2.4
# so no ElementTree here...
from xml.dom import minidom
doc = minidom.parse(grl)
run_nodes = doc.getElementsByTagName('Run')
runs = []
for node in run_nodes:
runs.append(int(node.childNodes[0].data))
kwargs['run'] = runs
datasets = get_datasets(client, tag_pattern, fields=fields,
project=project, stream=stream, type=type,
prod_step=prod_step,
parent_type=parent_type,
**kwargs)
if latest:
if type.startswith('NTUP'):
VERSION_PATTERN = NTUP_VERSION_PATTERN
elif type.startswith('AOD'):
VERSION_PATTERN = AOD_VERSION_PATTERN
elif type.startswith('ESD'):
VERSION_PATTERN = ESD_VERSION_PATTERN
else:
raise TypeError('\'latest\' not implemented for type %s' % type)
ds_unique = {}
for ds in datasets:
name = ds['logicalDatasetName']
match = re.match(DATA_PATTERN, name)
if match:
new_version = re.match(VERSION_PATTERN, match.group('version'))
if not new_version:
continue
run = int(match.group('run'))
if run not in ds_unique:
ds_unique[run] = ds
else:
curr_version = re.match(VERSION_PATTERN, re.match(DATA_PATTERN, ds_unique[run]['logicalDatasetName']).group('version'))
if type.startswith('NTUP'):
if new_version.group('la') == 'r' and curr_version.group('la') == 'f' or \
((new_version.group('la') == curr_version.group('la') and \
int(new_version.group('lb')) >= int(curr_version.group('lb')) and \
int(new_version.group('mb')) >= int(curr_version.group('mb')) and \
int(new_version.group('rb')) >= int(curr_version.group('rb')))):
ds_unique[run] = ds
elif type.startswith('AOD'):
if new_version.group('la') == 'r' and curr_version.group('la') == 'f' or \
((new_version.group('la') == curr_version.group('la') and \
int(new_version.group('lb')) >= int(curr_version.group('lb')) and \
int(new_version.group('mb')) >= int(curr_version.group('mb')))):
ds_unique[run] = ds
elif type.startswith('ESD'):
if new_version.group('la') == 'r' and curr_version.group('la') == 'f' or \
((new_version.group('la') == curr_version.group('la') and \
int(new_version.group('lb')) >= int(curr_version.group('lb')))):
ds_unique[run] = ds
datasets = ds_unique.values()
datasets.sort()
if flatten:
fields = parse_fields(fields, DATASET_TABLE)
fields.append('logicalDatasetName')
return flatten_results(datasets, fields)
return datasets
# does not work...
def get_configtagfields(client, tag, *args, **kwargs):
"""
*client*: AMIClient
*tag*: str
*args*: tuple
tuple of args to add to AMI command
*kwargs*: dict
dict of keyword args to add to AMI commmand as key=value
"""
argv = ['ListConfigTagFields',
'configTag=%s' % tag]
argv.extend(args)
for name, value in kwargs.items():
argv.append("%s='%s'" % (name, value))
result = client.execute(argv)
return result
def get_configtags(client, tag, *args, **kwargs):
"""
*client*: AMIClient
*tag*: str
*args*: tuple
tuple of args to add to AMI command
*kwargs*: dict
dict of keyword args to add to AMI commmand as key=value
"""
argv = ['ListConfigurationTag',
'configTag=%s' % tag]
argv.extend(args)
for name, value in kwargs.items():
argv.append("%s='%s'" % (name, value))
result = client.execute(argv)
return [row for row in result.rows()]
def get_files(client, dataset, limit=None):
"""
*client*: AMIClient
*dataset*: str
*limit*: [ tuple | list | int | None ]
"""
dataset = _clean_dataset(dataset)
args = ['ListFiles', 'logicalDatasetName=%s' % dataset]
if limit is not None:
if isinstance(limit, (list, tuple)):
limit = 'limit=%i,%i' % tuple(limit)
else:
limit = 'limit=0,%i' % limit
args.append(limit)
result = client.execute(args)
return result.rows()
def list_files(client, dataset, limit=None, total=False, human_readable=False, long=False, stream=None):
"""
*client*: AMIClient
*dataset*: str
*limit*: [ tuple | list | int | None ]
*total*: bool
*human_readable*: bool
*long*: bool
*stream*: file
"""
if stream is None:
stream = sys.stdout
if long:
table = []
total_size = 0
total_events = 0
for file in get_files(client, dataset, limit=limit):
size = file['fileSize']
if size != 'NULL':
total_size += int(size)
if human_readable:
size = humanize_bytes(int(size))
events = file['events']
if events != 'NULL':
total_events += int(events)
table.append(["size: %s" % size, "events: %s" % events, file['LFN'], "GUID: %s" % file['fileGUID']])
if total:
if human_readable:
total_size = humanize_bytes(total_size)
table.append(["size: %s" % total_size, "events: %i" % total_events, "total", ""])
print_table(table, stream=stream)
else:
for file in get_files(client, dataset, limit=limit):
print >> stream, file['LFN']
|
Take the scenic route when you explore downtown Denver!
Be the ultimate fan with everything you need to cheer on your favorite teams!
Save your money for fun and games, not parking!
Receive discounted rates when you prepay!
|
### -*- coding: utf-8 -*- ####################################################
from django.core.cache import cache
SESSION_GROUP_KEY = 'alphabetic_default_group'
DEFAULT_GROUP = 'rus'
CACHE_SECOND_PREFIX = 'alphabetic_second'
def get_group(request):
return request.session.get(SESSION_GROUP_KEY, DEFAULT_GROUP)
def set_group(request, group_key):
request.session[SESSION_GROUP_KEY] = group_key
def get_cache_key(queryset, letter, cache_params):
"""Generates unique cache key"""
try:
ident_class = queryset.model.__name__
except AttributeError:
ident_class = ''
return "_".join([CACHE_SECOND_PREFIX, ident_class, letter]+list(cache_params))
def get_second_level(queryset, letter, cache_params):
key = get_cache_key(queryset, letter, cache_params)
if key:
return cache.get(key)
def set_second_level(queryset, letter, second_level, timeout, cache_params):
key = get_cache_key(queryset, letter, cache_params)
if key:
cache.set(key, second_level, timeout)
|
Image Credit: Andy Magee via flickr.
Water. We all drink it, but how many of us think it? Eh? Last night Glasgow’s Water Innovation Challenge conference kicked off at the suitably aqueous venue of The Tall Ship.1 The smartly dressed attendees were brought to a dimly lit cabin containing drinks, tables, and PowerPoint presentations.
The conference captain explained that the purpose of the 2.5 day event is to generate and develop ideas that can tackle some of the biggest problems relating to water, by thinking outside of the box. Be that by helping the 2.5 billion people who do not have access to hygienic water, or solving the problems of a Victorian sewage system in the 21st Century.
We’ll report from the event as it progresses, but yesterday it was all about the launch party. We were treated to three key note speakers, all of which worked with water in innovative ways.
First up Minty Donald an artist and lecturer at the University of Glasgow kicked things off by telling us about her “experiments”. My personal favourite was Experiment #2 Water Borrow. This experiment had the following method (to its madness).
After completing each experiment Donald would spend some time with her colleagues pondering the interaction. She described her feelings to us incredibly poetically, and philosophised whether water has rights.
Next, we heard from George Ponton from Scottish Water. He opened with a guessing game. If you used all of the Scottish Water water pipes, how many times could you reach Beijing and back? If you don’t want to know the answer look away now. 8! One member of the audience guessed correctly and to her delight she was awarded a bag of Aero Bubbles (gettit?) as a prize.
Ponton went on to provide a motivational speech for the conference participants. Demanding us to “challenge our assumptions” and not to listen to the experts as “they know nothing”. In between the motivational talk, Ponton provided a waterfall of aqua facts. Did you know 600 million litres of water leaks out of Scottish pipes every single day? And it costs more to fix the leaks than it does to put up with the loss of water. And did you know that there is 200% more energy in the waste removed from water, then the energy required to treat it? Great facts, great speech.
Finally, we listened to David Harley from the Scottish Environment Protection Agency (SEPA). Harley focussed on the “interconnectedness of everything”. He spends his time as a Land and Water Manager trying to solve some modern day water problems.
He explained that Scotland has some large residential areas, where all of the excess water washes into the drains and then into nearby rivers. The problem is that the excess water picks up lots of the pollutants, such as oils, metals, and bacteria from the road and concentrates them at one point in the river. This means that there are bodies of water near residential areas with less than ideal levels of pollutants.
“In Germany nobody washes their car in the street,” he told us, “they know that the excess water washes into the drains and causes pollution.” Treating the water from residential areas is really expensive, but doing as the Germans do and reducing the levels of pollutants ourselves is far cheaper.
To finish the evening the participants gathered in their teams to discuss the challenges that they will tackle throughout the conference. In the meantime I did what any good journalist would have done in my position — visited the buffet bar.
Look out for our next report from the Water Innovation Challenge in the next few days. If you want to know more look here, or follow them on Twitter.
A 19th Century Clyde-built sailing boat that is moored outside The Riverside Museum.
Damn, spider. Why you so hairy?
|
from setuptools import setup, find_packages
version = '0.0.1'
setup(name='joerd',
version=version,
description="A tool for downloading and generating elevation data.",
long_description=open('README.md').read(),
classifiers=[
# strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Utilities',
],
keywords='map dem elevation raster',
author='Matt Amos, Mapzen',
author_email='[email protected]',
url='https://github.com/mapzen/joerd',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'GDAL',
'beautifulsoup4',
'requests',
'numpy',
'PyYAML',
'pyqtree',
'geographiclib',
'boto3',
'contextlib2',
],
test_suite='tests',
tests_require=[
'httptestserver',
],
entry_points=dict(
console_scripts=[
'joerd = joerd.command:joerd_main',
]
)
)
|
Our big question: how to eliminate stretch marks permanently? Well, to our regret, these are normal for women. Sooner or later, stretch marks appear in some area of the body (thighs, chest, abdomen …) It is very complicated to eliminate stretch marks once they appear, but when they are in the reddish base it is easier to fight them. Next, you have the best tricks to ‘eliminate’ stretch marks.
The first thing we need to know is how they are produced. Stretch marks come out when the skin is stretched to the limit, which is why they appear when weight is taken and later lost, puberty or during and after pregnancy. Stretch marks can appear at almost any age. The breakage and retraction of the elastic fibers of the dermis cause the skin to lose elasticity and causes certain areas of the body to have stretch marks.
There are different types of stretch marks: from puberty (linked to hormonal changes); of distension (associated with a change in weight or loss thereof); atrophic (because of the stretching and breaking of the fibers) and of gravidity (which has its origin during pregnancy).
Eliminating stretch marks is very difficult, but it is true that when they are in their first phase they can eliminate or improve their appearance. There are several ways to eliminate stretch marks: using specific cosmetics, with natural remedies or with cabin treatments such as the laser. Next, you have various tricks and treatments to finish or improve stretch marks.
Exfóliate daily, you will end up with dead cells.
Take hot baths and massage the area you want to treat to activate blood circulation.
Use creams that carry retinol-A, it has been proven effective, but you should avoid them if you are pregnant or you nurse your child.
You can also grate two carrots and mix them with almond milk. Apply it on the stretch marks for as long as possible.
You can also use specific creams for stretch marks. Of course, with these creams, you must be constant and apply once or twice a day. Pregnant women should use creams for stretch marks created especially for them.
But if you are someone who has already tried everything and has not achieved anything, you will always have laser surgery to eliminate stretch marks 100%. In this case, you should go to the dermatologist, he will indicate the most appropriate method for your case, since there are several different treatments.
The carboxiterapia with CO2 and therapeutic ends is another method that can help you to blur stretch marks. It is a non-invasive treatment that consists of subcutaneously administering carbon dioxide through small infiltrations in the tissues. This method improves the microcirculation, tone, and elasticity of the skin.
Chemical Resurfacing is also another method that is used to remove stretch marks. It is usually combined with a biostimulation without needles with PRX-T33. This treatment helps to exfoliate the skin and level the skin depressions. Improves the elasticity and diffuses the pearly tone of the stretch marks.
Now you just have to choose the best method to eliminate stretch marks.
Khloe Kardashian Daughter That Look Like OJ Simpson!
|
import numpy as np
import taichi as ti
ti.init(arch=ti.gpu)
dim = 2
quality = 1 # Use a larger integral number for higher quality
n_particle_x = 100 * quality
n_particle_y = 8 * quality
n_particles = n_particle_x * n_particle_y
n_elements = (n_particle_x - 1) * (n_particle_y - 1) * 2
n_grid = 64 * quality
dx = 1 / n_grid
inv_dx = 1 / dx
dt = 1e-4 / quality
E = 25000
p_mass = 1
p_vol = 1
mu = 1
la = 1
x = ti.Vector.field(dim, dtype=float, shape=n_particles, needs_grad=True)
v = ti.Vector.field(dim, dtype=float, shape=n_particles)
C = ti.Matrix.field(dim, dim, dtype=float, shape=n_particles)
grid_v = ti.Vector.field(dim, dtype=float, shape=(n_grid, n_grid))
grid_m = ti.field(dtype=float, shape=(n_grid, n_grid))
restT = ti.Matrix.field(dim, dim, dtype=float, shape=n_particles)
total_energy = ti.field(dtype=float, shape=(), needs_grad=True)
vertices = ti.field(dtype=ti.i32, shape=(n_elements, 3))
@ti.func
def mesh(i, j):
return i * n_particle_y + j
@ti.func
def compute_T(i):
a = vertices[i, 0]
b = vertices[i, 1]
c = vertices[i, 2]
ab = x[b] - x[a]
ac = x[c] - x[a]
return ti.Matrix([[ab[0], ac[0]], [ab[1], ac[1]]])
@ti.kernel
def initialize():
for i in range(n_particle_x):
for j in range(n_particle_y):
t = mesh(i, j)
x[t] = [0.1 + i * dx * 0.5, 0.7 + j * dx * 0.5]
v[t] = [0, -1]
# build mesh
for i in range(n_particle_x - 1):
for j in range(n_particle_y - 1):
# element id
eid = (i * (n_particle_y - 1) + j) * 2
vertices[eid, 0] = mesh(i, j)
vertices[eid, 1] = mesh(i + 1, j)
vertices[eid, 2] = mesh(i, j + 1)
eid = (i * (n_particle_y - 1) + j) * 2 + 1
vertices[eid, 0] = mesh(i, j + 1)
vertices[eid, 1] = mesh(i + 1, j + 1)
vertices[eid, 2] = mesh(i + 1, j)
for i in range(n_elements):
restT[i] = compute_T(i) # Compute rest T
@ti.kernel
def compute_total_energy():
for i in range(n_elements):
currentT = compute_T(i)
F = currentT @ restT[i].inverse()
# NeoHookean
I1 = (F @ F.transpose()).trace()
J = F.determinant()
element_energy = 0.5 * mu * (
I1 - 2) - mu * ti.log(J) + 0.5 * la * ti.log(J)**2
total_energy[None] += E * element_energy * dx * dx
@ti.kernel
def p2g():
for p in x:
base = ti.cast(x[p] * inv_dx - 0.5, ti.i32)
fx = x[p] * inv_dx - ti.cast(base, float)
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
affine = p_mass * C[p]
for i in ti.static(range(3)):
for j in ti.static(range(3)):
I = ti.Vector([i, j])
dpos = (float(I) - fx) * dx
weight = w[i].x * w[j].y
grid_v[base + I] += weight * (p_mass * v[p] - dt * x.grad[p] +
affine @ dpos)
grid_m[base + I] += weight * p_mass
bound = 3
@ti.kernel
def grid_op():
for i, j in grid_m:
if grid_m[i, j] > 0:
inv_m = 1 / grid_m[i, j]
grid_v[i, j] = inv_m * grid_v[i, j]
grid_v[i, j].y -= dt * 9.8
# center collision circle
dist = ti.Vector([i * dx - 0.5, j * dx - 0.5])
if dist.norm_sqr() < 0.005:
dist = dist.normalized()
grid_v[i, j] -= dist * min(0, grid_v[i, j].dot(dist))
# box
if i < bound and grid_v[i, j].x < 0:
grid_v[i, j].x = 0
if i > n_grid - bound and grid_v[i, j].x > 0:
grid_v[i, j].x = 0
if j < bound and grid_v[i, j].y < 0:
grid_v[i, j].y = 0
if j > n_grid - bound and grid_v[i, j].y > 0:
grid_v[i, j].y = 0
@ti.kernel
def g2p():
for p in x:
base = ti.cast(x[p] * inv_dx - 0.5, ti.i32)
fx = x[p] * inv_dx - float(base)
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1.0)**2, 0.5 * (fx - 0.5)**2]
new_v = ti.Vector([0.0, 0.0])
new_C = ti.Matrix([[0.0, 0.0], [0.0, 0.0]])
for i in ti.static(range(3)):
for j in ti.static(range(3)):
I = ti.Vector([i, j])
dpos = float(I) - fx
g_v = grid_v[base + I]
weight = w[i].x * w[j].y
new_v += weight * g_v
new_C += 4 * weight * g_v.outer_product(dpos) * inv_dx
v[p] = new_v
x[p] += dt * v[p]
C[p] = new_C
gui = ti.GUI("MPM", (640, 640), background_color=0x112F41)
def main():
initialize()
vertices_ = vertices.to_numpy()
while gui.running and not gui.get_event(gui.ESCAPE):
for s in range(int(1e-2 // dt)):
grid_m.fill(0)
grid_v.fill(0)
# Note that we are now differentiating the total energy w.r.t. the particle position.
# Recall that F = - \partial (total_energy) / \partial x
with ti.Tape(total_energy):
# Do the forward computation of total energy and backward propagation for x.grad, which is later used in p2g
compute_total_energy()
# It's OK not to use the computed total_energy at all, since we only need x.grad
p2g()
grid_op()
g2p()
gui.circle((0.5, 0.5), radius=45, color=0x068587)
particle_pos = x.to_numpy()
a = vertices_.reshape(n_elements * 3)
b = np.roll(vertices_, shift=1, axis=1).reshape(n_elements * 3)
gui.lines(particle_pos[a], particle_pos[b], radius=1, color=0x4FB99F)
gui.circles(particle_pos, radius=1.5, color=0xF2B134)
gui.line((0.00, 0.03 / quality), (1.0, 0.03 / quality),
color=0xFFFFFF,
radius=3)
gui.show()
if __name__ == '__main__':
main()
|
Is this really the first post of 2017? Good heavens! It's been a bit quiet for work I can show, but that's all changed now with all the work from the Lone Wolf Adventure Game's Bestiary volume 1, and all the remaining Cthulhu Tales cards, now cleared to be shown.
|
#Include dependencies
import numpy as np
from numpy import pi
from numpy.linalg import norm
from scipy.io import loadmat
from scipy.stats import linregress
from fnmatch import fnmatch
import os
import sys
import xml.etree.ElementTree as ET
def AFRL(directory, pol, start_az, n_az=3):
##############################################################################
# #
# This function reads in the AFRL *.mat files from the user supplied #
# directory and exports both the phs and a Python dictionary compatible #
# with ritsar. #
# #
##############################################################################
#Check Python version
version = sys.version_info
#Get filenames
walker = os.walk(directory+'/'+pol)
if version.major < 3:
w = walker.next()
else:
w = walker.__next__()
prefix = '/'+pol+'/'+w[2][0][0:19]
az_str = []
fnames = []
az = np.arange(start_az, start_az+n_az)
[az_str.append(str('%03d_'%a)) for a in az]
[fnames.append(directory+prefix+a+pol+'.mat') for a in az_str]
#Grab n_az phase histories
phs = []; platform = []
for fname in fnames:
#Convert MATLAB structure to Python dictionary
MATdata = loadmat(fname)['data'][0][0]
data =\
{
'fp' : MATdata[0],
'freq' : MATdata[1][:,0],
'x' : MATdata[2].T,
'y' : MATdata[3].T,
'z' : MATdata[4].T,
'r0' : MATdata[5][0],
'th' : MATdata[6][0],
'phi' : MATdata[7][0],
}
#Define phase history
phs_tmp = data['fp'].T
phs.append(phs_tmp)
#Transform data to be compatible with ritsar
c = 299792458.0
nsamples = int(phs_tmp.shape[1])
npulses = int(phs_tmp.shape[0])
freq = data['freq']
pos = np.hstack((data['x'], data['y'], data['z']))
k_r = 4*pi*freq/c
B_IF = data['freq'].max()-data['freq'].min()
delta_r = c/(2*B_IF)
delta_t = 1.0/B_IF
t = np.linspace(-nsamples/2, nsamples/2, nsamples)*delta_t
chirprate, f_0, r, p, s\
= linregress(t, freq)
#Vector to scene center at synthetic aperture center
if np.mod(npulses,2)>0:
R_c = pos[npulses/2]
else:
R_c = np.mean(
pos[npulses/2-1:npulses/2+1],
axis = 0)
#Save values to dictionary for export
platform_tmp = \
{
'f_0' : f_0,
'freq' : freq,
'chirprate' : chirprate,
'B_IF' : B_IF,
'nsamples' : nsamples,
'npulses' : npulses,
'pos' : pos,
'delta_r' : delta_r,
'R_c' : R_c,
't' : t,
'k_r' : k_r,
}
platform.append(platform_tmp)
#Stack data from different azimuth files
phs = np.vstack(phs)
npulses = int(phs.shape[0])
pos = platform[0]['pos']
for i in range(1, n_az):
pos = np.vstack((pos, platform[i]['pos']))
if np.mod(npulses,2)>0:
R_c = pos[npulses/2]
else:
R_c = np.mean(
pos[npulses/2-1:npulses/2+1],
axis = 0)
#Replace Dictionary values
platform = platform_tmp
platform['npulses'] = npulses
platform['pos'] = pos
platform['R_c'] = R_c
#Synthetic aperture length
L = norm(pos[-1]-pos[0])
#Add k_y
platform['k_y'] = np.linspace(-npulses/2,npulses/2,npulses)*2*pi/L
return(phs, platform)
def Sandia(directory):
##############################################################################
# #
# This function reads in the Sandia *.phs and *.au2 files from the user #
# supplied directoryand exports both the phs and a Python dictionary #
# compatible with ritsar. #
# #
##############################################################################
#get filename containing auxilliary data
for file in os.listdir(directory):
if fnmatch(file, '*.au2'):
aux_fname = directory+file
#import auxillary data
f=open(aux_fname,'rb')
#initialize tuple
record=['blank'] #first record blank to ensure
#indices match record numbers
#record 1
data = np.fromfile(f, dtype = np.dtype([
('version','S6'),
('phtype','S6'),
('phmode','S6'),
('phgrid','S6'),
('phscal','S6'),
('cbps','S6')
]),count=1)
record.append(data[0])
#record 2
f.seek(44)
data = np.fromfile(f, dtype = np.dtype([
('npulses','i4'),
('nsamples','i4'),
('ipp_start','i4'),
('ddas','f4',(5,)),
('kamb','i4')
]),count=1)
record.append(data[0])
#record 3
f.seek(44*2)
data = np.fromfile(f, dtype = np.dtype([
('fpn','f4',(3,)),
('grp','f4',(3,)),
('cdpstr','f4'),
('cdpstp','f4')
]),count=1)
record.append(data[0])
#record 4
f.seek(44*3)
data = np.fromfile(f, dtype = np.dtype([
('f0','f4'),
('fs','f4'),
('fdot','f4'),
('r0','f4')
]),count=1)
record.append(data[0])
#record 5 (blank)rvr_au_read.py
f.seek(44*4)
data = []
record.append(data)
#record 6
npulses = record[2]['npulses']
rpoint = np.zeros([npulses,3])
deltar = np.zeros([npulses,])
fscale = np.zeros([npulses,])
c_stab = np.zeros([npulses,3])
#build up arrays for record(npulses+6)
for n in range(npulses):
f.seek((n+5)*44)
data = np.fromfile(f, dtype = np.dtype([
('rpoint','f4',(3,)),
('deltar','f4'),
('fscale','f4'),
('c_stab','f8',(3,))
]),count=1)
rpoint[n,:] = data[0]['rpoint']
deltar[n] = data[0]['deltar']
fscale[n] = data[0]['fscale']
c_stab[n,:] = data[0]['c_stab']
#consolidate arrays into a 'data' dataype
dt = np.dtype([
('rpoint','f4',(npulses,3)),
('deltar','f4',(npulses,)),
('fscale','f4',(npulses,)),
('c_stab','f8',(npulses,3))
])
data = np.array((rpoint,deltar,fscale,c_stab)
,dtype=dt)
#write to record file
record.append(data)
#import phase history
for file in os.listdir(directory):
if fnmatch(file, '*.phs'):
phs_fname = directory+file
nsamples = record[2][1]
npulses = record[2][0]
f=open(phs_fname,'rb')
dt = np.dtype('i2')
phs = np.fromfile(f, dtype=dt, count=-1)
real = phs[0::2].reshape([npulses,nsamples])
imag = phs[1::2].reshape([npulses,nsamples])
phs = real+1j*imag
#Create platform dictionary
c = 299792458.0
pos = record[6]['rpoint']
n_hat = record[3]['fpn']
delta_t = record[4]['fs']
t = np.linspace(-nsamples/2, nsamples/2, nsamples)*1.0/delta_t
chirprate = record[4]['fdot']*1.0/(2*pi)
f_0 = record[4]['f0']*1.0/(2*pi) + chirprate*nsamples/(2*delta_t)
B_IF = (t.max()-t.min())*chirprate
delta_r = c/(2*B_IF)
freq = f_0+chirprate*t
omega = 2*pi*freq
k_r = 2*omega/c
if np.mod(npulses,2)>0:
R_c = pos[npulses/2]
else:
R_c = np.mean(
pos[npulses/2-1:npulses/2+1],
axis = 0)
platform = \
{
'f_0' : f_0,
'chirprate' : chirprate,
'B_IF' : B_IF,
'nsamples' : nsamples,
'npulses' : npulses,
'delta_r' : delta_r,
'pos' : pos,
'R_c' : R_c,
't' : t,
'k_r' : k_r,
'n_hat' : n_hat
}
return(phs, platform)
##############################################################################
# #
# This function reads in the DIRSIG xml data as well as the envi header #
# file from the user supplied directory. The phs and a Python dictionary #
# compatible with ritsar are returned to the function caller. #
# #
##############################################################################
def get(root, entry):
for entry in root.iter(entry):
out = entry.text
return(out)
def getWildcard(directory, char):
for file in os.listdir(directory):
if fnmatch(file, char):
fname = directory+file
return(fname)
def DIRSIG(directory):
from spectral.io import envi
#get phase history
phs_fname = getWildcard(directory, '*.hdr')
phs = envi.open(phs_fname).load(dtype = np.complex128)
phs = np.squeeze(phs)
#get platform geometry
ppd_fname = getWildcard(directory, '*.ppd')
tree = ET.parse(ppd_fname)
root = tree.getroot()
pos_dirs = []
for children in root.iter('point'):
pos_dirs.append(float(children[0].text))
pos_dirs.append(float(children[1].text))
pos_dirs.append(float(children[2].text))
pos_dirs = np.asarray(pos_dirs).reshape([len(pos_dirs)/3,3])
t_dirs=[]
for children in root.iter('datetime'):
t_dirs.append(float(children.text))
t_dirs = np.asarray(t_dirs)
#get platform system paramters
platform_fname = getWildcard(directory, '*.platform')
tree = ET.parse(platform_fname)
root = tree.getroot()
#put metadata into a dictionary
metadata = root[0]
keys = []; vals = []
for children in metadata:
keys.append(children[0].text)
vals.append(children[1].text)
metadata = dict(zip(keys,vals))
#obtain key parameters
c = 299792458.0
nsamples = int(phs.shape[1])
npulses = int(phs.shape[0])
vp = float(get(root, 'speed'))
delta_t = float(get(root, 'delta'))
t = np.linspace(-nsamples/2, nsamples/2, nsamples)*delta_t
prf = float(get(root, 'clockrate'))
chirprate = float(get(root, 'chirprate'))/pi
T_p = float(get(root, 'pulseduration'))
B = T_p*chirprate
B_IF = (t.max() - t.min())*chirprate
delta_r = c/(2*B_IF)
f_0 = float(get(root, 'center'))*1e9
freq = f_0+chirprate*t
omega = 2*pi*freq
k_r = 2*omega/c
T0 = float(get(root, 'min'))
T1 = float(get(root, 'max'))
#compute slowtime position
ti = np.linspace(0,1.0/prf*npulses, npulses)
x = np.array([np.interp(ti, t_dirs, pos_dirs[:,0])]).T
y = np.array([np.interp(ti, t_dirs, pos_dirs[:,1])]).T
z = np.array([np.interp(ti, t_dirs, pos_dirs[:,2])]).T
pos = np.hstack((x,y,z))
L = norm(pos[-1]-pos[0])
k_y = np.linspace(-npulses/2,npulses/2,npulses)*2*pi/L
#Vector to scene center at synthetic aperture center
if np.mod(npulses,2)>0:
R_c = pos[npulses/2]
else:
R_c = np.mean(
pos[npulses/2-1:npulses/2+1],
axis = 0)
#Derived Parameters
if np.mod(nsamples,2)==0:
T = np.arange(T0, T1+0*delta_t, delta_t)
else:
T = np.arange(T0, T1, delta_t)
#Mix signal
signal = np.zeros(phs.shape)+0j
for i in range(0,npulses,1):
r_0 = norm(pos[i])
tau_c = 2*r_0/c
ref = np.exp(-1j*(2*pi*f_0*(T-tau_c)+pi*chirprate*(T-tau_c)**2))
signal[i,:] = ref*phs[i,:]
platform = \
{
'f_0' : f_0,
'freq' : freq,
'chirprate' : chirprate,
'B' : B,
'B_IF' : B_IF,
'nsamples' : nsamples,
'npulses' : npulses,
'delta_r' : delta_r,
'delta_t' : delta_t,
'vp' : vp,
'pos' : pos,
'R_c' : R_c,
't' : t,
'k_r' : k_r,
'k_y' : k_y,
'metadata' : metadata
}
return(signal, platform)
|
Rick Kempa, a poet and essayist from Rock Springs, Wyoming, will be Mesa Verde National Park’s artist-in-residence from Sept. 3-15.
He has served as artist-in-residence at Grand Canyon National Park twice, and at Hubbell Trading Post in Ganado, Arizona, according to a press release from Mesa Verde public information officer Cristy Brown.
His most recent books are the anthologies “Going Down Grand: Poems from the Canyon” and “On Foot: Grand Canyon Backpacking Stories,” and the poetry collection “Ten Thousand Voices.” He recently retired after teaching at Western Wyoming College and is spending more time writing, Brown said.
Kempa plans two free programs, in the park and at the Mancos Public Library.
Saturday workshop: Kempa will offer a writing workshop at Mancos Public Library on Sept. 8 from 9:30 a.m. to 12:30 p.m. The workshop, he says, “is an open invitation for writers to spend a morning together writing, walking, conversing and inspiring each other.” Participants should “bring whatever tools for seeing and writing that you may have: a pocket notebook, binoculars, perhaps even a magnifying glass.” They are also welcome to bring a sampling of recent work to share. Space is limited to 15 people. To reserve a place, call Teri at 970-529-4642.
Evening presentation: On Sept. 13, from 6:30 to 8 p.m., Kempa will share stories, poems and pictures from his backpacking trips on the Colorado Plateau and from his time at Mesa Verde. It will be at the Far View Lodge and is open to the public.
|
from six.moves import range
import struct
def body_and_tail(data):
l = len(data)
nblocks = l // 16
tail = l % 16
if nblocks:
return struct.unpack_from('qq' * nblocks, data), struct.unpack_from('b' * tail, data, -tail), l
else:
return tuple(), struct.unpack_from('b' * tail, data, -tail), l
def rotl64(x, r):
# note: not a general-purpose function because it leaves the high-order bits intact
# suitable for this use case without wasting cycles
mask = 2 ** r - 1
rotated = (x << r) | ((x >> 64 - r) & mask)
return rotated
def fmix(k):
# masking off the 31s bits that would be leftover after >> 33 a 64-bit number
k ^= (k >> 33) & 0x7fffffff
k *= 0xff51afd7ed558ccd
k ^= (k >> 33) & 0x7fffffff
k *= 0xc4ceb9fe1a85ec53
k ^= (k >> 33) & 0x7fffffff
return k
INT64_MAX = int(2 ** 63 - 1)
INT64_MIN = -INT64_MAX - 1
INT64_OVF_OFFSET = INT64_MAX + 1
INT64_OVF_DIV = 2 * INT64_OVF_OFFSET
def truncate_int64(x):
if not INT64_MIN <= x <= INT64_MAX:
x = (x + INT64_OVF_OFFSET) % INT64_OVF_DIV - INT64_OVF_OFFSET
return x
def _murmur3(data):
h1 = h2 = 0
c1 = -8663945395140668459 # 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
body, tail, total_len = body_and_tail(data)
# body
for i in range(0, len(body), 2):
k1 = body[i]
k2 = body[i + 1]
k1 *= c1
k1 = rotl64(k1, 31)
k1 *= c2
h1 ^= k1
h1 = rotl64(h1, 27)
h1 += h2
h1 = h1 * 5 + 0x52dce729
k2 *= c2
k2 = rotl64(k2, 33)
k2 *= c1
h2 ^= k2
h2 = rotl64(h2, 31)
h2 += h1
h2 = h2 * 5 + 0x38495ab5
# tail
k1 = k2 = 0
len_tail = len(tail)
if len_tail > 8:
for i in range(len_tail - 1, 7, -1):
k2 ^= tail[i] << (i - 8) * 8
k2 *= c2
k2 = rotl64(k2, 33)
k2 *= c1
h2 ^= k2
if len_tail:
for i in range(min(7, len_tail - 1), -1, -1):
k1 ^= tail[i] << i * 8
k1 *= c1
k1 = rotl64(k1, 31)
k1 *= c2
h1 ^= k1
# finalization
h1 ^= total_len
h2 ^= total_len
h1 += h2
h2 += h1
h1 = fmix(h1)
h2 = fmix(h2)
h1 += h2
return truncate_int64(h1)
try:
from cassandra.cmurmur3 import murmur3
except ImportError:
murmur3 = _murmur3
|
Everyone knows that old joke about country music: If you play a country song backwards, you get your wife back, you get your dog back and you get your truck back. But just because a topic is well-worn doesn’t mean there isn’t anything compelling left to say, and country singer-songwriter Ward Davis has a way of making even the most rutted cliches feel different. In his track “Good and Drunk,” Davis sings about divorce, kicking off with the lines “drove myself to the lawyer's today / and picked up a pen and signed my wife away,” but rather than looking back at an ideal woman or red-lipsticked cheat, the song is pure aftermath lament, with the wife garnering just the one mention. That more subtle songwriting is typical of Davis, who has penned lyrics for country notables Willie Nelson, Merle Haggard, Trace Adkins, Sammy Kershaw and more, all while releasing his own music, most recently the four-track EP Asunder (Ward Davis Music, 2018). For fans of classic, heart-aching country, Davis’ show at Neurolux is sure to be worth the $20 cover charge.
|
#-*- coding: utf-8 -*-
"""
Laser management.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from qtpy import QtCore
import numpy as np
import time
from logic.generic_logic import GenericLogic
from interface.simple_laser_interface import ControlMode, ShutterState, LaserState
class LaserLogic(GenericLogic):
""" Logic module agreggating multiple hardware switches.
"""
_modclass = 'laser'
_modtype = 'logic'
_in = {'laser': 'SimpleLaserInterface'}
_out = {'laserlogic': 'LaserLogic'}
sigUpdate = QtCore.Signal()
def on_activate(self, e):
""" Prepare logic module for work.
@param object e: Fysom state change notification
"""
self._laser = self.get_in_connector('laser')
self.stopRequest = False
self.bufferLength = 100
self.data = {}
# waiting time between queries im milliseconds
self.queryInterval = 100
# delay timer for querying laser
self.queryTimer = QtCore.QTimer()
self.queryTimer.setInterval(self.queryInterval)
self.queryTimer.setSingleShot(True)
self.queryTimer.timeout.connect(self.check_laser_loop, QtCore.Qt.QueuedConnection)
# get laser capabilities
self.laser_shutter = self._laser.get_shutter_state()
self.laser_power_range = self._laser.get_power_range()
self.laser_extra = self._laser.get_extra_info()
self.laser_state = self._laser.get_laser_state()
self.laser_can_turn_on = self.laser_state.value <= LaserState.ON.value
self.laser_can_power = ControlMode.POWER in self._laser.allowed_control_modes()
self.laser_can_current = ControlMode.CURRENT in self._laser.allowed_control_modes()
if ControlMode.MIXED in self._laser.allowed_control_modes():
self.laser_can_power = True
self.laser_can_current = True
self.has_shutter = self._laser.get_shutter_state() != ShutterState.NOSHUTTER
self.init_data_logging()
#QtCore.QTimer.singleShot(100, self.start_query_loop)
self.start_query_loop()
def on_deactivate(self, e):
""" Deactivate modeule.
@param object e: Fysom state change notification
"""
self.stop_query_loop()
@QtCore.Slot()
def check_laser_loop(self):
""" """
if self.stopRequest:
self.stop()
self.stopRequest = False
return
self.laser_state = self._laser.get_laser_state()
self.laser_shutter = self._laser.get_shutter_state()
self.laser_power = self._laser.get_power()
self.laser_current = self._laser.get_current()
self.laser_temps = self._laser.get_temperatures()
for k in self.data:
self.data[k] = np.roll(self.data[k], -1)
self.data['power'][-1] = self.laser_power
self.data['current'][-1] = self.laser_current
self.data['time'][-1] = time.time()
for k,v in self.laser_temps.items():
self.data[k][-1] = v
self.queryTimer.start(self.queryInterval)
self.sigUpdate.emit()
@QtCore.Slot()
def start_query_loop(self):
""" start the loop """
self.run()
self.queryTimer.start(self.queryInterval)
@QtCore.Slot()
def stop_query_loop(self):
""" stop loop """
self.stopRequest = True
for i in range(10):
if not self.stopRequest:
return
QtCore.QCoreApplication.processEvents()
time.sleep(self.queryInterval/1000)
def init_data_logging(self):
""" """
self.data['current'] = np.zeros(self.bufferLength)
self.data['power'] = np.zeros(self.bufferLength)
self.data['time'] = np.ones(self.bufferLength) * time.time()
temps = self._laser.get_temperatures()
for name in temps:
self.data[name] = np.zeros(self.bufferLength)
@QtCore.Slot(ControlMode)
def set_control_mode(self, mode):
""" """
if mode in self._laser.allowed_control_modes():
if mode == ControlMode.POWER:
self.laser_power = self._laser.get_power()
self._laser.set_power(self.laser_power)
self._laser.set_control_mode(mode)
elif mode == ControlMode.CURRENT:
self.laser_current = self._laser.get_current()
self._laser.set_current(self.laser_current)
self._laser.set_control_mode(mode)
@QtCore.Slot(float)
def set_laser_state(self, state):
if state and self.laser_state == LaserState.OFF:
self._laser.on()
if not state and self.laser_state == LaserState.ON:
self._laser.off()
@QtCore.Slot(bool)
def set_shutter_state(self, state):
if state and self.laser_shutter == ShutterState.CLOSED:
self._laser.set_shutter_state(ShutterState.OPEN)
if not state and self.laser_shutter == ShutterState.OPEN:
self._laser.set_shutter_state(ShutterState.CLOSED)
@QtCore.Slot(float)
def set_power(self, power):
self._laser.set_power(power)
@QtCore.Slot(float)
def set_current(self, current):
self._laser.set_current(current)
|
First locate these settings which can be found by opening the Start menu and then typing Settings, inside of settings find and click Update & Security.
Once you are on the screen above, follow the following steps.
To prevent windows update from going through during working hours, we recommend setting “Active Hours” and specifying your operating hours.
Inside of “Restart Options” switch on show more notifications so that windows prompts you when it is going to restart.
Inside of Advanced options configure “When updates are installed”, by default this is set to Semi-Annual Channel (targeted). Change this setting to Semi-Annual Channel and by doing this you will not be included in the first release of the update and not risk experiencing any windows bugs.
If you would prefer to delay updates until a non working day, inside of Advanced options you can configure the update to be deferred up to 7 days.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.