text
stringlengths 29
850k
|
---|
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
|
Our philosophy is to provide the most comprehensive and personalized dental care you truly deserve.
Efficiency and comfort is our prime concern.
At Clinica Dentista we pride ourselves in the quality of service and genuine care we provide to our patients. Our philosophy is to perform minimally invasive dentistry as much as possible - that means we want to do as much as possible to preserve your natural teeth and give you the most comfortable experience possible in the dental chair.
We provide a comprehensive range of dental treatment to cater to all your needs.
Do you offer jacket crown services? |
# coding: utf-8
import copy
import json
from mock import Mock, PropertyMock, patch
from tests.helpers import Handler, BaseTestCase, ServerMock, get_free_port, DatabaseMock
from core.exceptions import CreationException, ConnectionError, \
SessionException, TimeoutException
from core.config import setup_config, config
from flask import Flask
class CommonCommandsTestCase(BaseTestCase):
webdriver_server = None
vmmaster_agent = None
vnc_server = None
host = 'localhost'
@classmethod
def setUpClass(cls):
setup_config("data/config_openstack.py")
body = {
"sessionId": None,
"desiredCapabilities": {
"platform": "some_platform",
"browserName": "firefox",
"version": "",
"javascriptEnabled": True
}
}
session_request_body = json.dumps(body)
session_request_headers = {
'content-length': '%s' % len(session_request_body),
'accept-encoding': 'identity',
'Connection': 'close',
'accept': 'application/json',
'user-agent': 'Python-urllib/2.7',
'host': '127.0.0.1:9000',
'content-type': 'application/json;charset=UTF-8',
}
cls.request = Mock()
cls.request.method = "POST"
cls.request.path = "/wd/hub/session"
cls.request.headers = dict()
cls.request.headers.update(session_request_headers)
cls.request.data = session_request_body
cls.webdriver_server = ServerMock(cls.host, get_free_port())
cls.webdriver_server.start()
cls.vmmaster_agent = ServerMock(cls.host, get_free_port())
cls.vmmaster_agent.start()
cls.vnc_server = ServerMock(cls.host, get_free_port())
cls.vnc_server.start()
cls.app = Flask(__name__)
cls.app.database = None
cls.app.sessions = None
cls.app.database_task_queue = Mock()
cls.app.pool = Mock()
def setUp(self):
self.ctx = self.app.test_request_context()
self.ctx.push()
with patch(
'flask.current_app.database', DatabaseMock()
), patch(
'flask.current_app.sessions', Mock()
):
from core.db.models import Session, Provider, Endpoint
self.session = Session('origin_1')
self.session.name = "session1"
provider = Provider(name='noname', url='nourl')
vm = Endpoint(Mock(), '', provider)
vm.name = 'vm1'
vm.ip = self.host
vm.ports = {
'selenium': self.webdriver_server.port,
'agent': self.vmmaster_agent.port,
'vnc': self.vnc_server.port
}
self.session.endpoint = vm
self.session.run()
from vmmaster.webdriver import commands
self.commands = commands
def tearDown(self):
with patch(
'flask.current_app.sessions', Mock()
), patch(
'flask.current_app.database', Mock()
):
self.session._close()
self.ctx.pop()
@classmethod
def tearDownClass(cls):
cls.webdriver_server.stop()
cls.vmmaster_agent.stop()
cls.vnc_server.stop()
del cls.app
def ping_vm_mock(arg, ports=None):
yield None
def selenium_status_mock(arg1, arg2, arg3):
yield None
@patch(
'vmmaster.webdriver.commands.start_selenium_session', new=Mock(
__name__="start_selenium_session",
side_effect=selenium_status_mock
)
)
@patch(
'vmmaster.webdriver.commands.ping_endpoint_before_start_session',
new=Mock(__name__="ping_endpoint_before_start_session", side_effect=ping_vm_mock)
)
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=False)
)
@patch('flask.current_app.database', Mock())
class TestStartSessionCommands(CommonCommandsTestCase):
def setUp(self):
super(TestStartSessionCommands, self).setUp()
self.session.dc = Mock(__name__="dc")
def test_start_session_when_selenium_status_failed(self):
request = copy.copy(self.request)
def make_request_mock(arg1, arg2):
yield 200, {}, json.dumps({'status': 1})
with patch(
'core.db.models.Session.make_request', Mock(
__name__="make_request",
side_effect=make_request_mock
)
):
self.assertRaises(
CreationException, self.commands.start_session,
request, self.session
)
@patch(
'vmmaster.webdriver.helpers.is_session_timeouted',
Mock(return_value=True)
)
@patch(
'requests.request', Mock(side_effect=Mock(
__name__="request",
return_value=(200, {}, json.dumps({'status': 0}))))
)
def test_start_session_when_session_was_timeouted(self):
request = copy.copy(self.request)
self.assertRaises(TimeoutException, self.commands.start_session,
request, self.session)
@patch(
'vmmaster.webdriver.helpers.is_session_closed',
Mock(return_value=True)
)
@patch(
'requests.request', Mock(side_effect=Mock(
__name__="request",
return_value=(200, {}, json.dumps({'status': 0}))))
)
def test_start_session_when_session_was_closed(self):
request = copy.copy(self.request)
self.assertRaises(SessionException, self.commands.start_session,
request, self.session)
@patch('flask.current_app.database', Mock())
class TestStartSeleniumSessionCommands(CommonCommandsTestCase):
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=False)
)
@patch("vmmaster.webdriver.commands.ping_endpoint_before_start_session", Mock())
def test_session_response_success(self):
request = copy.deepcopy(self.request)
request.headers.update({"reply": "200"})
status, headers, body = self.commands.start_selenium_session(
request, self.session
)
self.assertEqual(status, 200)
request_headers = dict((key.lower(), value) for key, value in
request.headers.iteritems())
for key, value in headers.iteritems():
if key == 'server' or key == 'date':
continue
self.assertDictContainsSubset({key: value}, request_headers)
self.assertEqual(body, request.data)
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=False)
)
@patch("vmmaster.webdriver.commands.ping_endpoint_before_start_session", Mock())
def test_session_response_fail(self):
request = copy.deepcopy(self.request)
request.headers.update({"reply": "500"})
def start_selenium_session(req):
for result in self.commands.start_selenium_session(
req, self.session
):
pass
self.assertRaises(CreationException, start_selenium_session, request)
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=True)
)
def test_start_selenium_session_when_connection_closed(self):
self.session.closed = True
request = copy.deepcopy(self.request)
request.headers.update({"reply": "200"})
self.assertRaises(
ConnectionError, self.commands.start_selenium_session,
request, self.session
)
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=False)
)
@patch(
'vmmaster.webdriver.helpers.is_session_closed',
Mock(return_value=True)
)
def test_start_selenium_session_when_session_closed(self):
self.session.closed = True
request = copy.deepcopy(self.request)
request.headers.update({"reply": "200"})
self.assertRaises(
SessionException, self.commands.start_selenium_session,
request, self.session
)
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=False)
)
@patch(
'vmmaster.webdriver.helpers.is_session_timeouted',
Mock(return_value=True)
)
def test_start_selenium_session_when_session_timeouted(self):
self.session.closed = True
request = copy.deepcopy(self.request)
request.headers.update({"reply": "200"})
self.assertRaises(
TimeoutException, self.commands.start_selenium_session,
request, self.session
)
@patch(
'vmmaster.webdriver.helpers.is_request_closed',
Mock(return_value=False)
)
@patch('flask.current_app.database', Mock())
class TestCheckVmOnline(CommonCommandsTestCase):
def setUp(self):
super(TestCheckVmOnline, self).setUp()
config.PING_TIMEOUT = 0
self._handler_get = Handler.do_GET
self.response_body = "{}"
self.response_headers = {
'header': 'value',
'content-length': len(self.response_body)
}
def tearDown(self):
super(TestCheckVmOnline, self).tearDown()
Handler.do_GET = self._handler_get
def test_check_vm_online_ok(self):
def do_GET(handler):
handler.send_reply(200, self.response_headers,
body=self.response_body)
Handler.do_GET = do_GET
result = self.commands.ping_endpoint_before_start_session(self.session, ports=[
self.webdriver_server.port, self.vmmaster_agent.port, self.vnc_server.port
])
self.assertTrue(result)
def test_check_vm_online_ping_failed_timeout(self):
self.assertRaises(
CreationException, self.commands.ping_endpoint_before_start_session, self.session, config.DEFAULT_PORTS
)
def test_check_vm_online_ping_failed_when_session_closed(self):
config.PING_TIMEOUT = 2
self.session.closed = True
self.assertRaises(
CreationException, self.commands.ping_endpoint_before_start_session, self.session, config.DEFAULT_PORTS
)
def test_check_vm_online_status_failed(self):
def do_GET(handler):
handler.send_reply(500, self.response_headers,
body=self.response_body)
Handler.do_GET = do_GET
request = copy.deepcopy(self.request)
def selenium_status(req):
for result in self.commands.selenium_status(
req, self.session
):
pass
self.assertRaises(CreationException, selenium_status, request)
def test_selenium_status_failed_when_session_closed(self):
self.session.closed = True
def do_GET(handler):
handler.send_reply(200, self.response_headers,
body=self.response_body)
Handler.do_GET = do_GET
request = copy.deepcopy(self.request)
def selenium_status(req):
for result in self.commands.selenium_status(
req, self.session
):
pass
self.assertRaises(CreationException, selenium_status, request)
class TestGetDesiredCapabilities(BaseTestCase):
def setUp(self):
self.body = {
"sessionId": None,
"desiredCapabilities": {
"platform": "some_platform",
}
}
self.session_request_headers = {
'content-length': '%s',
'accept-encoding': 'identity',
'Connection': 'close',
'accept': 'application/json',
'user-agent': 'Python-urllib/2.7',
'host': '127.0.0.1:9000',
'content-type': 'application/json;charset=UTF-8',
}
self.request = Mock()
self.request.method = "POST"
self.request.path = "/wd/hub/session"
self.request.headers = dict()
from vmmaster.webdriver import commands
self.commands = commands
def test_platform(self):
self.session_request_headers = {
'content-length': '%s' % len(self.body),
}
self.request.headers.update(self.session_request_headers)
self.request.data = json.dumps(self.body)
dc = self.commands.get_desired_capabilities(self.request)
self.assertIsInstance(dc["platform"], unicode)
self.assertEqual(self.body["desiredCapabilities"]["platform"],
dc["platform"])
def test_name(self):
self.body['desiredCapabilities'].update({
"name": "some_name"
})
self.session_request_headers = {
'content-length': '%s' % len(self.body),
}
self.request.headers.update(self.session_request_headers)
self.request.data = json.dumps(self.body)
dc = self.commands.get_desired_capabilities(self.request)
self.assertIsInstance(dc["name"], unicode)
self.assertEqual(self.body["desiredCapabilities"]["name"], dc["name"])
def test_no_name(self):
self.session_request_headers = {
'content-length': '%s' % len(self.body),
}
self.request.headers.update(self.session_request_headers)
self.request.data = json.dumps(self.body)
dc = self.commands.get_desired_capabilities(self.request)
self.assertEqual(dc.get("name", None), None)
def test_take_screenshot_bool(self):
self.body['desiredCapabilities'].update({
"takeScreenshot": True
})
self.session_request_headers = {
'content-length': '%s' % len(self.body),
}
self.request.headers.update(self.session_request_headers)
self.request.data = json.dumps(self.body)
dc = self.commands.get_desired_capabilities(self.request)
self.assertTrue(dc["takeScreenshot"])
def test_take_screenshot_some_string(self):
self.body['desiredCapabilities'].update({
"takeScreenshot": "asdf"
})
self.session_request_headers = {
'content-length': '%s' % len(self.body),
}
self.request.headers.update(self.session_request_headers)
self.request.data = json.dumps(self.body)
dc = self.commands.get_desired_capabilities(self.request)
self.assertTrue(dc["takeScreenshot"])
def test_take_screenshot_empty_string(self):
self.body['desiredCapabilities'].update({
"takeScreenshot": ""
})
self.session_request_headers = {
'content-length': '%s' % len(self.body),
}
self.request.headers.update(self.session_request_headers)
self.request.data = json.dumps(self.body)
dc = self.commands.get_desired_capabilities(self.request)
self.assertFalse(dc["takeScreenshot"])
class TestRunScript(CommonCommandsTestCase):
def setUp(self):
super(TestRunScript, self).setUp()
config.DEFAULT_AGENT_PORT = self.vmmaster_agent.port
self.response_body = "some_body"
def tearDown(self):
super(TestRunScript, self).tearDown()
@patch('flask.current_app.database', Mock())
def test_run_script(self):
def run_script_through_websocket_mock(*args, **kwargs):
return 200, {}, 'some_body'
with patch('vmmaster.webdriver.commands.run_script_through_websocket',
run_script_through_websocket_mock):
response = self.commands.run_script(self.request, self.session)
self.assertEqual(200, response[0])
self.assertEqual(self.response_body, response[2])
class TestLabelCommands(CommonCommandsTestCase):
def test_label(self):
request = copy.deepcopy(self.request)
label = "step-label"
label_id = 1
request.data = json.dumps({"label": label})
with patch('core.db.models.Session.current_log_step',
PropertyMock(return_value=Mock(id=label_id))):
status, headers, body = self.commands.vmmaster_label(
request, self.session
)
self.assertEqual(status, 200)
json_body = json.loads(body)
self.assertEqual(json_body["value"], label)
self.assertEqual(json_body["labelId"], label_id)
|
Its main consequences are as follows: waste of man hours, waste of energy, waste of space on site, and the possibility of material waste during transportation; (5) Processing: related to the nature of the processing (conversion) activity, which could only be avoided by changing construction technology.
SKB believes wholeheartedly in the recycling and processing of waste. SKB Recycling's system allows for the efficient, economical recovery of numerous resources and diverting valuable materials from the waste stream saves precious landfill airspace and reduces the need for resources.
Site waste – it's criminal A simple guide to Site Waste Management Plans Introduction 10m tonnes of construction products are wasted every year, at a cost of £1.5 billion Much of this site waste is harmful to the environment and costly to your business. Site Waste ... and manage waste appropriately in line with their duty of care.
For over 25 years, RRT Design & Construction has provided our clients with the most advanced technologies. When it comes to solid waste processing and recycling…we're all about helping build your business. For over 25 years, RRT Design & Construction has provided our clients with the most advanced technologies.
In 2015, Bechtel completed construction of Watts Bar Unit 2, a Tennessee Valley Authority nuclear power plant that was placed on hold some 30 years ago. Unit 2 at Watts Bar in Spring City, Tennessee, was about 60 percent finished in 1985 when construction was halted due to a projected decrease in demand for electricity.
Landfill gas (LFG) is generated during the natural process of decomposition of organic material contained in solid waste landfills. It's a composition of about 50 percent methane and 50 percent carbon dioxide and water vapor. |
from django.db import models
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
class Photo(models.Model):
author = models.ForeignKey(User, related_name='photos')
title = models.CharField(max_length=127, blank=True)
datetime = models.DateTimeField(auto_now_add=True)
image = models.ImageField(upload_to="images")
def __unicode__(self):
return '%s, %s, %s, %s' % (self.author, self.title, self.datetime, self.image)
class Party(models.Model):
author = models.ForeignKey(User, related_name='parties')
datetime = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=127)
subscription = models.TextField(blank=True)
date = models.DateField(blank=True, null=True)
time = models.TimeField(blank=True, null=True)
place = models.CharField(max_length=511, blank=True)
photos = models.ManyToManyField(Photo, through='PartyPhoto', blank=True, null=True)
def __unicode__(self):
return '%s, %s, %s, %s, %s, %s, %s, %s' % (self.author, self.datetime, self.title, self.subscription, self.date, self.time, self.place, self.photos)
def get_absolute_url(self):
return reverse('parties:party-detail', args=[str(self.id)])
class PartyPhoto(models.Model):
photo = models.ForeignKey(Photo)
party = models.ForeignKey(Party)
class Message(models.Model):
author = models.ForeignKey(User, related_name='messages')
datetime = models.DateTimeField(auto_now_add=True)
body = models.CharField(max_length=2047)
party = models.ForeignKey(Party, related_name='messages')
def __unicode__(self):
return '%s, %s, %s, %s' % (self.author, self.datetime, self.body, self.party)
def get_absolute_url(self):
return reverse('parties:message-detail', args=[str(self.id)])
class Willingness(models.Model):
author = models.ForeignKey(User, related_name='willingnesses')
participation = models.BooleanField()
invitation = models.BooleanField()
host = models.BooleanField()
vegetarian = models.BooleanField()
party = models.ForeignKey(Party, related_name='willingnesses')
def __unicode__(self):
return '%s, %s, %s, %s, %s, %s' % (self.author, self.participation, self.invitation, self.host, self.vegetarian, self.party)
def get_absolute_url(self):
return reverse('parties:willingness-detail', args=[str(self.id)])
|
Cartridges act like a blood in the printers. Printers, one of the most important computer accessories, do not show productive results without quality cartridges. Talking about the quality cartridges, Canon ink cartridges are one of the most reliable and durable brands available in the market. It is one of the market leaders in the printing industry. Serving to consumers from long period of time, reflects trust in its name.
Canon ink cartridges are the best suited for all your official requirements. From business cards to brochures, from official mails to official documents, all can show clear and visible results by using these branded cartridges. Apposite for all kinds of printers, these cartridges can be availed in black and white and color inks. By using these cartridges, ink wastage can be reduced to a great extent and increased longevity of printers is ensured. It can be said that these cartridges require one time but quality investment. Easy to refill and use, canon ink cartridges are one of the best ink cartridges available in the market at the pocket-friendly prices. |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('spellweb', '0003_auto_20140920_0006'),
]
operations = [
migrations.CreateModel(
name='Attempt',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('when', models.DateTimeField(auto_now_add=True)),
('success', models.BooleanField(default=False)),
('learner', models.ForeignKey(to='spellweb.Learner')),
],
options={
'ordering': ['-when'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Word',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.IntegerField()),
('word', models.CharField(max_length=30)),
('source', models.CharField(default=b'OT', max_length=2, choices=[(b'EW', b'Essential Words'), (b'ER', b'NZCER'), (b'OT', b'OTHER')])),
('hint', models.CharField(max_length=30, null=True, blank=True)),
],
options={
'ordering': ['level', 'word'],
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='word',
unique_together=set([('source', 'word')]),
),
migrations.AddField(
model_name='attempt',
name='word',
field=models.ForeignKey(to='spellweb.Word'),
preserve_default=True,
),
]
|
’ Emily VanCamp attended ‘The Resident, London photocall on Tuesday (April 10). She wore a green Andrew Gn Pre-Fall 2018 dress.
Emily VanCamp attended ‘The Resident’ London screening on Monday (April 9) at Rosewood Hotel on Monday (April 9) in London, England. She wore a pink Dolce & Gabbana Spring 2018 suit.
Emily VanCamp was in attendance along with Elizabeth Olsen yesterday at the Captain America: Civil War photocall at the Corinthia Hotel in London. |
#!/usr/bin/env python
from lxml import html
from lxml import etree
import requests
import sqlite3
import sys
import cgi
import cgitb
import datetime
import time
import subprocess
# global variables
speriod=(15*60)-1
dbname='/var/www/neuriolog.db'
ip='192.168.13.239'
endPoint='/both_tables.html'
myOption=''
# store the energy in the database
def log_energy(net,gen,cons):
conn=sqlite3.connect(dbname)
curs=conn.cursor()
print '2'
curs.execute('INSERT INTO energy values(?,?,?,?,?,?,?)', (datetime.datetime.now(),net[1],net[2],gen[1],gen[2],cons[1],cons[2]))
conn.commit()
conn.close()
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
# print the HTTP header
def printHTTPheader():
print "Content-type: text/html\n\n"
# print the HTML head section
# arguments are the page title and the table for the chart
def printHTMLHead(title, table):
print "<head>"
print " <title>"
print title
print " </title>"
print_graph_script(table)
print "</head>"
def getTimeMilli(inTime):
return(time.mktime( inTime.timetuple()) *1000)
def read_html():
data = []
interim = []
page = requests.get('http://' + ip + endPoint)
parser = etree.HTMLParser();
tree2 = etree.fromstring(page.text,parser)
#print etree.tostring(tree2)
walkAll = tree2.getiterator()
foundChannel = False;
count = 0
for elt in walkAll:
myText = elt.text;
if myText == 'Channel':
foundChannel = True
if foundChannel & (elt.tag == 'td') & (myText != None) :
#print elt.text, elt.tag
interim.append(elt.text)
count = count +1;
#print interim
if count == 6:
count = 0;
data.append(interim)
interim = []
#print data
retData = [ ['Name','Import','Export'],
['Net',data[2][2],data[2][3]],
['Gen',data[3][2],data[3][3]],
['Con',data[4][2],data[4][3]] ]
#print retData
return retData
# return a list of records from the database
def get_data(interval):
conn=sqlite3.connect(dbname)
curs=conn.cursor()
#print interval
if interval == None or int(interval) == -1:
curs.execute("SELECT * FROM energy")
else:
curs.execute("SELECT * FROM energy WHERE timestamp>datetime('now','-%s hours','localtime')" % interval)
rows=curs.fetchall()
conn.close()
return rows
# convert rows from database into a javascript table
def create_table(rows):
chart_table=""
smth=0.4
smth2=1-smth
smthh=smth*3600
old_data = None
old_value=0
old_time=0
for row in rows[:-1]:
if old_data != None:
delta=row[1]-old_data
aTime=datetime.datetime.strptime(row[0], "%Y-%m-%d %H:%M:%S")
dTime=aTime-old_time
value=delta/dTime.total_seconds()*smthh+old_value*smth2
if value > 8:
value=8
if value < -8:
value=-8
#rowstr="[new Date({0}), {1}],\n".format(datetime.datetime.strftime(aTime,"%Y,%m,%d,%H,%M,%S"),str(value))
rowstr="[new Date({0}), {1}, {2}],\n".format(getTimeMilli(aTime),str(row[1]),str(value))
chart_table+=rowstr
old_value=value
old_data=row[1]
old_time=datetime.datetime.strptime(row[0], "%Y-%m-%d %H:%M:%S")
row=rows[-1]
delta=row[1]-old_data
aTime=datetime.datetime.strptime(row[0], "%Y-%m-%d %H:%M:%S")
dTime=aTime-old_time
value=delta/dTime.total_seconds()*3600*0.1+old_value*0.9
#rowstr="[new Date({0}), {1}]\n".format(getTimeMilli(aTime),str(value))
rowstr="[new Date({0}), {1}, {2}]\n".format(getTimeMilli(aTime),str(row[1]),str(value))
#rowstr="['{0}', {1}]\n".format(str(row[0]),str(value))
chart_table+=rowstr
#print chart_table
return chart_table
# print the javascript to generate the chart
# pass the table generated from the database info
def print_graph_script(table):
# google chart snippet
#data.setColumnProperty(1, 'type', 'date');
#data.setColumnProperty(2, 'type', 'number');
chart_code="""
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<script type="text/javascript">
google.load("visualization", "1", {packages:["corechart"]});
google.setOnLoadCallback(drawChart);
function drawChart() {
var data = google.visualization.arrayToDataTable([ ['Time', 'Energy(lhs)', 'Power(rhs)'], %s ]);
data.setColumnProperty(0,'type','datetime');
data.setColumnProperty(1,'type','number');
data.setColumnProperty(2,'type','number');
var options = {
title: 'Energy/Power',
vAxes: { 0: {title: 'KWH'},
1: {title: 'KWatts' }},
hAxis: { title: 'Time', format: 'M/d/yy HH:mm', gridlines:{ color:'#555555', count: 10}},
series: {0: {targetAxisIndex:0},
1: {targetAxisIndex:1}}
};
var chart = new google.visualization.LineChart(document.getElementById('chart_div'));
chart.draw(data, options);
}
</script>"""
print chart_code % (table)
# print the div that contains the graph
def show_graph():
print "<h2>Energy(KWH)/Power(KW) Chart</h2>"
print '<div id="chart_div" style="width: 900px; height: 500px;"></div>'
# connect to the db and show some stats
# argument option is the number of hours
def show_stats(option):
conn=sqlite3.connect(dbname)
curs=conn.cursor()
if option is None or int(option) == -1:
option = str(240000)
#curs.execute("SELECT * FROM energy WHERE timestamp>datetime('now','-%s hours','localtime')" % interval)
curs.execute("SELECT timestamp,max(energy) FROM energy WHERE timestamp>datetime('now','-%s hour','localtime') AND timestamp<=datetime('now','localtime')" % option)
rowmax=curs.fetchone()
rowstrmax="{0}   {1}KWH".format(str(rowmax[0]),str(rowmax[1]))
# curs.execute("SELECT timestamp,min(temp) FROM temps WHERE timestamp>datetime('now','-%s hour') AND timestamp<=datetime('now')" % option)
curs.execute("SELECT timestamp,min(energy) FROM energy WHERE timestamp>datetime('now','-%s hour','localtime') AND timestamp<=datetime('now','localtime')" % option)
rowmin=curs.fetchone()
rowstrmin="{0}   {1}KWH".format(str(rowmin[0]),str(rowmin[1]))
# curs.execute("SELECT avg(temp) FROM temps WHERE timestamp>datetime('now','-%s hour') AND timestamp<=datetime('now')" % option)
curs.execute("SELECT avg(energy) FROM energy WHERE timestamp>datetime('now','-%s hour','localtime') AND timestamp<=datetime('now','localtime')" % option)
rowavg=curs.fetchone()
print "<hr>"
print "<h2>Minumum energy </h2>"
print rowstrmin
print "<h2>Maximum energy</h2>"
print rowstrmax
print "<h2>Average energy</h2>"
print "%.3f" % rowavg+"KWH"
print "<hr>"
print "<h2>In the last hour:</h2>"
print "<table>"
print "<tr><td><strong>Date/Time</strong></td><td><strong>energy</strong></td></tr>"
# rows=curs.execute("SELECT * FROM energy WHERE timestamp>datetime('new','-1 hour') AND timestamp<=datetime('new')")
rows=curs.execute("SELECT * FROM energy WHERE timestamp>datetime('now','-1 hour','localtime') AND timestamp<=datetime('now','localtime')")
for row in rows:
rowstr="<tr><td>{0}  </td><td>{1}KWH</td></tr>".format(str(row[0]),str(row[1]))
print rowstr
print "</table>"
print "<hr>"
conn.close()
def print_time_selector(option):
print """<form action="/cgi-bin/both.py" method="POST">
Show the logs for
<select name="timeinterval">"""
if option is not None:
if option == "-1":
print "<option value=\"-1\" selected=\"selected\">All times</option>"
else:
print "<option value=\"-1\">All times</option>"
#if option == None:
#print "<option value=\"-1\" selected=\"selected\">All times</option>"
#else:
#print "<option value=\"-1\">All times</option>"
if option == "6":
print "<option value=\"6\" selected=\"selected\">the last 6 hours</option>"
else:
print "<option value=\"6\">the last 6 hours</option>"
if option == "12":
print "<option value=\"12\" selected=\"selected\">the last 12 hours</option>"
else:
print "<option value=\"12\">the last 12 hours</option>"
if option == "24":
print "<option value=\"24\" selected=\"selected\">the last 24 hours</option>"
else:
print "<option value=\"24\">the last 24 hours</option>"
if option == "168":
print "<option value=\"168\" selected=\"selected\">1 week</option>"
else:
print "<option value=\"168\">1 week</option>"
else:
print """<option value="-1">All times</option>
<option value="6">the last 6 hours</option>
<option value="12">the last 12 hours</option>
<option value="24" selected="selected">the last 24 hours</option>
<option value="168">1 week</option>"""
print """ </select>
<input type="submit" value="Display">
</form>"""
# check that the option is valid
# and not an SQL injection
def validate_input(option_str):
# check that the option string represents a number
#if option_str == -1:
#return None
if is_number(option_str):
# check that the option is within a specific range
if int(option_str) > -2 and int(option_str) <= 2000:
return option_str
else:
return None
else:
return None
#return the option passed to the script
def get_option():
form=cgi.FieldStorage()
if "timeinterval" in form:
option = form["timeinterval"].value
return validate_input (option)
else:
return None
# main function
# This is where the program starts
def main():
# get options that may have been passed to this script
option=get_option()
if option is None:
option = str(24)
# get data from the database
records=read_html()
log_energy(records[1],records[2],records[3])
#records=get_data(None)
# print the HTTP header
#printHTTPheader()
#if len(records) != 0:
# convert the data into a table
#table=create_table(records)
#else:
#print "<h1>Raspberry Pi energy/power Logger "
#print myOption
#print "No data found"
#print "</h1>"
#return
#global myOption
#myOption=''
# start printing the page
#print "<html>"
# print the head section including the table
# used by the javascript for the chart
#printHTMLHead("Raspberry Pi energy/power Logger", table)
# print the page body
#print "<body>"
#print "<h1>Raspberry Pi energy/power Logger "
#print myOption
#print "</h1>"
#print "<hr>"
#print_time_selector(option)
#show_graph()
#show_stats(option)
#print "</body>"
#print "</html>"
sys.stdout.flush()
if __name__=="__main__":
main()
|
Wellington, FL - January 6, 2014 - The inaugural Citrus Series Horse Show Wellington held at Jim Brandon Equestrian Center Okeeheelee Park continued on Saturday. Hunters and jumpers showed over courses that ranged from 2 foot to 1.30m. Course designer Gerry Briggs and the Citrus Series management created beautiful, well-decorated and inviting courses.
"I gave the Zone 4 Clinic here last year but I hadn't shown here," said top judge and clinician and 1975 AHSA (now USEF) Medal Finals winner Cynthia Hankins. "It is so close. It is unbelievably easy. I love this. I lived in Europe for 10 years. I haven't owned horses in a while and I bought a couple of very green horses. Before spending an outrageous amount of money, I wanted to give them a chance to get into a ring. I am hoping that others will support this because we have to have these other shows. Not every horse is ready to walk in and win and so they need to come and do this."
"This show offers all the flowers and the color," Hankins said. "The ring is beautifully set. They've set it so I can do hunter classes and a couple of jumper classes."
Salim Radi is a native of Columbia going to at Northwood University who competes in the jumpers. He brought his six-year-old jumper, Charon, to the Citrus Series.
"I was really happy with him," said Radi. "It was great to be able two compete over two different courses at one height and then jump similar tracks a little bit bigger. The difference between his first trip and his last was like night and day. I couldn't get this kind of experience in another show setting anywhere. Plus, the facility and the footing are really fantastic. It is easy to ship in and show. I had no idea it was so close."
Citrus Series organizer James Lala was pleased with the outcome of the first show.
Salim Radi and his young jumper Charon / Photo by Carrie Wirth.
"I was cautiously optimistic for the first show of the series," said Lala. "I am trying to incorporate the wish lists I've discussed with professionals of all levels through the years-we are a little a bit revolutionary. I wasn't sure how it would actually go over. I watched horse after horse, rider after rider improve and leave with a positive experience. I am convinced this concept is really worthwhile. I overheard one trainer say that he accomplished about a month's worth of training in an hour."
Citrus Series returns to Jim Brandon Equestrian Center at Okeeheelee Park February 21-22 with jumpers under the lights on Friday night at 6 p.m. and hunter, equitation and jumper classes starting at 8 a.m. on Saturday.
The unique features of the show are designed to help trainers, developing horses and riders achieve their goals affordably. Professionals and adults may ride ponies. If time permits, competitors can do do-overs to make sure the horse and rider leave the ring with a positive learning experience. A user-friendly class schedule and sale horse identifiers provide an opportunity to market sale horses.
Owners, breeders, riders, trainers and their assistants are encouraged to bring their up-and- coming horses and riders to come experience Citrus Series, at Jim Brandon Equestrian Center Okeeheelee Park a close, convenient high quality facility. Jim Brandon Equestrian Center is located just a three miles / five minutes from the intersection of 441 and Forest Hill Boulevard. |
#!/usr/bin/env python
from pyDFTutils.vasp.myvasp import myvasp, default_pps
from pyDFTutils.vasp.vasp_utils import read_efermi
from pyDFTutils.ase_utils.geometry import gen_disped_atoms
from ase.io import read
from pyDFTutils.wannier90.wannier import wannier_input,run_wannier
import os
def calc():
atoms = read('POSCAR.vasp')
d_atoms = gen_disped_atoms(atoms, 'Ti1', distance=0.005, direction='all')
# original
pwd = os.getcwd()
path='orig'
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
calc_wannier(atoms)
os.chdir(pwd)
# displaced
pwd = os.getcwd()
path='disp_Ti_x'
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
calc_wannier(d_atoms[0])
os.chdir(pwd)
def calc_wannier(atoms):
mycalc = myvasp(
xc='PBE',
gga='PS',
setups=default_pps,
ispin=2,
icharg=0,
kpts=[6, 6, 6],
gamma=True,
prec='normal',
istart=1,
lmaxmix=4,
encut=500)
mycalc.set(lreal='Auto', algo='normal')
atoms.set_calculator(mycalc)
# electronic
mycalc.set(ismear=-5, sigma=0.1, nelm=100, nelmdl=-6, ediff=1e-7)
mycalc.set(ncore=1, kpar=3)
mycalc.scf_calculation()
mycalc.set(
lwannier90=True,
lwrite_unk=False,
lwrite_mmn_amn=True,
ncore=1,
kpar=3)
wa = wannier_input(atoms=atoms)
efermi = read_efermi()
wa.set(
mp_grid=[6, 6, 6],
num_bands=28,
guiding_centres=True,
num_iter=100,
kmesh_tol=1e-9,
search_shells=24,
write_xyz=True,
hr_plot=True,
)
wa.set_energy_window([-70,0.5],[-67.4,0.4],shift_efermi=efermi)
wa.add_basis('Ba','s')
wa.add_basis('Ba','p')
wa.add_basis('Ti','s')
wa.add_basis('Ti','p')
wa.add_basis('O','s')
wa.add_basis('O','p')
wa.write_input()
mycalc.set(nbands=28)
mycalc.scf_calculation()
run_wannier(spin='up')
run_wannier(spin='dn')
#mycalc.ldos_calculation()
calc()
|
The new year is a time for self-reflection, renewal, and determining your travel bucket list. After all, there’s no better time to contemplate the places you want to explore in the coming year. And, luckily, there are incredible state parks right in your backyard. Check out our picks for the top Texas state parks to visit in 2017.
Yes, we adore our beloved Barton Springs, but there’s something special about bathing in a giant pool smack-dab in the middle of the desert. From Austin, travel west six hours and you can swim or even scuba dive in Balmorhea’s cool blue waters — the world’s largest spring-fed pool, in fact.
Pro tip: Cross two parks off your list by staying at the Indian Lodge in Davis Mountains State Park. The Indian Lodge is truly a West Texas treasure, with its stark white adobe walls and scenic views of the park.
Sure, Big Bend National Park is glorious, but don’t forego Big Bend Ranch State Park in far West Texas on the U.S.-Mexico border. There are 238 miles of incredible trails and sights to see here. From rocky canyons and breathtaking mountain vistas to otherworldly desert landscapes, this park pretty much has it all.
Pro tip: Go in the springtime, when the area is ablaze with wildflowers and temperatures are comfy.
Caprock Canyons, located six-and-a-half hours northwest of Austin near Amarillo, often gets passed up in favor of the showier Palo Duro Canyon, but this is precisely why you should consider going. It’s less developed, more remote, and boasts marvelous views and miles of ruby red canyons.
Pro tip: Come prepared with food and a full gas tank.
With its lush green scenery and majestic waterfalls, Colorado Bend, located west of Lampasas and southeast of San Saba, is a unique addition to the scrubby Texas Hill Country. Just a two-hour drive from Austin, this is one of the most special spots in the state. There are plenty of trails here, but don’t skip the hike to Gorman Falls, a gorgeous 65-foot waterfall surrounded by so much bright green vegetation you may think you somehow landed in Oregon.
Pro tip: Avoid the crowds by camping at nearby Sulphur Springs campground, where you can score a spot right on the picturesque Colorado River.
If you’re seeking the quintessential Texas state park experience, Garner, located near Uvalde, should be your go-to destination. Whether you’re looking to tube down the cold waters of the Frio River, go fishing, or hike some of the most verdant land in the state, this park has a little something for everyone. Don’t forget to trek up Old Baldy Hill for spectacular views of the surrounding area.
Pro tip: Head to Garner in the summer when the park hosts its famed old-timey jukebox dance. |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import mock
from oslo.serialization import jsonutils
from oslo.utils import timeutils
import webob
from nova.api.openstack.compute.contrib import simple_tenant_usage as \
simple_tenant_usage_v2
from nova.api.openstack.compute.plugins.v3 import simple_tenant_usage as \
simple_tenant_usage_v21
from nova.compute import flavors
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
SERVERS = 5
TENANTS = 2
HOURS = 24
ROOT_GB = 10
EPHEMERAL_GB = 20
MEMORY_MB = 1024
VCPUS = 2
NOW = timeutils.utcnow()
START = NOW - datetime.timedelta(hours=HOURS)
STOP = NOW
FAKE_INST_TYPE = {'id': 1,
'vcpus': VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'memory_mb': MEMORY_MB,
'name': 'fakeflavor',
'flavorid': 'foo',
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'swap': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'disabled': False,
'is_public': True,
'extra_specs': {'foo': 'bar'}}
def get_fake_db_instance(start, end, instance_id, tenant_id,
vm_state=vm_states.ACTIVE):
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, FAKE_INST_TYPE))
# NOTE(mriedem): We use fakes.stub_instance since it sets the fields
# needed on the db instance for converting it to an object, but we still
# need to override system_metadata to use our fake flavor.
inst = fakes.stub_instance(
id=instance_id,
uuid='00000000-0000-0000-0000-00000000000000%02d' % instance_id,
image_ref='1',
project_id=tenant_id,
user_id='fakeuser',
display_name='name',
flavor_id=FAKE_INST_TYPE['id'],
launched_at=start,
terminated_at=end,
vm_state=vm_state,
memory_mb=MEMORY_MB,
vcpus=VCPUS,
root_gb=ROOT_GB,
ephemeral_gb=EPHEMERAL_GB,)
inst['system_metadata'] = sys_meta
return inst
def fake_instance_get_active_by_window_joined(context, begin, end,
project_id, host):
return [get_fake_db_instance(START,
STOP,
x,
"faketenant_%s" % (x / SERVERS))
for x in xrange(TENANTS * SERVERS)]
@mock.patch.object(db, 'instance_get_active_by_window_joined',
fake_instance_get_active_by_window_joined)
class SimpleTenantUsageTestV21(test.TestCase):
url = '/v2/faketenant_0/os-simple-tenant-usage'
alt_url = '/v2/faketenant_1/os-simple-tenant-usage'
policy_rule_prefix = "compute_extension:v3:os-simple-tenant-usage"
def setUp(self):
super(SimpleTenantUsageTestV21, self).setUp()
self.admin_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=True)
self.user_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=False)
self.alt_user_context = context.RequestContext('fakeadmin_0',
'faketenant_1',
is_admin=False)
def _get_wsgi_app(self, context):
return fakes.wsgi_app_v21(fake_auth_context=context,
init_only=('servers',
'os-simple-tenant-usage'))
def _test_verify_index(self, start, stop):
req = webob.Request.blank(
self.url + '?start=%s&end=%s' %
(start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.admin_context))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usages = res_dict['tenant_usages']
for i in xrange(TENANTS):
self.assertEqual(int(usages[i]['total_hours']),
SERVERS * HOURS)
self.assertEqual(int(usages[i]['total_local_gb_usage']),
SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS)
self.assertEqual(int(usages[i]['total_memory_mb_usage']),
SERVERS * MEMORY_MB * HOURS)
self.assertEqual(int(usages[i]['total_vcpus_usage']),
SERVERS * VCPUS * HOURS)
self.assertFalse(usages[i].get('server_usages'))
def test_verify_index(self):
self._test_verify_index(START, STOP)
def test_verify_index_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_index(START, future)
def test_verify_show(self):
self._test_verify_show(START, STOP)
def test_verify_show_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_show(START, future)
def _get_tenant_usages(self, detailed=''):
req = webob.Request.blank(
self.url + '?detailed=%s&start=%s&end=%s' %
(detailed, START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.admin_context))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
return res_dict['tenant_usages']
def test_verify_detailed_index(self):
usages = self._get_tenant_usages('1')
for i in xrange(TENANTS):
servers = usages[i]['server_usages']
for j in xrange(SERVERS):
self.assertEqual(int(servers[j]['hours']), HOURS)
def test_verify_simple_index(self):
usages = self._get_tenant_usages(detailed='0')
for i in xrange(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def test_verify_simple_index_empty_param(self):
# NOTE(lzyeval): 'detailed=&start=..&end=..'
usages = self._get_tenant_usages()
for i in xrange(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def _test_verify_show(self, start, stop):
tenant_id = 0
req = webob.Request.blank(
self.url + '/faketenant_%s?start=%s&end=%s' %
(tenant_id, start.isoformat(), stop.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.user_context))
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
usage = res_dict['tenant_usage']
servers = usage['server_usages']
self.assertEqual(len(usage['server_usages']), SERVERS)
uuids = ['00000000-0000-0000-0000-00000000000000%02d' %
(x + (tenant_id * SERVERS)) for x in xrange(SERVERS)]
for j in xrange(SERVERS):
delta = STOP - START
uptime = delta.days * 24 * 3600 + delta.seconds
self.assertEqual(int(servers[j]['uptime']), uptime)
self.assertEqual(int(servers[j]['hours']), HOURS)
self.assertIn(servers[j]['instance_id'], uuids)
def test_verify_show_cannot_view_other_tenant(self):
req = webob.Request.blank(
self.alt_url + '/faketenant_0?start=%s&end=%s' %
(START.isoformat(), STOP.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
rules = {
self.policy_rule_prefix + ":show":
common_policy.parse_rule([
["role:admin"], ["project_id:%(project_id)s"]
])
}
policy.set_rules(rules)
try:
res = req.get_response(self._get_wsgi_app(self.alt_user_context))
self.assertEqual(res.status_int, 403)
finally:
policy.reset()
def test_get_tenants_usage_with_bad_start_date(self):
future = NOW + datetime.timedelta(hours=HOURS)
tenant_id = 0
req = webob.Request.blank(
self.url + '/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, future.isoformat(), NOW.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.user_context))
self.assertEqual(res.status_int, 400)
def test_get_tenants_usage_with_invalid_start_date(self):
tenant_id = 0
req = webob.Request.blank(
self.url + '/'
'faketenant_%s?start=%s&end=%s' %
(tenant_id, "xxxx", NOW.isoformat()))
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.user_context))
self.assertEqual(res.status_int, 400)
def _test_get_tenants_usage_with_one_date(self, date_url_param):
req = webob.Request.blank(
self.url + '/'
'faketenant_0?%s' % date_url_param)
req.method = "GET"
req.headers["content-type"] = "application/json"
res = req.get_response(self._get_wsgi_app(self.user_context))
self.assertEqual(200, res.status_int)
def test_get_tenants_usage_with_no_start_date(self):
self._test_get_tenants_usage_with_one_date(
'end=%s' % (NOW + datetime.timedelta(5)).isoformat())
def test_get_tenants_usage_with_no_end_date(self):
self._test_get_tenants_usage_with_one_date(
'start=%s' % (NOW - datetime.timedelta(5)).isoformat())
class SimpleTenantUsageTestV2(SimpleTenantUsageTestV21):
policy_rule_prefix = "compute_extension:simple_tenant_usage"
def _get_wsgi_app(self, context):
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Simple_tenant_usage'])
return fakes.wsgi_app(fake_auth_context=context,
init_only=('os-simple-tenant-usage', ))
class SimpleTenantUsageSerializerTest(test.TestCase):
def _verify_server_usage(self, raw_usage, tree):
self.assertEqual('server_usage', tree.tag)
# Figure out what fields we expect
not_seen = set(raw_usage.keys())
for child in tree:
self.assertIn(child.tag, not_seen)
not_seen.remove(child.tag)
self.assertEqual(str(raw_usage[child.tag]), child.text)
self.assertEqual(len(not_seen), 0)
def _verify_tenant_usage(self, raw_usage, tree):
self.assertEqual('tenant_usage', tree.tag)
# Figure out what fields we expect
not_seen = set(raw_usage.keys())
for child in tree:
self.assertIn(child.tag, not_seen)
not_seen.remove(child.tag)
if child.tag == 'server_usages':
for idx, gr_child in enumerate(child):
self._verify_server_usage(raw_usage['server_usages'][idx],
gr_child)
else:
self.assertEqual(str(raw_usage[child.tag]), child.text)
self.assertEqual(len(not_seen), 0)
def test_serializer_show(self):
serializer = simple_tenant_usage_v2.SimpleTenantUsageTemplate()
today = timeutils.utcnow()
yesterday = today - datetime.timedelta(days=1)
raw_usage = dict(
tenant_id='tenant',
total_local_gb_usage=789,
total_vcpus_usage=456,
total_memory_mb_usage=123,
total_hours=24,
start=yesterday,
stop=today,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000000',
name='test',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=1,
tenant_id='tenant',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=12,
memory_mb=512,
local_gb=25,
vcpus=2,
tenant_id='tenant',
flavor='m1.tiny',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=43200),
],
)
tenant_usage = dict(tenant_usage=raw_usage)
text = serializer.serialize(tenant_usage)
tree = etree.fromstring(text)
self._verify_tenant_usage(raw_usage, tree)
def test_serializer_index(self):
serializer = simple_tenant_usage_v2.SimpleTenantUsagesTemplate()
today = timeutils.utcnow()
yesterday = today - datetime.timedelta(days=1)
raw_usages = [dict(
tenant_id='tenant1',
total_local_gb_usage=1024,
total_vcpus_usage=23,
total_memory_mb_usage=512,
total_hours=24,
start=yesterday,
stop=today,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000001',
name='test1',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=2,
tenant_id='tenant1',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=42,
memory_mb=4201,
local_gb=25,
vcpus=1,
tenant_id='tenant1',
flavor='m1.tiny',
started_at=today,
ended_at=yesterday,
state='terminated',
uptime=43200),
],
),
dict(
tenant_id='tenant2',
total_local_gb_usage=512,
total_vcpus_usage=32,
total_memory_mb_usage=1024,
total_hours=42,
start=today,
stop=yesterday,
server_usages=[dict(
instance_id='00000000-0000-0000-0000-0000000000000003',
name='test3',
hours=24,
memory_mb=1024,
local_gb=50,
vcpus=2,
tenant_id='tenant2',
flavor='m1.small',
started_at=yesterday,
ended_at=today,
state='terminated',
uptime=86400),
dict(
instance_id='00000000-0000-0000-0000-0000000000000002',
name='test2',
hours=42,
memory_mb=4201,
local_gb=25,
vcpus=1,
tenant_id='tenant4',
flavor='m1.tiny',
started_at=today,
ended_at=yesterday,
state='terminated',
uptime=43200),
],
),
]
tenant_usages = dict(tenant_usages=raw_usages)
text = serializer.serialize(tenant_usages)
tree = etree.fromstring(text)
self.assertEqual('tenant_usages', tree.tag)
self.assertEqual(len(raw_usages), len(tree))
for idx, child in enumerate(tree):
self._verify_tenant_usage(raw_usages[idx], child)
class SimpleTenantUsageControllerTestV21(test.TestCase):
controller = simple_tenant_usage_v21.SimpleTenantUsageController()
def setUp(self):
super(SimpleTenantUsageControllerTestV21, self).setUp()
self.context = context.RequestContext('fakeuser', 'fake-project')
self.baseinst = get_fake_db_instance(START, STOP, instance_id=1,
tenant_id=self.context.project_id,
vm_state=vm_states.DELETED)
# convert the fake instance dict to an object
self.inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), self.baseinst)
def test_get_flavor_from_sys_meta(self):
# Non-deleted instances get their type information from their
# system_metadata
with mock.patch.object(db, 'instance_get_by_uuid',
return_value=self.baseinst):
flavor = self.controller._get_flavor(self.context,
self.inst_obj, {})
self.assertEqual(objects.Flavor, type(flavor))
self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
def test_get_flavor_from_non_deleted_with_id_fails(self):
# If an instance is not deleted and missing type information from
# system_metadata, then that's a bug
self.inst_obj.system_metadata = {}
self.assertRaises(KeyError,
self.controller._get_flavor, self.context,
self.inst_obj, {})
def test_get_flavor_from_deleted_with_id(self):
# Deleted instances may not have type info in system_metadata,
# so verify that they get their type from a lookup of their
# instance_type_id
self.inst_obj.system_metadata = {}
self.inst_obj.deleted = 1
flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
self.assertEqual(objects.Flavor, type(flavor))
self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
def test_get_flavor_from_deleted_with_id_of_deleted(self):
# Verify the legacy behavior of instance_type_id pointing to a
# missing type being non-fatal
self.inst_obj.system_metadata = {}
self.inst_obj.deleted = 1
self.inst_obj.instance_type_id = 99
flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
self.assertIsNone(flavor)
class SimpleTenantUsageControllerTestV2(SimpleTenantUsageControllerTestV21):
controller = simple_tenant_usage_v2.SimpleTenantUsageController()
class SimpleTenantUsageUtilsV21(test.NoDBTestCase):
simple_tenant_usage = simple_tenant_usage_v21
def test_valid_string(self):
dt = self.simple_tenant_usage.parse_strtime(
"2014-02-21T13:47:20.824060", "%Y-%m-%dT%H:%M:%S.%f")
self.assertEqual(datetime.datetime(
microsecond=824060, second=20, minute=47, hour=13,
day=21, month=2, year=2014), dt)
def test_invalid_string(self):
self.assertRaises(exception.InvalidStrTime,
self.simple_tenant_usage.parse_strtime,
"2014-02-21 13:47:20.824060",
"%Y-%m-%dT%H:%M:%S.%f")
class SimpleTenantUsageUtilsV2(SimpleTenantUsageUtilsV21):
simple_tenant_usage = simple_tenant_usage_v2
|
When constructing a Logic Apps that gets information from a SharePoint list you will use the SharePoint “Get Items” action. This action allows you to get items from a SharePoint list you connect the action to. You can then process the items by using for example an Azure Function. Constructing a flow this will give you the following Logic App.
By default Logic Apps will construct a foreach loop after the “Get Items” action that you cannot see in the designer view. By opening the code view you will see it (It will only appear if you added a value of the “Get Items” action to for example the input of the Azure Function).
This means that the Azure Function will be called for every item that is retrieved by the “Get Items” action meaning that if the action retrieves 100 items the function will be started 100 times.
In a Logic App a colleague (Andre de Ruiter) of mine was constructing he wanted to process all the items at ones in a Azure Function. By default this isn’t possible because the Logic Apps designer only gives you the option to add specific fields into the input value.
As you can see in the above screenshots the action retrieved all the fields available on the SharePoint item and does not give a option to add the complete body of the “Get Items” action.
If you want to send the complete body trough you will have to use de code view. The easiest way is to add one output from the “Get Items” activity into the input of your action and then click on “Code View”.
Within this view search for the Azure Function or the other action that is being called. You will notice that this action contains the foreach object.
Remove the line with the foreach and in the “inputs” object change the body value to: “@body(‘Get_items’)”. Your action will know look as followed.
Open the “Designer” again and notice that the Action now contains the “body” element of the “Get Items” action. |
import os
import logging
import subprocess
import shutil
import math
import tempfile
'''Given a list of commands run single threaded or in parallel'''
class CommandRunner:
def __init__(self, output_directory, logger, threads):
self.logger = logger
self.threads = threads
self.output_directory = output_directory
def run_list_of_kmc_commands(self, commands_to_run):
if self.threads > 1:
self.run_with_parallel(commands_to_run, self.kmc_processes())
else:
self.run_sequentially(commands_to_run)
def run_list_of_commands(self, commands_to_run):
if self.threads > 1:
self.run_with_parallel(commands_to_run, self.threads)
else:
self.run_sequentially(commands_to_run)
def run_sequentially(self, commands_to_run):
for c in commands_to_run:
self.logger.warning('GNU parallel command to run %s', c)
subprocess.check_call(c, shell=True)
'''KMC handles multithreading badly. So give each process 2 threads and limit the overall total independant processes'''
def kmc_threads(self):
if self.threads >= 2:
return 2
else:
return 1
def kmc_processes(self):
if self.threads >= 2:
return int(math.floor(self.threads/2))
else:
return 1
'''Use GNU parallel to manage parallel processing'''
def run_with_parallel(self, commands_to_run, processes_in_parallel):
temp_working_dir = tempfile.mkdtemp(dir=os.path.abspath(self.output_directory))
file_of_commands = os.path.join(temp_working_dir,'commands_to_run')
with open(file_of_commands, 'w') as commands_file:
for c in commands_to_run:
self.logger.warning('Command to run %s', c)
commands_file.write(c + "\n")
gnu_parallel_command = ' '.join(['parallel', '--gnu', '-j '+ str(processes_in_parallel), '<',file_of_commands])
self.logger.warning('GNU parallel command to run %s', gnu_parallel_command)
subprocess.check_call(gnu_parallel_command, shell=True)
shutil.rmtree(temp_working_dir)
|
Lots of our customers find Direct Debit the easiest way to pay as it gives them control over their finances, with a choice of payment dates and instalments spread across the year.
To set up a Direct Debit with us, click on the link below.
You can use this form to pay your bill in full, or to pay an installment. Click on the link below to get started (you'll need your South East Water account number to hand). Please note you will not be able to make a one-off payment if you already pay by Direct Debit.
Quick, simple and available 24 hours a day! Just pick up the phone, and make sure you have your card and your customer account number at the ready.
You can pay in cash or by cheque at most Post Offices by completing the payment slip at the bottom of your bill. Please note that the Post Office will make a small charge.
PayPoint is a quick and easy way to pay for your water. Available at most shops, it's free of charge when you pay with cash at a PayPoint store. Simply take your bill along with you.
You can use the giro slip on your bill to make a payment the next time you visit your bank. You can also make payment at any HSBC bank for free.
To pay your bill by post, please send a cheque made payable to 'South East Water Limited'.
Finding it hard to pay your bill?
If you find yourself struggling to pay your bill, we're here to help support you and discuss other payment options, so you've got one less thing to worry about. |
"""
Plagiarism detection support using JPlag.
"""
import re
import subprocess
from pathlib import Path
from shutil import copytree
from tempfile import TemporaryDirectory
from typing import Dict, Iterable, Optional, Set
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models import QuerySet
from huey.api import Result
from huey.contrib.djhuey import db_task
from inloop.grading.models import save_plagiarism_set
from inloop.solutions.models import Solution
from inloop.tasks.models import Task
LINE_REGEX = re.compile(r"Comparing (.*?)-(.*?): (\d+\.\d+)")
@db_task()
def jplag_check_async(users: QuerySet, tasks: QuerySet) -> Result:
"""
Submit a job to check solutions using the jplag_check function.
This function returns immediately and is supposed to be called from inside
view code. The actual JPlag invocation happens in a background worker process
that will wait for JPlag to complete.
The given queryset arguments will be serialized (pickled) before they are sent
to the background queue.
The results of the check will be available in the PlagiarismTest model.
Args:
users: A User queryset.
tasks: A Task queryset.
Returns:
A huey Result object.
"""
jplag_check(users, tasks)
def jplag_check(
users: Iterable[User],
tasks: Iterable[Task],
min_similarity: Optional[int] = None,
result_dir: Optional[Path] = None,
) -> Set[Solution]:
"""
Check solutions of the given users for the given tasks with JPlag.
Args:
users: A User iterable (e.g., queryset).
tasks: A Task iterable (e.g., queryset).
min_similarity: Minimum solution similarity after which two solutions
shall be regarded as plagiarism (optional).
result_dir: Directory where JPlag HTML files shall be saved to (optional).
The given directory must not already exist.
Returns:
A set containing the solutions that have been identified as plagiarism.
"""
if min_similarity is None:
min_similarity = settings.JPLAG_DEFAULT_SIMILARITY
with TemporaryDirectory() as tmpdir:
path = Path(tmpdir)
plagiarism_set = set()
for task in tasks:
plagiarism_set.update(jplag_check_task(users, task, min_similarity, path))
save_plagiarism_set(plagiarism_set, str(path))
if result_dir:
copytree(src=path, dst=result_dir)
return plagiarism_set
def jplag_check_task(
users: Iterable[User],
task: Task,
min_similarity: int,
result_path: Path,
) -> Set[Solution]:
"""
Check solutions of the given users for the given single task with JPlag.
Args:
users: A User queryset.
task: A Task object.
min_similarity: Minimum solution similarity after which two solutions
shall be regarded as plagiarism.
result_path: Directory where JPlag HTML files shall be saved to.
Returns:
A set containing the solutions that have been identified as plagiarism.
"""
with TemporaryDirectory() as tmpdir:
root_path = Path(tmpdir)
last_solutions = get_last_solutions(users, task)
if len(last_solutions) < 2:
return set()
prepare_directories(root_path, last_solutions)
output = exec_jplag(min_similarity, root_path, result_path.joinpath(task.slug))
return parse_output(output, min_similarity, last_solutions)
def get_last_solutions(users: Iterable[User], task: Task) -> Dict[str, Solution]:
"""
Get the last valid solution of the given users for a given task.
"""
last_solutions = {}
for user in users:
last_solution = Solution.objects.filter(author=user, task=task, passed=True).last()
if last_solution is not None:
# escape hyphens in usernames with an unused (since
# disallowed) character, otherwise the usernames cannot
# be extracted from the jplag output
last_solutions[user.username.replace("-", "$")] = last_solution
return last_solutions
def prepare_directories(root_path: Path, last_solutions: Dict[str, Solution]) -> None:
"""
Copy the given solutions to root_path, using the folder structure expected by JPlag.
The expected folder structure, for one task, will look like this:
root_path/
user-1/
File1.java
File2.java
user-2/
File1.java
File2.java
"""
for username, last_solution in last_solutions.items():
copytree(src=last_solution.path, dst=root_path.joinpath(username))
def parse_output(
output: str,
min_similarity: int,
last_solutions: Dict[str, Solution],
) -> Set[Solution]:
"""
Extract plagiarism check results from the given JPlag command line output.
Returns:
A set containing the solutions that have been identified as plagiarism.
"""
plagiarism_set = set()
for match in LINE_REGEX.finditer(output):
username1, username2, similarity = match.groups()
similarity = float(similarity)
if similarity >= min_similarity:
plagiarism_set.add(last_solutions[username1])
plagiarism_set.add(last_solutions[username2])
return plagiarism_set
def exec_jplag(min_similarity: int, root_path: Path, result_path: Path) -> str:
"""
Execute the JPlag Java program with the given parameters and return its output.
"""
args = ["java", "-cp", settings.JPLAG_JAR_PATH, "jplag.JPlag"]
args.append("-vl")
args.extend(["-l", "java19"])
args.extend(["-m", f"{min_similarity}%"])
args.extend(["-r", f"{result_path}"])
args.append(f"{root_path}")
return subprocess.check_output(args, stderr=subprocess.DEVNULL, universal_newlines=True)
|
General partnerships are the oldest form of business organizations behind only the sole-proprietorship and the kingdom. The general partnership, however, is not a business entity that stands alone from its owners as far as legal liability is concerned. All partners in a general partnership share full responsibility jointly and severally for all of the obligations of the partnership.
For tax purposes on the other hand, the partnership is treated separately to the extent that it may file its own tax return. However, the partnership possesses pass-through taxation which allows the income, profits and losses to pass through directly to the general partner's personal tax returns. Because of their unlimited general liability, general partnerships are rarely used today. |
##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import ctypes
import logging
# the OpenGL module loves spewing things into logs, and for some reason
# when running in maya 2012 the default log level allows info messages through.
# so we set a specific log level on the OpenGL logger to keep it quiet.
logging.getLogger( "OpenGL" ).setLevel( logging.WARNING )
import IECore
import Gaffer
import GafferUI
# import lazily to improve startup of apps which don't use GL functionality
GL = Gaffer.lazyImport( "OpenGL.GL" )
IECoreGL = Gaffer.lazyImport( "IECoreGL" )
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
QtOpenGL = GafferUI._qtImport( "QtOpenGL", lazy=True )
## The GLWidget is a base class for all widgets which wish to draw using OpenGL.
# Derived classes override the _draw() method to achieve this.
class GLWidget( GafferUI.Widget ) :
## This enum defines the optional elements of the GL buffer used
# for display.
BufferOptions = IECore.Enum.create(
"Alpha",
"Depth",
"Double"
)
## Note that you won't always get the buffer options you ask for - a best fit is found
# among the available formats. In particular it appears that a depth buffer is often present
# even when not requested.
def __init__( self, bufferOptions = set(), **kw ) :
format = QtOpenGL.QGLFormat()
format.setRgba( True )
format.setAlpha( self.BufferOptions.Alpha in bufferOptions )
format.setDepth( self.BufferOptions.Depth in bufferOptions )
format.setDoubleBuffer( self.BufferOptions.Double in bufferOptions )
if hasattr( format, "setVersion" ) : # setVersion doesn't exist in qt prior to 4.7.
format.setVersion( 2, 1 )
graphicsView = _GLGraphicsView( format )
self.__graphicsScene = _GLGraphicsScene( graphicsView, Gaffer.WeakMethod( self.__draw ) )
graphicsView.setScene( self.__graphicsScene )
GafferUI.Widget.__init__( self, graphicsView, **kw )
## Adds a Widget as an overlay.
## \todo Support more than one overlay, and provide grid-based
# placement options. Perhaps GLWidget should also derive from Container
# to support auto-parenting and appropriate removeChild() behaviour.
def addOverlay( self, overlay ) :
assert( overlay.parent() is None )
self.__overlay = overlay
self.__overlay._setStyleSheet()
item = self.__graphicsScene.addWidget( self.__overlay._qtWidget() )
## Called whenever the widget is resized. May be reimplemented by derived
# classes if necessary. The appropriate OpenGL context will already be current
# when this is called.
def _resize( self, size ) :
GL.glViewport( 0, 0, size.x, size.y )
## Derived classes must override this to draw their contents using
# OpenGL calls. The appropriate OpenGL context will already be current
# when this is called.
def _draw( self ) :
pass
## Derived classes may call this when they wish to trigger a redraw.
def _redraw( self ) :
self._glWidget().update()
## May be used by derived classes to get access to the internal
# QGLWidget. Note that _makeCurrent() should be used in preference
# to _glWidget().makeCurrent(), for the reasons stated in the
# documentation for that method.
def _glWidget( self ) :
return self._qtWidget().viewport()
## May be used by derived classes to make the OpenGL context
# for this widget current. Returns True if the operation was
# successful and False if not. In an ideal world, the return
# value would always be True, but it appears that there are
# Qt/Mac bugs which cause it not to be from time to time -
# typically for newly created Widgets. If False is returned,
# no OpenGL operations should be undertaken subsequently by
# the caller.
def _makeCurrent( self ) :
self._qtWidget().viewport().makeCurrent()
return self.__framebufferValid()
def __framebufferValid( self ) :
import OpenGL.GL.framebufferobjects
return GL.framebufferobjects.glCheckFramebufferStatus( GL.framebufferobjects.GL_FRAMEBUFFER ) == GL.framebufferobjects.GL_FRAMEBUFFER_COMPLETE
def __draw( self ) :
# Qt sometimes enters our GraphicsScene.drawBackground() method
# with a GL error flag still set. We unset it here so it won't
# trigger our own error checking.
while GL.glGetError() :
pass
if not self.__framebufferValid() :
return
# we need to call the init method after a GL context has been
# created, and this seems like the only place that is guaranteed.
# calling it here does mean we call init() way more than needed,
# but it's safe.
IECoreGL.init( True )
self._draw()
class _GLGraphicsView( QtGui.QGraphicsView ) :
def __init__( self, format ) :
QtGui.QGraphicsView.__init__( self )
self.setObjectName( "gafferGLWidget" )
self.setHorizontalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
self.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
glWidget = self.__createQGLWidget( format )
# On mac, we need to hide the GL widget until the last
# possible moment, otherwise we get "invalid drawable"
# errors spewing all over the place. See event() for the
# spot where we show the widget.
glWidget.hide()
self.setViewport( glWidget )
self.setViewportUpdateMode( self.FullViewportUpdate )
# QAbstractScrollArea (one of our base classes), implements
# minimumSizeHint() to include enough room for scrollbars.
# But we know we'll never show scrollbars, and don't want
# a minimum size, so we reimplement it.
def minimumSizeHint( self ) :
return QtCore.QSize()
def event( self, event ) :
if event.type() == event.PolishRequest :
# This seems to be the one signal that reliably
# lets us know we're becoming genuinely visible
# on screen. We use it to show the GL widget we
# hid in our constructor.
self.viewport().show()
return QtGui.QGraphicsView.event( self, event )
def resizeEvent( self, event ) :
if self.scene() is not None :
self.scene().setSceneRect( 0, 0, event.size().width(), event.size().height() )
owner = GafferUI.Widget._owner( self )
# clear any existing errors that may trigger
# error checking code in _resize implementations.
while GL.glGetError() :
pass
owner._makeCurrent()
owner._resize( IECore.V2i( event.size().width(), event.size().height() ) )
def keyPressEvent( self, event ) :
# We have to reimplement this method to prevent QAbstractScrollArea
# from stealing the cursor keypresses, preventing them from
# being used by GLWidget subclasses. QAbstractScrollArea uses
# those keypresses to move the scrollbars, but we don't want the
# scrolling functionality at all. Our implementation of this method
# is functionally identical to the QGraphicsView one, except it
# passes unused events to QFrame, bypassing QAbstractScrollArea.
if self.scene() is not None and self.isInteractive() :
QtGui.QApplication.sendEvent( self.scene(), event )
if event.isAccepted() :
return
QtGui.QFrame.keyPressEvent( self, event )
# We keep a single hidden widget which owns the texture and display lists
# and then share those with all the widgets we really want to make.
__shareWidget = None
@classmethod
def __createQGLWidget( cls, format ) :
# try to make a host specific widget if necessary.
result = cls.__createMayaQGLWidget( format )
if result is not None :
return result
result = cls.__createHoudiniQGLWidget( format )
if result is not None :
return result
# and if it wasn't necessary, just breathe a sigh of relief
# and make a nice normal one.
if cls.__shareWidget is None :
cls.__shareWidget = QtOpenGL.QGLWidget()
return QtOpenGL.QGLWidget( format, shareWidget = cls.__shareWidget )
@classmethod
def __createHostedQGLWidget( cls, format, hostContextActivator ) :
# When running Gaffer embedded in a host application such as Maya
# or Houdini, we want to be able to share OpenGL resources between
# gaffer uis and host viewport uis, because IECoreGL will be used
# in both. So we implement our own QGLContext class which creates a
# context which shares with the host.
import OpenGL.GLX
# This is our custom context class which allows us to share gl
# resources with the hosts's contexts. We define it in here rather than
# at the top level because we want to import QtOpenGL lazily and
# don't want to trigger a full import until the last minute.
## \todo Call glXDestroyContext appropriately, although as far as I
# can tell this is impossible. The base class implementation calls it
# in reset(), but that's not virtual, and we can't store it in d->cx
# (which is what the base class destroys) because that's entirely
# on the C++ side of things.
class HostedGLContext( QtOpenGL.QGLContext ) :
def __init__( self, format, paintDevice, hostContextActivator ) :
QtOpenGL.QGLContext.__init__( self, format, paintDevice )
self.__paintDevice = paintDevice
self.__context = None
self.__hostContextActivator = hostContextActivator
def chooseContext( self, shareContext ) :
assert( self.__context is None )
# We have to call this to get d->vi set in the base class, because
# QGLWidget::setContext() accesses it directly, and will crash if we don't.
QtOpenGL.QGLContext.chooseContext( self, shareContext )
# Get the host's main OpenGL context. It is the responsibility
# of the hostContextActivator passed to __init__ to make the host
# context current so we can access it.
self.__hostContextActivator()
hostContext = OpenGL.GLX.glXGetCurrentContext()
self.__display = OpenGL.GLX.glXGetCurrentDisplay()
# Get a visual - we let the base class figure this out, but then we need
# to convert it from the form given by the qt bindings into the ctypes form
# needed by PyOpenGL.
visual = self.chooseVisual()
visual = ctypes.cast( int( visual ), ctypes.POINTER( OpenGL.raw._GLX.XVisualInfo ) )
# Make our context.
self.__context = OpenGL.GLX.glXCreateContext(
self.__display[0],
visual,
hostContext,
True
)
return True
def makeCurrent( self ) :
success = OpenGL.GLX.glXMakeCurrent( self.__display, self.__paintDevice.effectiveWinId(), self.__context )
assert( success )
result = QtOpenGL.QGLWidget()
result.setContext( HostedGLContext( format, result, hostContextActivator ) )
return result
@classmethod
def __createMayaQGLWidget( cls, format ) :
try :
import maya.OpenMayaRender
except ImportError :
# we're not in maya - createQGLWidget() will just make a
# normal widget.
return None
mayaRenderer = maya.OpenMayaRender.MHardwareRenderer.theRenderer()
return cls.__createHostedQGLWidget( format, IECore.curry( mayaRenderer.makeResourceContextCurrent, mayaRenderer.backEndString() ) )
@classmethod
def __createHoudiniQGLWidget( cls, format ) :
try :
import hou
except ImportError :
# we're not in houdini - createQGLWidget() will just make a
# normal widget.
return None
import IECoreHoudini
return cls.__createHostedQGLWidget( format, IECoreHoudini.makeMainGLContextCurrent )
class _GLGraphicsScene( QtGui.QGraphicsScene ) :
def __init__( self, parent, backgroundDrawFunction ) :
QtGui.QGraphicsScene.__init__( self, parent )
self.__backgroundDrawFunction = backgroundDrawFunction
self.sceneRectChanged.connect( self.__sceneRectChanged )
def addWidget( self, widget ) :
if widget.layout() is not None :
# removing the size constraint is necessary to keep the widget the
# size we tell it to be in __updateItemGeometry.
widget.layout().setSizeConstraint( QtGui.QLayout.SetNoConstraint )
item = QtGui.QGraphicsScene.addWidget( self, widget )
self.__updateItemGeometry( item, self.sceneRect() )
return item
def drawBackground( self, painter, rect ) :
self.__backgroundDrawFunction()
def __sceneRectChanged( self, sceneRect ) :
for item in self.items() :
self.__updateItemGeometry( item, sceneRect )
def __updateItemGeometry( self, item, sceneRect ) :
geometry = item.widget().geometry()
item.widget().setGeometry( QtCore.QRect( 0, 0, sceneRect.width(), item.widget().sizeHint().height() ) )
|
// Limit for the max-age value send by the server.
// and the server answers with a HTTP 304 = "Not Modified"
* Logs a throwable that happened. Adds the stack trace to the log.
* @param level The level.
* @param t The throwable that should be logged.
* @param pattern The formatted message to print. |
#!/usr/bin/env python
# Copyright (C) 2015 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage JJB yaml feature implementation
import copy
import fnmatch
import io
import itertools
import logging
import re
import os
from jenkins_jobs.constants import MAGIC_MANAGE_STRING
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.formatter import deep_format
import jenkins_jobs.local_yaml as local_yaml
from jenkins_jobs import utils
__all__ = [
"YamlParser"
]
logger = logging.getLogger(__name__)
def matches(what, glob_patterns):
"""
Checks if the given string, ``what``, matches any of the glob patterns in
the iterable, ``glob_patterns``
:arg str what: String that we want to test if it matches a pattern
:arg iterable glob_patterns: glob patterns to match (list, tuple, set,
etc.)
"""
return any(fnmatch.fnmatch(what, glob_pattern)
for glob_pattern in glob_patterns)
def combination_matches(combination, match_combinations):
"""
Checks if the given combination is matches for any of the given combination
globs, being those a set of combinations where if a key is missing, it's
considered matching
(key1=2, key2=3)
would match the combination match:
(key2=3)
but not:
(key1=2, key2=2)
"""
for cmatch in match_combinations:
for key, val in combination.items():
if cmatch.get(key, val) != val:
break
else:
return True
return False
class YamlParser(object):
def __init__(self, jjb_config=None):
self.data = {}
self.jobs = []
self.views = []
self.jjb_config = jjb_config
self.keep_desc = jjb_config.yamlparser['keep_descriptions']
self.path = jjb_config.yamlparser['include_path']
def load_files(self, fn):
# handle deprecated behavior, and check that it's not a file like
# object as these may implement the '__iter__' attribute.
if not hasattr(fn, '__iter__') or hasattr(fn, 'read'):
logger.warning(
'Passing single elements for the `fn` argument in '
'Builder.load_files is deprecated. Please update your code '
'to use a list as support for automatic conversion will be '
'removed in a future version.')
fn = [fn]
files_to_process = []
for path in fn:
if not hasattr(path, 'read') and os.path.isdir(path):
files_to_process.extend([os.path.join(path, f)
for f in os.listdir(path)
if (f.endswith('.yml')
or f.endswith('.yaml'))])
else:
files_to_process.append(path)
# symlinks used to allow loading of sub-dirs can result in duplicate
# definitions of macros and templates when loading all from top-level
unique_files = []
for f in files_to_process:
if hasattr(f, 'read'):
unique_files.append(f)
continue
rpf = os.path.realpath(f)
if rpf not in unique_files:
unique_files.append(rpf)
else:
logger.warning("File '%s' already added as '%s', ignoring "
"reference to avoid duplicating yaml "
"definitions." % (f, rpf))
for in_file in unique_files:
# use of ask-for-permissions instead of ask-for-forgiveness
# performs better when low use cases.
if hasattr(in_file, 'name'):
fname = in_file.name
else:
fname = in_file
logger.debug("Parsing YAML file {0}".format(fname))
if hasattr(in_file, 'read'):
self._parse_fp(in_file)
else:
self.parse(in_file)
def _parse_fp(self, fp):
# wrap provided file streams to ensure correct encoding used
data = local_yaml.load(utils.wrap_stream(fp), search_path=self.path)
if data:
if not isinstance(data, list):
raise JenkinsJobsException(
"The topmost collection in file '{fname}' must be a list,"
" not a {cls}".format(fname=getattr(fp, 'name', fp),
cls=type(data)))
for item in data:
cls, dfn = next(iter(item.items()))
group = self.data.get(cls, {})
if len(item.items()) > 1:
n = None
for k, v in item.items():
if k == "name":
n = v
break
# Syntax error
raise JenkinsJobsException("Syntax error, for item "
"named '{0}'. Missing indent?"
.format(n))
# allow any entry to specify an id that can also be used
_id = dfn.get('id', dfn['name'])
if _id in group:
self._handle_dups(
"Duplicate entry found in '{0}: '{1}' already "
"defined".format(fp.name, _id))
group[_id] = dfn
self.data[cls] = group
def parse(self, fn):
with io.open(fn, 'r', encoding='utf-8') as fp:
self._parse_fp(fp)
def _handle_dups(self, message):
if not self.jjb_config.yamlparser['allow_duplicates']:
logger.error(message)
raise JenkinsJobsException(message)
else:
logger.warning(message)
def _getJob(self, name):
job = self.data.get('job', {}).get(name, None)
if not job:
return job
return self._applyDefaults(job)
def _getJobGroup(self, name):
return self.data.get('job-group', {}).get(name, None)
def _getJobTemplate(self, name):
job = self.data.get('job-template', {}).get(name, None)
if not job:
return job
return self._applyDefaults(job)
def _applyDefaults(self, data, override_dict=None):
if override_dict is None:
override_dict = {}
whichdefaults = data.get('defaults', 'global')
defaults = copy.deepcopy(self.data.get('defaults',
{}).get(whichdefaults, {}))
if defaults == {} and whichdefaults != 'global':
raise JenkinsJobsException("Unknown defaults set: '{0}'"
.format(whichdefaults))
for key in override_dict.keys():
if key in defaults.keys():
defaults[key] = override_dict[key]
newdata = {}
newdata.update(defaults)
newdata.update(data)
return newdata
def _formatDescription(self, job):
if self.keep_desc:
description = job.get("description", None)
else:
description = job.get("description", '')
if description is not None:
job["description"] = description + \
self._get_managed_string().lstrip()
def _getfullname(self, data):
if 'folder' in data:
return "%s/%s" % (data['folder'], data['name'])
return data['name']
def expandYaml(self, registry, jobs_glob=None):
changed = True
while changed:
changed = False
for module in registry.modules:
if hasattr(module, 'handle_data'):
if module.handle_data(self.data):
changed = True
for job in self.data.get('job', {}).values():
job = self._applyDefaults(job)
job['name'] = self._getfullname(job)
if jobs_glob and not matches(job['name'], jobs_glob):
logger.debug("Ignoring job {0}".format(job['name']))
continue
logger.debug("Expanding job '{0}'".format(job['name']))
self._formatDescription(job)
self.jobs.append(job)
for view in self.data.get('view', {}).values():
view['name'] = self._getfullname(view)
logger.debug("Expanding view '{0}'".format(view['name']))
self._formatDescription(view)
self.views.append(view)
for project in self.data.get('project', {}).values():
logger.debug("Expanding project '{0}'".format(project['name']))
# use a set to check for duplicate job references in projects
seen = set()
for jobspec in project.get('jobs', []):
if isinstance(jobspec, dict):
# Singleton dict containing dict of job-specific params
jobname, jobparams = next(iter(jobspec.items()))
if not isinstance(jobparams, dict):
jobparams = {}
else:
jobname = jobspec
jobparams = {}
job = self._getJob(jobname)
if job:
# Just naming an existing defined job
if jobname in seen:
self._handle_dups("Duplicate job '{0}' specified "
"for project '{1}'"
.format(jobname, project['name']))
seen.add(jobname)
continue
# see if it's a job group
group = self._getJobGroup(jobname)
if group:
for group_jobspec in group['jobs']:
if isinstance(group_jobspec, dict):
group_jobname, group_jobparams = \
next(iter(group_jobspec.items()))
if not isinstance(group_jobparams, dict):
group_jobparams = {}
else:
group_jobname = group_jobspec
group_jobparams = {}
job = self._getJob(group_jobname)
if job:
if group_jobname in seen:
self._handle_dups(
"Duplicate job '{0}' specified for "
"project '{1}'".format(group_jobname,
project['name']))
seen.add(group_jobname)
continue
template = self._getJobTemplate(group_jobname)
# Allow a group to override parameters set by a project
d = type(project)(project)
d.update(jobparams)
d.update(group)
d.update(group_jobparams)
# Except name, since the group's name is not useful
d['name'] = project['name']
if template:
self._expandYamlForTemplateJob(d, template,
jobs_glob)
continue
# see if it's a template
template = self._getJobTemplate(jobname)
if template:
d = type(project)(project)
d.update(jobparams)
self._expandYamlForTemplateJob(d, template, jobs_glob)
else:
raise JenkinsJobsException("Failed to find suitable "
"template named '{0}'"
.format(jobname))
# check for duplicate generated jobs
seen = set()
# walk the list in reverse so that last definition wins
for job in self.jobs[::-1]:
if job['name'] in seen:
self._handle_dups("Duplicate definitions for job '{0}' "
"specified".format(job['name']))
self.jobs.remove(job)
seen.add(job['name'])
return self.jobs, self.views
def _expandYamlForTemplateJob(self, project, template, jobs_glob=None):
dimensions = []
template_name = template['name']
# reject keys that are not useful during yaml expansion
for k in ['jobs']:
project.pop(k)
excludes = project.pop('exclude', [])
for (k, v) in project.items():
tmpk = '{{{0}}}'.format(k)
if tmpk not in template_name:
continue
if type(v) == list:
dimensions.append(zip([k] * len(v), v))
# XXX somewhat hackish to ensure we actually have a single
# pass through the loop
if len(dimensions) == 0:
dimensions = [(("", ""),)]
for values in itertools.product(*dimensions):
params = copy.deepcopy(project)
params = self._applyDefaults(params, template)
params['template-name'] = re.sub(r'({|})', r'\1\1', template_name)
try:
expanded_values = {}
for (k, v) in values:
if isinstance(v, dict):
inner_key = next(iter(v))
expanded_values[k] = inner_key
expanded_values.update(v[inner_key])
else:
expanded_values[k] = v
except TypeError:
project_name = project.pop('name')
logger.error(
"Exception thrown while expanding template '%s' for "
"project '%s', with expansion arguments of:\n%s\n"
"Original project input variables for template:\n%s\n"
"Most likely the inputs have items indented incorrectly "
"to describe how they should be applied.\n\nNote yaml "
"'null' is mapped to python's 'None'", template_name,
project_name,
"".join(local_yaml.dump({k: v}, default_flow_style=False)
for (k, v) in values),
local_yaml.dump(project, default_flow_style=False))
raise
params.update(expanded_values)
try:
params = deep_format(params, params)
except Exception:
logging.error(
"Failure formatting params '%s' with itself", params)
raise
if combination_matches(params, excludes):
logger.debug('Excluding combination %s', str(params))
continue
for key in template.keys():
if key not in params:
params[key] = template[key]
try:
expanded = deep_format(
template, params,
self.jjb_config.yamlparser['allow_empty_variables'])
except Exception:
logging.error(
"Failure formatting template '%s', containing '%s' with "
"params '%s'", template_name, template, params)
raise
expanded['name'] = self._getfullname(expanded)
job_name = expanded.get('name')
if jobs_glob and not matches(job_name, jobs_glob):
continue
self._formatDescription(expanded)
self.jobs.append(expanded)
def _get_managed_string(self):
# The \n\n is not hard coded, because they get stripped if the
# project does not otherwise have a description.
return "\n\n" + MAGIC_MANAGE_STRING
|
Local firm of accountants, Prime Accountants Group have acquired S. Pabari and Co.
Local firm of accountants, Prime Accountants Group have acquired S. Pabari and Co. As a growing business Prime seek opportunities to partner with firms that enhance their service offering to clients.
The two firms take pride in their approach to servicing clients and both Suresh and Prime feel confident in that clients will continue to receive the excellent level of service they have become accustomed to.
Suresh, Director and Owner of S. Pabari and Co. comments: “I am delighted to be joining Prime Accountants Group. It was important for me to plan for the future and join a firm that would deliver the same level of client service I have always given my clients. Prime were able to demonstrate this with their brand values which are embraced throughout the firm."
Jeremy Kitson, Prime Director, said: "We are amidst an exciting time for Prime Accountants Group and the acquisition of S. Pabari and Co. demonstrates our commitment to growth and expansion of our specialist sector experience.
"Suresh and his team will enhance our service offering. We will be able to provide S. Pabari and Co. clients a wider range of services not typically available from a traditional accountancy firm." |
from unicodedata import category
from django.core.management.base import BaseCommand, CommandError
from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption
from django.core import serializers
from optparse import make_option
import uuid
import os
import sys
from django.conf import settings
import json
from .export_package import export_package
class Command(BaseCommand):
args = 'package_name'
help = 'Exports all packages.'
def handle(self, *args, **options):
packages = []
for app in settings.INSTALLED_APPS:
if 'workflows.' in app:
packages.append(app)
for package in packages:
package_name = package.split('workflows.')[1]
self.stdout.write("Exporting package "+package_name+"\n")
export_package(package_name,self.stdout)
#temporary fix
#self.stdout.write("Exporting cf_nlp package \n")
#export_package('nlp',self.stdout, dest_folder='/home/matej/platforms/clowdflows-env/cf_nlp/nlp') |
During this workshop each participant was able to inject mushroom spawn in a log and take the log home. Rick, our instructor, shared with the class that each log should sprout mushrooms by early fall. Our other instructor Margaret gave a tour of their greenhouse and she described the business opportunities that came from growing, harvesting, and selling mushrooms. |
#coding=utf-8
'''
Created on 2014年6月18日
K-means 算法,将数据聚类到K个中心点
距离计算:欧式距离
中心店计算:均值法
@author: sjm
'''
import numpy as np
import random
def Euclid_dist(x,y):
if len(y.shape)==1:
return np.sqrt(np.sum(np.sum((x-y)**2)))
elif len(y.shape)==2:
return np.sqrt(np.sum((x-y)**2,axis=1))
else:
raise ValueError('error x or y shape')
def dist(x,y):
'''
计算两个数据间的距离,使用马氏距离
'''
return np.sqrt(np.sum((x-y)**2),axis=1)
def distMat(X,Y):
'''
计算两个矩阵间的距里,即矩阵里的每一个数据与另一个矩阵中每一个数据的距离
'''
mat=[map(lambda y:dist(x,y),Y) for x in X]
return np.array(mat)
def sum_dist(data,label,center):
s=0
for i in range(data.shape[0]):
s+=dist(data[i],center[label[i]])
return s
def kmeans(data,cluster,threshold=1.0e-19,maxIter=100):
data=np.array(data)
d1,d2=data.shape
'''
find the label
'''
batch=np.random.permutation(d1)
center=data[batch[0:cluster],:]
print(center.shape)
labels=np.zeros((d1,))
last_cost=0
for ii in xrange(0,d1):
d=Euclid_dist(data[ii,:],center[labels[ii],:])
last_cost+=d
for index in xrange(0,maxIter):
'''
寻找每个类的标号
'''
for ii in xrange(0,d1):
this_data=data[ii,:]
d=Euclid_dist(this_data,center)
label=np.argmin(d)
labels[ii]=label
for ii in xrange(0,cluster):
batch_no=(labels==ii).nonzero()
batch=data[batch_no]
m=np.mean(batch,axis=0)
#print(m.shape)
center[ii,:]=m
#print(center)
current_cost=0
for ii in xrange(0,d1):
d=Euclid_dist(data[ii,:],center[labels[ii],:])
current_cost+=d
if last_cost-current_cost<threshold:
break
else:
last_cost=current_cost
return center
'''
def kmeans2(data,cluster,threshold=1.0e-19,maxIter=100):
m=len(data)
labels=np.zeros(m)
#cluster=None
center=np.array(random.sample(data,cluster))
s=sum_dist(data,labels,center)
n=0
while 1:
n=n+1
tmp_mat=distMat(data,center)
labels=tmp_mat.argmin(axis=1)
for i in xrange(cluster):
idx=(labels==i).nonzero()
m=np.mean(data[idx[0]],axis=0)
center[i]=m
#d_i=data[idx[0]]
#d_i=d_i[0]
s1=sum_dist(data,labels,center)
if s-s1<threshold:
break;
s=s1
if n>maxIter:
break;
return center
'''
if __name__=='__main__':
from scipy.io import loadmat,savemat
data=loadmat(r'E:\code\matlab\DeepLearnToolbox-master\data\mnist_uint8.mat')
train_x=np.asarray(data['train_x'],np.float)/255.0
codebook=kmeans(train_x,10)
savemat('codebook.mat',{'C':codebook}) |
Looking for the perfect gift? our range of men's socks from Barbour and Pringle come in a choice of colour. These men's socks are stylish and modern and great for everyday wear to brighten up your footwear.
We also stock a range of Mens Socks Gift packs in a range of colours and designs. These men's socks make the perfect gift or are great for everyday use.
For other gift ideas visit our men's clothing and accessories to see our full range on offer! |
#!/usr/bin/env python
import os
import sys
import re
import time
import subprocess
import tempfile
import xml.dom.minidom as xmldom
ac3fifo = False
def GetText(n):
return n.childNodes.item(0).data
def CreateFifo(suffix='.ac3'):
tfile, tname = tempfile.mkstemp(suffix=suffix)
os.close(tfile)
os.unlink(tname)
os.mkfifo(tname)
return tname
filesToDelete = []
def main():
if len(sys.argv) < 3:
print "Usage: %s infile outfile [format]" % sys.argv[0]
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2]
fmt = 'ac3'
if len(sys.argv) >= 4:
fmt = sys.argv[3]
if not os.path.exists(infile):
print "%s not exists" % infile
sys.exit(1)
if not os.access(os.path.dirname(os.path.realpath(outfile)), os.W_OK):
print "File \"%s\" could not be written" % os.path.realpath(outfile)
sys.exit(1)
p = subprocess.Popen(['mkvinfo', '-s', infile], stdout=subprocess.PIPE)
tracksToConvert = []
tracksToCopy = []
# TODO: preserve track's language
for line in p.stdout.xreadlines():
if line.startswith("Track"):
r = re.search("Track [0-9]+: ([^,]+), codec ID: ([^,]+), mkvmerge[^0-9]+([0-9]+),.*", line)
if r and r.groups()[0] == 'audio':
id = r.groups()[2]
srcfmt = ['A_DTS']
if fmt == 'mp3':
srcfmt = ['A_DTS', 'A_AAC', 'A_AC3']
if not r.groups()[1] in srcfmt:
tracksToCopy.append(id)
else:
tracksToConvert.append(id)
else:
p.kill()
p.wait()
break
if not tracksToConvert:
print "Nothing to convert"
return 0
tracks = []
for i in tracksToConvert:
dts = CreateFifo(suffix='.dts')
if ac3fifo:
ac3 = CreateFifo(suffix='.'+fmt)
else:
tfile, ac3 = tempfile.mkstemp(suffix='.'+fmt)
os.close(tfile)
filesToDelete.append(dts)
filesToDelete.append(ac3)
tracks.append((i, dts, ac3))
# Extractor
cmdline = ['mkvextract', 'tracks', infile]
for id, dts, ac3 in tracks:
cmdline += ['%s:%s' % (id, dts)]
print cmdline
p_extract = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
devnull = os.open('/dev/null', os.O_WRONLY)
convs = []
# Converters
for id, dts, ac3 in tracks:
#cmdline = ['ffmpeg', '-v', '3', '-y', '-i', dts, '-alang', 'rus', '-ab', '448k', '-ar', '48000', '-ac', '6', '-acodec', 'ac3', ac3]
cmdline = []
if fmt == 'ac3':
cmdline = ['avconv', '-threads', 'auto', '-y', '-i', dts, '-b', '448k', '-ar', '48000', '-q', '0', '-ac', '6', '-acodec', 'ac3', ac3]
else:
cmdline = ['avconv', '-threads', 'auto', '-y', '-i', dts, '-b', '256k', '-ar', '48000', '-q', '0', '-acodec', 'libmp3lame', ac3]
print cmdline
if not ac3fifo:
p = subprocess.Popen(cmdline, stdout=devnull, stderr=devnull)
else:
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=devnull)
p1 = subprocess.Popen(['bash', '-c', 'cat > %s' % ac3], stdin = p.stdout)
convs.append((p1, None))
convs.append((p, cmdline))
# Wait for extract and convert
if not ac3fifo:
out_e = p_extract.communicate()
if p_extract.returncode != 0:
print "Extract failed, %s" % str(out_e)
return 2
for i, cmdline in convs:
out = i.communicate()
if i.returncode != 0:
print "Convert (%s) failed, %s" % (str(cmdline), str(out))
return 3
# Merger
cmdline = ['mkvmerge', '-q', '-o', outfile]
for id, dts, ac3 in tracks:
cmdline += [ac3]
if tracksToCopy:
cmdline += ['-a', ",".join(tracksToCopy)]
else:
cmdline += ['-A']
cmdline += [infile]
print cmdline
p_merge = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = p_merge.communicate()
if p_merge.returncode != 0:
print "Merge failed: [%s], [%s]" % (out[0], out[1])
return 1
print "Ok"
return 0
if __name__ == '__main__':
res = 1
#try:
res = main()
#except Exception, e:
# print e
# pass
for i in filesToDelete:
try:
os.unlink(i)
except:
pass
sys.exit(res)
|
There are not enough words in the English dictionary to describe how much I love this dish. It just screams home comfort and I love the fact that you can eat this on its own and feel 100 per cent satisfied. You can literally add whatever you want: fish, meat, veg… it doesn’t matter as it will all taste great.
Think of it as a Chinese bubble and squeak – whatever you have in your fridge, lob it in!
There are just a couple of rules to making fried rice that will guarantee a fabulous result.
1) Use day-old rice that has been cooled in the fridge. The reason for this is simple: rice that has sat overnight in the fridge will have dried out, whereas if you use hot, freshly cooked rice it will turn sticky and need a lot of oil to cook.
2) Don’t use too much oil. This dish has been given a bad reputation over the years because of the word ‘fried’ in its title. But it’s not deep-fried like chips. You only use as much oil as you would need to stir-fry the healthiest vegetable dish.
That’s it . . . so simple. Viva La Rice!
PREPARATION TIME 5 MINUTES. COOKING TIME 10 MINUTES.
1. Heat a wok until medium hot and add a dash of oil. Add the Chinese sausage slices and fry quickly on both sides until colouredall over, with the white parts turning pink. Remove and set aside. Beat the eggs in a bowl and season with salt and pepper.
2. Add 1 tablespoon of oil to the hot wok and pour in the beaten eggs. Scramble the eggs, scraping the bits that are sticking tothe wok. Aim for well-cooked scrambled egg that is separating into individual pieces. Once cooked, add the rice, scraping thebottom of the pan and tossing the rice as you heat it through. Once the rice is hot, add the remaining ingredients, except forthe sausage. Continue to cook over a medium heat, tossing and mixing. Once the anchovies are beginning to melt intothe hot rice, taste and season with pepper and a pinch of salt. Continue to stir-fry for 3–4 minutes, then taste and adjust theseasoning, adding more soy sauce if necessary.
3. Serve the fried rice with the cooked Chinese sausage, chilli oil and lettuce, wrapping spoonfuls of the rice inside the lettuce leaves to eat. |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint
from frappe import _
from frappe.desk.notifications import clear_notifications
@frappe.whitelist()
def delete_company_transactions(company_name):
frappe.only_for("System Manager")
doc = frappe.get_doc("Company", company_name)
if frappe.session.user != doc.owner:
frappe.throw(_("Transactions can only be deleted by the creator of the Company"),
frappe.PermissionError)
delete_bins(company_name)
delete_lead_addresses(company_name)
for doctype in frappe.db.sql_list("""select parent from
tabDocField where fieldtype='Link' and options='Company'"""):
if doctype not in ("Account", "Cost Center", "Warehouse", "Budget",
"Party Account", "Employee", "Sales Taxes and Charges Template",
"Purchase Taxes and Charges Template", "POS Profile", 'BOM'):
delete_for_doctype(doctype, company_name)
# Clear notification counts
clear_notifications()
def delete_for_doctype(doctype, company_name):
meta = frappe.get_meta(doctype)
company_fieldname = meta.get("fields", {"fieldtype": "Link",
"options": "Company"})[0].fieldname
if not meta.issingle:
if not meta.istable:
# delete communication
delete_communications(doctype, company_name, company_fieldname)
# delete children
for df in meta.get_table_fields():
frappe.db.sql("""delete from `tab{0}` where parent in
(select name from `tab{1}` where `{2}`=%s)""".format(df.options,
doctype, company_fieldname), company_name)
# delete parent
frappe.db.sql("""delete from `tab{0}`
where {1}= %s """.format(doctype, company_fieldname), company_name)
# reset series
naming_series = meta.get_field("naming_series")
if naming_series and naming_series.options:
prefixes = sorted(naming_series.options.split("\n"), lambda a, b: len(b) - len(a))
for prefix in prefixes:
if prefix:
last = frappe.db.sql("""select max(name) from `tab{0}`
where name like %s""".format(doctype), prefix + "%")
if last and last[0][0]:
last = cint(last[0][0].replace(prefix, ""))
else:
last = 0
frappe.db.sql("""update tabSeries set current = %s
where name=%s""", (last, prefix))
def delete_bins(company_name):
frappe.db.sql("""delete from tabBin where warehouse in
(select name from tabWarehouse where company=%s)""", company_name)
def delete_lead_addresses(company_name):
"""Delete addresses to which leads are linked"""
leads = frappe.get_all("Lead", filters={"company": company_name})
leads = [ "'%s'"%row.get("name") for row in leads ]
addresses = []
if leads:
addresses = frappe.db.sql_list("""select parent from `tabDynamic Link` where link_name
in ({leads})""".format(leads=",".join(leads)))
if addresses:
addresses = ["'%s'"%addr for addr in addresses]
frappe.db.sql("""delete from tabAddress where name in ({addresses}) and
name not in (select distinct dl1.parent from `tabDynamic Link` dl1
inner join `tabDynamic Link` dl2 on dl1.parent=dl2.parent
and dl1.link_doctype<>dl2.link_doctype)""".format(addresses=",".join(addresses)))
frappe.db.sql("""delete from `tabDynamic Link` where link_doctype='Lead'
and parenttype='Address' and link_name in ({leads})""".format(leads=",".join(leads)))
frappe.db.sql("""update tabCustomer set lead_name=NULL where lead_name in ({leads})""".format(leads=",".join(leads)))
def delete_communications(doctype, company_name, company_fieldname):
frappe.db.sql("""
DELETE FROM `tabCommunication` WHERE reference_doctype = %s AND
EXISTS (SELECT name FROM `tab{0}` WHERE {1} = %s AND `tabCommunication`.reference_name = name)
""".format(doctype, company_fieldname), (doctype, company_name))
|
Sometimes it’s all too easy to knock Samsung when it comes to the security of its phones. The company has long produced handsets with face scanning technology that can be fooled by a photo, and for that reason some people fell back to using a good old fashioned fingerprint.
That, however, isn’t all that secure either. Especially if you’re using a Galaxy S10. As much was proven recently when one Imgur user shared how he faked his way into his own device, and it was surprisingly easy.
Posting over on Imgur, user darkshark shared the project that was needed to gain access to his Galaxy S10. Ultimately, it involved taking a photo of his own fingerprint on a wineglass and then using Photoshop to process the image.
Once it was to his liking the image was passed to 3ds Max, which then allowed him to turn it into a 3D image. Then, it was printed in a process that took an additional 13 minutes.
Samsung is using a capacitive fingerprint sensor in the Galaxy S10, rather than the slower ultrasonic sensors that other phone makers are using for in-display fingerprint scanners. Capacitive scanners are supposed to be more accurate, and harder to fool, but alas it seems nothing is perfect.
To that end, it’s important to note that other phones have been fooled into unlocking despite biometric measures before. Samsung’s own face unlock feature in Galaxy S10 can be fooled with a simple video as previously reported. |
from django import forms
from django.core.exceptions import ValidationError
from django.forms import ModelForm
from survey.models import QuestionSetChannel
from survey.models import WebAccess
from survey.models import Batch
def get_question_set_form(model_class):
class QuestionSetForm(ModelForm):
access_channels = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple(
attrs={
'class': 'access_channels'}), choices=[
opt for opt in QuestionSetChannel.ACCESS_CHANNELS if not opt[0] == WebAccess.choice_name()])
def __init__(self, *args, **kwargs):
if kwargs.get('instance'):
initial = kwargs.setdefault('initial', {})
initial['access_channels'] = [
c.channel for c in kwargs['instance'].access_channels.all()]
#self.fields['validation'] = kwargs['instance']
super(QuestionSetForm, self).__init__(*args, **kwargs)
class Meta:
model = model_class
fields = ['name', 'description', ]
widgets = {
'name': forms.TextInput(
attrs={
'size': 29,
'title': 'Your name',
'style': 'height: 2em;width:231px;'}),
'description': forms.Textarea(
attrs={
"rows": 5,
"cols": 30}),
}
def clean_name(self):
name = self.cleaned_data['name'].strip()
if self.instance is None and model_class.objects.filter(
name=name).exists():
raise ValidationError('Name already exists')
return name
def save(self, commit=True, **kwargs):
question_set = super(QuestionSetForm, self).save(commit=commit)
bc = QuestionSetChannel.objects.filter(qset=question_set)
bc.delete()
for val in kwargs['access_channels']:
QuestionSetChannel.objects.create(
qset=question_set, channel=val)
return question_set
return QuestionSetForm
class BatchForm(get_question_set_form(Batch)):
class Meta:
model = Batch
fields = ['name', 'description', 'survey', ]
widgets = {
'description': forms.Textarea(attrs={"rows": 4, "cols": 40}),
'survey': forms.HiddenInput(),
}
#
# class BatchQuestionsForm(ModelForm):
# questions = forms.ModelMultipleChoiceField(label=u'', queryset=QuestionTemplate.objects.filter(),
# widget=forms.SelectMultiple(attrs={'class': 'multi-select'}))
#
# class Meta:
# model = Batch
# fields = []
#
# def __init__(self, batch=None, *args, **kwargs):
# super(BatchQuestionsForm, self).__init__(*args, **kwargs)
# def save_question_to_batch(self, batch):
# for question in self.cleaned_data['questions']:
# question.save()
# order = BatchQuestionOrder.next_question_order_for(batch)
# BatchQuestionOrder.objects.create(question=question, batch=batch, order=order)
# question.batches.add(batch)
#
# def save(self, commit=True, *args, **kwargs):
# batch = super(BatchQuestionsForm, self).save(commit=commit, *args, **kwargs)
#
# if commit:
# batch.save()
# self.save_question_to_batch(batch)
|
Y’all seemed to love the last Amazon haul I did a couple weeks ago, so I thought I’d share what I bought in my most recent order! For the most part, I was shopping for workout clothes, so that’s the bulk of what you’ll see here. There are also some cute fall clothing, a couple loungewear pieces that are sooo comfortable, a new watch band, and the best razors I’ve ever found! (Random, I know. haha!) By the way, I feel like my floor always gets so dirty when I try clothes on because little pieces of fuzz are flying off the clothes and onto the floor, so just disregard the fact that it needs to be vacuumed!
Also, I’m wearing this star necklace in every picture. It’s a big favorite! Also available here and here.
Aren’t these $23 yoga pants so cute? They have a high waist and are really flattering on. I’m wearing my normal size small.
Here’s another pair of workout pants, but these have a 7/8th crop length. This is a really cool concept because you can “build your own” workout legging. You can choose the length and the waistband height!
The top I have on above is a MUST HAVE. I wore it immediately after getting it in! It’s only $15 and the perfect top to throw on for the gym. Everything about it is great… and it comes in other colors, too.
I’m wearing a medium! Here’s the green one.
I had to get the black one, too. I’m wearing these pants I just talked about above!
These are the most comfortable joggers! I mean, I truly want to live in them. They come in 4 colors! I have on the light heather gray in size small.
I’m awkwardly holding my shirt up so that you can see the waistband! These have to be my favorite leggings I ordered. You can pick what type of waistband you want… I chose this criss cross style, which is super flattering because it really hits over the love handles. These are SO comfortable and really flattering! They are very similar to the Lululemon Align leggings, but with a little more support. I’m wearing a S.
Oh and THESE joggers. I love them too!! These have a more substantial fabric to them than the grey ones I shared above. Equally as comfortable, but more of a structured look and feel.
These workout leggings are nearly identical to the Lululemon Align leggings, but they are only $39! The fabric… the waistband… the length. I’m AMAZED. I really want these in black, but they are currently sold out. I swear I’ll buy 5 pairs of them!
Everybody RUN AND GET THESE.
Here’s another great workout top! You could even wear this one layered under a jacket for fall. I love how it ties in the back. It comes in 5 different colors. I’m wearing a medium! I have it on with the very first workout pants I linked in this post.
So this dress isn’t zipping in the back for me, but I wanted to share it anyway because it has so much potential to be so cute! It’s only $25. Here’s the thing, the listing says it’s based on Asia sizing, so it does run way small. I’m wearing a medium, but I think I need an XL! It has many different color options and I love the wrap/ruffle design. It doesn’t have the thickest material in the world, but it’s not see through. I think it would be great with over-the-knee boots for fall.
Cute top! Has so many good reviews and I can see why. The sleeves are a fun detail and I love that it comes in a big variety of colors.
I’m wearing this top in the khaki color, although it doesn’t look nearly like this on the listing. Wearing a small!
This is a cute babydoll-style dress for fall. I’d pair it with brown boots once the weather cools off enough. Wearing a small.
This is random, but this is the best razor I’ve ever used! I bought this pack of 9. I’m really into disposable razors because I appreciate being able to toss the whole thing in the trash and start fresh—new handle and all.
You really can’t beat this watch band if you have an Apple watch. It looks and feels like real leather. Under $20 and a huge variety of color and hardware options. This is the “camel” color with silver hardware.
Believe it or not, they actually have these exact razors at Aldi. I don’t remember the pricing, but of course it’s cheap. They’re so good- the conditioning strips last a long time!
What size did you get in the babydoll/ tiered dress? Also, what size did you get in the workout tank that you wore in multiple colors?
Small in the dress and medium in the tops!
Loving these Amazon hauls! I’ve gotten plenty from each post. Thank you so much!! Been following you for years and love your content. Cheers from Dallas!
The links aren’t working for Amazon. Can you help? Thanks!
Hey! Love all of this – thank you! Quick Q: for reference, what size is your Apple Watch? 38mm or 40mm? |
# Special value
ALP_DEFAULT = 0
ALP_INVALID_ID = 2 ** 32 - 1 # ulong maximum
# Return value
ALP_OK = 0
ALP_NOT_ONLINE = 1001
ALP_NOT_IDLE = 1002
ALP_NOT_AVAILABLE = 1003
ALP_NOT_READY = 1004
ALP_PARM_INVALID = 1005
ALP_ADDR_INVALID = 1006
ALP_MEMORY_FULL = 1007
ALP_SEQ_IN_USE = 1008
ALP_HALTED = 1009
ALP_ERROR_INIT = 1010
ALP_ERROR_COMM = 1011
ALP_DEVICE_REMOVED = 1012
ALP_NOT_CONFIGURED = 1013
ALP_LOADER_VERSION = 1014
ALP_ERROR_POWER_DOWN = 1018
# Device inquire and control types
ALP_DEVICE_NUMBER = 2000
ALP_VERSION = 2001
ALP_AVAIL_MEMORY = 2003
ALP_SYNCH_POLARITY = 2004
ALP_LEVEL_HIGH = 2006
ALP_LEVEL_LOW = 2007
ALP_TRIGGER_EDGE = 2005
ALP_EDGE_FALLING = 2008
ALP_EDGE_RISING = 2009
ALP_DEV_DMDTYPE = 2021
ALP_DMDTYPE_XGA = 1
ALP_DMDTYPE_1080P_095A = 3
ALP_DMDTYPE_XGA_07A = 4
ALP_DMDTYPE_XGA_055X = 6
ALP_DMDTYPE_WUXGA_096A = 7
ALP_DMDTYPE_DISCONNECT = 255
# TODO check is these constant values exist...
# ALP_DMDTYPE_XGA_055A =
# ALP_DMDTYPE_SXGA_PLUS =
# ALP_DMDTYPE_WQXGA_400MHZ_090A =
# ALP_DMDTYPE_WQXGA_480MHZ_090A =
ALP_USB_CONNECTION = 2016
ALP_DEV_DYN_SYNCH_OUT1_GATE = 2023
ALP_DEV_DYN_SYNCH_OUT2_GATE = 2024
ALP_DEV_DYN_SYNCH_OUT3_GATE = 2025
ALP_DDC_FPGA_TEMPERATURE = 2050
ALP_APPS_FPGA_TEMPERATURE = 2051
ALP_PCB_TEMPERATURE = 2052
ALP_DEV_DISPLAY_HEIGHT = 2057
ALP_DEV_DISPLAY_WIDTH = 2058
ALP_PWM_LEVEL = 2063
ALP_DEV_DMD_MODE = 2064
ALP_DMD_POWER_FLOAT = 1
# Sequence inquire and control types
ALP_BITPLANES = 2200
ALP_BITNUM = 2103
ALP_BIN_MODE = 2104
ALP_BIN_NORMAL = 2105
ALP_BIN_UNINTERRUPTED = 2106
ALP_PICNUM = 2201
ALP_FIRSTFRAME = 2101
ALP_LASTFRAME = 2102
ALP_FIRSTLINE = 2111
ALP_LASTLINE = 2112
ALP_LINE_INC = 2113
ALP_SCROLL_FROM_ROW = 2123
ALP_SCROLL_TO_ROW = 2124
ALP_SEQ_REPEAT = 2100
ALP_PICTURE_TIME = 2203
ALP_MIN_PICTURE_TIME = 2211
ALP_MAX_PICTURE_TIME = 2213
ALP_ILLUMINATE_TIME = 2204
ALP_MIN_ILLUMINATE_TIME = 2212
ALP_ON_TIME = 2214
ALP_OFF_TIME = 2215
ALP_SYNCH_DELAY = 2205
ALP_MAX_SYNCH_DELAY = 2209
ALP_SYNCH_PULSEWIDTH = 2206
ALP_TRIGGER_IN_DELAY = 2207
ALP_MAX_TRIGGER_IN_DELAY = 2210
ALP_DATA_FORMAT = 2110
ALP_DATA_MSB_ALIGN = 0
ALP_DATA_LSB_ALIGN = 1
ALP_DATA_BINARY_TOPDOWN = 2
ALP_DATA_BINARY_BOTTOMUP = 3
ALP_SEQ_PUT_LOCK = 2117
ALP_FLUT_MODE = 2118
ALP_FLUT_NONE = 0
ALP_FLUT_9BIT = 1
ALP_FLUT_18BIT = 2
ALP_FLUT_ENTRIES9 = 2120
ALP_FLUT_OFFSET9 = 2122
ALP_PWM_MODE = 2107
ALP_FLEX_PWM = 3
# Projection inquire and control types
ALP_PROJ_MODE = 2300
ALP_MASTER = 2301
ALP_SLAVE = 2302
ALP_PROJ_STEP = 2329
ALP_PROJ_STATE = 2400
ALP_PROJ_ACTIVE = 1200
ALP_PROJ_IDLE = 1201
ALP_PROJ_INVERSION = 2306
ALP_PROJ_UPSIDE_DOWN = 2307
ALP_PROJ_QUEUE_MODE = 2314
ALP_PROJ_LEGACY = 0
ALP_PROJ_SEQUENCE_QUEUE = 1
ALP_PROJ_QUEUE_ID = 2315
ALP_PROJ_QUEUE_MAX_AVAIL = 2316
ALP_PROJ_QUEUE_AVAIL = 2317
ALP_PROJ_PROGRESS = 2318
ALP_FLAG_QUEUE_IDLE = 1
ALP_FLAG_SEQUENCE_ABORTING = 2
ALP_FLAG_SEQUENCE_INDEFINITE = 4
ALP_FLAG_FRAME_FINISHED = 8
ALP_PROJ_RESET_QUEUE = 2319
ALP_PROJ_ABORT_SEQUENCE = 2320
ALP_PROJ_ABORT_FRAME = 2321
ALP_PROJ_WAIT_UNTIL = 2323
ALP_PROJ_WAIT_PIC_TIME = 0
ALP_PROJ_WAIT_ILLU_TIME = 1
ALP_FLUT_MAX_ENTRIES9 = 2324
ALP_FLUT_WRITE_9BIT = 2325
ALP_FLUT_WRITE_18BIT = 2326
# LED types
ALP_HLD_PT120_RED = 257
ALP_HLD_PT120_GREEN = 258
ALP_HLD_PT120_BLUE = 259
ALP_HLD_PT120_UV = 260
ALP_HLD_CBT90_WHITE = 262
ALP_HLD_PT120TE_BLUE = 263
ALP_HLD_CBT140_WHITE = 264
# LED inquire and control types
ALP_LED_SET_CURRENT = 1001
ALP_LED_BRIGHTNESS = 1002
ALP_LED_FORCE_OFF = 1003
ALP_LED_AUTO_OFF = 0
ALP_LED_OFF = 1
ALP_LED_ON = 2
ALP_LED_TYPE = 1101
ALP_LED_MEASURED_CURRENT = 1102
ALP_LED_TEMPERATURE_REF = 1103
ALP_LED_TEMPERATURE_JUNCTION = 1104
# Extended LED inquire and control types
ALP_LED_ALLOC_PARAMS = 2101
# TODO correct following lines, different constants can have the same value (e.g. ALP_DEFAULT and ALP_LED_AUTO_OFF)
# look_up_table = dict([
# (value, key)
# for key, value in globals().items()
# if key[0:4] == "ALP_"
# ])
#
#
# def constant_to_string(constant):
# """TODO add docstring"""
# string = look_up_table[constant]
# return string
dmd_type_look_up_table = dict([
(value, key)
for key, value in globals().items()
if key[0:12] == "ALP_DMDTYPE_"
])
def dmd_type_constant_to_string(dmd_type_constant):
"""TODO add docstring"""
dmd_type_string = dmd_type_look_up_table[dmd_type_constant]
return dmd_type_string
|
Bodegas Izadi stepped into the world of rosé wine with Larrosa in hand, a unique wine that use 100% Granacha grown on the old vines at 790 meters above the sea level. The vineyard may be the highest one in D.O.Ca Rioja.
From this first vintage, we only produce a limited edition of 18,901 bottles and we select a special day, San Valentin, the day that to the roses sell the best of all the year, release Larrosa on the market to match the romantic and feminine concept of Larrosa.
Izadi Larrosa is handpicked in the small case of 15 kg. By taking advantage of the gravity of the grape, we use a slow winemaking process of “bleeding” to issue the first run Granacha’s rosé. Only this rosé is labelled as Larrosa so that we impress the consumers with its flower notes, its freshness and elegance.
Izadi Larrosa is incorporated into the IZADI family as the youngest and fresh wines in the catalogue, with its attractive and elegant label, suitable for spring fresh tapas, pastas and rice. |
# Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class NetseenException(Exception):
'''
customize exception
'''
message = 'An unknown exception occurred'
def __init__(self, **kwargs):
try:
super(NetseenException, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs
except Exception:
# at least get the core message out if something happened
super(NetseenException, self).__init__(self.message)
def __unicode__(self):
return unicode(self.msg)
def log(self):
'''
log except msg
'''
pass
class NotFound(NetseenException):
"""A generic not found exception."""
pass
class BadRequest(NetseenException):
"""An exception indicating a generic bad request for a said resource.
A generic exception indicating a bad request for a specified resource.
"""
message = 'Bad %(resource)s request: %(msg)s.'
class ObjectFieldInvalid(NetseenException):
"""the field value of object is invalid
"""
message = "Field %(field)s of %(objname)s is not an instance of Field"
class Conflict(NetseenException):
"""A generic conflict exception."""
pass
class NotAuthorized(NetseenException):
"""A generic not authorized exception."""
message = "Not authorized."
class ServiceUnavailable(NetseenException):
"""A generic service unavailable exception."""
message = "The service is unavailable."
class ObjectNotFound(NotFound):
"""A not found exception indicating an identifiable object isn't found.
A specialization of the NotFound exception indicating an object with a said
ID doesn't exist.
:param id: The ID of the (not found) object.
"""
message = "Object %(id)s not found."
|
When you are involved in an accident with a driver who is under the influence of drugs or alcohol, you have a right to collect compensation. You can file a lawsuit against the drunk driver and collect the money you need to pay for medical expenses, pain and suffering, and lost wages. Unfortunately, this isn’t always easy and there are some additional factors to consider in drunk driving accidents. Understanding what a DUI crash victim’s rights are is crucial to collecting the money you need to recover.
After a DUI accident, the drunk driver will likely be arrested and charged with a crime. The police may interview you and ask you for details of the accident and they will collect a BAC from the drunk driver. Because drunk driving is a serious crime that can result in jail time, the police report and investigation is critical to their case. However, it is important to know that criminal action is different from civil action. Eve though the police may file their report and charge the driver with DUI, that does not mean that you automatically get compensation.
Even though criminal charges are filed, you still need an experienced attorney on your side to help you collect the compensation you are entitled to. Your lawyer will need to investigate the accident and build a case that is designed to prove fault. Even if the driver was drunk at the time of the accident, if you were also to blame for the crash, you may not recover all of the money you need. In addition, if the driver was uninsured or underinsured, you may not be able to recover enough money to pay for all of your injuries. Your car accident attorney will help you during this difficult time and will be a strong legal advocate for you and your family after your accident.
If your lawyer can successfully establish these three elements, you may be able to collect compensation to pay for medical expenses, lost wages, loss of future earnings, pain and suffering and more. In some cases, you may also be entitled to punitive damages.
If you or someone you love has been injured in a car accident, we can help. Call Pueblo attorney Mickey W. Smith, today for a FREE CONSULTATION – (719) 544-0062. We know how to investigate DUI crashes to obtain maximum compensation for our clients. We also know the tricks insurance companies will use to reduce your payout and protect their own assets and we fight hard to seek justice for our clients.
This entry was posted on Wednesday, November 28th, 2018 at 12:25 am and is filed under Blog, Pueblo DUI Accident Attorney. You can follow any responses to this entry through the RSS 2.0 feed.Both comments and pings are currently closed. |
"""struct_normalizer.py - Normalize data structures to a requested schema
Given a nested data structure `struct` it can be normalized into a specified
form like so:
result = normalize_value(ctx, struct, as=list_of(
map_with(
'field1': fallback_option(mandtory(
scalar(type=str, else_='Invalud value for field1'),
else_='Missing configuration field: field1'
)),
'field2: scalar(),
'field3: list_of(scalar(type=int)),
'field4: list_of(
map_with(
'x': mandatory(scalar(type=int)),
'y': mandatory(scalar(type=int)),
),
),
),
)
The `ctx` variable is an application-specific context. It is passed to all
normalization functions and is mean to make it easier to write custom
normalization functions that accept custom application data.
Normalization functions are simply functions with the following signature:
def normalizer(ctx, value)
The normalizer functions are meant to return a normalized value or raise an
exception. While this module provides a set of generic normalization functions
it is expected that applications would implement custom functions to perform
more complex data manipulations.
"""
from collections import Mapping, Iterable
from functools import wraps
from six import string_types, iteritems
class DataNormalizationError(Exception):
pass
def scalar(type=None, else_='Invalid scalar specified'):
"""A normalization function generator for scalar values
:param function type: Optional conversion function to convert a value to a
desired type, if not given, scalar value would be
returned as-is
:param str else_: Optional error message to raise if value is not a
scalar
:rtype: function
:returns: A function that accepts a context and a value and returns a
scalar if the value could be converted into it, or raises the
given error message in a DataNormalizationError exception.
If `type` function is given, it is called with the value and the
result is returned. If it raise a ValueError exception,
a DataNormalizationError will be raised instead.
"""
def normalizer(ctx, value):
if isinstance(value, Iterable) and not isinstance(value, string_types):
raise DataNormalizationError(else_)
if type is None:
return value
try:
return type(value)
except ValueError:
raise DataNormalizationError(else_)
return normalizer
def list_of(member_type):
"""A normalization function generator for list values
:param function member_type: A normalization function for members of the
list
:rtype: function
:returns: A function that accepts a context and a value and returns a list
where each member had been normalized with the given
normalization function. If the value is not a list, it is
converted into a list of a single normalized value.
"""
def normalizer(ctx, value):
if isinstance(value, string_types) or isinstance(value, Mapping):
lst = [value]
elif isinstance(value, Iterable):
lst = value
else:
lst = [value]
return [normalize_value(ctx, val, to=member_type) for val in lst]
return normalizer
def map_with(**options):
"""A normalization function generator for mapping values
Each keyword argument points to a normalization function, so that if that
keyword appears as key in the input map, it is included in the normalized
map with the value normalized by the function.
If a given normalization function has a __default__ attribute, and the
keyword that points to it is not included in the input, the value of the
attribute is placed in the output map.
If a given normalization function has a __mandatory__ attribute, the
keyword that points to it must be included in the input (Unless is also has
the __default__ attribute). Otherwise, the value of the __mandatory__
attribute is used as an error message in a raised DataNormalizationError.
If the input is not a map, a keyword argument is looked for that point to a
function that includes the __fallback_option__ attribute. If found, a map
is generated with the keyword pointing to the value which is then
normalized with the normalization function the keyword points to. If not
found a DataNormalizationError exception is raised.
:rtype: function
:returns: A normalization function that accepts a context and a value
and normalizes it according to the rules specified above
"""
fallback_key = next((
key for key, norm_func in iteritems(options)
if hasattr(norm_func, '__fallback_option__')
), None)
def normalizer(ctx, value):
if isinstance(value, Mapping):
srcmap = value
elif fallback_key is None:
srcmap = {}
else:
srcmap = { fallback_key: value }
dstmap = {}
for key, norm_func in iteritems(options):
dstmap.update(normalize_option(ctx, srcmap, key, to=norm_func))
return dstmap
return normalizer
def normalize_option(ctx, mp, key, to):
"""Normalize a single option in a map
:param object ctx: A context to pass to normalization function
:param dict mp: An input map
:param str key: The name of the option to normalize
:param function to: A normalization function used to normalize the value
pointed to by `key` in the input map
The normalization function can be annotated with the __mandatory__ and
__default__ attributes to define behaviour if the option does not exist in
the input map, as specified in the docstring for `map_with`.
:rtype: dict
:returns: If `key` found in `mp`, then a map with `key` pointing to a
normalized value, otherwise, may return an empty map, a map
with a default value or raise an exception according to wither
__mandatory__ and __default__ are set on the `to` function.
"""
if key in mp:
return {key: normalize_value(ctx, mp[key], to)}
elif hasattr(to, '__mandatory__'):
if hasattr(to, '__default__'):
return {key: to.__default__}
else:
raise DataNormalizationError(to.__mandatory__)
else:
return {}
def mandatory(value_type, default=None, else_='Mandatory option missing'):
"""Annotate a normalization function to indicate a mandatory option
:param function value_type: A normalization function to annotate
:param object default: An optional default value to associate with the
function
:param str else_: An error message for the case where a mandatory
value is missing
:rtype: function
:returns: A function that calls `value_type` and has the __mandatory__ and
optionally __default__ attributes set so it conforms with the
requirements of the `map_with` and `normalize_option` functions.
"""
@wraps(value_type)
def normalizer(*args, **kwargs):
return value_type(*args, **kwargs)
normalizer.__mandatory__ = else_
if default is not None:
normalizer.__default__ = default
return normalizer
def fallback_option(value_type):
"""Annotate a normalization function to indicate a fallback option
:param function value_type: A normalization function to annotate
:rtype: function
:returns: A function that calls `value_type` and has the
__fallback__option__ attribute set so it conforms with the
requirements of the `map_with` and `normalize_option` functions.
"""
@wraps(value_type)
def normalizer(*args, **kwargs):
return value_type(*args, **kwargs)
normalizer.__fallback_option__ = True
return normalizer
def normalize_value(ctx, value, to):
"""Normalize a single value
:param object ctx: A context to pass to normalization function
:param object value: A value to normalize
:param function to: A normalization function
Call the `to` function passing in `ctx` and `value`, and returning the
result. The is the core of the normalization mini-DSL.
:rtype: object
:returns: Whatever the `to` function returns
"""
return to(ctx, value)
def all_of(*normalizers):
"""Chain normalization functions together
:param list normalizers: A list of two or more normalization functions
that should be a applied to a value
:rtype: function
:returns: A normalization function that calls all the given normalization
functions in a chain, generating a value that is normalized by
all of them.
"""
def normalizer(ctx, value):
for nrmfun in normalizers:
value = normalize_value(ctx, value, to=nrmfun)
return value
for nrmfun in normalizers:
normalizer = wraps(nrmfun)(normalizer)
return normalizer
|
Another year of gear lust and envy, as our editorial team tackles the greatest show on Earth to bring you the latest gear news.
From classic hard rock to modern high gain, Mzero packs massive boutique punch.
When you're unwilling to compromise tone and functionality, you can't go wrong with the fantastic Hughes & Kettner Triamp Mark 3. Truly one of the greatest high-gain amps ever built.
Who would have thought that the legendary alternative rock guitarist from The Smiths would create a signature guitar that significantly improves upon the original?
Torn between a Jazz Bass and a Precision Bass? Not sure how they differ? Check out these two rock-solid choices from Fender.
Serious about the Peavey Cirrus!
We were excited last year when Peavey announced the impending return of their flagship bass line, Cirrus. Was it worth the wait?
Tech 21's first compact amp built around a class-D amplifier sounds fantastic and carries a ridiculously low price tag. What's not to love?
Need some classic drum machine beats and analog bass grooves? Check out the Urban Suite collection from UVI for something... retro fresh!
Odds are good that you didn't even know this virtual analog polysynth existed. What can this producers' secret weapon do for you?
If you've been searching for a MIDI controller that works with your racks full of MIDI sound modules as well as your computer-based soft synths, these just might be the controller keyboards for you.
This semi-modular analog synth from Moog sounds great, is fun to use, and is priced for anyone. Who says your mother isn't hot stuff?
We are extremey picky about our choice of MIDI keyboard controllers, so it was an absolute treat getting to know this pro-grade controller and its accompanying VIP software.
Boutique build. Boutique sound. Solid shell. Butternut wood. Check out this beautiful snare from the master craftsmen at Craviotto.
The legend is back! Did Yamaha and drummer Steve Gadd manage to improve upon this timeless, classic kit?
Solid performing pro drums with Tama's latest innovations, at a price that is just right for the working musician.
Blink and you just might miss these hard rocking stalks of hickory that double as both mallets and sticks.
Looking to update your studio with a new interface and preamps? We went head-to-head with three of the latest and greatest: PreSonus Studio 192, MOTU 1248, and the Antelope Orion Studio.
Warm Audio created a replica of the legendary LA-2A compressor. Is it just as good? Almost as good? Even better than the original?
Great sounding and easy way to add a bank of eight digitallly controlled preamps to your studio and/or audio interface.
We all need a place to set up our music production workstations, and the Yesk Desk offers a stylish and functional piece of furtniture for your project studio.
Finally, earplugs that protect your hearing without killing the quality of the sound!
New CDs and DVDs from Fire Garden, Kansas, Kee Marcello, Marillion, The Mission, The Neal Morse Band, My World, Operation: Mindcrime, Ted Poley, Paul Simon, Toto, Devin Townsend Project, John Wesley, Withem.
Check this space every week for additions!
The classic jazz guitar company from New York City is back, and the EX-SS is a beautiful hollow-body guitar that can hang at your jazz gig... or your rock gig.
Andy Fuchs delivers a new high-gain, boutique amp with low wattage and a friendly price tag.
Boutique builder Knaggs Guitars has unleased another monster in their Steve Stevens signature model.
Tech 21 takes their industry standard DI and improves it for use with five- and six-string basses.
If you need a modern, versatile bass with tons of style, tone, and feel, which also presents a solid value, you must check out this fantastic instrument!
Tired of the same old synth pads? PADS delivers soundtrack vibe in a box, with a super-easy interface. ARPS delivers a new spin on percussion sequencing.
Want to sound like a guitar player... from your keyboard? Strum goes way beyond simple guitar samples, delivering actual guitar playing feel. Check it out!
This monster of a snare drum sounds just as special as it looks. Does it belong in your drum kit?
One of the leading pitch correction and time correction audio tools gets even better!
This true-peak limiter could be our new benchmark.
One plug-in suite to rule them all? If you want classic Eventide processing in your studio, this bundle is extremely hard to resist.
The first revolutionary design change to a handheld microphone in nearly 100 years yields awesome results on stage and in the studio!
Two microphones, nearly identical, but with very different results. Which one will benefit your vocalist?
New, premium mobile audio interface serves double duty on your laptop, too!
Great new musical releases from Susan Hyatt, Late Night Cable, Dan Reed Network, Roxette, Jorn, MGT, Karmakanic, engineering book by Sylvia Massy.
How We Test: Information on our testing methodologies. |
#!/usr/bin/python
"""
15 Nov 2010
simple example to mark a tree and compute branch-site test of positive selection
"""
__author__ = "Francois-Jose Serra"
__email__ = "[email protected]"
__licence__ = "GPLv3"
__version__ = "0.0"
from ete3 import EvolTree
try:
input = raw_input
except NameError:
pass
tree = EvolTree("data/L_example/measuring_L_tree.nw")
tree.link_to_alignment('data/L_example/alignment_L_measuring_evol.fasta')
print (tree)
# input('\n tree and alignment loaded\nHit some key, to start computation of branch site models A and A1 on each branch.\n')
print ('running model M0, for comparison with branch-site models...')
tree.run_model('M0')
# each node/leaf has two kind of identifiers node_id and paml_id, to mark nodes we have to specify
# the node_id of the nodes we want to mark, and the kind of mark in this way:
for leaf in tree:
leaf.node_id
print ('\n---------\nNow working with leaf ' + leaf.name)
tree.mark_tree([leaf.node_id], marks=['#1'])
print (tree.write())
# to organize a bit, we name model with the name of the marked node
# any character after the dot, in model name, is not taken into account
# for computation. (have a look in /tmp/ete3.../bsA.. directory)
print ('running model bsA and bsA1')
tree.run_model('bsA.'+ leaf.name)
tree.run_model('bsA1.' + leaf.name)
print ('p-value of positive selection for sites on this branch is: ')
ps = tree.get_most_likely('bsA.' + leaf.name, 'bsA1.'+ leaf.name)
rx = tree.get_most_likely('bsA1.'+ leaf.name, 'M0')
print (str(ps))
print ('p-value of relaxation for sites on this branch is: ')
print (str(rx))
model = tree.get_evol_model("bsA." + leaf.name)
if ps < 0.05 and float(model.classes['foreground w'][2]) > 1:
print ('we have positive selection on sites on this branch')
tree.show(histfaces=['bsA.' + leaf.name])
elif rx<0.05 and ps>=0.05:
print ('we have relaxation on sites on this branch')
else:
print ('no signal detected on this branch, best fit for M0')
print ('\nclean tree, remove marks')
tree.mark_tree(map(lambda x: x.node_id, tree.get_descendants()),
marks=[''] * len(tree.get_descendants()), verbose=True)
# nothing working yet to get which sites are under positive selection/relaxation,
# have to look at the main outfile or rst outfile
print ('The End.')
|
Our WWS line of metal bond crystalline diamond products are widely used in resin bond, metal bond, vitreous bond and electroplated bond systems.
modes for each respective product make our WWS line of products ideal for a variety of glass grinding applications, grinding tungsten carbide, fiberglass and other composite materials.
A medium quality, semi-crystalline blocky shaped crystal with good toughness and thermal stability. Increased friability vs. the WWS-300 and an improved surface texture on the WWS-200’s crystal facets produce a free cutting crystal that regenerates new cutting edges with moderate grinding force.
High quality cubo-octahedral diamond that is thermally stable, requires high grinding forces to generate new cutting edges and fractures in a controlled manner. Uniform shape, along with the WWS-300’s free cutting capability.
Excellent cubo-octahedral crystal shape, high impact strength and very high thermal stability make the WWS-400 suitable for demanding applications where high production rates are desired.
Uniform shaped cubo-octahedral crystals that exhibit very high toughness and exceptional thermal stability along with minimal internal impurities. |
import numpy as np
'''
du/dt + a du/dx = 0
WENO метод для уравнения переноса
a - параметр перед производной по x
dt - шаг по времени
dx - шаг по координате
u - начальное условие t = 0 (numpy array)
Возвращаемое значение: Следующий слой (numpy array)
'''
def WENOmethod(a, tau, h, u):
# Количество весов: Порядок = 2k - 1
k = 3
# Количество узлов
nx = int((2 / h ) + 1)
# Количество пустых клеток
gc = k - 1
# Добавление пустых клеток
x = np.linspace(0, 2, nx)
gcr = x[-1] + np.linspace(1, gc, gc) * h
gcl = x[0] + np.linspace(-gc, -1, gc) * h
xc = np.append(x, gcr)
xc = np.append(gcl, xc)
uc = np.append(u, u[-gc:])
uc = np.append(u[0:gc], uc)
gs = np.zeros([nx + 2 * gc, 1])
flux = np.zeros(nx + 2 * gc)
un = uc.copy()
for i in range(gc, nx - 1 + gc): # i = 2
xloc = xc[i - (k - 1):i + k] # i + k - 1 - (i - (k - 1) - 1) = 2k -1
uloc = uc[i - (k - 1):i + k]
f_left, f_right = WENO(xloc, uloc, k)
# Положительный поток
flux[i] = 0.5 * (a + np.fabs(a)) * f_left + 0.5 * (a - np.fabs(a)) * f_right
for i in range(gc, nx - gc):
if (a > 0):
uc[i] = un[i] - tau / h * (flux[i] - flux[i - 1])
else:
uc[i] = un[i] - tau / h * (flux[i + 1] - flux[i])
return uc[3:-3]
'''
Функция вычисляет левые и правые границы клетки, используя ENO
xloc - 2k - 1 узел
uloc - 2k - 1 значение в узле
k - количество весов
Возвращаемое значение: Кортеж из левого и правого значений
'''
def WENO(xloc, uloc, k):
# Особый случай - не нужно выбирать шаблон
if (k == 1):
ul = uloc[0]
ur = uloc[1]
# Применение процедур WENO
alphal = np.zeros(k, dtype = np.float)
alphar = np.zeros(k, dtype = np.float)
omegal = np.zeros(k, dtype = np.float)
omegar = np.zeros(k, dtype = np.float)
beta = np.zeros(k, dtype = np.float)
d = np.zeros(k, dtype = np.float)
vareps = 1e-6
# Вычисление k значений xl и xr построенных на разных шаблонах
ulr = np.zeros(k, dtype = np.float)
urr = np.zeros(k, dtype = np.float)
for r in range(0, k):
cr = ENOweights(k, r)
cl = ENOweights(k, r - 1)
for i in range(0, k):
urr[r] = urr[r] + cr[i] * uloc[k - r + i - 1]
ulr[r] = ulr[r] + cl[i] * uloc[k - r + i - 1]
# Вычисление WENO коэффициентов для разных порядков 2k - 1 (3 и 5 порядки)
if (k == 2):
# Оптимальные веса
d[0] = 2 / 3.
d[1] = 1 / 3.
# Вычисление индикатора гладкости для каждого шаблона
beta[0] = (uloc[2] - uloc[1]) ** 2
beta[1] = (uloc[1] - uloc[0]) ** 2
if(k == 3):
# Оптимальные веса
d[0] = 3 / 10.
d[1] = 3 / 5.
d[2] = 1 / 10.
# Вычисление индикатора гладкости для каждого шаблона
beta[0] = 13/12.*(uloc[2]-2*uloc[3]+uloc[4])**2 + 1/4.*(3*uloc[2]-4*uloc[3]+uloc[4])**2
beta[1] = 13/12.*(uloc[1]-2*uloc[2]+uloc[3])**2 + 1/4.*(uloc[1]-uloc[3])**2
beta[2] = 13/12.*(uloc[0]-2*uloc[1]+uloc[2])**2 + 1/4.*(3*uloc[2]-4*uloc[1]+uloc[0])**2
# Вычисление альфа параметров
for r in range(0,k):
alphar[r] = d[r] / (vareps + beta[r]) ** 2
alphal[r] = d[k - r - 1] / (vareps + beta[r]) ** 2
# Вычисление весовых параметров WENO
for r in range(0,k):
omegal[r] = alphal[r] / alphal.sum()
omegar[r] = alphar[r] / alphar.sum()
# Вычисление значений на краях ячейки
ul = 0
ur = 0
for r in range(0,k):
ul = ul + omegal[r] * ulr[r]
ur = ur + omegar[r] * urr[r]
return (ul,ur)
'''
Функция вычисляет оптимальные веса C__k ^ r для WENO
v_[i+1/2] = \sum_[j=0]^[k-1] c_[rj] v_[i-r+j]
k - порядок
r - смещение
Возвращаемое значение: Массив весов c_rk (numpy array)
'''
def ENOweights(k,r):
c = np.zeros(k)
for j in range(0,k):
de3 = 0.
for m in range(j + 1, k + 1):
# Вычисление знаменателя (denominator)
de2 = 0.
for l in range(0, k + 1):
if l is not m:
de1 = 1.
for q in range(0, k + 1):
if (q is not m) and (q is not l):
de1 = de1 * (r - q + 1)
de2 = de2 + de1
# Вычисление числителя
de1 = 1.
for l in range(0, k + 1):
if (l is not m):
de1 = de1 * (m - l)
de3 = de3 + de2 / de1
c[j] = de3
return c |
electrical engineer degree unt news additive 2 unt electrical engineering technology degree plan.
mechanical engineering degree cambridge university of home improvement outlet stores near me.
automotive engineering degree courses uk automotive engineering diploma level 4 automotive engineering universities uk.
civil engineering degree plan tamu shot of engineering education complex tamu civil engineering degree plan 2017.
mechanical engineering degree nottingham civil engineering design and construction hons home improvement neighbor name.
mechanical engineering degree sheet uco download figure home improvement loan sbi.
online electrical engineering degree courses full time program online electrical engineering degree programs accredited.
environmental engineering degree requirements student using test tube in lab environmental engineering study plan nau.
electrical engineering degree malaysia university campus electrical and electronic engineering university ranking in malaysia.
electrical engineering degree plan utep computer science degree plan photos electrical and computer engineering utep degree plan. |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from kombu.async.aws.sqs.message import AsyncMessage
from kombu.tests.async.aws.case import AWSCase
from kombu.tests.case import PromiseMock, Mock
from kombu.utils import uuid
class test_AsyncMessage(AWSCase):
def setup(self):
self.queue = Mock(name='queue')
self.callback = PromiseMock(name='callback')
self.x = AsyncMessage(self.queue, 'body')
self.x.receipt_handle = uuid()
def test_delete(self):
self.assertTrue(self.x.delete(callback=self.callback))
self.x.queue.delete_message.assert_called_with(
self.x, self.callback,
)
self.x.queue = None
self.assertIsNone(self.x.delete(callback=self.callback))
def test_change_visibility(self):
self.assertTrue(self.x.change_visibility(303, callback=self.callback))
self.x.queue.connection.change_message_visibility.assert_called_with(
self.x.queue, self.x.receipt_handle, 303, self.callback,
)
self.x.queue = None
self.assertIsNone(self.x.change_visibility(
303, callback=self.callback,
))
|
On this website we recommend many pictures abaout Best Fishing Holes Near Me that we have collected from various sites Fishing Madness, and of course what we recommend is the most excellent of picture for Best Fishing Holes Near Me. If you like the picture on our website, please do not hesitate to visit again and get inspiration from our website.
And if you want to see more images more we recommend the gallery below, you can see the picture as a reference design from your Best Fishing Holes Near Me.
Thank you for seeing gallery of Best Fishing Holes Near Me, we would be very happy if you come back.
Similar Keyword Best Fishing Holes Near Me : best fishing holes near me, and more. |
# Copyright (c) 2018-2021, Manfred Moitzi
# License: MIT License
from typing import TYPE_CHECKING, List, Union, Optional
import copy
import logging
from collections import namedtuple
from ezdxf.lldxf import const
from ezdxf.lldxf.attributes import (
DXFAttr,
DXFAttributes,
DefSubclass,
XType,
group_code_mapping,
)
from ezdxf.lldxf.tags import Tags
from ezdxf.math import Vec3, NULLVEC, X_AXIS, Y_AXIS, Z_AXIS, Matrix44
from ezdxf import colors
from .dxfentity import base_class, SubclassProcessor
from .dxfobj import DXFObject
from .dxfgfx import DXFGraphic, acdb_entity
from .factory import register_entity
from .objectcollection import ObjectCollection
if TYPE_CHECKING:
from ezdxf.eztypes import TagWriter, Drawing, DXFNamespace, DXFTag
__all__ = ["MultiLeader", "MLeader", "MLeaderStyle", "MLeaderStyleCollection"]
logger = logging.getLogger("ezdxf")
# DXF Examples:
# "D:\source\dxftest\CADKitSamples\house design for two family with common staircasedwg.dxf"
# "D:\source\dxftest\CADKitSamples\house design.dxf"
# How to render MLEADER: https://atlight.github.io/formats/dxf-leader.html
# DXF reference:
# http://help.autodesk.com/view/OARX/2018/ENU/?guid=GUID-72D20B8C-0F5E-4993-BEB7-0FCF94F32BE0
acdb_mleader = DefSubclass(
"AcDbMLeader",
{
"version": DXFAttr(270, default=2),
"style_handle": DXFAttr(340),
# Theory: Take properties from MLEADERSTYLE,
# except explicit overridden here:
"property_override_flags": DXFAttr(90),
# Bit coded flags:
# 1 << 0 = leader_type
# 1 << 1 = leader_line_color
# 1 << 2 = leader_linetype_handle
# 1 << 3 = leader_lineweight
# 1 << 4 = landing_flag
# 1 << 5 = landing_gap ???
# 1 << 6 = dogleg_flag
# 1 << 7 = dogleg_length
# 1 << 8 = arrow_head_handle
# 1 << 9 = arrow_head_size
# 1 << 10 = content_type
# 1 << 11 = text_style_handle
# 1 << 12 = text_left_attachment_type (of MTEXT)
# 1 << 13 = text_angle_type (of MTEXT)
# 1 << 14 = text_alignment_type (of MTEXT)
# 1 << 15 = text_color (of MTEXT)
# 1 << 16 = ??? Text height (of MTEXT) ???
# 1 << 17 = text_frame_flag
# 1 << 18 = ??? Enable use of default MTEXT (from MLEADERSTYLE)
# 1 << 19 = block_record_handle
# 1 << 20 = block_color
# 1 << 21 = block_scale_vector
# 1 << 22 = block_rotation
# 1 << 23 = block_connection_type
# 1 << 24 = ??? Scale ???
# 1 << 25 = text_right_attachment_type (of MTEXT)
# 1 << 26 = ??? Text switch alignment type (of MTEXT) ???
# 1 << 27 = text_attachment_direction (of MTEXT)
# 1 << 28 = text_top_attachment_type (of MTEXT)
# 1 << 29 = Text_bottom_attachment_type (of MTEXT)
"leader_type": DXFAttr(170, default=1),
"leader_line_color": DXFAttr(91, default=colors.BY_BLOCK_RAW_VALUE),
"leader_linetype_handle": DXFAttr(341),
"leader_lineweight": DXFAttr(171, default=const.LINEWEIGHT_BYBLOCK),
"has_landing": DXFAttr(290, default=1),
"has_dogleg": DXFAttr(291, default=1),
"dogleg_length": DXFAttr(41, default=8), # depend on $MEASUREMENT?
# no handle is default arrow 'closed filled':
"arrow_head_handle": DXFAttr(342),
"arrow_head_size": DXFAttr(42, default=4), # depend on $MEASUREMENT?
"content_type": DXFAttr(172, default=2),
# 0 = None
# 1 = Block content
# 2 = MTEXT content
# 3 = TOLERANCE content
# Text Content:
"text_style_handle": DXFAttr(343),
"text_left_attachment_type": DXFAttr(173, default=1),
# Values 0-8 are used for the left/right attachment
# point (attachment direction is horizontal), values 9-10 are used for the
# top/bottom attachment points (attachment direction is vertical).
# Attachment point is:
# 0 = top of top text line,
# 1 = middle of top text line,
# 2 = middle of text,
# 3 = middle of bottom text line,
# 4 = bottom of bottom text line,
# 5 = bottom text line,
# 6 = bottom of top text line. Underline bottom line
# 7 = bottom of top text line. Underline top line,
# 8 = bottom of top text line. Underline all content,
# 9 = center of text (y-coordinate only),
# 10 = center of text (y-coordinate only), and overline top/underline
# bottom content.
"text_right_attachment_type": DXFAttr(95), # like 173
"text_angle_type": DXFAttr(174, default=1),
# 0 = text angle is equal to last leader line segment angle
# 1 = text is horizontal
# 2 = text angle is equal to last leader line segment angle, but potentially
# rotated by 180 degrees so the right side is up for readability.
"text_alignment_type": DXFAttr(175, default=2),
"text_color": DXFAttr(92, default=colors.BY_BLOCK_RAW_VALUE),
"has_frame_text": DXFAttr(292, default=0),
# Block Content:
"block_record_handle": DXFAttr(344),
"block_color": DXFAttr(
93, default=colors.BY_BLOCK_RAW_VALUE
), # raw color
"block_scale_vector": DXFAttr(
10, xtype=XType.point3d, default=Vec3(1, 1, 1)
),
"block_rotation": DXFAttr(43, default=0), # in radians!!!
"block_connection_type": DXFAttr(176, default=0),
# 0 = center extents
# 1 = insertion point
"is_annotative": DXFAttr(293, default=0),
# REPEAT "arrow_heads": DXF R2007+
# arrow_head_index: 94, ???
# arrow_head_handle: 345
# END "arrow heads"
# REPEAT "block attribs" (ATTDEF): DXF R2007+
# attrib_handle: 330
# attrib_index: 177, sequential index of the label in the collection
# attrib_width: 44
# attrib_text: 302, collision with group code (302, "LEADER{") in context data
# END "block attribs"
# Text Content:
"is_text_direction_negative": DXFAttr(
294, default=0, dxfversion=const.DXF2007
),
"text_IPE_align": DXFAttr(178, default=0, dxfversion=const.DXF2007),
"text_attachment_point": DXFAttr(
179, default=1, dxfversion=const.DXF2007
),
# 1 = left
# 2 = center
# 3 = right
"scale": DXFAttr(45, default=1, dxfversion=const.DXF2007),
"text_attachment_direction": DXFAttr(
271, default=0, dxfversion=const.DXF2010
),
# This defines whether the leaders attach to the left/right of the content
# block/text, or attach to the top/bottom:
# 0 = horizontal
# 1 = vertical
"text_bottom_attachment_direction": DXFAttr(
272, default=9, dxfversion=const.DXF2010
),
# like 173, but
# 9 = center
# 10= underline and center
"text_top_attachment_direction": DXFAttr(
273, default=9, dxfversion=const.DXF2010
),
# like 173, but
# 9 = center
# 10= overline and center
"leader_extend_to_text": DXFAttr(
295, default=0, dxfversion=const.DXF2013
),
},
)
acdb_mleader_group_codes = group_code_mapping(acdb_mleader)
CONTEXT_STR = "CONTEXT_DATA{"
LEADER_STR = "LEADER{"
LEADER_LINE_STR = "LEADER_LINE{"
START_CONTEXT_DATA = 300
END_CONTEXT_DATA = 301
START_LEADER = 302
END_LEADER = 303
START_LEADER_LINE = 304
END_LEADER_LINE = 305
def compile_context_tags(
data: List["DXFTag"], stop_code: int
) -> List[Union["DXFTag", List]]:
def build_structure(
tag: "DXFTag", stop: int
) -> List[Union["DXFTag", List]]:
collector = [tag]
tag = next(tags)
while tag.code != stop:
if tag.code == START_LEADER:
collector.append(build_structure(tag, END_LEADER))
# Group code 304 is used also for MTEXT content, therefore always
# test for group code AND and value string:
elif tag.code == START_LEADER_LINE and tag.value == LEADER_LINE_STR:
collector.append(build_structure(tag, END_LEADER_LINE))
else:
collector.append(tag)
tag = next(tags)
return collector
tags = iter(data)
return build_structure(next(tags), stop_code)
ArrowHeadData = namedtuple("ArrowHeadData", "index, handle")
AttribData = namedtuple("AttribData", "handle, index, width, text")
@register_entity
class MultiLeader(DXFGraphic):
DXFTYPE = "MULTILEADER"
DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_mleader)
MIN_DXF_VERSION_FOR_EXPORT = const.DXF2000
def __init__(self):
super().__init__()
self.context = MultiLeaderContext()
self.arrow_heads: List[ArrowHeadData] = []
self.block_attribs: List[AttribData] = []
def _copy_data(self, entity: "MultiLeader") -> None:
"""Copy leaders"""
entity.context = copy.deepcopy(self.context)
entity.arrow_heads = copy.deepcopy(self.arrow_heads)
entity.block_attribs = copy.deepcopy(self.block_attribs)
def load_dxf_attribs(
self, processor: SubclassProcessor = None
) -> "DXFNamespace":
dxf = super().load_dxf_attribs(processor)
if processor is None:
return dxf
tags = processor.subclass_by_index(2)
context = self.extract_context_data(tags)
if context:
try:
self.context = self.load_context(context)
except const.DXFStructureError:
logger.info(
f"Context structure error in entity MULTILEADER(#{dxf.handle})"
)
self.arrow_heads = self.extract_arrow_heads(tags)
self.block_attribs = self.extract_block_attribs(tags)
processor.fast_load_dxfattribs(
dxf, acdb_mleader_group_codes, subclass=tags, recover=True
)
return dxf
@staticmethod
def extract_context_data(tags: Tags) -> List["DXFTag"]:
start, end = None, None
context_data = []
for index, tag in enumerate(tags):
if tag.code == START_CONTEXT_DATA:
start = index
elif tag.code == END_CONTEXT_DATA:
end = index + 1
if start and end:
context_data = tags[start:end]
# Remove context data!
del tags[start:end]
return context_data
@staticmethod
def load_context(data: List["DXFTag"]) -> "MultiLeaderContext":
try:
context = compile_context_tags(data, END_CONTEXT_DATA)
except StopIteration:
raise const.DXFStructureError
else:
return MultiLeaderContext.load(context)
@staticmethod
def extract_arrow_heads(data: Tags) -> List[ArrowHeadData]:
def store_head():
heads.append(
ArrowHeadData(
collector.get(94, 0), # arrow head index
collector.get(345, "0"), # arrow head handle
)
)
collector.clear()
heads = []
try:
start = data.tag_index(94)
except const.DXFValueError:
return heads
end = start
collector = dict()
for code, value in data.collect_consecutive_tags({94, 345}, start):
end += 1
collector[code] = value
if code == 345:
store_head()
# Remove processed tags:
del data[start:end]
return heads
@staticmethod
def extract_block_attribs(data: Tags) -> List[AttribData]:
def store_attrib():
attribs.append(
AttribData(
collector.get(330, "0"), # ATTDEF handle
collector.get(177, 0), # ATTDEF index
collector.get(44, 1.0), # ATTDEF width
collector.get(302, ""), # ATTDEF text (content)
)
)
collector.clear()
attribs = []
try:
start = data.tag_index(330)
except const.DXFValueError:
return attribs
end = start
collector = dict()
for code, value in data.collect_consecutive_tags(
{330, 177, 44, 302}, start
):
end += 1
if code == 330 and len(collector):
store_attrib()
collector[code] = value
if len(collector):
store_attrib()
# Remove processed tags:
del data[start:end]
return attribs
def preprocess_export(self, tagwriter: "TagWriter") -> bool:
if self.context.is_valid:
return True
else:
logger.debug(
f"Ignore {str(self)} at DXF export, invalid context data."
)
return False
def export_entity(self, tagwriter: "TagWriter") -> None:
def write_handle_if_exist(code: int, name: str):
handle = dxf.get(name)
if handle is not None:
write_tag2(code, handle)
super().export_entity(tagwriter)
dxf = self.dxf
version = tagwriter.dxfversion
write_tag2 = tagwriter.write_tag2
write_tag2(100, acdb_mleader.name)
write_tag2(270, dxf.version)
self.context.export_dxf(tagwriter)
# Export common MLEADER tags:
# Don't use dxf.export_dxf_attribs() - all attributes should be written
# even if equal to the default value:
write_tag2(340, dxf.style_handle)
write_tag2(90, dxf.property_override_flags)
write_tag2(170, dxf.leader_type)
write_tag2(91, dxf.leader_line_color)
write_tag2(341, dxf.leader_linetype_handle)
write_tag2(171, dxf.leader_lineweight)
write_tag2(290, dxf.has_landing)
write_tag2(291, dxf.has_dogleg)
write_tag2(41, dxf.dogleg_length)
# arrow_head_handle is None for default arrow 'closed filled':
write_handle_if_exist(342, "arrow_head_handle")
write_tag2(42, dxf.arrow_head_size)
write_tag2(172, dxf.content_type)
write_tag2(343, dxf.text_style_handle) # mandatory!
write_tag2(173, dxf.text_left_attachment_type)
write_tag2(95, dxf.text_right_attachment_type)
write_tag2(174, dxf.text_angle_type)
write_tag2(175, dxf.text_alignment_type)
write_tag2(92, dxf.text_color)
write_tag2(292, dxf.has_frame_text)
write_handle_if_exist(344, "block_record_handle")
write_tag2(93, dxf.block_color)
tagwriter.write_vertex(10, dxf.block_scale_vector)
write_tag2(43, dxf.block_rotation)
write_tag2(176, dxf.block_connection_type)
write_tag2(293, dxf.is_annotative)
if version >= const.DXF2007:
self.export_arrow_heads(tagwriter)
self.export_block_attribs(tagwriter)
write_tag2(294, dxf.is_text_direction_negative)
write_tag2(178, dxf.text_IPE_align)
write_tag2(179, dxf.text_attachment_point)
write_tag2(45, dxf.scale)
if version >= const.DXF2010:
write_tag2(271, dxf.text_attachment_direction)
write_tag2(272, dxf.text_bottom_attachment_direction)
write_tag2(273, dxf.text_top_attachment_direction)
if version >= const.DXF2013:
write_tag2(295, dxf.leader_extend_to_text)
def export_arrow_heads(self, tagwriter: "TagWriter") -> None:
for index, handle in self.arrow_heads:
tagwriter.write_tag2(94, index)
tagwriter.write_tag2(345, handle)
def export_block_attribs(self, tagwriter: "TagWriter") -> None:
for attrib in self.block_attribs:
tagwriter.write_tag2(330, attrib.handle)
tagwriter.write_tag2(177, attrib.index)
tagwriter.write_tag2(44, attrib.width)
tagwriter.write_tag2(302, attrib.text)
class MultiLeaderContext:
ATTRIBS = {
40: "scale",
10: "base_point",
41: "text_height",
140: "arrowhead_size",
145: "landing_gap_size",
174: "left_attachment",
175: "right_attachment",
176: "text_align_type",
177: "attachment_type",
110: "plane_origin",
111: "plane_x_axis",
112: "plane_y_axis",
297: "plane_normal_reversed",
272: "top_attachment",
273: "bottom_attachment",
}
def __init__(self):
self.leaders: List["Leader"] = []
self.scale: float = 1.0 # overall scale
self.base_point: Vec3 = NULLVEC
self.text_height = 4.0
self.arrowhead_size = 4.0
self.landing_gap_size = 2.0
self.left_attachment = 1
self.right_attachment = 1
self.text_align_type = 0 # 0=left, 1=center, 2=right
self.attachment_type = 0 # 0=content extents, 1=insertion point
self.mtext: Optional[MTextData] = None
self.block: Optional[BlockData] = None
self.plane_origin: Vec3 = NULLVEC
self.plane_x_axis: Vec3 = X_AXIS
self.plane_y_axis: Vec3 = Y_AXIS
self.plane_normal_reversed: int = 0
self.top_attachment = 9
self.bottom_attachment = 9
@classmethod
def load(cls, context: List[Union["DXFTag", List]]) -> "MultiLeaderContext":
assert context[0] == (START_CONTEXT_DATA, CONTEXT_STR)
ctx = cls()
content = None
for tag in context:
if isinstance(tag, list): # Leader()
ctx.leaders.append(Leader.load(tag))
continue
# parse context tags
code, value = tag
if content:
if content.parse(code, value):
continue
else:
content = None
if code == 290 and value == 1:
content = MTextData()
ctx.mtext = content
elif code == 296 and value == 1:
content = BlockData()
ctx.block = content
else:
name = MultiLeaderContext.ATTRIBS.get(code)
if name:
ctx.__setattr__(name, value)
return ctx
@property
def is_valid(self) -> bool:
return True
def export_dxf(self, tagwriter: "TagWriter") -> None:
write_tag2 = tagwriter.write_tag2
write_vertex = tagwriter.write_vertex
write_tag2(START_CONTEXT_DATA, CONTEXT_STR)
# All MultiLeaderContext tags:
write_tag2(40, self.scale)
write_vertex(10, self.base_point)
write_tag2(41, self.text_height)
write_tag2(140, self.arrowhead_size)
write_tag2(145, self.landing_gap_size)
write_tag2(174, self.left_attachment)
write_tag2(175, self.right_attachment)
write_tag2(176, self.text_align_type)
write_tag2(177, self.attachment_type)
if self.mtext:
write_tag2(290, 1) # has mtext content
self.mtext.export_dxf(tagwriter)
else:
write_tag2(290, 0)
if self.block:
write_tag2(296, 1) # has block content
self.block.export_dxf(tagwriter)
else:
write_tag2(296, 0)
write_vertex(110, self.plane_origin)
write_vertex(111, self.plane_x_axis)
write_vertex(112, self.plane_y_axis)
write_tag2(297, self.plane_normal_reversed)
# Export Leader and LiederLine objects:
for leader in self.leaders:
leader.export_dxf(tagwriter)
# Additional MultiLeaderContext tags:
if tagwriter.dxfversion >= const.DXF2010:
write_tag2(272, self.top_attachment)
write_tag2(273, self.bottom_attachment)
write_tag2(END_CONTEXT_DATA, "}")
class MTextData:
ATTRIBS = {
304: "default_content",
11: "normal_direction",
340: "style_handle",
12: "location",
13: "direction",
42: "rotation",
43: "boundary_width",
44: "boundary_height",
45: "line_space_factor",
170: "line_space_style",
90: "color",
171: "alignment",
172: "flow_direction",
91: "bg_color",
141: "bg_scale_factor",
92: "bg_transparency",
291: "has_bg_color",
292: "has_bg_fill",
173: "column_type",
293: "use_auto_height",
142: "column_width",
143: "column_gutter_width",
294: "column_flow_reversed",
144: "column_sizes", # multiple values
295: "use_word_break",
}
def __init__(self):
self.default_content: str = ""
self.normal_direction: Vec3 = Z_AXIS
self.style_handle = None # handle of TextStyle() table entry
self.location: Vec3 = NULLVEC
self.direction: Vec3 = X_AXIS # text direction
self.rotation: float = 0.0 # in radians!
self.boundary_width: float = 0.0
self.boundary_height: float = 0.0
self.line_space_factor: float = 1.0
self.line_space_style: int = 1 # 1=at least, 2=exactly
self.color: int = colors.BY_BLOCK_RAW_VALUE
self.alignment: int = 1 # 1=left, 2=center, 3=right
self.flow_direction: int = 1 # 1=horiz, 3=vert, 6=by style
self.bg_color: int = -939524096 # use window background color? (CMC)
self.bg_scale_factor: float = 1.5
self.bg_transparency: int = 0
self.has_bg_color: int = 0
self.has_bg_fill: int = 0
self.column_type: int = 0 # unknown values
self.use_auto_height: int = 0
self.column_width: float = 0.0
self.column_gutter_width: float = 0.0
self.column_flow_reversed: int = 0
self.column_sizes: List[float] = [] # heights?
self.use_word_break: int = 1
def parse(self, code: int, value) -> bool:
# return True if data belongs to mtext else False (end of mtext section)
if code == 144:
self.column_sizes.append(value)
return True
attrib = MTextData.ATTRIBS.get(code)
if attrib:
self.__setattr__(attrib, value)
return bool(attrib)
def export_dxf(self, tagwriter: "TagWriter") -> None:
write_tag2 = tagwriter.write_tag2
write_vertex = tagwriter.write_vertex
write_tag2(304, self.default_content)
write_vertex(11, self.normal_direction)
if self.style_handle:
write_tag2(340, self.style_handle)
else:
# Do not write None, but "0" is also not valid!
# DXF structure error should be detected before export.
write_tag2(340, "0")
write_vertex(12, self.location)
write_vertex(13, self.direction)
write_tag2(42, self.rotation)
write_tag2(43, self.boundary_width)
write_tag2(44, self.boundary_height)
write_tag2(45, self.line_space_factor)
write_tag2(170, self.line_space_style)
write_tag2(90, self.color)
write_tag2(171, self.alignment)
write_tag2(172, self.flow_direction)
write_tag2(91, self.bg_color)
write_tag2(141, self.bg_scale_factor)
write_tag2(92, self.bg_transparency)
write_tag2(291, self.has_bg_color)
write_tag2(292, self.has_bg_fill)
write_tag2(173, self.column_type)
write_tag2(293, self.use_auto_height)
write_tag2(142, self.column_width)
write_tag2(143, self.column_gutter_width)
write_tag2(294, self.column_flow_reversed)
for size in self.column_sizes:
write_tag2(144, size)
write_tag2(295, self.use_word_break)
class BlockData:
ATTRIBS = {
341: "block_record_handle",
14: "normal_direction",
15: "location",
16: "scale",
46: "rotation",
93: "color",
}
def __init__(self):
self.block_record_handle = None
self.normal_direction: Vec3 = Z_AXIS
self.location: Vec3 = NULLVEC
self.scale: Vec3 = Vec3(1, 1, 1)
self.rotation: float = 0 # in radians!
self.color: int = colors.BY_BLOCK_RAW_VALUE
# The transformation matrix is stored in transposed order
# of ezdxf.math.Matrix44()!
self._matrix: List[float] = [] # group code 47 x 16
@property
def matrix44(self) -> Matrix44:
m = Matrix44(self._matrix)
m.transpose()
return m
@matrix44.setter
def matrix44(self, m: Matrix44) -> None:
m = m.copy()
m.transpose()
self._matrix = list(m)
def parse(self, code: int, value) -> bool:
attrib = BlockData.ATTRIBS.get(code)
if attrib:
self.__setattr__(attrib, value)
elif code == 47:
self._matrix.append(value)
else:
return False
# return True if data belongs to block else False (end of block section)
return True
def export_dxf(self, tagwriter: "TagWriter") -> None:
write_tag2 = tagwriter.write_tag2
write_vertex = tagwriter.write_vertex
if self.block_record_handle:
write_tag2(341, self.block_record_handle)
else:
# Do not write None, but "0" is also not valid!
# DXF structure error should be detected before export.
write_tag2(341, "0")
write_vertex(14, self.normal_direction)
write_vertex(15, self.location)
write_vertex(16, self.scale)
write_tag2(46, self.rotation)
write_tag2(93, self.color)
for value in self._matrix:
write_tag2(47, value)
class Leader:
def __init__(self):
self.lines: List["LeaderLine"] = []
self.has_last_leader_line: int = 0 # group code 290
self.has_dogleg_vector: int = 0 # group code 291
self.last_leader_point: Vec3 = NULLVEC # group code (10, 20, 30)
self.dogleg_vector: Vec3 = X_AXIS # group code (11, 21, 31)
self.dogleg_length: float = 1.0 # group code 40
self.index: int = 0 # group code 90
self.attachment_direction: int = 0 # group code 271, R21010+
self.breaks = [] # group code 12, 13 - multiple breaks possible!
@classmethod
def load(cls, context: List[Union["DXFTag", List]]):
assert context[0] == (START_LEADER, LEADER_STR)
leader = cls()
for tag in context:
if isinstance(tag, list): # LeaderLine()
leader.lines.append(LeaderLine.load(tag))
continue
code, value = tag
if code == 290:
leader.has_last_leader_line = value
elif code == 291:
leader.has_dogleg_vector = value
elif code == 10:
leader.last_leader_point = value
elif code == 11:
leader.dogleg_vector = value
elif code == 40:
leader.dogleg_length = value
elif code == 90:
leader.index = value
elif code == 271:
leader.attachment_direction = value
elif code in (12, 13):
leader.breaks.append(value)
return leader
def export_dxf(self, tagwriter: "TagWriter") -> None:
write_tag2 = tagwriter.write_tag2
write_vertex = tagwriter.write_vertex
write_tag2(START_LEADER, LEADER_STR)
write_tag2(290, self.has_last_leader_line)
write_tag2(291, self.has_dogleg_vector)
if self.has_last_leader_line:
write_vertex(10, self.last_leader_point)
if self.has_dogleg_vector:
write_vertex(11, self.dogleg_vector)
code = 0
for vertex in self.breaks:
# write alternate group code 12 and 13
write_vertex(12 + code, vertex)
code = 1 - code
write_tag2(90, self.index)
write_tag2(40, self.dogleg_length)
# Export leader lines:
for line in self.lines:
line.export_dxf(tagwriter)
if tagwriter.dxfversion >= const.DXF2010:
write_tag2(271, self.attachment_direction)
write_tag2(END_LEADER, "}")
class LeaderLine:
def __init__(self):
self.vertices: List[Vec3] = []
self.breaks: Optional[List[Union[int, Vec3]]] = None
# Breaks: 90, 11, 12, [11, 12, ...] [, 90, 11, 12 [11, 12, ...]]
# group code 90 = break index
# group code 11 = start vertex of break
# group code 12 = end vertex of break
# multiple breaks per index possible
self.index: int = 0 # group code 91
self.color: int = colors.BY_BLOCK_RAW_VALUE # group code 92
# R2010+: override properties see ODA DWG pg. 214-215
@classmethod
def load(cls, tags: List["DXFTag"]):
assert tags[0] == (START_LEADER_LINE, LEADER_LINE_STR)
line = LeaderLine()
vertices = line.vertices
breaks = []
for code, value in tags:
if code == 10:
vertices.append(value)
elif code in (90, 11, 12):
breaks.append(value)
elif code == 91:
line.index = value
elif code == 92:
line.color = value
if breaks:
line.breaks = breaks
return line
def export_dxf(self, tagwriter: "TagWriter") -> None:
write_tag2 = tagwriter.write_tag2
write_vertex = tagwriter.write_vertex
write_tag2(START_LEADER_LINE, LEADER_LINE_STR)
for vertex in self.vertices:
write_vertex(10, vertex)
if self.breaks:
code = 0
for value in self.breaks:
if isinstance(value, int):
# break index
write_tag2(90, value)
else:
# 11 .. start vertex of break
# 12 .. end vertex of break
write_vertex(11 + code, value)
code = 1 - code
write_tag2(91, self.index)
write_tag2(92, self.color)
write_tag2(END_LEADER_LINE, "}")
@register_entity
class MLeader(MultiLeader):
DXFTYPE = "MLEADER"
acdb_mleader_style = DefSubclass(
"AcDbMLeaderStyle",
{
"unknown1": DXFAttr(179, default=2),
"content_type": DXFAttr(170, default=2),
"draw_mleader_order_type": DXFAttr(171, default=1),
"draw_leader_order_type": DXFAttr(172, default=0),
"max_leader_segments_points": DXFAttr(90, default=2),
"first_segment_angle_constraint": DXFAttr(40, default=0.0),
"second_segment_angle_constraint": DXFAttr(41, default=0.0),
"leader_type": DXFAttr(173, default=1),
"leader_line_color": DXFAttr(91, default=-1056964608),
# raw color: BY_BLOCK
# raw color: BY_BLOCK
"leader_linetype_handle": DXFAttr(340),
"leader_lineweight": DXFAttr(92, default=-2),
"has_landing": DXFAttr(290, default=1),
"landing_gap": DXFAttr(42, default=2.0),
"has_dogleg": DXFAttr(291, default=1),
"dogleg_length": DXFAttr(43, default=8),
"name": DXFAttr(3, default="Standard"),
# no handle is default arrow 'closed filled':
"arrow_head_handle": DXFAttr(341),
"arrow_head_size": DXFAttr(44, default=4),
"default_text_content": DXFAttr(300, default=""),
"text_style_handle": DXFAttr(342),
"text_left_attachment_type": DXFAttr(174, default=1),
"text_angle_type": DXFAttr(175, default=1),
"text_alignment_type": DXFAttr(176, default=0),
"text_right_attachment_type": DXFAttr(178, default=1),
"text_color": DXFAttr(93, default=-1056964608), # raw color: BY_BLOCK
"text_height": DXFAttr(45, default=4),
"has_frame_text": DXFAttr(292, default=0),
"text_align_always_left": DXFAttr(297, default=0),
"align_space": DXFAttr(46, default=4),
"has_block_scaling": DXFAttr(293),
"block_record_handle": DXFAttr(343),
"block_color": DXFAttr(94, default=-1056964608), # raw color: BY_BLOCK
"block_scale_x": DXFAttr(47, default=1),
"block_scale_y": DXFAttr(49, default=1),
"block_scale_z": DXFAttr(140, default=1),
"has_block_rotation": DXFAttr(294, default=1),
"block_rotation": DXFAttr(141, default=0),
"block_connection_type": DXFAttr(177, default=0),
"scale": DXFAttr(142, default=1),
"overwrite_property_value": DXFAttr(295, default=0),
"is_annotative": DXFAttr(296, default=0),
"break_gap_size": DXFAttr(143, default=3.75),
# 0 = Horizontal; 1 = Vertical:
"text_attachment_direction": DXFAttr(271, default=0),
# 9 = Center; 10 = Underline and Center:
"text_bottom__attachment_direction": DXFAttr(272, default=9),
# 9 = Center; 10 = Overline and Center:
"text_top_attachment_direction": DXFAttr(273, default=9),
"unknown2": DXFAttr(298, optional=True), # boolean flag ?
},
)
acdb_mleader_style_group_codes = group_code_mapping(acdb_mleader_style)
@register_entity
class MLeaderStyle(DXFObject):
DXFTYPE = "MLEADERSTYLE"
DXFATTRIBS = DXFAttributes(base_class, acdb_mleader_style)
MIN_DXF_VERSION_FOR_EXPORT = const.DXF2000
def load_dxf_attribs(
self, processor: SubclassProcessor = None
) -> "DXFNamespace":
dxf = super().load_dxf_attribs(processor)
if processor:
processor.fast_load_dxfattribs(
dxf, acdb_mleader_style_group_codes, subclass=1
)
return dxf
def export_entity(self, tagwriter: "TagWriter") -> None:
super().export_entity(tagwriter)
tagwriter.write_tag2(const.SUBCLASS_MARKER, acdb_mleader_style.name)
self.dxf.export_dxf_attribs(
tagwriter, acdb_mleader_style.attribs.keys()
)
class MLeaderStyleCollection(ObjectCollection):
def __init__(self, doc: "Drawing"):
super().__init__(
doc, dict_name="ACAD_MLEADERSTYLE", object_type="MLEADERSTYLE"
)
self.create_required_entries()
def create_required_entries(self) -> None:
for name in ("Standard",):
if name not in self.object_dict:
mleader_style = self.new(name)
# set standard text style
text_style = self.doc.styles.get("Standard")
mleader_style.dxf.text_style_handle = text_style.dxf.handle
|
The Centre will bring together expertise, manufacturing capability and experimental medicine research facilities for cell and gene therapy.
Kings College London will establish an Advanced Therapies Centre, bringing together expertise, manufacturing capability and experimental medicine research facilities for cell and gene therapy.
These therapies offer enormous promise for severe, intractable diseases, but their development is limited by access to clinical-grade manufacturing infrastructure and early-phase trials capabilities.
The Centre will be open to academic and industry partners across London and the UK, and support the planned Cell and Gene Therapy Catapult large-scale manufacturing facility.
The Centre will create a unique addition to the UK’s biomedical research and development capability. It will trigger a substantial investment in the UK by pharmaceutical companies, creating high value jobs, scientific advances, and ground-breaking improvements in healthcare. |
"""
@author: David Moodie
"""
import json
import os
import requests
flingAPI = "https://api.superfling.com/api/v2/"
amazonAPI = "http://unii-fling.s3.amazonaws.com/"
class Fling(object):
def __init__(self, bearer):
"""Requires authentication bearer to instantiate"""
#self.bearer = ""
self.bearer = bearer
def _request(self, endpoint="", data=None, req_type="post"):
global flingAPI
if data is None:
data = {}
user_agent = 'fling/1.6.2 (iPhone; iOS 8.3; Scale/2.00)'
bearer = 'Bearer ' + self.bearer
headers = {'User-Agent' : user_agent, 'Authorization' : bearer}
url = flingAPI
if req_type == "post":
headers['Content-Type'] = 'application/json'
r = requests.post(url + endpoint, data=data, headers=headers)
else:
r = requests.get(url + endpoint, params=data, headers=headers)
#if raise_for_status:
# r.raise_for_status()
return r
def _request_amazon(self, url, data=None, files=None):
#global amazonAPI
if data is None:
data = {}
user_agent = 'fling/1.6.2 (iPhone; iOS 8.3; Scale/2.00)'
headers = {'User-Agent' : user_agent}
#url = amazonAPI
r = requests.post(url, data=data, files=files, headers=headers)
return r
def _init_fling_on_server(self):
"""Create slot on fling to store fling data"""
media_type = "image" #Temporary
if media_type == "image":
data = {"uploads": {"extension":".jpg"}}
data = json.dumps(data)
r = self._request("uploads", data=data)
result = r.json()
uploads = result['uploads']
return uploads
def _upload_to_amazon(self, path, data):
"""Actually upload media to Amazon S3 so that fling can be downloaded/viewied"""
#print(data)
if not os.path.exists(path):
raise ValueError('No such file: {0}'.format(path))
with open(path, 'rb') as f:
file = f.read()
#Note: Must use tuple value for file data otherwise erroneous 'filename' field is put in request
files = {'file' : (None, file)}
amazonS3RequestURL = data['url']
if data['static_fields']['content-type'] == None:
data['static_fields']['content-type'] = ""
submitdata = data['static_fields']
r = self._request_amazon(amazonS3RequestURL, data=submitdata, files=files)
return r
def get_flings(self, limit=50, page=1):
data = {"limit" : limit, "page" : page}
r = self._request("flings", data=data, req_type="get")
result = r.json()
result = result['flings']
return result
def get_me(self):
r = self._request("users/me", req_type="get")
result = r.json()
result = result['users']
return result
def upload(self, path):
"""Init a new picture with fling API and
upload the picture from your harddrive to fling amazon S3 servers"""
datafromfling = self._init_fling_on_server()
#result = self._upload_to_amazon(path, datafromfling)
img_url = datafromfling['final_location']
return img_url
def send_text(self, text):
send_type = "Text"
if len(text) > 140:
print("Text must be <= 140 chars")
return "Text must be <= 140 chars"
else:
media = {"type" : send_type, "text" : text, "y" : 0}
data = {"flings": {"media" : media}}
data=json.dumps(data)
#print(data)
r = self._request("flings", data=data)
result = r.json()
return result
def send_image(self, img_url):
send_type = "Image"
media = {"type" : send_type, "url" : img_url, "y" : 0}
data = {"flings": {"media" : media}}
data=json.dumps(data)
#print(data)
r = self._request("flings", data=data)
result = r.json()
return result
def geocode(self, lat, lng):
geocode = requests.get("https://maps.googleapis.com/maps/api/geocode/json?latlng=" + str(lat) + "," + str(lng))
address = geocode.json()
if len(address['results']) == 0:
return ""
address = address['results']
return address
|
If you are suffering from noise nuisance this could be investigated in a confidential manner. We work with Tower Hamlets Environmental Health Environmental Protection (EHEP).
If there is sufficient evidence of noise nuisance action will be taken against identified perpetrators.
Log sheets are important to gather evidence as this helps identify patterns and trends of noise nuisance.
Sometimes the most effective option is to discuss the problem with your neighbour.
Often people aren’t aware they are causing a problem. If that doesn’t work the Mediation Service may help you resolve a dispute. Contact your housing officer for more information.
If this informal approach is not successful you should report the incident to us. Also contact the council's EHEP section.
Emergency out-of-hours noise patrol: 020 7364 5007 or visit towerhamlets.gov.uk for more information. |
# -*- coding: utf-8 -*-
'''
General documentation architecture:
Home
Index
- Getting started
Getting started to concise
Layers
- Preprocessing
Genomic Sequence Preprocessing
RNA Structure Preprocessing
Spline-position Preprocessing
- Data
Encode
Attract
Losses
Metrics
Eval metrics
Optimizers
Initializers
Regularizers
- effects
- Utils
fasta
model_data
pwm
splines
Contributing
'''
from __future__ import print_function
from __future__ import unicode_literals
import re
import inspect
import os
import shutil
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding('utf8')
import concise
from concise import utils
from concise.utils import fasta
from concise.utils import helper
from concise.utils import model_data
from concise.utils import pwm, load_motif_db
from concise.utils import splines
from concise.data import encode, attract, hocomoco
from concise.preprocessing import sequence, splines, structure
from concise import constraints
from concise import eval_metrics
from concise import metrics
from concise import hyopt
from concise import initializers
from concise import layers
from concise import losses
from concise import optimizers
from concise import regularizers
from concise import effects
EXCLUDE = {
'Optimizer',
'Wrapper',
'get_session',
'set_session',
'CallbackList',
'serialize',
'deserialize',
'get',
}
PAGES = [
{
'page': 'preprocessing/sequence.md',
'functions': [
sequence.encodeSequence,
sequence.pad_sequences,
sequence.encodeDNA,
sequence.encodeRNA,
sequence.encodeCodon,
sequence.encodeAA,
]
},
{
'page': 'preprocessing/splines.md',
'classes': [
splines.EncodeSplines,
],
'functions': [
splines.encodeSplines,
]
},
{
'page': 'preprocessing/structure.md',
'functions': [
structure.encodeRNAStructure,
]
},
{
'page': 'layers.md',
'functions': [
layers.InputDNA,
layers.InputRNA,
layers.InputRNAStructure,
layers.InputCodon,
layers.InputAA,
layers.InputSplines,
],
'classes': [
layers.SplineT,
layers.SplineWeight1D,
layers.ConvSequence,
layers.ConvDNA,
layers.ConvRNA,
layers.ConvRNAStructure,
layers.ConvAA,
layers.ConvCodon,
# layers.ConvSplines,
layers.GlobalSumPooling1D,
],
},
{
'page': 'losses.md',
'all_module_functions': [losses],
},
{
'page': 'metrics.md',
'all_module_functions': [metrics],
},
{
'page': 'eval_metrics.md',
'all_module_functions': [eval_metrics],
},
{
'page': 'initializers.md',
'all_module_functions': [initializers],
'all_module_classes': [initializers],
},
{
'page': 'regularizers.md',
# 'all_module_functions': [regularizers],
# 'all_module_classes': [regularizers],
'classes': [
regularizers.SplineSmoother,
]
},
{
'page': 'optimizers.md',
'all_module_classes': [optimizers],
'functions': [
optimizers.data_based_init
]
},
{
'page': 'effects.md',
'functions': [
effects.effect_from_model,
effects.gradient_pred,
effects.dropout_pred,
effects.ism,
]
},
{
'page': 'utils/fasta.md',
'all_module_functions': [utils.fasta],
},
{
'page': 'utils/model_data.md',
'all_module_functions': [utils.model_data],
},
{
'page': 'utils/pwm.md',
'classes': [utils.pwm.PWM],
'functions': [
load_motif_db,
]
},
{
'page': 'utils/splines.md',
'classes': [utils.splines.BSpline]
},
{
'page': 'hyopt.md',
'classes': [
hyopt.CMongoTrials,
hyopt.CompileFN,
],
'functions': [
hyopt.test_fn,
hyopt.eval_model,
]
},
{
'page': 'data/encode.md',
'functions': [
encode.get_metadata,
encode.get_pwm_list,
]
},
{
'page': 'data/attract.md',
'functions': [
attract.get_metadata,
attract.get_pwm_list,
]
},
{
'page': 'data/hocomoco.md',
'functions': [
hocomoco.get_metadata,
hocomoco.get_pwm_list,
]
},
]
# TODO
ROOT = 'http://concise.io/'
def get_earliest_class_that_defined_member(member, cls):
ancestors = get_classes_ancestors([cls])
result = None
for ancestor in ancestors:
if member in dir(ancestor):
result = ancestor
if not result:
return cls
return result
def get_classes_ancestors(classes):
ancestors = []
for cls in classes:
ancestors += cls.__bases__
filtered_ancestors = []
for ancestor in ancestors:
if ancestor.__name__ in ['object']:
continue
filtered_ancestors.append(ancestor)
if filtered_ancestors:
return filtered_ancestors + get_classes_ancestors(filtered_ancestors)
else:
return filtered_ancestors
def get_function_signature(function, method=True):
signature = getattr(function, '_legacy_support_signature', None)
if signature is None:
signature = inspect.getargspec(function)
defaults = signature.defaults
if method:
args = signature.args[1:]
else:
args = signature.args
if defaults:
kwargs = zip(args[-len(defaults):], defaults)
args = args[:-len(defaults)]
else:
kwargs = []
st = '%s.%s(' % (function.__module__, function.__name__)
for a in args:
st += str(a) + ', '
for a, v in kwargs:
if isinstance(v, str):
v = '\'' + v + '\''
st += str(a) + '=' + str(v) + ', '
if kwargs or args:
return st[:-2] + ')'
else:
return st + ')'
def get_class_signature(cls):
try:
class_signature = get_function_signature(cls.__init__)
class_signature = class_signature.replace('__init__', cls.__name__)
except:
# in case the class inherits from object and does not
# define __init__
class_signature = cls.__module__ + '.' + cls.__name__ + '()'
return class_signature
def class_to_docs_link(cls):
module_name = cls.__module__
assert module_name[:8] == 'concise.'
module_name = module_name[8:]
link = ROOT + module_name.replace('.', '/') + '#' + cls.__name__.lower()
return link
def class_to_source_link(cls):
module_name = cls.__module__
assert module_name[:8] == 'concise.'
path = module_name.replace('.', '/')
path += '.py'
line = inspect.getsourcelines(cls)[-1]
link = 'https://github.com/avsecz/concise/blob/master/' + path + '#L' + str(line)
return '[[source]](' + link + ')'
def code_snippet(snippet):
result = '```python\n'
result += snippet + '\n'
result += '```\n'
return result
def process_class_docstring(docstring):
docstring = re.sub(r'\n # (.*)\n',
r'\n __\1__\n\n',
docstring)
docstring = re.sub(r' ([^\s\\]+):(.*)\n',
r' - __\1__:\2\n',
docstring)
docstring = docstring.replace(' ' * 5, '\t\t')
docstring = docstring.replace(' ' * 3, '\t')
docstring = docstring.replace(' ', '')
return docstring
def process_function_docstring(docstring):
docstring = re.sub(r'\n # (.*)\n',
r'\n __\1__\n\n',
docstring)
docstring = re.sub(r'\n # (.*)\n',
r'\n __\1__\n\n',
docstring)
docstring = re.sub(r' ([^\s\\]+):(.*)\n',
r' - __\1__:\2\n',
docstring)
docstring = docstring.replace(' ' * 6, '\t\t')
docstring = docstring.replace(' ' * 4, '\t')
docstring = docstring.replace(' ', '')
return docstring
print('Cleaning up existing sources directory.')
if os.path.exists('sources'):
shutil.rmtree('sources')
print('Populating sources directory with templates.')
for subdir, dirs, fnames in os.walk('templates'):
for fname in fnames:
new_subdir = subdir.replace('templates', 'sources')
if not os.path.exists(new_subdir):
os.makedirs(new_subdir)
if fname[-3:] == '.md':
fpath = os.path.join(subdir, fname)
new_fpath = fpath.replace('templates', 'sources')
shutil.copy(fpath, new_fpath)
# Take care of index page.
readme = open('../README.md').read()
index = open('templates/index.md').read()
index = index.replace('{{autogenerated}}', readme[readme.find('##'):])
f = open('sources/index.md', 'w')
f.write(index)
f.close()
print('Starting autogeneration.')
for page_data in PAGES:
blocks = []
classes = page_data.get('classes', [])
for module in page_data.get('all_module_classes', []):
module_classes = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if inspect.isclass(module_member):
cls = module_member
if cls.__module__ == module.__name__:
if cls not in module_classes:
module_classes.append(cls)
module_classes.sort(key=lambda x: id(x))
classes += module_classes
for cls in classes:
subblocks = []
signature = get_class_signature(cls)
subblocks.append('<span style="float:right;">' + class_to_source_link(cls) + '</span>')
subblocks.append('### ' + cls.__name__ + '\n')
subblocks.append(code_snippet(signature))
docstring = cls.__doc__
if docstring:
subblocks.append(process_class_docstring(docstring))
blocks.append('\n'.join(subblocks))
functions = page_data.get('functions', [])
for module in page_data.get('all_module_functions', []):
module_functions = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if inspect.isfunction(module_member):
function = module_member
if module.__name__ in function.__module__:
if function not in module_functions:
module_functions.append(function)
module_functions.sort(key=lambda x: id(x))
functions += module_functions
for function in functions:
subblocks = []
signature = get_function_signature(function, method=False)
signature = signature.replace(function.__module__ + '.', '')
subblocks.append('### ' + function.__name__ + '\n')
subblocks.append(code_snippet(signature))
docstring = function.__doc__
if docstring:
subblocks.append(process_function_docstring(docstring))
blocks.append('\n\n'.join(subblocks))
if not blocks:
raise RuntimeError('Found no content for page ' +
page_data['page'])
mkdown = '\n----\n\n'.join(blocks)
# save module page.
# Either insert content into existing page,
# or create page otherwise
page_name = page_data['page']
path = os.path.join('sources', page_name)
if os.path.exists(path):
template = open(path).read()
assert '{{autogenerated}}' in template, ('Template found for ' + path +
' but missing {{autogenerated}} tag.')
mkdown = template.replace('{{autogenerated}}', mkdown)
print('...inserting autogenerated content into template:', path)
else:
print('...creating new page with autogenerated content:', path)
subdir = os.path.dirname(path)
if not os.path.exists(subdir):
os.makedirs(subdir)
open(path, 'w').write(mkdown)
shutil.copyfile('../CONTRIBUTING.md', 'sources/contributing.md')
|
Multinational corporations have to be prepared for international encounters in order to guarantee successful transnational business activities.
Truly, there are no ready-to-use solutions for the interactions with business partners in all kind of environments that are perfectly suited to the social conditions of each country – tradesmen mostly rely on their experience and soft skills. However, some business etiquette is needed to enter today’s global business world and to fulfill the requirements of multinational men of business.
Although there is no language barrier between the US and the UK, misunderstanding can occur. Accordingly, foreign business partners who know about the British business etiquette can score some sympathy points.
Young businesses and freelancers with little experience in business practices can get help from business advisors, those who can supervise financial transactions and plan business strategies, whilst also give advice on intercultural management (e.g. ClearSky Freelance Accountant).
Relationship building and networking are key success factors in the British business world. Especially, older generations preferably do business with partners they trust and know, or who have been introduced by a common business associate.
The traditional Briton shows respect for rank and age, and therefore enquire professional business contacts with an equal authority to their own.
Business interactions are formal, reserved and unemotional – effusive or emotional language and loud talking is highly inappropriate.
British business men tend to formulate their statements in a way that they remain vague and only show tendencies. Their understated language is formal and marked by phrases like ‘perhaps’ or ‘it might be’. Often, internationals have to learn to read between the lines, as many avoid direct confrontation and stay reserved. In addition, giving personal space and some eye contact during conversions is very important in Britain.
Internationals also need adapting to the rather slow decision making process within British organisations – pressure tactics must be replaced by a high level of patience.
If things go wrong, the British show signs of self-blame in their verbal and written communication, even if they are not responsible for the faulty result or not expecting behavioural norms from their counterparts either.
Adopting a more formal approach which includes the sharing of, for instance, meeting protocols will please the modest British business men. Such protocols or follow-up letters should summarise what was discussed and next steps to be taken.
To maintain a conservative image requires a conservative business dress for both men and women. Punctuality, as in many other countries, is another key requirement when business meeting are scheduled. Even unessential delays need announcing by a short call. If invited to a meeting, no business present is needed.
Business partners are greeted by a handshake which is accompanied by eye contact. Before business affairs are discussed, meetings are starting with a short duration of small talk. During those meetings, it is mainly senior ranking managers that do the speaking. Eager discussions only take place in meetings with equal business partners. However, statements must be well thought through and based on facts and statistics – never on feelings.
Business meetings are formal and always serve a clear purpose, which may be the decision on clear action points.
Having learned about the required international management skills of a business man, businesses only need to worry about their professional skills – an area where professionals such as the Clearskyaccounting.co.uk Contractor accountants can support the management. |
# -*- coding: utf-8 -*-
# Copyright (c) 2004-2014 Alterra, Wageningen-UR
# Allard de Wit ([email protected]), April 2014
"""Settings for PCSE
Default values will be read from the files 'pcse/settings/default_settings.py'
User specific settings are read from '$HOME/.pcse/user_settings.py'. Any
settings defined in user settings will override the default settings
Setting must be defined as ALL-CAPS and can be accessed as attributes
from pcse.settings.settings
For example, to use the settings in a module under 'crop':
from ..settings import settings
print settings.METEO_CACHE_DIR
Settings that are not ALL-CAPS will generate a warning. To avoid warnings
for everything that is not a setting (such as imported modules), prepend
and underscore to the name.
"""
# Location for meteo cache files
METEO_CACHE_DIR = "meteo_cache"
# Do range checks for meteo variables
METEO_RANGE_CHECKS = True
# PCSE sets all rate variables to zero after state integration for consistency.
# You can disable this behaviour for increased performance.
ZEROFY = True
# Configuration of logging
# The logging system of PCSE consists of two log handlers. One that sends log messages
# to the screen ('console') and one that sends message to a file. The location and name of
# the log is defined by LOG_DIR and LOG_FILE_NAME. Moreover, the console and file handlers
# can be given a log level as defined LOG_LEVEL_FILE and LOG_LEVEL_CONSOLE. By default
# these levels are INFO and WARNING meaning that log message of INFO and up are sent to
# file and WARNING and up are send to the console. For detailed log messages the log
# level can be set to DEBUG but this will generate a large number of logging messages.
#
# Log files can become 1Mb large. When this file size is reached a new file is opened
# and the old one is renamed. Only the most recent 7 log files are retained to avoid
# getting large log file sizes.
LOG_LEVEL_CONSOLE = "INFO"
LOG_CONFIG = \
{
'version': 1,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
'brief': {
'format': '[%(levelname)s] - %(message)s'
},
},
'handlers': {
'console': {
'level':LOG_LEVEL_CONSOLE,
'class':'logging.StreamHandler',
'formatter':'brief'
},
},
'root': {
'handlers': ['console'],
'propagate': True,
'level':'NOTSET'
}
}
|
With Miele@mobile it is easy for you to connect with your refrigeration appliance or wine conditioning unit. With Plug & Play you register the appliance with the app and you immediately have access to the current status, no matter where you are. You can also control some individual appliance functions via the app and are reliably informed of faults and alarms. |
from lxml import etree
def classify(fh):
"""
Classify the feed type of the *link*
Args:
fh: url handler
Returns:
feed type: 'atom' or 'rss'
Raises:
UnknownFeedError: if the *link* does not point to a valid feed
"""
feed = fh.read()
for subclass in FeedClassifier.__subclasses__():
if subclass.check(feed):
return subclass.__name__
raise UnknownFeedError()
def get_feed_types():
"""List the available feed types by this feed classifier module."""
types = [subcls.__name__ for subcls in FeedClassifier.__subclasses__()]
return types
class FeedClassifier(object):
"""
Super class of the feed classifiers. The check class method has to be
overwritten by the descendant classes.
The name of the descendant class will be its feed type.
"""
@classmethod
def check(cls, feed):
"""Validate the *feed* content"""
return False
class atom(FeedClassifier):
"""atom feed classifier"""
xmlns = 'http://www.w3.org/2005/Atom'
@classmethod
def check(cls, feed):
try:
root = etree.fromstring(feed)
except etree.XMLSyntaxError, error:
return False
else:
if root.nsmap.get(None) == cls.xmlns:
return True
return False
class rss(FeedClassifier):
"""rss feed classifier"""
@classmethod
def check(cls, feed):
try:
root = etree.fromstring(feed)
except etree.XMLSyntaxError, error:
return False
return root.tag == cls.__name__
pass
class UnknownFeedError(Exception):
pass
|
The logic of modern technology underscores the importance of liberty for the advancement of our civilisation.
There’s a better and safer way to protect and encourage disruptive innovation. First and foremost, governments must recognize severe limits in their ability to shape the destination, if not the trajectory, of disruptive technologies. Technology and policy run at different clock speeds, and the gap is getting wider. Even with the best of intentions, the most nimble regulatory agency still can’t keep up with the pace of change in consumer markets. When they try, the result, more often than not, is the invocation of the law of unintended consequences, where rules intended to encourage such noble goals as enhanced competition or the public interest wind up doing just the opposite.
Americans, especially those under the age of 30, are deeply cynical about the political process. They live in a universe where technology can be counted on to make the world better and more interesting every 12 to 24 months, where life is approached as a series of problems to be solved through clever hacks, where even impractical dreams can be realized in weeks through a successful Kickstarter campaign. Why should they trust policy-makers who don’t live in their world, or share their optimism for its future, and who can’t be counted on to do what it takes to maximize its potential? Even if that just means staying out of the way.
Make sure to read the entire article. |
#
# Copyright (C) 2010 Stanislav Bohm
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
from parameters import ParametersWidget
from build import BuildOptionsWidget
import gtk
class LibraryConfig(gtk.VBox):
def __init__(self, project):
def set_rpc(w):
project.library_rpc = w.get_active()
def set_octave(w):
project.library_octave = w.get_active()
gtk.VBox.__init__(self)
self.project = project
button = gtk.CheckButton("Build library in RPC mode")
button.set_active(project.library_rpc)
button.connect("toggled", set_rpc)
self.pack_start(button, False, False)
button = gtk.CheckButton("Build Octave module")
button.set_active(project.library_octave)
button.connect("toggled", set_octave)
self.pack_start(button, False, False)
self.show()
class ProjectConfig(gtk.Notebook):
def __init__(self, app):
gtk.Notebook.__init__(self)
self.set_tab_pos(gtk.POS_LEFT)
w = LibraryConfig(app.project)
self.append_page(w, gtk.Label("Library"))
w = ParametersWidget(app.project, app.window)
self.append_page(w, gtk.Label("Parameters"))
w = BuildOptionsWidget(app.project, app)
self.append_page(w, gtk.Label("Build"))
self.show_all()
|
What a great upcycle! I absolutely love it, you did a wonderful job.
I love your pretty chandelier! It is so sparkly...it must look beautiful when the sun hits it. Thanks for linking up to Dishing It! & Digging It!
Thank you Jennifer, the chandelier does sparkle in the sun.
What a beautiful project! I'm sure you'll really enjoy it during the summer months.
Thank you Michele, I do love it!
Very pretty! I'm sure it looks great in the morning sun. Thanks for linking up with Funtastic Friday. |
from datetime import datetime
from django.conf import settings
from django.db import connection
import pstats
from cStringIO import StringIO
from random import random
import logging
try:
import cProfile as profile
except ImportError:
import profile
class PerformanceMiddleware(object):
_process_data = {}
profiling = False
logger = logging.getLogger(__name__)
def process_view(self, request, callback, callback_args, callback_kwargs):
# self is reused :(
self._process_data = {}
self.profiling = False
self.profiler = None
self._process_data['start_time'] = datetime.now()
profile_per = getattr(settings, "PERFORMANCE_MIDDLEWARE_PROFILE_EVERY", 10)
random_less_than = 1.0 / profile_per
rand_val = random()
if rand_val < random_less_than:
self.profiling = True
self.profiler = profile.Profile()
args = (request,) + callback_args
try:
return self.profiler.runcall(callback, *args, **callback_kwargs)
except:
# we want the process_exception middleware to fire
# https://code.djangoproject.com/ticket/12250
return
def process_response(self, request, response):
now = datetime.now()
start = self._process_data['start_time']
td = (now - start)
seconds_taken = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
warning_threshold = getattr(settings, "PERFORMANCE_MIDDLEWARE_WARNING_THRESHOLD", 1.0)
error_threshold = getattr(settings, "PERFORMANCE_MIDDLEWARE_ERROR_THRESHOLD", 2.0)
critical_threshold = getattr(settings, "PERFORMANCE_MIDDLEWARE_CRITICAL_THRESHOLD", 5.0)
if (seconds_taken < warning_threshold) and (seconds_taken < error_threshold) and (seconds_taken < critical_threshold):
return response
io = StringIO()
io.write("Time taken: %f seconds\n" % seconds_taken)
io.write("Request: \n%s\n" % request.__str__())
io.write("Profile: \n")
if self.profiling:
self.profiler.create_stats()
stats = pstats.Stats(self.profiler, stream=io)
stats.sort_stats('cumulative')
stats.print_stats(100)
else:
io.write("No profile for this request, sorry")
io.write("SQL:\n")
for query in connection.queries:
io.write("Time: %s, Query: %s\n" % (query['time'], query['sql']))
if seconds_taken > critical_threshold:
self.logger.critical(io.getvalue())
elif seconds_taken > error_threshold:
self.logger.error(io.getvalue())
elif seconds_taken > warning_threshold:
self.logger.warning(io.getvalue())
return response
|
We look forward to meeting you at The Scandinavian Stand at The Hyatt, Mezz. Floor.
This year we can be found in our new office at 25 Rue des États-Unis, 4th floor just around the corner from the Croisette. We look forward to welcoming you in our new surroundings.
Welcome to Scandinavian cinema - welcome to our world of films. |
import argparse
import io
import os.path
import sys
import pytest
from ..patterns import PathFullPattern, PathPrefixPattern, FnmatchPattern, ShellPattern, RegexPattern
from ..patterns import load_exclude_file, load_pattern_file
from ..patterns import parse_pattern, PatternMatcher
def check_patterns(files, pattern, expected):
"""Utility for testing patterns.
"""
assert all([f == os.path.normpath(f) for f in files]), "Pattern matchers expect normalized input paths"
matched = [f for f in files if pattern.match(f)]
assert matched == (files if expected is None else expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("/", []),
("/home", ["/home"]),
("/home///", ["/home"]),
("/./home", ["/home"]),
("/home/user", ["/home/user"]),
("/home/user2", ["/home/user2"]),
("/home/user/.bashrc", ["/home/user/.bashrc"]),
])
def test_patterns_full(pattern, expected):
files = ["/home", "/home/user", "/home/user2", "/home/user/.bashrc", ]
check_patterns(files, PathFullPattern(pattern), expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("", []),
("relative", []),
("relative/path/", ["relative/path"]),
("relative/path", ["relative/path"]),
])
def test_patterns_full_relative(pattern, expected):
files = ["relative/path", "relative/path2", ]
check_patterns(files, PathFullPattern(pattern), expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("/", None),
("/./", None),
("", []),
("/home/u", []),
("/home/user", ["/home/user/.profile", "/home/user/.bashrc"]),
("/etc", ["/etc/server/config", "/etc/server/hosts"]),
("///etc//////", ["/etc/server/config", "/etc/server/hosts"]),
("/./home//..//home/user2", ["/home/user2/.profile", "/home/user2/public_html/index.html"]),
("/srv", ["/srv/messages", "/srv/dmesg"]),
])
def test_patterns_prefix(pattern, expected):
files = [
"/etc/server/config", "/etc/server/hosts", "/home", "/home/user/.profile", "/home/user/.bashrc",
"/home/user2/.profile", "/home/user2/public_html/index.html", "/srv/messages", "/srv/dmesg",
]
check_patterns(files, PathPrefixPattern(pattern), expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("", []),
("foo", []),
("relative", ["relative/path1", "relative/two"]),
("more", ["more/relative"]),
])
def test_patterns_prefix_relative(pattern, expected):
files = ["relative/path1", "relative/two", "more/relative"]
check_patterns(files, PathPrefixPattern(pattern), expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("/*", None),
("/./*", None),
("*", None),
("*/*", None),
("*///*", None),
("/home/u", []),
("/home/*",
["/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile", "/home/user2/public_html/index.html",
"/home/foo/.thumbnails", "/home/foo/bar/.thumbnails"]),
("/home/user/*", ["/home/user/.profile", "/home/user/.bashrc"]),
("/etc/*", ["/etc/server/config", "/etc/server/hosts"]),
("*/.pr????e", ["/home/user/.profile", "/home/user2/.profile"]),
("///etc//////*", ["/etc/server/config", "/etc/server/hosts"]),
("/./home//..//home/user2/*", ["/home/user2/.profile", "/home/user2/public_html/index.html"]),
("/srv*", ["/srv/messages", "/srv/dmesg"]),
("/home/*/.thumbnails", ["/home/foo/.thumbnails", "/home/foo/bar/.thumbnails"]),
])
def test_patterns_fnmatch(pattern, expected):
files = [
"/etc/server/config", "/etc/server/hosts", "/home", "/home/user/.profile", "/home/user/.bashrc",
"/home/user2/.profile", "/home/user2/public_html/index.html", "/srv/messages", "/srv/dmesg",
"/home/foo/.thumbnails", "/home/foo/bar/.thumbnails",
]
check_patterns(files, FnmatchPattern(pattern), expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("*", None),
("**/*", None),
("/**/*", None),
("/./*", None),
("*/*", None),
("*///*", None),
("/home/u", []),
("/home/*",
["/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile", "/home/user2/public_html/index.html",
"/home/foo/.thumbnails", "/home/foo/bar/.thumbnails"]),
("/home/user/*", ["/home/user/.profile", "/home/user/.bashrc"]),
("/etc/*/*", ["/etc/server/config", "/etc/server/hosts"]),
("/etc/**/*", ["/etc/server/config", "/etc/server/hosts"]),
("/etc/**/*/*", ["/etc/server/config", "/etc/server/hosts"]),
("*/.pr????e", []),
("**/.pr????e", ["/home/user/.profile", "/home/user2/.profile"]),
("///etc//////*", ["/etc/server/config", "/etc/server/hosts"]),
("/./home//..//home/user2/", ["/home/user2/.profile", "/home/user2/public_html/index.html"]),
("/./home//..//home/user2/**/*", ["/home/user2/.profile", "/home/user2/public_html/index.html"]),
("/srv*/", ["/srv/messages", "/srv/dmesg", "/srv2/blafasel"]),
("/srv*", ["/srv", "/srv/messages", "/srv/dmesg", "/srv2", "/srv2/blafasel"]),
("/srv/*", ["/srv/messages", "/srv/dmesg"]),
("/srv2/**", ["/srv2", "/srv2/blafasel"]),
("/srv2/**/", ["/srv2/blafasel"]),
("/home/*/.thumbnails", ["/home/foo/.thumbnails"]),
("/home/*/*/.thumbnails", ["/home/foo/bar/.thumbnails"]),
])
def test_patterns_shell(pattern, expected):
files = [
"/etc/server/config", "/etc/server/hosts", "/home", "/home/user/.profile", "/home/user/.bashrc",
"/home/user2/.profile", "/home/user2/public_html/index.html", "/srv", "/srv/messages", "/srv/dmesg",
"/srv2", "/srv2/blafasel", "/home/foo/.thumbnails", "/home/foo/bar/.thumbnails",
]
check_patterns(files, ShellPattern(pattern), expected)
@pytest.mark.parametrize("pattern, expected", [
# "None" means all files, i.e. all match the given pattern
("", None),
(".*", None),
("^/", None),
("^abc$", []),
("^[^/]", []),
("^(?!/srv|/foo|/opt)",
["/home", "/home/user/.profile", "/home/user/.bashrc", "/home/user2/.profile",
"/home/user2/public_html/index.html", "/home/foo/.thumbnails", "/home/foo/bar/.thumbnails", ]),
])
def test_patterns_regex(pattern, expected):
files = [
'/srv/data', '/foo/bar', '/home',
'/home/user/.profile', '/home/user/.bashrc',
'/home/user2/.profile', '/home/user2/public_html/index.html',
'/opt/log/messages.txt', '/opt/log/dmesg.txt',
"/home/foo/.thumbnails", "/home/foo/bar/.thumbnails",
]
obj = RegexPattern(pattern)
assert str(obj) == pattern
assert obj.pattern == pattern
check_patterns(files, obj, expected)
def test_regex_pattern():
# The forward slash must match the platform-specific path separator
assert RegexPattern("^/$").match("/")
assert RegexPattern("^/$").match(os.path.sep)
assert not RegexPattern(r"^\\$").match("/")
def use_normalized_unicode():
return sys.platform in ("darwin",)
def _make_test_patterns(pattern):
return [PathPrefixPattern(pattern),
FnmatchPattern(pattern),
RegexPattern("^{}/foo$".format(pattern)),
ShellPattern(pattern),
]
@pytest.mark.parametrize("pattern", _make_test_patterns("b\N{LATIN SMALL LETTER A WITH ACUTE}"))
def test_composed_unicode_pattern(pattern):
assert pattern.match("b\N{LATIN SMALL LETTER A WITH ACUTE}/foo")
assert pattern.match("ba\N{COMBINING ACUTE ACCENT}/foo") == use_normalized_unicode()
@pytest.mark.parametrize("pattern", _make_test_patterns("ba\N{COMBINING ACUTE ACCENT}"))
def test_decomposed_unicode_pattern(pattern):
assert pattern.match("b\N{LATIN SMALL LETTER A WITH ACUTE}/foo") == use_normalized_unicode()
assert pattern.match("ba\N{COMBINING ACUTE ACCENT}/foo")
@pytest.mark.parametrize("pattern", _make_test_patterns(str(b"ba\x80", "latin1")))
def test_invalid_unicode_pattern(pattern):
assert not pattern.match("ba/foo")
assert pattern.match(str(b"ba\x80/foo", "latin1"))
@pytest.mark.parametrize("lines, expected", [
# "None" means all files, i.e. none excluded
([], None),
(["# Comment only"], None),
(["*"], []),
(["# Comment",
"*/something00.txt",
" *whitespace* ",
# Whitespace before comment
" #/ws*",
# Empty line
"",
"# EOF"],
["/more/data", "/home", " #/wsfoobar"]),
(["re:.*"], []),
(["re:\s"], ["/data/something00.txt", "/more/data", "/home"]),
([r"re:(.)(\1)"], ["/more/data", "/home", "\tstart/whitespace", "/whitespace/end\t"]),
(["", "", "",
"# This is a test with mixed pattern styles",
# Case-insensitive pattern
"re:(?i)BAR|ME$",
"",
"*whitespace*",
"fm:*/something00*"],
["/more/data"]),
([r" re:^\s "], ["/data/something00.txt", "/more/data", "/home", "/whitespace/end\t"]),
([r" re:\s$ "], ["/data/something00.txt", "/more/data", "/home", " #/wsfoobar", "\tstart/whitespace"]),
(["pp:./"], None),
(["pp:/"], [" #/wsfoobar", "\tstart/whitespace"]),
(["pp:aaabbb"], None),
(["pp:/data", "pp: #/", "pp:\tstart", "pp:/whitespace"], ["/more/data", "/home"]),
(["/nomatch", "/more/*"],
['/data/something00.txt', '/home', ' #/wsfoobar', '\tstart/whitespace', '/whitespace/end\t']),
# the order of exclude patterns shouldn't matter
(["/more/*", "/nomatch"],
['/data/something00.txt', '/home', ' #/wsfoobar', '\tstart/whitespace', '/whitespace/end\t']),
])
def test_exclude_patterns_from_file(tmpdir, lines, expected):
files = [
'/data/something00.txt', '/more/data', '/home',
' #/wsfoobar',
'\tstart/whitespace',
'/whitespace/end\t',
]
def evaluate(filename):
patterns = []
load_exclude_file(open(filename, "rt"), patterns)
matcher = PatternMatcher(fallback=True)
matcher.add_inclexcl(patterns)
return [path for path in files if matcher.match(path)]
exclfile = tmpdir.join("exclude.txt")
with exclfile.open("wt") as fh:
fh.write("\n".join(lines))
assert evaluate(str(exclfile)) == (files if expected is None else expected)
@pytest.mark.parametrize("lines, expected_roots, expected_numpatterns", [
# "None" means all files, i.e. none excluded
([], [], 0),
(["# Comment only"], [], 0),
(["- *"], [], 1),
(["+fm:*/something00.txt",
"-/data"], [], 2),
(["R /"], ["/"], 0),
(["R /",
"# comment"], ["/"], 0),
(["# comment",
"- /data",
"R /home"], ["/home"], 1),
])
def test_load_patterns_from_file(tmpdir, lines, expected_roots, expected_numpatterns):
def evaluate(filename):
roots = []
inclexclpatterns = []
load_pattern_file(open(filename, "rt"), roots, inclexclpatterns)
return roots, len(inclexclpatterns)
patternfile = tmpdir.join("patterns.txt")
with patternfile.open("wt") as fh:
fh.write("\n".join(lines))
roots, numpatterns = evaluate(str(patternfile))
assert roots == expected_roots
assert numpatterns == expected_numpatterns
def test_switch_patterns_style():
patterns = """\
+0_initial_default_is_shell
p fm
+1_fnmatch
P re
+2_regex
+3_more_regex
P pp
+4_pathprefix
p fm
p sh
+5_shell
"""
pattern_file = io.StringIO(patterns)
roots, patterns = [], []
load_pattern_file(pattern_file, roots, patterns)
assert len(patterns) == 6
assert isinstance(patterns[0].val, ShellPattern)
assert isinstance(patterns[1].val, FnmatchPattern)
assert isinstance(patterns[2].val, RegexPattern)
assert isinstance(patterns[3].val, RegexPattern)
assert isinstance(patterns[4].val, PathPrefixPattern)
assert isinstance(patterns[5].val, ShellPattern)
@pytest.mark.parametrize("lines", [
(["X /data"]), # illegal pattern type prefix
(["/data"]), # need a pattern type prefix
])
def test_load_invalid_patterns_from_file(tmpdir, lines):
patternfile = tmpdir.join("patterns.txt")
with patternfile.open("wt") as fh:
fh.write("\n".join(lines))
filename = str(patternfile)
with pytest.raises(argparse.ArgumentTypeError):
roots = []
inclexclpatterns = []
load_pattern_file(open(filename, "rt"), roots, inclexclpatterns)
@pytest.mark.parametrize("lines, expected", [
# "None" means all files, i.e. none excluded
([], None),
(["# Comment only"], None),
(["- *"], []),
# default match type is sh: for patterns -> * doesn't match a /
(["-*/something0?.txt"],
['/data', '/data/something00.txt', '/data/subdir/something01.txt',
'/home', '/home/leo', '/home/leo/t', '/home/other']),
(["-fm:*/something00.txt"],
['/data', '/data/subdir/something01.txt', '/home', '/home/leo', '/home/leo/t', '/home/other']),
(["-fm:*/something0?.txt"],
["/data", '/home', '/home/leo', '/home/leo/t', '/home/other']),
(["+/*/something0?.txt",
"-/data"],
["/data/something00.txt", '/home', '/home/leo', '/home/leo/t', '/home/other']),
(["+fm:*/something00.txt",
"-/data"],
["/data/something00.txt", '/home', '/home/leo', '/home/leo/t', '/home/other']),
# include /home/leo and exclude the rest of /home:
(["+/home/leo",
"-/home/*"],
['/data', '/data/something00.txt', '/data/subdir/something01.txt', '/home', '/home/leo', '/home/leo/t']),
# wrong order, /home/leo is already excluded by -/home/*:
(["-/home/*",
"+/home/leo"],
['/data', '/data/something00.txt', '/data/subdir/something01.txt', '/home']),
(["+fm:/home/leo",
"-/home/"],
['/data', '/data/something00.txt', '/data/subdir/something01.txt', '/home', '/home/leo', '/home/leo/t']),
])
def test_inclexcl_patterns_from_file(tmpdir, lines, expected):
files = [
'/data', '/data/something00.txt', '/data/subdir/something01.txt',
'/home', '/home/leo', '/home/leo/t', '/home/other'
]
def evaluate(filename):
matcher = PatternMatcher(fallback=True)
roots = []
inclexclpatterns = []
load_pattern_file(open(filename, "rt"), roots, inclexclpatterns)
matcher.add_inclexcl(inclexclpatterns)
return [path for path in files if matcher.match(path)]
patternfile = tmpdir.join("patterns.txt")
with patternfile.open("wt") as fh:
fh.write("\n".join(lines))
assert evaluate(str(patternfile)) == (files if expected is None else expected)
@pytest.mark.parametrize("pattern, cls", [
("", FnmatchPattern),
# Default style
("*", FnmatchPattern),
("/data/*", FnmatchPattern),
# fnmatch style
("fm:", FnmatchPattern),
("fm:*", FnmatchPattern),
("fm:/data/*", FnmatchPattern),
("fm:fm:/data/*", FnmatchPattern),
# Regular expression
("re:", RegexPattern),
("re:.*", RegexPattern),
("re:^/something/", RegexPattern),
("re:re:^/something/", RegexPattern),
# Path prefix
("pp:", PathPrefixPattern),
("pp:/", PathPrefixPattern),
("pp:/data/", PathPrefixPattern),
("pp:pp:/data/", PathPrefixPattern),
# Shell-pattern style
("sh:", ShellPattern),
("sh:*", ShellPattern),
("sh:/data/*", ShellPattern),
("sh:sh:/data/*", ShellPattern),
])
def test_parse_pattern(pattern, cls):
assert isinstance(parse_pattern(pattern), cls)
@pytest.mark.parametrize("pattern", ["aa:", "fo:*", "00:", "x1:abc"])
def test_parse_pattern_error(pattern):
with pytest.raises(ValueError):
parse_pattern(pattern)
def test_pattern_matcher():
pm = PatternMatcher()
assert pm.fallback is None
for i in ["", "foo", "bar"]:
assert pm.match(i) is None
# add extra entries to aid in testing
for target in ["A", "B", "Empty", "FileNotFound"]:
pm.is_include_cmd[target] = target
pm.add([RegexPattern("^a")], "A")
pm.add([RegexPattern("^b"), RegexPattern("^z")], "B")
pm.add([RegexPattern("^$")], "Empty")
pm.fallback = "FileNotFound"
assert pm.match("") == "Empty"
assert pm.match("aaa") == "A"
assert pm.match("bbb") == "B"
assert pm.match("ccc") == "FileNotFound"
assert pm.match("xyz") == "FileNotFound"
assert pm.match("z") == "B"
assert PatternMatcher(fallback="hey!").fallback == "hey!"
|
Baker Street brothers volume 2.
1 of 1 Loveland Adult Fiction - Mystery & Crime Robertson, M.
When brothers Reggie and Nigel Heath choose 221B Baker Street as the location for their law office, they don't expect that their new office space would come with one huge stipulation, answering the letters sent to Sherlock Holmes, the most famous resident of that address.
Reggie is distressed because the love of his life, actress Laura Rankin (whom Nigel also adores), is gallivanting around with media mogul Lord Buxton. And while Reggie is working on a new case involving one of London's Black Cab drivers who is accused of murdering two American tourists, the letters to Sherlock Holmes are piling up. There s even one from someone who claims to be the descendent of Professor James Moriarty.
With a case that would have puzzled even Sherlock himself and a pair of brother sleuths more different than night and day, The Brothers of Baker Street is sure to please mystery fans whatever their address.
The first Sherlock Holmes letter barrister Reggie Heath answered cost him most of his personal fortune, all of his Baker Street chambers clients, and quite possibly Laura Rankin, the love of his life. But Reggie intends to earn all of it-the money, the career, and the love-back again. His latest client is the driver of one of London's famous black cabs, accused of murdering two American tourists. But while Reggie is working on that case, the letters to Sherlock Holmes are piling up-including one from someone who claims to be the descendant of Professor James Moriarty.
hoopla:MWT10025206|eAudiobook|Audio Books|Unabridged.|English|Blackstone Audio, Inc. ,|2011.|1 online resource (1 audio file (6hr., 28 min.)) : digital., ils:.b1862702x|Book|Books|1st ed.|English|Minotaur Books,|2011.|274 p. ; 22 cm. |
import sqlite3
import matplotlib.pyplot as plt
from collections import OrderedDict
import numpy as np
# Connect to database
conn = sqlite3.connect('sitedb.sqlite')
cur = conn.cursor()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
avg_list = []
std_list = []
for i in range(1, 13, 1):
stuff = cur.execute('''
SELECT * FROM Data WHERE month = ? ORDER BY date''', (str(i), ))
# catch from each month
month_catch = []
for line in stuff:
month_catch.append(line[7])
high = max(month_catch)
month_catch = month_catch.remove(high)
avg = np.mean(month_catch)
std_dev = np.std(month_catch)
# list of avg ordered by month
avg_list.append(avg)
# list of std deviations ordered by month
std_list.append(std_dev)
# Graph of normal distribution of predictions
for i in range(len(avg_list)):
mu = avg_list[i]
sigma = std_list[i]
s = np.random.normal(mu, sigma, 1000)
count, bins, ignored = plt.hist(s, 30, normed=True)
plt.title('Normal Distribution of Predicted Catch in %s' % months[i])
plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
linewidth=2, color='r')
axes = plt.gca()
axes.set_xlim(0,)
plt.show()
#plt.figure(1)
#plt.bar(range(len(avg_catch)), avg_catch.values(), align='center')
#plt.xticks(range(len(avg_catch)), avg_catch.keys())
#plt.xlabel('Month')
#plt.ylabel('Average Catch')
#plt.title('Average Catch at Ventura County Shore Sites 2000-2010')
#plt.show()
|
Create professional Flash multimedia slideshow. Blend your photos, Flash movies, music, text animation and videos to produce impressive multimedia slideshows. Great to promote your business, proofing images for clients or wowing any audience. 200+ highly customizable transition effect. Create more impresive slideshow with Pan and Zoom Motion (Ken & Burn) effect, stunning text animation, more build-in clipart library to cater most themes. Adjust the timing of each object's animation and sound's playback precisely and easily using timeline window. Waveform preview enables you to synchronize music, sound and visual element. Add interactivities such as click to play sound, click to show/hide visual objects, etc. Import MP3/OOG/AVI/WMV/MPEG/GIF animation files. Add and choose playback control. Publish your slideshow as Flash movie, executable file/CD/screensaver with registration, usage limit and full application installer. Send via e-mail using free IncrediShow online service. It has been tested and works on Windows Vista.
Support more than one background music, multiple font setting, font style. More seamless looping sound. And better preloader system.
Publish to MPEG4, Quicktime movie and wireless device video format such as IPhone, IPod, PSP and mobile phone.
SWFKit Pro - SWFKit is one of the leading SWF to EXE tools. SWFKit can build professional Windows desktop applications, screen savers from SWF movies created by Flash Authoring tools such as Macromedia Flash, Flex, Swish Max, etc. It can also create installers.
Flash Wallpaper Maker - It's a tool which help you to create your favorite macromedia flash file to be your wallpaper, also it can help you pubilish it just by click a button.
zxChart - zxChart creates dynamic charts in the Macromedia Flash?„? SWF format. Software based on the Macromedia Flash?„? technology and may be used on any platform. Does not require knowledges of Macromedia Flash?„?.
AnyChart Flash Chart Component - Anychart is a flexible Macromedia Flash-based solution that allows you to create animated, compact, interactive and attractive charts. Driven by an XML interface, it has no installation and is easily used.
Flash To Video Encoder - Encode Your Flash swf to AVI and mp4 Video Movies. Flash To Video Encoder converts any Macromedia Flash (swf files) to video movie. The converting process allowing human interaction on Flash content during conversion. |
#!/usr/bin/env python
#this script plots the a white-dwarf model and its four variations in 1 simga.
#It accepts a text file will the model file names listed.
#Slight modification is required on line 61 to scale the y-axis correctly.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Read the text list of files to plot (starList.txt) into a python list
with open('starList.txt') as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
# loop stuff
listLength = len(lines)
count = 0
# this loop plots the original model and the 4 variations
while (count < listLength):
inputString1 = lines[count]
inputString2 = lines[count + 1]
inputString3 = lines[count + 2]
inputString4 = lines[count + 3]
inputString5 = lines[count + 4]
plotTitle = lines[count][:12]
plotFileName = lines[count][:12] + ".pdf"
array1 = np.genfromtxt(inputString1,names=['wave','flam'])
array2 = np.genfromtxt(inputString2,names=['wave','flam'])
array3 = np.genfromtxt(inputString3,names=['wave','flam'])
array4 = np.genfromtxt(inputString4,names=['wave','flam'])
array5 = np.genfromtxt(inputString5,names=['wave','flam'])
fig = plt.figure()
axes = fig.add_subplot(111)
axes.set_title(plotTitle)
axes.set_xlabel('Wavelength (A)')
axes.set_ylabel('Flux (Flam)')
axes.plot(array1['wave'],array1['flam'],label='Original',linewidth=1)
axes.plot(array2['wave'],array2['flam'],label='tm, gm',linewidth=1)
axes.plot(array3['wave'],array3['flam'],label='tm, gp',linewidth=1)
axes.plot(array4['wave'],array4['flam'],label='tp, gm',linewidth=1)
axes.plot(array5['wave'],array5['flam'],label='tp, gp',linewidth=1)
axes.set_xlim([3000,11000])
axes.set_ylim([0,array1['flam'][1208]])
axes.legend(loc='upper right', shadow=True)
plt.grid(True)
plt.savefig(plotFileName)
plt.clf()
count = count + 5
exit()
|
There is something so intimate about saying goodbye.
Day after day, we are constantly saying goodbye to people.
A cashier at the grocery store.
A spouse as they head off to work in the morning.
A coworker as we leave the office for the day.
A friend at the end of a coffee date.
But in many cases, we don't really say goodbye. Often, we disconnect from the other person and ourselves before we physically part. Even if we say the word goodbye (and often we don't, in exchange for something more casual), most of the time the tone and feeling of what we are saying is really this: see you next time.
But the thing is: We don't know if there will be a next time.
We don't ever know. Not for sure.
But the reality of this is much too frightening to face. Maybe we touch this Truth lightly; maybe we are vaguely haunted by an indistinguishable trace of it--a passing feeling we recognize, but can't quite articulate; maybe it's miles outside our awareness.
As I was saying goodbye to students and coworkers, what struck me was how profoundly different a goodbye feels when you know it is really goodbye.
Sure, if we want to get technical about it, my goodbye wasn't absolute and final. I am still teaching at the school once a week so I knew I would still see many coworkers and some students for sure. And there are ways I may end up crossing paths with some people again in the future.
And yet, for many people I said goodbye to, especially students, for all intents and purposes, this was really and truly goodbye.
So, this left us with a couple of options.
1) We could play it safe by hiding behind and exaggerating the probability of these possible future runs in.
We could say things like, "Maybe I'll see you in the hall!" or "I'm sure our paths will cross again!" or "I'll see you at graduation!" And in some cases that is exactly what we did.
This made me reflect on how often we hide behind the assumption of "next time" in our goodbyes to protect ourselves from being vulnerable.
If we assume a next time, we don't have to feel vulnerable. We don't have to say I love you.
2) We met the finality of the goodbye head on and saw it as an opportunity to say the things we needed to say or that the other person needed to hear.
We leaned into rather than away from the inherent vulnerability of the exchange.
We let it be intimate.
We let it be loving.
We said things like, "you mattered to me" and "I'm so glad I met you."
It was intimate and sweet and heart-opening, and it brought me to a deeper level of being able to really sit with another human and myself.
I was meeting with one young man for what would likely be the last time. At one point during the conversation, he told me he felt I was holding back. He said he thought I wasn't saying everything I wanted to say because I know that he gets uncomfortable with emotions. He told me it was OK, that I didn't have to protect him.
I was quick to disagree. "I'm not holding back," I said with a smile.
Later, I realized he was right...partially anyway. I was holding back, but not because of him. I was holding back because of me.
I was protecting myself from the intimacy of the goodbye.
I wasn't aware I was doing that until he said it, but he must have sensed it. He called me on it and I was grateful.
So I gave it another go. This time I brought myself more fully into the exchange. I showed up and let him know how I felt not just by saying the words, but by allowing myself to feel them.
In the Spring of 2011, after a long journey with cancer, my uncle decided to say a final goodbye to the world on May 7th with the help of Compassion and Choices and thanks to the Death with Dignity Act. My aunt and uncle moved to Portland the previous winter to have easier access to alternative treatment and organic food. That meant that my husband and I had the privilege of spending five months with them during this difficult time.
It also meant that we were only a couple of miles away when it was time to say goodbye. May 7th was a Saturday. The Thursday before that, my husband and I went to visit my uncle.
It would be our last visit.
I remember feeling so anxious on the way to their apartment. What would it be like? It seemed so awkward and awful. What would we say? What would we do? He was going to die in exactly two days; this was certain. God, it just was too much.
But once we arrived, it was an entirely different story. Anticipating the visit was unbearable; being there was easy. There were sad moments, to be sure. But also, we ate pizza and held hands and laughed without restraint at my uncle's off-color jokes.
There was nowhere to hide, no more time or room (or point) for defense mechanisms or protective strategies.
At some point, when he was falling asleep, we knew it was time to leave and let him rest. So we said goodbye.
I will never forget walking out that door and looking back knowing it would be the last time I would see his sparkling eyes and impossible smile.
How might our daily goodbyes change if in those moments we were willing to feel into the unpredictability of life and the fact that we are utterly defenseless in the face of losing those we hold dear?
Maybe a pause to really make eye contact and smile.
Maybe some loving words or an expression of appreciation.
Maybe a private and silent acknowledgement of gratitude.
It's understandable that we tend to be casual with our goodbyes. It's difficult to constantly look squarely at the impermanence of all things.
I bet even the Buddha said "see you later" once in a while.
Then there is the problem of being busy. When we are always rushing from one place and task to another, it can be difficult to slow down long enough to remember the preciousness of life and those we love.
Who among us is not painfully aware of how quickly life can be taken from us? If we have managed to avoid directly experiencing this particularly brutal lesson, surely we know one, two, or a dozen people who were not as fortunate.
A neighbor whose wife died of a stroke. A friend whose husband had a heart attack. A coworker whose mother went to the ER and never came out. A cousin whose brother died in a car accident.
These stories happen every day. And for a moment or a week after we hear them, we squeeze our loved ones a little bit tighter; we remember to say I love you; we work through the argument so we can fall asleep with arms around each other.
And then, we forget again. We retreat. We return to habits and routines, to social conventions and protective strategies.
What if we were willing to hold the Truth of the fragility of life just close enough to allow us to always remember what is most sacred?
This doesn't have to be morbid. We needn't weave it into a shroud that covers up our eyes making things dark and heavy. That is not necessary or useful.
But what if we kept this Truth somewhere close--maybe in our shirt pocket with the pointy bits facing our chest so that it was always there, scratching just a little--not to hurt, but to be remembered.
A gentle, but slightly prickly reminder.
And when we hugged our loved ones goodbye, it would poke us just a little bit more as it pressed through the fabric. And we would remember again and it would sting.
And in time, we would come to appreciate that little bit of pain, and recognize it as a beautiful and inseparable part of the privilege of loving and being in this world. |
import gtk
from time import sleep
class splashScreen():
def __init__(self):
#DONT connect 'destroy' event here!
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title('Your app name')
self.window.set_position(gtk.WIN_POS_CENTER)
main_vbox = gtk.VBox(False, 1)
self.window.add(main_vbox)
hbox = gtk.HBox(False, 0)
self.lbl = gtk.Label("This shouldn't take too long... :)")
self.lbl.set_alignment(0, 0.5)
main_vbox.pack_start(self.lbl, True, True)
self.window.show_all()
class yourApp():
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title('Your app name')
self.window.set_position(gtk.WIN_POS_CENTER)
self.window.connect('destroy', gtk.main_quit)
main_vbox = gtk.VBox(False, 1)
self.window.add(main_vbox)
hbox = gtk.HBox(False, 0)
self.lbl = gtk.Label('All done! :)')
self.lbl.set_alignment(0, 0.5)
main_vbox.pack_start(self.lbl, True, True)
self.window.show_all()
if __name__ == "__main__":
splScr = splashScreen()
#If you don't do this, the splash screen will show, but wont render it's contents
while gtk.events_pending():
gtk.main_iteration()
#Here you can do all that nasty things that take some time.
sleep(3)
app = yourApp()
#We don't need splScr anymore.
splScr.window.destroy()
gtk.main()
|
Activision launches its first ever partnership with McDonald's USA.
Nine Skylanders Giants toys will be found in McDonald’s kids’ Happy Meals up until May 2nd.
Plus, customers will also be able to find $10 coupons for the Skylanders Giants game.
It’s Activision’s first partnership with the McDonald’s USA restaurant chain. The promotion will head to other countries later this year.
Skylanders Giants toys found in Happy Meals include Spyro, Gill Grunt, Drobot, Chop Chop, Ignitor, Jet-Vac, Tree Rex, Crusher and Kaos.
They are based on the $1 billion video game and action figure toy line which first arrived in late 2011.
McDonald’s will support the three-week promotion with a marketing campaign that includes TV ads, in-restaurant merchandising, custom packaging and a website and social media presence online.
"The Skylanders Giants Happy Meal illustrates how strong the Skylanders Spyro’s Adventure and Skylanders Giants brand has become in just 18 months," said Tim Ellis, chief marketing officer at Activision Publishing.
Ashley Maidy, vice president of global licensing and partnerships at Activision Publishing, added: "We’re excited to offer our fans another great way to experience this signature brand, and we couldn’t be more pleased to add McDonald’s to our roster of incredible partners." |
# Copyright 2018 Flight Lab authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test-cases for utils.Projector."""
from __future__ import print_function
import logging
import sys
import projector
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
def test(argv):
ip = argv[1]
p = projector.Projector(name='test', address=ip)
p.on('state_changed', on_state_changed)
p.start()
while True:
cmd = raw_input('Command>')
if cmd == 'on':
try:
p.power_on()
except Exception as e:
print(e)
elif cmd == 'off':
try:
p.power_off()
except Exception as e:
print(e)
elif cmd == 'exit':
break
p.stop()
def on_state_changed(old_state, new_state):
print('State changed: "{0}" => "{1}"'.format(old_state, new_state))
if __name__ == '__main__':
logger = logging.getLogger('')
logger.setLevel('DEBUG')
console_handler = logging.StreamHandler()
console_handler.setFormatter(
logging.Formatter('%(levelname)-8s %(name)-12s: %(message)s'))
logger.addHandler(console_handler)
test(sys.argv)
|
The Move In Coordinator/ Concierge for Active Adult (55+ living) plays an integral part in the sales process by working closely with the Community Manager and Sales Consultant/s. The main responsibility will be to do whatever is necessary to make the selling process more effective and the move in process more efficient. The Move In Coordinator plays a critical role in staying engaged with each depositor through actual move in to reduce the risk of cancellation. This person will participate in all aspects of sales and marketing for the community including the L.O.V.E. sales program fundamentals - daily sales huddles, discovery, tours, and creative follow up. The Move In Coordinator will contribute to the success of the community sales team’s achievement of year end budgeted occupancy, revenue, and resident retention goals.
Tour Support - Assists with tours when needed.
Prospect Experience - Inspects tour path, model apartment(s), and leasing office on a regular basis in order to maximize appeal of the prospect experience.
Meet and greet residents and families on the day of move in and ensure smooth process. Hand deliver move-in gift the day of move-in.
Prepare and complete all paperwork related to new move-ins and renewals according to Greystar policy, including preparation of Move-in Packets.
Process applications, credit screening, and criminal background checks.
Coordinate the distribution of all Move-In paperwork to the appropriate personnel.
Coordinate and communicate unit readiness with appropriate personnel to ensure timely move-ins.
Assist with furniture measurements and placement.
Ambitious and energetic; willing to have fun; personable with the ability to build and develop relationships quickly.
Very team-oriented with the ability to put the team and resident needs first by employing a “can do” attitude.
Founded in 1993, Greystar provides world-class service in the multifamily real estate business. Our innovative business model integrates the management, development and investment disciplines of the multifamily industry on international, regional and local levels. This unique approach and our commitment to hiring the very best multifamily professionals have resulted in record growth, making us one of the most respected and trusted multifamily real estate companies.
Because our business model includes both investment and service-oriented businesses, we are able to maintain a constant presence in local markets and create value in all phases of the real estate cycle. Our international platform provides economies of scale, financial sophistication, institutional quality reporting and tremendous capital relationships, while our city offices provide local market expertise and execution.
Our dedication to redefining excellence in apartment living means we are constantly exploring innovative ideas and pioneering new ways to serve our residents and clients.
The Greystar team is more than 10,000 team members strong and growing. Check out the latest opportunities at #removed# EOE. |
import numpy as np
import sonnet as snt
import tensorflow as tf
from tensorflow.contrib.distributions import Bernoulli, NormalWithSoftplusScale
from modules import SpatialTransformer, ParametrisedGaussian
class AIRCell(snt.RNNCore):
"""RNN cell that implements the core features of Attend, Infer, Repeat, as described here:
https://arxiv.org/abs/1603.08575
"""
_n_transform_param = 4
def __init__(self, img_size, crop_size, n_appearance,
transition, input_encoder, glimpse_encoder, glimpse_decoder, transform_estimator, steps_predictor,
discrete_steps=True, canvas_init=None, explore_eps=None, debug=False):
"""Creates the cell
:param img_size: int tuple, size of the image
:param crop_size: int tuple, size of the attention glimpse
:param n_appearance: number of latent units describing the "what"
:param transition: an RNN cell for maintaining the internal hidden state
:param input_encoder: callable, encodes the original input image before passing it into the transition
:param glimpse_encoder: callable, encodes the glimpse into latent representation
:param glimpse_decoder: callable, decodes the glimpse from latent representation
:param transform_estimator: callabe, transforms the hidden state into parameters for the spatial transformer
:param steps_predictor: callable, predicts whether to take a step
:param discrete_steps: boolean, steps are samples from a Bernoulli distribution if True; if False, all steps are
taken and are weighted by the step probability
:param canvas_init: float or None, initial value for the reconstructed image. If None, the canvas is black. If
float, the canvas starts with a given value, which is trainable.
:param explore_eps: float or None; if float, it has to be \in (0., .5); step probability is clipped between
`explore_eps` and (1 - `explore_eps)
:param debug: boolean, adds checks for NaNs in the inputs to distributions
"""
super(AIRCell, self).__init__(self.__class__.__name__)
self._img_size = img_size
self._n_pix = np.prod(self._img_size)
self._crop_size = crop_size
self._n_appearance = n_appearance
self._transition = transition
self._n_hidden = self._transition.output_size[0]
self._sample_presence = discrete_steps
self._explore_eps = explore_eps
self._debug = debug
with self._enter_variable_scope():
self._canvas = tf.zeros(self._img_size, dtype=tf.float32)
if canvas_init is not None:
self._canvas_value = tf.get_variable('canvas_value', dtype=tf.float32, initializer=canvas_init)
self._canvas += self._canvas_value
transform_constraints = snt.AffineWarpConstraints.no_shear_2d()
self._spatial_transformer = SpatialTransformer(img_size, crop_size, transform_constraints)
self._inverse_transformer = SpatialTransformer(img_size, crop_size, transform_constraints, inverse=True)
self._transform_estimator = transform_estimator(self._n_transform_param)
self._input_encoder = input_encoder()
self._glimpse_encoder = glimpse_encoder()
self._glimpse_decoder = glimpse_decoder(crop_size)
self._what_distrib = ParametrisedGaussian(n_appearance, scale_offset=0.5,
validate_args=self._debug, allow_nan_stats=not self._debug)
self._steps_predictor = steps_predictor()
@property
def state_size(self):
return [
np.prod(self._img_size), # image
np.prod(self._img_size), # canvas
self._n_appearance, # what
self._n_transform_param, # where
self._transition.state_size, # hidden state of the rnn
1, # presence
]
@property
def output_size(self):
return [
np.prod(self._img_size), # canvas
np.prod(self._crop_size), # glimpse
self._n_appearance, # what code
self._n_appearance, # what loc
self._n_appearance, # what scale
self._n_transform_param, # where code
self._n_transform_param, # where loc
self._n_transform_param, # where scale
1, # presence prob
1 # presence
]
@property
def output_names(self):
return 'canvas glimpse what what_loc what_scale where where_loc where_scale presence_prob presence'.split()
def initial_state(self, img):
batch_size = img.get_shape().as_list()[0]
hidden_state = self._transition.initial_state(batch_size, tf.float32, trainable=True)
where_code = tf.zeros([1, self._n_transform_param], dtype=tf.float32, name='where_init')
what_code = tf.zeros([1, self._n_appearance], dtype=tf.float32, name='what_init')
flat_canvas = tf.reshape(self._canvas, (1, self._n_pix))
where_code, what_code, flat_canvas = (tf.tile(i, (batch_size, 1)) for i in (where_code, what_code, flat_canvas))
flat_img = tf.reshape(img, (batch_size, self._n_pix))
init_presence = tf.ones((batch_size, 1), dtype=tf.float32)
return [flat_img, flat_canvas,
what_code, where_code, hidden_state, init_presence]
def _build(self, inpt, state):
"""Input is unused; it's only to force a maximum number of steps"""
img_flat, canvas_flat, what_code, where_code, hidden_state, presence = state
img_inpt = img_flat
img = tf.reshape(img_inpt, (-1,) + tuple(self._img_size))
inpt_encoding = self._input_encoder(img)
with tf.variable_scope('rnn_inpt'):
hidden_output, hidden_state = self._transition(inpt_encoding, hidden_state)
where_param = self._transform_estimator(hidden_output)
where_distrib = NormalWithSoftplusScale(*where_param,
validate_args=self._debug, allow_nan_stats=not self._debug)
where_loc, where_scale = where_distrib.loc, where_distrib.scale
where_code = where_distrib.sample()
cropped = self._spatial_transformer(img, where_code)
with tf.variable_scope('presence'):
presence_prob = self._steps_predictor(hidden_output)
if self._explore_eps is not None:
presence_prob = self._explore_eps / 2 + (1 - self._explore_eps) * presence_prob
if self._sample_presence:
presence_distrib = Bernoulli(probs=presence_prob, dtype=tf.float32,
validate_args=self._debug, allow_nan_stats=not self._debug)
new_presence = presence_distrib.sample()
presence *= new_presence
else:
presence = presence_prob
what_params = self._glimpse_encoder(cropped)
what_distrib = self._what_distrib(what_params)
what_loc, what_scale = what_distrib.loc, what_distrib.scale
what_code = what_distrib.sample()
decoded = self._glimpse_decoder(what_code)
inversed = self._inverse_transformer(decoded, where_code)
with tf.variable_scope('rnn_outputs'):
inversed_flat = tf.reshape(inversed, (-1, self._n_pix))
canvas_flat += presence * inversed_flat
decoded_flat = tf.reshape(decoded, (-1, np.prod(self._crop_size)))
output = [canvas_flat, decoded_flat, what_code, what_loc, what_scale, where_code, where_loc, where_scale,
presence_prob, presence]
state = [img_flat, canvas_flat,
what_code, where_code, hidden_state, presence]
return output, state |
Lt George Schwartz Welch and 2nd Lt Kenneth M Taylor are credited with being the first ‘Aces’ of World War II. Welch and Taylor were both awarded the Distinguished Service Cross.
But boats were not their only targets.
Before the boats, the Japanese attacked Oʻahu’s airfields: Wheeler, Kaneohe, Ewa, Hickam, Ford Island, Bellows and the civilian airport serving Honolulu.
Welch and Taylor were at Wheeler when the attack began; they had previously flown their P-40B fighters over to the small airfield at Haleiwa as part of a plan to disperse the squadron’s planes away from Wheeler.
Lt Welch was able to down the plane following him and they both returned back to Wheeler. Lt Welch was credited with a total of four Japanese planes shot down and Lt Taylor downed two. Just as suddenly as it began, the sky was empty of enemy aircraft.
Welch remained in the Pacific Theater of Operations and went on to score 12 more kills against Japanese aircraft (16 in total).
In the spring of 1944, Welch was approached by North American Aviation to become a test pilot for the P-51 Mustang. He went on to fly the prototypes of the FJ Fury, and when the F-86 Sabre was proposed, Welch was chosen as the chief test pilot.
On October 14, 1947, the same day that Chuck Yeager was to attempt supersonic flight, Welch reputedly performed a supersonic dive. Starting from 37,000 feet, he executed a full-power 4g pullout, greatly increasing the power of his apparent sonic boom. Yeager broke the sound barrier approximately 30 minutes later.
The Pentagon allegedly ordered the results of Welch’s flights classified and did not allow North American to publicly announce that Welch had gone supersonic until almost a year later. The Air Force still officially denies that Welch broke the sound barrier first.
2nd Lt. Kenneth Marlar Taylor was a new second lieutenant on his first assignment, posted in April 1941 to Wheeler Army Airfield in Honolulu.
Born in Enid, Oklahoma, Taylor was raised in Hominy, Oklahoma and entered the University of Oklahoma in 1938. After two years, he quit school to enlist in the Army Air Corps.
After Pearl Harbor, Taylor was sent to the South Pacific, flying out of Guadalcanal, and was credited with downing another Japanese aircraft. During an air raid at the base one day, someone jumped into a trench on top of him and broke his leg, which ended his combat career.
He rose to the rank of colonel during his 27 years of active duty. He became commander of the Alaska Air National Guard and retired as a brigadier general in 1971. He then worked as an insurance underwriter in Alaska, representing Lloyds of London, until 1985.
Taylor split his retirement between Anchorage and Arizona. He died November 25, 2006 at an assisted living residence in Tucson. (Washington Post) The image shows Lt George Welch (L) and Ken Taylor. |
import CustomVLCClass
import serial
import time
import threading
time.sleep(20)
while True:
def inputListener():
inputdata = input('0 to quit the first song, 1 to quit the second song')
if(inputdata == '0'):
if a.mediaplayer.is_playing() :
a.pause()
else:
a.play()
print("Quiting 0")
inputListener() #Starting another time the inputListener
elif(inputdata == '1'):
if b.mediaplayer.is_playing():
b.pause()
else:
b.play()
print("Quiting 1")
inputListener() #Starting another time the inputListener
elif(inputdata == '00'):
a.mute()
inputListener()
elif(inputdata == '01'):
a.unmute()
inputListener()
def arduinoListener():
past0 = 0 #For counting the last chip in the field
past1 = 0
past2 = 0
past3 = 0
past4 = 0
while True:
try:
line = ser.readline()
if not line:
continue
x = line.decode('ascii', errors='replace')
if x == '00\r\n':
print("00")
if past0 == 1:
a.mute()
if past0 == 2:
b.mute()
if past0 == 3:
c.mute()
if past0 == 4:
d.mute()
if past0 == 5:
e.mute()
if past0 == 6:
f.mute()
past0 = 0
elif x == '01\r\n':
print("01")
past0 = 1
a.unmute()
elif x == '02\r\n':
print("02")
past0 = 2
b.unmute()
elif x == '03\r\n':
print("03")
past0 = 3
c.unmute()
elif x == '04\r\n':
print("04")
past0 = 4
d.unmute()
elif x == '05\r\n':
print("05")
past0 = 5
e.unmute()
elif x == '06\r\n':
print("06")
past0 = 6
f.unmute()
if x == '10\r\n':
print("10")
if past1 == 1:
a.mute()
if past1 == 2:
b.mute()
if past1 == 3:
c.mute()
if past1 == 4:
d.mute()
if past1 == 5:
e.mute()
if past1 == 6:
f.mute()
past1 = 0
elif x == '11\r\n':
print("11")
past1 = 1
a.unmute()
elif x == '12\r\n':
print("12")
past1 = 2
b.unmute()
elif x == '13\r\n':
print("13")
past1 = 3
c.unmute()
elif x == '14\r\n':
print("14")
past1 = 4
d.unmute()
elif x == '15\r\n':
print("15")
past1 = 5
e.unmute()
elif x == '16\r\n':
print("16")
past1 = 6
f.unmute()
if x == '20\r\n':
print("20")
if past2 == 1:
a.mute()
if past2 == 2:
b.mute()
if past2 == 3:
c.mute()
if past2 == 4:
d.mute()
if past2 == 5:
e.mute()
if past2 == 6:
f.mute()
past1 = 0
elif x == '21\r\n':
print("21")
past2 = 1
a.unmute()
elif x == '22\r\n':
print("22")
past2 = 2
b.unmute()
elif x == '23\r\n':
print("23")
past2 = 3
c.unmute()
elif x == '24\r\n':
print("24")
past2 = 4
d.unmute()
elif x == '25\r\n':
print("25")
past2 = 5
e.unmute()
elif x == '26\r\n':
print("26")
past2 = 6
f.unmute()
if x == '30\r\n':
print("30")
if past3 == 1:
a.mute()
if past3 == 2:
b.mute()
if past3 == 3:
c.mute()
if past3 == 4:
d.mute()
if past3 == 5:
e.mute()
if past3 == 6:
f.mute()
past3 = 0
elif x == '31\r\n':
print("31")
past3 = 1
a.unmute()
elif x == '32\r\n':
print("32")
past3 = 2
b.unmute()
elif x == '33\r\n':
print("33")
past3 = 3
c.unmute()
elif x == '34\r\n':
print("34")
past3 = 4
d.unmute()
elif x == '35\r\n':
print("35")
past3 = 5
e.unmute()
elif x == '36\r\n':
print("36")
past3 = 6
f.unmute()
if x == '40\r\n':
print("40")
if past4 == 1:
a.mute()
if past4 == 2:
b.mute()
if past4 == 3:
c.mute()
if past4 == 4:
d.mute()
if past4 == 5:
e.mute()
if past4 == 6:
f.mute()
past4 = 0
elif x == '41\r\n':
print("41")
past4 = 1
a.unmute()
elif x == '42\r\n':
print("42")
past4 = 2
b.unmute()
elif x == '43\r\n':
print("43")
past4 = 3
c.unmute()
elif x == '44\r\n':
print("44")
past4 = 4
d.unmute()
elif x == '45\r\n':
print("45")
past4 = 5
e.unmute()
elif x == '46\r\n':
print("46")
past4 = 6
f.unmute()
except KeyboardInterrupt:
print("exiting")
break
ser = serial.Serial('/dev/ttyAMA0', 9600, timeout=1.0)
ser.setDTR(False)
time.sleep(1)
ser.flushInput()
ser.setDTR(True)
a = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/1.mp3")
b = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/2.mp3")
c = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/3.mp3")
d = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/4.mp3")
e = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/5.mp3")
f = CustomVLCClass.CustomVLCClass(filename="/acien101/AudioMixer/audio/6.mp3")
inputArduinoThread = threading.Thread(target=arduinoListener, name="inputAduino")
inputArduinoThread.start()
while a.mediaplayer.is_playing() and b.mediaplayer.is_playing:
time.sleep(0.1)
|
Valium 10mg prescription assistance program - No prescription required.
It is possible that the dilatation of the heartmay be reduced by the muscular action, as occurs in animal experiments,but here again it is difficult to measure the improvement in man. attempts have been made to show that thevomiting arises from stimulation of the medullary centre, but thesehave not been convincing. the leaves of the coca grown in peru and bolivia containcocaine along with small quantities of other alkaloids, but the indiancoca and still more the java leaves contain a smaller proportion ofcocaine and a larger amount of the want to buy lorazepam tablets online less known alkaloids.cocaine, like atropine, is readily decomposed into several constit-uents. the term hypersen-sitivity usually refers to allergic or other immunologic responses todrugs.) with some drugs, the intensity of response to a given dosemay change during the course of therapy; in these cases, responsive-ness usually valium 10mg prescription assistance program decreases as a consequence of continued drug adminis-tration, producing a state of phentermine 37.5mg prescription dosage relative tolerance to the drugs effects.when responsiveness diminishes rapidly after administration of adrug, the response is said to be subject to tachyphylaxis.even before administering the first dose of a drug, cheapest generic klonopin 1mg in houston the pre-server valium 10mg prescription assistance program should consider factors that may help in predicting thedirection and extent of possible variations in responsiveness. in the rabbit's urine paramidophenol alone is found, whilein Cheapest generic carisoprodol in bangkok the dog this is accompanied by oxycarbanil (ceh4o.nh.co); in each caseit forms a double sulphate or glycuronate. depression ofventilation may be exaggerated when etomidate is combined withinhaled anesthetics or opioids. the cross-linking valium 10mg prescription assistance program of gelatincapsules is an irreversible chemical reaction. kevin as a former enemy, wouldn't you also list sarah as a former enemy? wagner proposed a scheme, depicted in fig-ure 6-1, for the processes involved in the dissolution of soliddosage forms. the official standards valium 10mg prescription assistance program for containers apply to articles packagedby either the pharmaceutical manufacturer or the dispensingpharmacist unless otherwise indicated in a compendial mono-graph. is 20 mg it should be taken at least 16 minutes before the valium 10mg prescription assistance program sexual activity. as part of the drug approval process, a dissolu-tion test procedure is established for all oral solid dosage forms.these dissolution tests are incorporated into the united statespharmacopeia (usp) and apply valium 10mg prescription assistance program both to innovator purchase generic ativan online ireland and genericdrug products. mayo clinic experts, and other useful information that will help you manage your health. in general, patients are sent towatering places and baths, and the success of the treatment is to aconsiderable extent due to the climatic conditions, the change in thehabits of life, the dietic treatment and the rest valium 10mg prescription assistance program from everyday occu-pations. toxicology refers tothe study of adverse effects of drug molecules in vitro and in experimentalanimal models.the objectives of nonclinical development in the development of Klonopin prescription for flying a drug are: in 24 hours. the international conference on harmonisation of technical requirementsfor registration of pharmaceuticals for human use (ich) is an organisationthat was founded as a result of an international conference organised in brussels in 1990 and that brought together representatives of the purchase ambien online india regulatory authorities and pharmaceutical purchase ativan mastercard industry in europe, japan and the united states todiscuss scientific and technical aspects of drug registration. each clinical study should ambien prescription class be scientifically sound and describedin a clear and detailed protocol that should be followed meticulously and eachindividual valium 10mg prescription assistance program involved in conducting the study should be adequately qualified,trained and experienced to perform his/her respective task(s). amphiphiliccompounds can be adsorbed onto the double lipidic membrane. — a curious relationship has been shown to existbetween the calcium and potassium salts. acetylcholine is very rapidly hydrolyzed.fused intravenously toachieve concentrations sufficient to produce detectable effects. the discovery process that is described below is the targetbased approachfor small molecules. in addition,the fatigue impact scale has been used to measure thecorrelation between fatigue and markers of disease severity .the aim of this study was to assess the safety and efficacy ofciprofloxacin dpi treatment for 28 days, with 56 days offollow-up, in individuals with non-cf bronchiectasis byexamining changes in bacterial load and where to purchase ativan in florida other importantclinical outcomes, as well as tolerability. the danger ofthe use of the green arsenical dyes, such as scheele's green (arseniteof copper), and schweinfurt's green, or paris green (arsenite andacetate of copper), is now generally recognized, but arsenic is still usedin the preparation of other colors, and these may give rise to poisoningfrom the imperfect removal of the metal. per 100 c.c. for example, theeuropean medicines agency ultram 50mg prescription stolen (ema) regulates most europeanmarkets. the authorisation is only granted when the dossier has been reviewed bythe experts of the competent authorities and no objections are made to proceed with the clinical trial. valium 10mg prescription assistance program if valium 10mg prescription assistance program you have not worked for you in 12 months, is unlikely to work for you. its main applicationis valium 10mg prescription assistance program as an external antiseptic in 5 per cent, solution over wounds, ulcers andso forth, also to disinfect wounds and for antiseptic douches. at any rate,irritation and later acute inflammation are set up at these points.at buy alprazolam 3mg first the irritation excites only diarrhoea and diuresis, but as itgoes on, gastro-enteritis and anuria or hematuria may be produced.the symptoms from the intestine and kidney may not be equally wellmarked; at one time the one valium 10mg prescription assistance program becomes inflamed while the other is onlysubjected to mild stimulation, while at other times both are the seatof acute inflammation. of the candidate drugs that come to clinical research,only 20 percent survive valium 10mg prescription assistance program as safe and efficacious and are addedto the portfolio of therapeutics. valium 10mg prescription assistance program polyethylene, polystyrene, polyvinyl chlo-ride, and polypropylene are used to prepare plastic containersof various densities to fit specific formulation needs.factors such as plastic composition, processing and clean-ing procedures, contacting media, inks, adhesives, absorption,adsorption, and permeability of preservatives also affect thesuitability of a plastic for pharmaceutical use. while onthe market, efforts in drug development valium 10mg prescription assistance program continue to refine the manufacturingprocess, to improve pharmaceutical formulations and to explore new routesof administration or new therapeutic indications. both fluoridesand oxalates irritate the stomach and induce nausea and vomiting.the fluorides absorbed from the alimentary canal are excreted by the urine,but this takes place very slowly, and much of the fluoride is stored valium 10mg prescription assistance program up in thebody, some in the liver and skin, but most in the bones in the form of calciumfluoride. an overview of the constituents of a qmsis given in table good practicesthe focus of this valium 10mg prescription assistance program section is on quality requirements regarding the conductof operations and tests in chemical/pharmaceutical, nonclinical and clinicaldevelopment.
Buy cheap alprazolam 1mg online with american express Ambien india Buy lorazepam with mastercard Where to purchase diazepam 5mg tablets Do forces in the tunica albuginea contribute to peyronie's disease? but the hopes which were at first entertainedthat it would prove a cure for the disease are now dissipated; atoxyl appearsto act efficiently on the parasites in the blood, but has less effect on those whichhave infected the lymph glands, and apparently does not reach those in thecentral nervous system in efficient concentrations. a clinical practice guideline from the american buy drug alprazolam 1.5mg in the uk college of physicians. as part of the drug approval process, a dissolu-tion test procedure is established for all oral solid dosage forms.these dissolution tests are incorporated into the united statespharmacopeia (usp) and apply both to innovator and genericdrug products. in order to reach these goals, ich involves six parties during the processof harmonisation: the institutionalreview board (irb)/independent ethics committee (iec), the investigator and the sponsor; guidance on the content and format of the clinical study protocol, theinvestigator brochure, and other socalled essential documents.the guideline should be followed whenever performing clinical studies thatare generating data to be submitted to regulatory authorities, but are recommended in other trials as well.the principles of gcpa good clinical study should be conducted in accordance with current regulations, gcp and the ethical principles laid down in the declaration of helsink.ividual safety, wellbeing and rights ofclinical trial participants. this delivery route is more representative,when compared to intranasal instillation, of the expectedhuman pulmonary administration using a nebulizer or inhaler.in the pneumonic tularemia mouse model, a single dose ofaerosolized ciprofloxacin (1 mg/kg lung dose) provided little Tramadol 200mg usa pharmacy orno protection whereas a single dose of aerosolized liposomalciprofloxacin (1 valium 10mg prescription assistance program mg/kg lung dose) offered 100% protection evenwhen administered as late as 72 h post-challenge (wong et al.,2003) (figure 1 shows 72 h data). thereappears to be a ceiling effect for benzodiazepine-induced Where can i buy alprazolam over the counter decreasesin cmr0 2 as evidenced by midazolams inability to produce anisoelectric eeg. doctor or the want to buy xanax 2mg in thailand medical advice, diagnose or treat any disease and valium 10mg prescription assistance program may not cover all valium 10mg prescription assistance program uses, directions, drug buy drug alprazolam 1.5mg in houston interactions, precautions, or adverse effects of your medication. this opinion is entirely wrong, as camphoricacid is almost devoid of action. in such cases of poisoning in valium 10mg prescription assistance program man the mucousmembrane of the stomach and intestine has been found swollen andloosened, and in animals fatty degeneration of the liver, heart, andkidney has been described.injected into the veins of animals, iodine causes oedema of thelungs, which v. This is widely used for pavement recycling method today. ruzicka statesthat the malpighian corpuscles of the spleen are increased in numberafter pilocarpine.the temperature is said to be increased by pilocarpine, althoughonly to valium 10mg prescription assistance program a very small extent, and the carbonic acid excretion is increasedthrough the drug increasing the activity of the glands and other organs.after the perspiration is fully developed the internal temperature isgenerally reduced, especially in fever.some symptoms occur in cases of poisoning which point to someaction of the alkaloids on the central nervous system. ed get and keep an erection when he is sexually excited. thezinc salts seem to be in general much weaker than those of valium 10mg prescription assistance program copper, which theyresemble closely in other respects.zinci sulphas (u.) (zns0 4 + 7h 0), colorless, transparent,odorless crystals, with ambien mexico pharmacy a harsh, astringent, metallic taste, soluble in water,not in alcohol. thus when a weak solution of lead acetate is applied to amucous membrane, a precipitate is formed in, the proteins lying on thesurface, and protects the cells from the action of the very dilute aceticacid which is set at liberty. thedial reading on the viscometer is valium 10mg prescription assistance program a measure of the resistancethat the spindle encounters at various levels of the sedimentedsuspension. valium 10mg prescription assistance program single use equipment is commonly used in31quality assurance and controlthe industry, particularly in manufacture of where to purchase alprazolam online in canada biological products.product contact components are discarded after use. this point was well made byjurgen drewes21when he wrote, “it is, however, by no meanscertain to what extent molecular diversity, as viewed by chem-ists and as calculated by structural descriptors, resembles di-versity as buy cheap tramadol 100mg online india ‘seen’ by a biological target molecule.”collections of valium 10mg prescription assistance program compounds can be designed to be “drug-like”in that they have favorable valium 10mg prescription assistance program physicochemical valium 10mg prescription assistance program properties in com-mon want to buy alprazolam 2mg with paypal with known drugs. this is followedby drowsiness which soon passes into sleep, from which the subject can at first be aroused. the study director is the leader of the studyin the test facility and one of his/her responsibilities is the approval of the studyplan and amendments and to make valium 10mg prescription assistance program sure that they are available to laboratory personnel. ethnic characteristics that might influence drug effects are classified asintrinsic (related to the drug recipient, such as patient, pathogen valium 10mg prescription assistance program or tumourgenotype) or extrinsic (related to the environment or culture). buteven the direct application of adrenaline to a lesion of the lung orbrain has little effect in stopping the bleeding, the vessels in theseorgans not being constricted by adrenaline to the same extent as thoseof other organs.stomach and intestine. the information on this web site is designed for educational purposes only. the exploratory andexperimental nature of a firstinhuman (fih) clinical trial warrants particular attention from nonclinical and clinical experts and a thorough analysisof all the nonclinical data at hand is conducted before the decision is takento start clinical research. perchloricof mercury is less liable to induce salivation, but disturbs the digestion more thatother preparations when given internally, while its hypodermic injection i?exceedingly painful. dose hypodermically, 0.3g. each of these consequences is related to valium 10mg prescription assistance program thestructure adopted by the protein in the valium 10mg prescription assistance program interfacial region. the valium 10mg prescription assistance program ventilatory response to carbon dioxideis unchanged. the epileptiform convulsions, sometimes observed after the cheapest generic valium in florida administration of camphor in mammals,recur at varying intervals, and last from five to ten minutes.usually after a few hours they cease and the animal recoverscompletely. the order phentermine 37.5mg in houston nausea is accompanied by theusual concomitant symptoms — salivation, increased secretion of mucus in therespiratory tract, depression and alternately rapid and slow pulse. toprevent excessive drop in bloodpressure, the dosage of other agents should bereduced by at least 50 percent when lasix isadded to the clonazepam 2mg prescription limit regimen. on the other hand it seemsanomalous to employ it in cases of enlargement of the gland (goitre).yet great improvement is seen from thyroid treatment in many ofa case of sporadic cretinism. thus nitroglycerin has been advisedin valium 10mg prescription assistance program heart disease and has accordingly been placed by some among theheterogeneous group of "cardiac tonics or stimulants." its bene-ficial effects are not due to any direct action on the heart, but to itsdecreasing the resistance against which the systole is performed. |
"""Runs the server for backend application `data`."""
import argparse
import json
from paste import httpserver
import webapp2
import status
class MyServer(webapp2.RequestHandler):
"""Abstract request handler, to be subclasses by server hooks."""
def get(self):
"""Implements actual hook logic and responds to requests."""
raise NotImplementedError('Must implement method `get`.')
def returnJSON(self,j):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(j, separators=(',',':')))
class SourceDataInfo(MyServer):
def get(self):
result = status.get_source_data_info()
self.returnJSON(result)
class ProdDataInfo(MyServer):
def get(self):
result = status.get_prod_data_info()
self.returnJSON(result)
class PublicDumpsInfo(MyServer):
def get(self):
result = status.get_public_dumps_info()
self.returnJSON(result)
class ColabsInfo(MyServer):
def get(self):
result = status.get_colabs_info()
self.returnJSON(result)
# Setup the webapp2 WSGI application.
app = webapp2.WSGIApplication([
('/source_data_info', SourceDataInfo),
('/prod_data_info', ProdDataInfo),
('/public_dumps_info', PublicDumpsInfo),
('/colabs_info', ColabsInfo),
], debug=False)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--listen',
help='host:port to listen on',
default='127.0.0.1:8084')
args = parser.parse_args()
host, port = args.listen.split(':')
httpserver.serve(
app,
host=host,
port=port,
request_queue_size=128,
use_threadpool=True,
threadpool_workers=32,
)
if __name__ == '__main__':
main()
|
Ariat presents this Cavender's exclusive men's white short sleeve western shirt. Features include an all over turquoise medallion print, short sleeves, button down front and collar, open left chest pocket with logo and a straight back yoke. 97% Cotton 3% Spandex - Machine wash cold. |
#!/usr/bin/env python
"""Data Acquisition Management service to keep track of Data Producers, Data Sources and external data agents
and the relationships between them"""
from pyon.agent.agent import ResourceAgentClient
__author__ = 'Maurice Manning, Michael Meisinger'
from collections import deque
import logging
from copy import deepcopy
from ooi.timer import Timer, Accumulator
from pyon.core.exception import NotFound, BadRequest, ServerError
from pyon.ion.resource import ExtendedResourceContainer
from pyon.public import CFG, IonObject, log, RT, LCS, PRED, OT
from pyon.util.arg_check import validate_is_instance
from ion.services.sa.instrument.agent_configuration_builder import ExternalDatasetAgentConfigurationBuilder
from ion.util.enhanced_resource_registry_client import EnhancedResourceRegistryClient
from ion.util.stored_values import StoredValueManager
from ion.util.agent_launcher import AgentLauncher
from interface.objects import ProcessDefinition, ProcessSchedule, ProcessTarget, ProcessRestartMode
from interface.objects import Parser, DataProducer, InstrumentProducerContext, ExtDatasetProducerContext, DataProcessProducerContext
from interface.objects import AttachmentType
from interface.services.sa.idata_product_management_service import DataProductManagementServiceProcessClient
from interface.services.sa.idata_acquisition_management_service import BaseDataAcquisitionManagementService
stats = Accumulator(persist=True)
class DataAcquisitionManagementService(BaseDataAcquisitionManagementService):
def on_init(self):
self.RR2 = EnhancedResourceRegistryClient(self.clients.resource_registry)
self.DPMS = DataProductManagementServiceProcessClient(self) # TODO: Add to clients
# -----------------
# The following operations register different types of data producers
# -----------------
def register_external_data_set(self, external_dataset_id=''):
"""Register an existing external data set as data producer
@param external_dataset_id str
@retval data_producer_id str
"""
ext_dataset_obj = self.clients.resource_registry.read(external_dataset_id)
if ext_dataset_obj is None:
raise NotFound('External Data Set %s does not exist' % external_dataset_id)
#create a InstrumentProducerContext to hold the state of the this producer
producer_context_obj = ExtDatasetProducerContext(configuration=vars(ext_dataset_obj))
#create data producer resource and associate to this data_process_id
data_producer_obj = DataProducer(name=ext_dataset_obj.name,
description='Primary DataProducer for External Dataset %s' % ext_dataset_obj.name,
is_primary=True,
producer_context=producer_context_obj)
data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)
# Create association
self.clients.resource_registry.create_association(subject=external_dataset_id, predicate=PRED.hasDataProducer, object=data_producer_id)
return data_producer_id
def unregister_external_data_set(self, external_dataset_id=''):
"""
@param external_dataset_id str
@throws NotFound object with specified id does not exist
"""
# Verify that id is valid
external_data_set_obj = self.clients.resource_registry.read(external_dataset_id)
# List all resource ids that are objects for this data_source and has the hasDataProducer link
producers, producer_assns = self.clients.resource_registry.find_objects(
subject=external_dataset_id, predicate=PRED.hasDataProducer, id_only=True)
for producer, producer_assn in zip(producers, producer_assns):
log.debug("DataAcquisitionManagementService:unregister_external_data_set delete association %s", str(producer_assn))
self.clients.resource_registry.delete_association(producer_assn)
log.debug("DataAcquisitionManagementService:unregister_external_data_set delete producer %s", str(producer))
self.clients.resource_registry.delete(producer)
return
def register_process(self, data_process_id=''):
"""
Register an existing data process as data producer
"""
# retrieve the data_process object
data_process_obj = self.clients.resource_registry.read(data_process_id)
if data_process_obj is None:
raise NotFound('Data Process %s does not exist' % data_process_id)
producer_context_obj = DataProcessProducerContext(configuration=data_process_obj.configuration)
#create data producer resource and associate to this data_process_id
data_producer_obj = DataProducer(name=data_process_obj.name,
description='Primary DataProducer for DataProcess %s' % data_process_obj.name,
producer_context=producer_context_obj,
is_primary=True)
data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)
# Create association
self.clients.resource_registry.create_association(data_process_id, PRED.hasDataProducer, data_producer_id)
return data_producer_id
def register_event_process(self, process_id=''):
"""
Register an existing data process as data producer
"""
# retrieve the data_process object
# retrieve the data_process object
data_process_obj = self.clients.resource_registry.read(process_id)
if data_process_obj is None:
raise NotFound('Data Process %s does not exist' % process_id)
producer_context_obj = DataProcessProducerContext(configuration=data_process_obj.process_configuration)
#create data producer resource and associate to this data_process_id
data_producer_obj = DataProducer(name=data_process_obj.name,
description='Primary DataProducer for DataProcess %s' % data_process_obj.name,
producer_context=producer_context_obj,
is_primary=True)
data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)
# Create association
self.clients.resource_registry.create_association(process_id, PRED.hasDataProducer, data_producer_id)
return data_producer_id
def unregister_process(self, data_process_id=''):
"""
Remove the associated DataProcess and disc
"""
# Verify that id is valid
input_process_obj = self.clients.resource_registry.read(data_process_id)
# List all resource ids that are objects for this data_source and has the hasDataProducer link
producers, producer_assns = self.clients.resource_registry.find_objects(subject=data_process_id, predicate=PRED.hasDataProducer, id_only=True)
for producer, producer_assn in zip(producers, producer_assns):
log.debug("DataAcquisitionManagementService:unregister_process delete association %s", str(producer_assn))
self.clients.resource_registry.delete_association(producer_assn)
log.debug("DataAcquisitionManagementService:unregister_process delete producer %s", str(producer))
log.debug("DAMS:unregister_process delete producer: %s ", str(producer) )
self.clients.resource_registry.delete(producer)
def unregister_event_process(self, process_id=''):
"""
Remove the associated Process and disc
"""
# Verify that id is valid
input_process_obj = self.clients.resource_registry.read(process_id)
# List all resource ids that are objects for this data_source and has the hasDataProducer link
producers, producer_assns = self.clients.resource_registry.find_objects(subject=process_id, predicate=PRED.hasDataProducer, id_only=True)
for producer, producer_assn in zip(producers, producer_assns):
log.debug("DataAcquisitionManagementService:unregister_process delete association %s", str(producer_assn))
self.clients.resource_registry.delete_association(producer_assn)
log.debug("DataAcquisitionManagementService:unregister_process delete producer %s", str(producer))
log.debug("DAMS:unregister_process delete producer: %s ", str(producer) )
self.clients.resource_registry.delete(producer)
def register_instrument(self, instrument_id=''):
"""
Register an existing instrument as data producer
"""
# retrieve the data_process object
instrument_obj = self.clients.resource_registry.read(instrument_id)
if instrument_obj is None:
raise NotFound('Instrument object %s does not exist' % instrument_id)
#create a InstrumentProducerContext to hold the state of the this producer
producer_context_obj = InstrumentProducerContext(configuration=vars(instrument_obj))
#create data producer resource and associate to this data_process_id
data_producer_obj = DataProducer(name=instrument_obj.name,
description='Primary DataProducer for DataProcess %s' % instrument_obj.name,
is_primary=True,
producer_context=producer_context_obj)
data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)
# Create association
self.clients.resource_registry.create_association(instrument_id, PRED.hasDataProducer, data_producer_id)
return data_producer_id
def unregister_instrument(self, instrument_id=''):
# Verify that id is valid
# Verify that id is valid
input_process_obj = self.clients.resource_registry.read(instrument_id)
# List all resource ids that are objects for this data_source and has the hasDataProducer link
producers, producer_assns = self.clients.resource_registry.find_objects(subject=instrument_id, predicate=PRED.hasDataProducer, id_only=True)
for producer, producer_assn in zip(producers, producer_assns):
log.debug("DataAcquisitionManagementService:unregister_instrument delete association %s", str(producer_assn))
self.clients.resource_registry.delete_association(producer_assn)
log.debug("DataAcquisitionManagementService:unregister_instrument delete producer %s", str(producer))
self.clients.resource_registry.delete(producer)
return
def assign_data_product(self, input_resource_id='', data_product_id=''):
log.debug('assigning data product %s to resource %s', data_product_id, input_resource_id)
#Connect the producer for an existing input resource with a data product
t = Timer() if stats.is_log_enabled() else None
# Verify that both ids are valid
#input_resource_obj = self.clients.resource_registry.read(input_resource_id) #actually, don't need this one unless producer is not found (see if below)
data_product_obj = self.clients.resource_registry.read(data_product_id)
if t:
t.complete_step('dams.assign_data_product.read_dataproduct')
#find the data producer resource associated with the source resource that is creating the data product
primary_producer_ids, _ = self.clients.resource_registry.find_objects(subject=input_resource_id, predicate=PRED.hasDataProducer, object_type=RT.DataProducer, id_only=True)
if t:
t.complete_step('dams.assign_data_product.find_producer')
if not primary_producer_ids:
self.clients.resource_registry.read(input_resource_id) # raise different NotFound if resource didn't exist
raise NotFound("Data Producer for input resource %s does not exist" % input_resource_id)
#connect the producer to the product directly
self.clients.resource_registry.create_association(subject=input_resource_id, predicate=PRED.hasOutputProduct, object=data_product_id)
if t:
t.complete_step('dams.assign_data_product.create_association.hasOutputProduct')
#create data producer resource for this data product
data_producer_obj = DataProducer(name=data_product_obj.name, description=data_product_obj.description)
data_producer_obj.producer_context.configuration = {}
data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)
if t:
t.complete_step('dams.assign_data_product.create_dataproducer')
attachments = self.clients.resource_registry.find_attachments(data_product_id, include_content=False, id_only=False)
if t:
t.complete_step('dams.assign_data_product.find_attachments')
for attachment in attachments:
if attachment.attachment_type == AttachmentType.REFERENCE:
parser_id = attachment.context.parser_id
if parser_id:
self.register_producer_qc_reference(data_producer_id, parser_id, attachment._id)
if t:
t.complete_step('dams.assign_data_product.register_qc')
# Associate the Product with the Producer
self.clients.resource_registry.create_association(data_product_id, PRED.hasDataProducer, data_producer_id)
if t:
t.complete_step('dams.assign_data_product.create_association.hasDataProducer')
# Associate the Producer with the main Producer
self.clients.resource_registry.create_association(data_producer_id, PRED.hasParent, primary_producer_ids[0])
if t:
t.complete_step('dams.assign_data_product.create_association.hasParent')
stats.add(t)
stats.add_value('dams.assign_data_product.attachment_count', len(attachments))
def unassign_data_product(self, input_resource_id='', data_product_id=''):
"""
Disconnect the Data Product from the Data Producer
@param data_product_id str
@throws NotFound object with specified id does not exist
"""
# Verify that both ids are valid
input_resource_obj = self.clients.resource_registry.read(input_resource_id)
data_product_obj = self.clients.resource_registry.read(data_product_id)
#find the data producer resource associated with the source resource that is creating the data product
primary_producer_ids, _ = self.clients.resource_registry.find_objects(input_resource_id, PRED.hasDataProducer, RT.DataProducer, id_only=True)
if not primary_producer_ids:
raise NotFound("Data Producer for input resource %s does not exist" % input_resource_id)
else:
log.debug("unassign_data_product: primary producer ids %s" % str(primary_producer_ids))
#find the hasDataProduct association between the data product and the input resource
associations = self.clients.resource_registry.find_associations(subject=input_resource_id, predicate=PRED.hasOutputProduct, object=data_product_id, id_only=True)
for association in associations:
log.debug("unassign_data_product: unlink input resource with data product %s" % association)
self.clients.resource_registry.delete_association(association)
#find the data producer resource associated with the source resource that is creating the data product
producers, producer_assns = self.clients.resource_registry.find_objects(data_product_id, PRED.hasDataProducer, RT.DataProducer, True)
for producer, producer_assn in zip(producers, producer_assns):
#remove the link to the data product
self.clients.resource_registry.delete_association(producer_assn)
#remove the link to the parent data producer
associations = self.clients.resource_registry.find_associations(subject=producer, predicate=PRED.hasParent, id_only=True)
for association in associations:
self.clients.resource_registry.delete_association(association)
log.debug("DAMS:unassign_data_product delete producer: %s ", str(producer) )
self.clients.resource_registry.delete(producer)
return
def assign_data_product_source(self, data_product_id='', source_id=''):
# Connect a Data Product to the data source, either a Site or a Device
if source_id:
#connect the producer to the product directly
self.clients.resource_registry.create_association(data_product_id, PRED.hasSource, source_id)
return
def unassign_data_product_source(self, data_product_id='', source_id=''):
# Disconnect the Data Product from the data source
# Find and break association with either a Site or a Decvice
assocs = self.clients.resource_registry.find_associations(data_product_id, PRED.hasSource, source_id)
if not assocs or len(assocs) == 0:
raise NotFound("DataProduct to source association for data product id %s to source %s does not exist" % (data_product_id, source_id))
association_id = assocs[0]._id
self.clients.resource_registry.delete_association(association_id)
return
#
# def create_data_producer(name='', description=''):
# """Create a data producer resource, create a stream reource via DM then associate the two resources. Currently, data producers and streams are one-to-one. If the data producer is a process, connect the data producer to any parent data producers.
#
# @param name str
# @param description str
# @retval data_producer_id str
# @throws BadRequest if object passed has _id or _rev attribute
# """
# pass
#
# def update_data_producer(self, data_producer=None):
# '''
# Update an existing data producer.
#
# @param data_producer The data_producer object with updated properties.
# @retval success Boolean to indicate successful update.
# @todo Add logic to validate optional attributes. Is this interface correct?
# '''
# # Return Value
# # ------------
# # {success: true}
# #
# log.debug("Updating data_producer object: %s" % data_producer.name)
# return self.clients.resource_registry.update(data_producer)
#
# def read_data_producer(self, data_producer_id=''):
# '''
# Get an existing data_producer object.
#
# @param data_producer_id The id of the stream.
# @retval data_producer The data_producer object.
# @throws NotFound when data_producer doesn't exist.
# '''
# # Return Value
# # ------------
# # data_producer: {}
# #
# log.debug("Reading data_producer object id: %s" % data_producer_id)
# data_producer_obj = self.clients.resource_registry.read(data_producer_id)
#
# return data_producer_obj
#
# def delete_data_producer(self, data_producer_id=''):
# '''
# Delete an existing data_producer.
#
# @param data_producer_id The id of the stream.
# @retval success Boolean to indicate successful deletion.
# @throws NotFound when data_producer doesn't exist.
# '''
# # Return Value
# # ------------
# # {success: true}
# #
# log.debug("Deleting data_producer id: %s" % data_producer_id)
#
# return self.clients.resource_registry.lcs_delete(data_producer_id)
#
#
# def force_delete_data_producer(self, data_producer_id=''):
# self._remove_associations(data_producer_id)
# self.clients.resource_registry.delete(data_producer_id)
# -----------------
# The following operations manage EOI resources
# -----------------
##########################################################################
#
# External Data Provider
#
##########################################################################
def create_external_data_provider(self, external_data_provider=None):
# Persist ExternalDataProvider object and return object _id as OOI id
return self.RR2.create(external_data_provider, RT.ExternalDataProvider)
def update_external_data_provider(self, external_data_provider=None):
# Overwrite ExternalDataProvider object
self.RR2.update(external_data_provider, RT.ExternalDataProvider)
def read_external_data_provider(self, external_data_provider_id=''):
# Read ExternalDataProvider object with _id matching passed user id
return self.RR2.read(external_data_provider_id, RT.ExternalDataProvider)
def delete_external_data_provider(self, external_data_provider_id=''):
self.RR2.lcs_delete(external_data_provider_id, RT.ExternalDataProvider)
def force_delete_external_data_provider(self, external_data_provider_id=''):
self.RR2.force_delete(external_data_provider_id, RT.ExternalDataProvider)
##########################################################################
#
# Data Source
#
##########################################################################
def create_data_source(self, data_source=None):
# Persist DataSource object and return object _id as OOI id
return self.RR2.create(data_source, RT.DataSource)
def update_data_source(self, data_source=None):
# Overwrite DataSource object
self.RR2.update(data_source, RT.DataSource)
def read_data_source(self, data_source_id=''):
# Read DataSource object with _id matching passed user id
log.debug("Reading DataSource object id: %s" % data_source_id)
data_source_obj = self.RR2.read(data_source_id, RT.DataSource)
return data_source_obj
def delete_data_source(self, data_source_id=''):
# Read and delete specified DataSource object
log.debug("Deleting DataSource id: %s" % data_source_id)
self.RR2.lcs_delete(data_source_id, RT.DataSource)
return
def force_delete_data_source(self, data_source_id=''):
self.RR2.force_delete(data_source_id, RT.DataSource)
def create_data_source_model(self, data_source_model=None):
# Persist DataSourceModel object and return object _id as OOI id
return self.RR2.create(data_source_model, RT.DataSourceModel)
def update_data_source_model(self, data_source_model=None):
# Overwrite DataSourceModel object
self.RR2.update(data_source_model, RT.DataSourceModel)
def read_data_source_model(self, data_source_model_id=''):
# Read DataSourceModel object with _id matching passed user id
return self.RR2.read(data_source_model_id, RT.DataSourceModel)
def delete_data_source_model(self, data_source_model_id=''):
# Read and delete specified ExternalDatasetModel object
self.RR2.lcs_delete(data_source_model_id, RT.DataSourceModel)
return
def force_delete_data_source_model(self, data_source_model_id=''):
self.RR2.force_delete(data_source_model_id, RT.DataSourceModel)
def create_data_source_agent(self, data_source_agent=None, data_source_model_id='' ):
# Persist ExternalDataSourcAgent object and return object _id as OOI id
data_source_agent_id = self.RR2.create(data_source_agent, RT.DataSourceAgent)
if data_source_model_id:
self.RR2.assign_data_source_model_to_data_source_agent_with_has_model(data_source_model_id, data_source_agent_id)
return data_source_agent_id
def update_data_source_agent(self, data_source_agent=None):
# Overwrite DataSourceAgent object
self.RR2.update(data_source_agent, RT.DataSourceAgent)
def read_data_source_agent(self, data_source_agent_id=''):
# Read DataSourceAgent object with _id matching passed user id
data_source_agent = self.RR2.read(data_source_agent_id, RT.DataSourceAgent)
return data_source_agent
def delete_data_source_agent(self, data_source_agent_id=''):
# Read and delete specified DataSourceAgent object
self.RR2.lcs_delete(data_source_agent_id, RT.DataSourceAgent)
def force_delete_data_source_agent(self, data_source_agent_id=''):
self.RR2.force_delete(data_source_agent_id, RT.DataSourceAgent)
def create_data_source_agent_instance(self, data_source_agent_instance=None, data_source_agent_id='', data_source_id=''):
# Persist DataSourceAgentInstance object and return object _id as OOI id
data_source_agent_instance_id = self.RR2.create(data_source_agent_instance, RT.DataSourceAgentInstance)
if data_source_id:
self.RR2.assign_data_source_agent_instance_to_data_source_with_has_agent_instance(data_source_agent_instance_id, data_source_id)
if data_source_agent_id:
self.RR2.assign_data_source_agent_to_data_source_agent_instance_with_has_agent_definition(data_source_agent_id, data_source_agent_instance_id)
return data_source_agent_instance_id
def update_data_source_agent_instance(self, data_source_agent_instance=None):
# Overwrite DataSourceAgentInstance object
self.RR2.update(data_source_agent_instance, RT.DataSourceAgentInstance)
def read_data_source_agent_instance(self, data_source_agent_instance_id=''):
# Read DataSourceAgentInstance object with _id matching passed user id
data_source_agent_instance = self.RR2.read(data_source_agent_instance_id, RT.DataSourceAgentInstance)
return data_source_agent_instance
def delete_data_source_agent_instance(self, data_source_agent_instance_id=''):
# Read and delete specified DataSourceAgentInstance object
self.RR2.lcs_delete(data_source_agent_instance_id, RT.DataSourceAgentInstance)
def force_delete_data_source_agent_instance(self, data_source_agent_instance_id=''):
self.RR2.force_delete(data_source_agent_instance_id, RT.DataSourceAgentInstance)
def start_data_source_agent_instance(self, data_source_agent_instance_id=''):
"""Launch an data source agent instance process and return its process id. Agent instance resource
must exist and be associated with an external data source
@param data_source_agent_instance_id str
@retval process_id str
@throws NotFound object with specified id does not exist
"""
pass
def stop_data_source_agent_instance(self, data_source_agent_instance_id=''):
"""Deactivate the agent instance process
@param data_source_agent_instance_id str
@throws NotFound object with specified id does not exist
"""
pass
##########################################################################
#
# External Data Set
#
##########################################################################
def create_external_dataset(self, external_dataset=None, external_dataset_model_id=''):
# Persist ExternalDataSet object and return object _id as OOI id
external_dataset_id = self.RR2.create(external_dataset, RT.ExternalDataset)
if external_dataset_model_id:
self.RR2.assign_external_dataset_model_to_external_dataset_with_has_model(external_dataset_model_id, external_dataset_id)
return external_dataset_id
def update_external_dataset(self, external_dataset=None):
# Overwrite ExternalDataSet object
self.RR2.update(external_dataset, RT.ExternalDataset)
def read_external_dataset(self, external_dataset_id=''):
# Read ExternalDataSet object with _id matching passed user id
external_dataset = self.RR2.read(external_dataset_id, RT.ExternalDataset)
return external_dataset
def delete_external_dataset(self, external_dataset_id=''):
# Read and delete specified ExternalDataSet object
self.RR2.lcs_delete(external_dataset_id, RT.ExternalDataset)
def force_delete_external_dataset(self, external_dataset_id=''):
self.RR2.force_delete(external_dataset_id, RT.ExternalDataset)
def create_external_dataset_model(self, external_dataset_model=None):
# Persist ExternalDatasetModel object and return object _id as OOI id
return self.RR2.create(external_dataset_model, RT.ExternalDatasetModel)
def update_external_dataset_model(self, external_dataset_model=None):
# Overwrite ExternalDatasetModel object
self.RR2.update(external_dataset_model, RT.ExternalDatasetModel)
def read_external_dataset_model(self, external_dataset_model_id=''):
# Read ExternalDatasetModel object with _id matching passed user id
external_dataset_model = self.RR2.read(external_dataset_model_id, RT.ExternalDatasetModel)
return external_dataset_model
def delete_external_dataset_model(self, external_dataset_model_id=''):
# Read and delete specified ExternalDatasetModel object
self.RR2.lcs_delete(external_dataset_model_id, RT.ExternalDatasetModel)
def force_delete_external_dataset_model(self, external_dataset_model_id=''):
self.RR2.force_delete(external_dataset_model_id, RT.ExternalDatasetModel)
#
# ExternalDatasetAgent
#
def create_external_dataset_agent(self, external_dataset_agent=None, external_dataset_model_id=''):
# Persist ExternalDatasetAgent object and return object _id as OOI id
agent_id = self.RR2.create(external_dataset_agent, RT.ExternalDatasetAgent)
if external_dataset_model_id:
# NOTE: external_dataset_model_id can be any model type
self.clients.resource_registry.create_association(agent_id, PRED.hasModel, external_dataset_model_id)
# Create the process definition to launch the agent
process_definition = ProcessDefinition()
process_definition.name = "ProcessDefinition for ExternalDatasetAgent %s" % external_dataset_agent.name
process_definition.executable['url'] = external_dataset_agent.agent_uri
process_definition.executable['module'] = external_dataset_agent.agent_module or 'ion.agents.data.dataset_agent'
process_definition.executable['class'] = external_dataset_agent.agent_class or 'DataSetAgent'
process_definition_id = self.clients.process_dispatcher.create_process_definition(process_definition=process_definition)
log.debug("external_dataset_agent has process definition id %s", process_definition_id)
# Associate the agent and the process def
self.RR2.assign_process_definition_to_external_dataset_agent_with_has_process_definition(process_definition_id, agent_id)
return agent_id
def update_external_dataset_agent(self, external_dataset_agent=None):
# Overwrite ExternalDataAgent object
self.RR2.update(external_dataset_agent, RT.ExternalDatasetAgent)
def read_external_dataset_agent(self, external_dataset_agent_id=''):
# Read ExternalDatasetAgent object with _id matching passed user id
external_dataset_agent = self.RR2.read(external_dataset_agent_id, RT.ExternalDatasetAgent)
return external_dataset_agent
def delete_external_dataset_agent(self, external_dataset_agent_id=''):
# Read and delete specified ExternalDataAgent object
self.RR2.lcs_delete(external_dataset_agent_id, RT.ExternalDatasetAgent)
def force_delete_external_dataset_agent(self, external_dataset_agent_id=''):
self.RR2.force_delete(external_dataset_agent_id, RT.ExternalDatasetAgent)
def assign_model_to_external_dataset_agent(self, model_id='', external_dataset_agent_id=''):
self.clients.resource_registry.create_association(external_dataset_agent_id, PRED.hasModel, model_id)
def unassign_model_from_external_dataset_agent(self, model_id='', external_dataset_agent_id=''):
self.clients.resource_registry.delete_association((external_dataset_agent_id, PRED.hasModel, model_id))
def assign_external_data_agent_to_agent_instance(self, external_data_agent_id='', agent_instance_id=''):
#Connect the agent with an agent instance
data_source = self.clients.resource_registry.read(external_data_agent_id)
agent_instance = self.clients.resource_registry.read(agent_instance_id)
log.debug("associating: external dataset agent instance %s hasAgentDefinition %s", agent_instance_id, external_data_agent_id)
# check if the association already exists
associations = self.clients.resource_registry.find_associations(agent_instance_id, PRED.hasAgentDefinition, external_data_agent_id, id_only=True)
log.trace('found associations: %r', associations)
if not associations:
self.clients.resource_registry.create_association(agent_instance_id, PRED.hasAgentDefinition, external_data_agent_id)
def unassign_external_data_agent_from_agent_instance(self, external_data_agent_id='', agent_instance_id=''):
data_source = self.clients.resource_registry.read(external_data_agent_id)
agent_instance = self.clients.resource_registry.read(agent_instance_id)
# delete the associations
self.clients.resource_registry.delete_association((agent_instance_id, PRED.hasAgentDefinition, external_data_agent_id))
def prepare_external_dataset_agent_support(self, external_dataset_agent_id=''):
#TODO - does this have to be filtered by Org ( is an Org parameter needed )
extended_resource_handler = ExtendedResourceContainer(self)
resource_data = extended_resource_handler.create_prepare_resource_support(external_dataset_agent_id, OT.ExternalDatasetAgentPrepareSupport)
#Fill out service request information for creating a instrument agent
extended_resource_handler.set_service_requests(resource_data.create_request,
'data_acquisition_management',
'create_external_dataset_agent',
{ "external_dataset_agent": "$(external_dataset_agent)" })
#Fill out service request information for creating a instrument agent
extended_resource_handler.set_service_requests(resource_data.update_request,
'data_acquisition_management',
'update_external_dataset_agent',
{ "external_dataset_agent": "$(external_dataset_agent)" })
#Fill out service request information for assigning a InstrumentModel
extended_resource_handler.set_service_requests(resource_data.associations['InstrumentModel'].assign_request,
'data_acquisition_management',
'assign_model_to_external_dataset_agent',
{"model_id": "$(instrument_model_id)",
"external_dataset_agent_id": external_dataset_agent_id })
#Fill out service request information for unassigning a InstrumentModel
extended_resource_handler.set_service_requests(resource_data.associations['InstrumentModel'].unassign_request,
'data_acquisition_management',
'unassign_model_from_external_dataset_agent',
{"model_id": "$(instrument_model_id)",
"external_dataset_agent_id": external_dataset_agent_id })
#Fill out service request information for assigning a PlatformModel
extended_resource_handler.set_service_requests(resource_data.associations['PlatformModel'].assign_request,
'data_acquisition_management',
'assign_model_to_external_dataset_agent',
{"model_id": "$(platform_model_id)",
"external_dataset_agent_id": external_dataset_agent_id })
#Fill out service request information for unassigning a PlatformModel
extended_resource_handler.set_service_requests(resource_data.associations['PlatformModel'].unassign_request,
'data_acquisition_management',
'unassign_model_from_external_dataset_agent',
{"model_id": "$(platform_model_id)",
"external_dataset_agent_id": external_dataset_agent_id })
#Fill out service request information for assigning a ExternalDatasetAgentInstance
extended_resource_handler.set_service_requests(resource_data.associations['ExternalDatasetAgentInstance'].assign_request,
'data_acquisition_management',
'assign_external_data_agent_to_agent_instance',
{"external_data_agent_id": external_dataset_agent_id,
"agent_instance_id": "$(external_dataset_agent_instance_id)" })
#Fill out service request information for unassigning a ExternalDatasetAgentInstance
extended_resource_handler.set_service_requests(resource_data.associations['ExternalDatasetAgentInstance'].unassign_request,
'data_acquisition_management',
'unassign_external_data_agent_from_agent_instance',
{"external_data_agent_id": external_dataset_agent_id,
"agent_instance_id": "$(external_dataset_agent_instance_id)" })
return resource_data
#
# ExternalDatasetAgentInstance
#
def create_external_dataset_agent_instance(self, external_dataset_agent_instance=None, external_dataset_agent_id='', external_dataset_id=''):
# Persist ExternalDatasetAgentInstance object and return object _id as OOI id
external_dataset_agent_instance_id = self.RR2.create(external_dataset_agent_instance, RT.ExternalDatasetAgentInstance)
if external_dataset_id:
self.RR2.assign_external_dataset_agent_instance_to_external_dataset_with_has_agent_instance(
external_dataset_agent_instance_id, external_dataset_id)
if external_dataset_agent_id:
self.assign_external_data_agent_to_agent_instance(external_dataset_agent_id, external_dataset_agent_instance_id)
log.debug('created dataset agent instance %s, agent id=%s', external_dataset_agent_instance_id, external_dataset_agent_id)
return external_dataset_agent_instance_id
def update_external_dataset_agent_instance(self, external_dataset_agent_instance=None):
# Overwrite ExternalDataAgent object
self.RR2.update(external_dataset_agent_instance, RT.ExternalDatasetAgentInstance)
def read_external_dataset_agent_instance(self, external_dataset_agent_instance_id=''):
# Read ExternalDatasetAgent object with _id matching passed user id
external_dataset_agent_instance = self.RR2.read(external_dataset_agent_instance_id, RT.ExternalDatasetAgentInstance)
return external_dataset_agent_instance
def delete_external_dataset_agent_instance(self, external_dataset_agent_instance_id=''):
self.RR2.lcs_delete(external_dataset_agent_instance_id, RT.ExternalDatasetAgentInstance)
def force_delete_external_dataset_agent_instance(self, external_dataset_agent_instance_id=''):
self.RR2.force_delete(external_dataset_agent_instance_id, RT.ExternalDatasetAgentInstance)
def assign_external_dataset_agent_instance_to_device(self, external_dataset_agent_instance_id='', device_id=''):
self.clients.resource_registry.create_association(device_id, PRED.hasAgentInstance, external_dataset_agent_instance_id)
def unassign_external_dataset_agent_instance_from_device(self, external_dataset_agent_instance_id='', device_id=''):
self.clients.resource_registry.delete_association((device_id, PRED.hasAgentInstance, external_dataset_agent_instance_id))
def _assert_persistence_on(self, config_builder):
if not config_builder or RT.DataProduct not in config_builder.associated_objects:
return
data_products = config_builder.associated_objects[RT.DataProduct]
if config_builder._get_device().type_ == RT.PlatformDevice:
for dp in data_products:
if self.DPMS.is_persisted(dp._id):
return
raise BadRequest("Cannot start agent - data product persistence is not activated!")
else:
parsed_dp_id = None
for dp in data_products:
if dp.processing_level_code == "Parsed":
parsed_dp_id = dp._id
break
if parsed_dp_id:
if not self.DPMS.is_persisted(parsed_dp_id):
raise BadRequest("Cannot start agent - data product persistence is not activated!")
else:
log.warn("Cannot determine if persistence is activated for agent instance=%s", config_builder.agent_instance_obj._id)
def start_external_dataset_agent_instance(self, external_dataset_agent_instance_id=''):
"""Launch an external dataset agent instance process and return its process id.
Agent instance resource must exist and be associated with an external dataset or device and an agent definition
@param external_dataset_agent_instance_id str
@retval process_id str
@throws NotFound object with specified id does not exist
"""
#todo: may want to call retrieve_external_dataset_agent_instance here
#todo: if instance running, then return or throw
#todo: if instance exists and dataset_agent_instance_obj.dataset_agent_config is completd then just schedule_process
dataset_agent_instance_obj = self.clients.resource_registry.read(external_dataset_agent_instance_id)
# can be a Device or ExternalDataset
source_id = self.clients.resource_registry.read_subject(
predicate=PRED.hasAgentInstance, object=external_dataset_agent_instance_id, id_only=True)
ext_dataset_agent_obj = self.clients.resource_registry.read_object(
object_type=RT.ExternalDatasetAgent, predicate=PRED.hasAgentDefinition, subject=external_dataset_agent_instance_id, id_only=False)
process_definition_id = self.clients.resource_registry.read_object(
subject=ext_dataset_agent_obj._id, predicate=PRED.hasProcessDefinition, object_type=RT.ProcessDefinition, id_only=True)
# Agent launch
config_builder = ExternalDatasetAgentConfigurationBuilder(self.clients)
try:
config_builder.set_agent_instance_object(dataset_agent_instance_obj)
config = config_builder.prepare()
log.trace("Using dataset agent configuration: %s", config)
except Exception:
log.error('failed to launch', exc_info=True)
raise ServerError('failed to launch')
# Check that persistence is on
self._assert_persistence_on(config_builder)
# Save the config into an object in the object store which will be passed to the agent by the container.
config_builder.record_launch_parameters(config)
launcher = AgentLauncher(self.clients.process_dispatcher)
process_id = launcher.launch(config, config_builder._get_process_definition()._id)
if not process_id:
raise ServerError("Launched external dataset agent instance but no process_id")
launcher.await_launch(10.0)
return process_id
def stop_external_dataset_agent_instance(self, external_dataset_agent_instance_id=''):
"""
Deactivate the agent instance process
"""
# this dataset agent instance could be link to a external dataset or a instrument device. Retrieve whatever is the data producer.
external_dataset_device_ids, _ = self.clients.resource_registry.find_subjects( predicate=PRED.hasAgentInstance, object=external_dataset_agent_instance_id, id_only=True)
if len(external_dataset_device_ids) != 1:
raise NotFound("ExternalDatasetAgentInstance resource is not correctly associated with an ExternalDataset or InstrumentDevice" )
agent_process_id = ResourceAgentClient._get_agent_process_id(external_dataset_device_ids[0])
try:
# Cancels the execution of the given process id.
self.clients.process_dispatcher.cancel_process(agent_process_id)
finally:
# Save the process state
agent_instance_res = self.clients.resource_registry.read(external_dataset_agent_instance_id)
old_state = None
try:
old_state,_ = self.container.state_repository.get_state(agent_process_id)
old_state["_prior_agent_process_id"] = agent_process_id
except NotFound:
log.warn("Could not find process state for agent instance %s", external_dataset_agent_instance_id)
if old_state and isinstance(old_state, dict):
agent_instance_res.saved_agent_state = old_state
else:
agent_instance_res.saved_agent_state = {}
agent_instance_res.saved_agent_state = old_state
self.clients.resource_registry.update(agent_instance_res)
def prepare_external_dataset_agent_instance_support(self, external_dataset_agent_instance_id=''):
#TODO - does this have to be filtered by Org ( is an Org parameter needed )
extended_resource_handler = ExtendedResourceContainer(self)
resource_data = extended_resource_handler.create_prepare_resource_support(external_dataset_agent_instance_id, OT.ExternalDatasetAgentInstancePrepareSupport)
#Fill out service request information for creating a instrument agent instance
extended_resource_handler.set_service_requests(resource_data.create_request,
'data_acquisition_management',
'create_external_dataset_agent_instance',
{"external_dataset_agent_instance": "$(external_dataset_agent_instance)" })
#Fill out service request information for creating a instrument agent instance
extended_resource_handler.set_service_requests(resource_data.update_request,
'data_acquisition_management',
'update_external_dataset_agent_instance',
{"external_dataset_agent_instance": "$(external_dataset_agent_instance)" })
#Fill out service request information for starting an instrument agent instance
extended_resource_handler.set_service_requests(resource_data.start_request,
'data_acquisition_management',
'start_external_dataset_agent_instance',
{"external_dataset_agent_instance_id": "$(external_dataset_agent_instance_id)" })
#Fill out service request information for starting an instrument agent instance
extended_resource_handler.set_service_requests(resource_data.stop_request,
'data_acquisition_management',
'stop_external_dataset_agent_instance',
{"external_dataset_agent_instance_id": "$(external_dataset_agent_instance_id)" })
#Fill out service request information for assigning a InstrumentDevice
extended_resource_handler.set_service_requests(resource_data.associations['InstrumentDevice'].assign_request,
'data_acquisition_management',
'assign_external_dataset_agent_instance_to_device',
{"device_id": "$(instrument_device_id)",
"external_dataset_agent_instance_id": external_dataset_agent_instance_id })
#Fill out service request information for unassigning a InstrumentDevice
extended_resource_handler.set_service_requests(resource_data.associations['InstrumentDevice'].unassign_request,
'data_acquisition_management',
'unassign_external_dataset_agent_instance_from_device',
{"device_id": "$(instrument_device_id)",
"external_dataset_agent_instance_id": external_dataset_agent_instance_id })
#Fill out service request information for assigning a PlatformDevice
extended_resource_handler.set_service_requests(resource_data.associations['PlatformDevice'].assign_request,
'data_acquisition_management',
'assign_external_dataset_agent_instance_to_device',
{"device_id": "$(platform_device_id)",
"external_dataset_agent_instance_id": external_dataset_agent_instance_id })
#Fill out service request information for unassigning a PlatformDevice
extended_resource_handler.set_service_requests(resource_data.associations['PlatformDevice'].unassign_request,
'data_acquisition_management',
'unassign_external_dataset_agent_instance_from_device',
{"device_id": "$(platform_device_id)",
"external_dataset_agent_instance_id": external_dataset_agent_instance_id })
#Fill out service request information for assigning a InstrumentAgent
extended_resource_handler.set_service_requests(resource_data.associations['ExternalDatasetAgent'].assign_request,
'data_acquisition_management',
'assign_external_data_agent_to_agent_instance',
{"external_data_agent_id": "$(external_dataset_agent_id)",
"agent_instance_id": external_dataset_agent_instance_id })
#Fill out service request information for unassigning a InstrumentAgent
extended_resource_handler.set_service_requests(resource_data.associations['ExternalDatasetAgent'].unassign_request,
'data_acquisition_management',
'unassign_external_data_agent_from_agent_instance',
{"external_data_agent_id": "$(external_dataset_agent_id)",
"agent_instance_id": external_dataset_agent_instance_id })
return resource_data
def retrieve_external_dataset_agent_instance(self, external_dataset_id=''):
"""
Retrieve the agent instance for an external dataset and check if it is running
"""
#Connect the data source with an external data provider
data_set = self.clients.resource_registry.read(external_dataset_id)
# check if the association already exists
ai_ids, _ = self.clients.resource_registry.find_objects(external_dataset_id, PRED.hasAgentInstance, id_only=True)
if len(ai_ids) > 1:
raise NotFound("ExternalDataset resource %s is associated with multiple agent instances" % external_dataset_id)
if ai_ids is None:
return None, None
else:
if not ResourceAgentClient._get_agent_process_id(external_dataset_id):
active = False
else:
active = True
return ai_ids[0], active
##########################################################################
#
# Resource Assign Functions
#
##########################################################################
def assign_data_source_to_external_data_provider(self, data_source_id='', external_data_provider_id=''):
#Connect the data source with an external data provider
data_source = self.clients.resource_registry.read(data_source_id)
agent_instance = self.clients.resource_registry.read(external_data_provider_id)
# check if the association already exists
associations = self.clients.resource_registry.find_associations(data_source_id, PRED.hasProvider, external_data_provider_id, id_only=True)
if not associations:
self.clients.resource_registry.create_association(data_source_id, PRED.hasProvider, external_data_provider_id)
def unassign_data_source_from_external_data_provider(self, data_source_id='', external_data_provider_id=''):
#Disconnect the data source from the external data provider
data_source = self.clients.resource_registry.read(data_source_id)
agent_instance = self.clients.resource_registry.read(external_data_provider_id)
# delete the associations
self.clients.resource_registry.delete_association((data_source_id, PRED.hasProvider, external_data_provider_id))
def assign_data_source_to_data_model(self, data_source_id='', data_source_model_id=''):
#Connect the data source with an external data model
data_source = self.clients.resource_registry.read(data_source_id)
agent_instance = self.clients.resource_registry.read(data_source_model_id)
# check if the association already exists
associations = self.clients.resource_registry.find_associations(data_source_id, PRED.hasModel, data_source_model_id, id_only=True)
if not associations:
self.clients.resource_registry.create_association(data_source_id, PRED.hasModel, data_source_model_id)
def unassign_data_source_from_data_model(self, data_source_id='', data_source_model_id=''):
#Disonnect the data source from the external data model
data_source = self.clients.resource_registry.read(data_source_id)
agent_instance = self.clients.resource_registry.read(data_source_model_id)
# delete the associations
self.clients.resource_registry.delete_association((data_source_id, PRED.hasModel, data_source_model_id))
def assign_external_dataset_to_agent_instance(self, external_dataset_id='', agent_instance_id=''):
#Connect the agent instance with an external data set
data_source = self.clients.resource_registry.read(external_dataset_id)
agent_instance = self.clients.resource_registry.read(agent_instance_id)
log.debug("associating: external dataset %s hasAgentInstance %s", external_dataset_id, agent_instance_id)
# check if the association already exists
associations = self.clients.resource_registry.find_associations(external_dataset_id, PRED.hasAgentInstance, agent_instance_id, id_only=True)
if not associations:
self.clients.resource_registry.create_association(external_dataset_id, PRED.hasAgentInstance, agent_instance_id)
def unassign_external_dataset_from_agent_instance(self, external_dataset_id='', agent_instance_id=''):
data_source = self.clients.resource_registry.read(external_dataset_id)
agent_instance = self.clients.resource_registry.read(agent_instance_id)
# delete the associations
self.clients.resource_registry.delete_association((external_dataset_id, PRED.hasAgentInstance, agent_instance_id))
def assign_dataset_agent_to_external_dataset_model(self, dataset_agent_id='', external_dataset_model_id=''):
#Connect the external data agent with an external data model
external_data_agent = self.clients.resource_registry.read(dataset_agent_id)
external_dataset_model = self.clients.resource_registry.read(external_dataset_model_id)
# check if the association already exists
associations = self.clients.resource_registry.find_associations(dataset_agent_id, PRED.hasModel, external_dataset_model_id, id_only=True)
if not associations:
self.clients.resource_registry.create_association(dataset_agent_id, PRED.hasModel, external_dataset_model_id)
def unassign_dataset_agent_from_external_dataset_model(self, dataset_agent_id='', external_dataset_model_id=''):
#Disonnect the external data agent from the external data model
dataset_agent = self.clients.resource_registry.read(dataset_agent_id)
external_dataset_model = self.clients.resource_registry.read(external_dataset_model_id)
# delete the associations
self.clients.resource_registry.delete_association((dataset_agent_id, PRED.hasModel, external_dataset_model_id))
def assign_external_dataset_to_data_source(self, external_dataset_id='', data_source_id=''):
#Connect the external data set to a data source
data_source = self.clients.resource_registry.read(external_dataset_id)
agent_instance = self.clients.resource_registry.read(data_source_id)
# check if the association already exists
associations = self.clients.resource_registry.find_associations(external_dataset_id, PRED.hasSource, data_source_id, id_only=True)
if not associations:
self.clients.resource_registry.create_association(external_dataset_id, PRED.hasDataSource, data_source_id)
def unassign_external_dataset_from_data_source(self, external_dataset_id='', data_source_id=''):
#Disonnect the external data set from the data source
data_source = self.clients.resource_registry.read(external_dataset_id)
agent_instance = self.clients.resource_registry.read(data_source_id)
# delete the associations
self.clients.resource_registry.delete_association((external_dataset_id, PRED.hasDataSource, data_source_id))
def create_parser(self, parser=None):
parser_id, rev = self.clients.resource_registry.create(parser)
return parser_id
def read_parser(self, parser_id=''):
parser = self.clients.resource_registry.read(parser_id)
validate_is_instance(parser,Parser,'The specified identifier does not correspond to a Parser resource')
return parser
def delete_parser(self, parser_id=''):
self.clients.resource_registry.delete(parser_id)
return True
def update_parser(self, parser=None):
if parser:
self.clients.resource_registry.update(parser)
def register_producer_qc_reference(self, producer_id='', parser_id='', attachment_id=''):
log.debug('register_producer_qc_reference: %s %s %s', producer_id, parser_id, attachment_id)
attachment = self.clients.resource_registry.read_attachment(attachment_id, include_content=True)
document = attachment.content
document_keys = self.parse_qc_reference(parser_id, document) or []
producer_obj = self.clients.resource_registry.read(producer_id)
if 'qc_keys' in producer_obj.producer_context.configuration:
producer_obj.producer_context.configuration['qc_keys'].extend(document_keys)
else:
producer_obj.producer_context.configuration['qc_keys'] = document_keys
self.clients.resource_registry.update(producer_obj)
return True
def parse_qc_reference(self, parser_id='', document=None):
document_keys = []
if document is None:
raise BadRequest('Empty Document')
parser = self.read_parser(parser_id=parser_id)
try:
module = __import__(parser.module, fromlist=[parser.method])
method = getattr(module, parser.method)
except ImportError:
raise BadRequest('No import named {0} found.'.format(parser.module))
except AttributeError:
raise BadRequest('No method named {0} in {1}.'.format(parser.method, parser.module))
except:
log.exception('Failed to parse document')
raise
svm = StoredValueManager(self.container)
for key, doc in method(document):
try:
svm.stored_value_cas(key, doc)
document_keys.append(key)
except:
log.error('Error parsing a row in document.')
return document_keys
def list_qc_references(self, data_product_id=''):
''' Performs a breadth-first traversal of the provenance for a data product in an attempt to collect all the document keys'''
document_keys = []
producer_ids, _ = self.clients.resource_registry.find_objects(subject=data_product_id, predicate=PRED.hasDataProducer, id_only=True)
if not len(producer_ids):
return []
producer_id = producer_ids.pop(0)
def traversal(owner_id):
def edges(resource_ids=[]):
retval = []
if not isinstance(resource_ids, list):
resource_ids = list(resource_ids)
for resource_id in resource_ids:
retval.extend(self.clients.resource_registry.find_objects(subject=resource_id, predicate=PRED.hasParent,id_only=True)[0])
return retval
visited_resources = deque([producer_id] + edges([owner_id]))
traversal_queue = deque()
done = False
t = None
while not done:
t = traversal_queue or deque(visited_resources)
traversal_queue = deque()
for e in edges(t):
if not e in visited_resources:
visited_resources.append(e)
traversal_queue.append(e)
if not len(traversal_queue): done = True
return list(visited_resources)
for prod_id in traversal(producer_id):
producer = self.clients.resource_registry.read(prod_id)
if 'qc_keys' in producer.producer_context.configuration:
document_keys.extend(producer.producer_context.configuration['qc_keys'])
return document_keys
|
Kevin2016Uk VeronicaJonnes jasmin. AshleyTisha. weereyy. OneHotMilf4U.
PinkPanther1217beautifulcoks11iMissCandyHotJillina .FelixFeatherkasiaHuntLenna33NastyBambi .MarioFMTNastyOver50a0JaclynJewelLHORYTS .LourettaaLollySexxymarcyblackTestPerfSergey .beautifulcoks11iLHORYTSJackGreyJMissLisa4U .andredulcecumDIOSACANALIES01VivienneLouCuteYaritzaForU .EdithaCarterHotMilanaXJillinakarlaHill .ClaireArdenaVannesaDanieleLenna33DobbiDodo .InsiAngelSweetBlondLollySexxy01VivienneLou .PinkPanther1217blondeslutxtcLHORYTSLenna33 .Lourettaanaturalbeauty88HannaMyersVannesaDaniele .
KRISTHIAN1KRISTHIAN1MilenaKristalisabellaaaa .SweetySandyLadyElisabethVickyZaneJackGreyJ .BellaAndMaxErikahotxxLenna33SweetySandy .CrissDwaynePinkPanther1217Erikahotxxfredafun .LisaStarXBarbieSaharaxxDobbiDodoEdithaCarter .VickyZanebeautifulcoks11iXXTWOBOYGYMXXXMissLisa4U .AshleyHazeCamKRISTHIAN1BinoTropicalDIckSweetySandy .naturalbeauty88RebeccaSilverSkyDIOSACANALIESVannesaDaniele .blondeslutxtcNathanBoothVickyZaneSweetLittleLily .BarbieSaharaxxSHYNESinNATUREMilenaJamesTestPerfSergey .RebeccaSilverSkyBinoTropicalDIckAshleyTishaBijouAlexis . |
import csv
import PyPDF2
import re
import datetime
from datetime import date, timedelta
from fitness import Fitness
from dbConnection import DB
import toolBox
def eval(individual):
''' Evaluates best ind timetable'''
with open('timetable.csv', 'w') as csvfile1:
writer = csv.writer(csvfile1)
writer.writerow(['Line', 'Capacity', 'Headway', 'Departure time'])
for trip, item in enumerate(individual):
if trip % 7 == 0:
[writer.writerow(individual[trip+i]) for i in range(7)]
writer.writerow([])
def evaluateTimetable():
''' Evaluates how well a current static timetable does in terms of waiting time. The purpose is to give some kind of
comparison metric with the timetable as generated by GA.
'''
pdfFileObj = open('H1_web.pdf', 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
numPages = pdfReader.numPages
pagesText = []
for i in range(2):
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
pageObj = pdfReader.getPage(i)
txt = pageObj.extractText()
timeRegex = re.compile(r'\d{2}\.\d{2}') # escape dot to match
mo = timeRegex.findall(txt)
pagesText += mo
departures = list()
for index in range(len(pagesText)):
if index % 4 == 0:
for i in range(4):
departures.append(pagesText[index+i])
departures[:] = ['2015 12 09 ' + x for x in departures]
length = len(departures)
items = []
for i in range(8):
item = departures.pop()
items.append(item)
items.reverse()
#departures[:] = items + departures
individual = list()
for t in departures:
individual.append([1, 120, 1, datetime.datetime.strptime(t, '%Y %m %d %H.%M')])
phenotype = []
ind = []
for q in range(len(individual)):
try:
if q % 4 == 0:
ind.append(individual[q])
t = []
for x in range(4):
t.append(individual[q+x])
#phenotype.append(t)
except IndexError, e:
t[:] = []
for x in range(4):
t.append(individual[x+0])
#phenotype.append(t)
print ind
if __name__ == "__main__":
evaluateTimetable()
|
You may only record a name once.
You must talk to each person before you write their name down.
You can use people from within your own class, or outside the class group.
Scoring is based on one point for each name, two bonus points for no repeating of names, five bonus points for the first person to finish, and three bonus points for the second person to finish.
Your students are bound to have fun and learn a lot using this cooperation game. Problem solving activities will give them the skills they need to be successful. |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Parameters for debugging
res = 1000
# create pipeline
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
# Create a probe plane
center = output.GetCenter()
plane = vtk.vtkPlaneSource()
plane.SetResolution(res,res)
plane.SetOrigin(0,0,0)
plane.SetPoint1(10,0,0)
plane.SetPoint2(0,10,0)
plane.SetCenter(center)
plane.SetNormal(0,1,0)
# Reuse the locator
locator = vtk.vtkStaticPointLocator()
locator.SetDataSet(output)
locator.BuildLocator()
# Voronoi kernel------------------------------------------------
voronoiKernel = vtk.vtkVoronoiKernel()
interpolator = vtk.vtkPointInterpolator()
interpolator.SetInputConnection(plane.GetOutputPort())
interpolator.SetSourceData(output)
interpolator.SetKernel(voronoiKernel)
interpolator.SetLocator(locator)
# Time execution
timer = vtk.vtkTimerLog()
timer.StartTimer()
interpolator.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Points (Voronoi): {0}".format(time))
intMapper = vtk.vtkPolyDataMapper()
intMapper.SetInputConnection(interpolator.GetOutputPort())
intActor = vtk.vtkActor()
intActor.SetMapper(intMapper)
# Create an outline
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# Gaussian kernel-------------------------------------------------------
gaussianKernel = vtk.vtkGaussianKernel()
#gaussianKernel = vtk.vtkEllipsoidalGaussianKernel()
#gaussianKernel.UseScalarsOn()
#gaussianKernel.UseNormalsOn()
gaussianKernel.SetSharpness(4)
gaussianKernel.SetRadius(0.5)
interpolator1 = vtk.vtkPointInterpolator()
interpolator1.SetInputConnection(plane.GetOutputPort())
interpolator1.SetSourceData(output)
interpolator1.SetKernel(gaussianKernel)
interpolator1.SetLocator(locator)
interpolator1.SetNullPointsStrategyToNullValue()
# Time execution
timer.StartTimer()
interpolator1.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Points (Gaussian): {0}".format(time))
intMapper1 = vtk.vtkPolyDataMapper()
intMapper1.SetInputConnection(interpolator1.GetOutputPort())
intActor1 = vtk.vtkActor()
intActor1.SetMapper(intMapper1)
# Create an outline
outline1 = vtk.vtkStructuredGridOutlineFilter()
outline1.SetInputData(output)
outlineMapper1 = vtk.vtkPolyDataMapper()
outlineMapper1.SetInputConnection(outline1.GetOutputPort())
outlineActor1 = vtk.vtkActor()
outlineActor1.SetMapper(outlineMapper1)
# Shepard kernel-------------------------------------------------------
shepardKernel = vtk.vtkShepardKernel()
shepardKernel.SetPowerParameter(2)
shepardKernel.SetRadius(0.5)
interpolator2 = vtk.vtkPointInterpolator()
interpolator2.SetInputConnection(plane.GetOutputPort())
interpolator2.SetSourceData(output)
interpolator2.SetKernel(shepardKernel)
interpolator2.SetLocator(locator)
interpolator2.SetNullPointsStrategyToMaskPoints()
# Time execution
timer.StartTimer()
interpolator2.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Points (Shepard): {0}".format(time))
intMapper2 = vtk.vtkPolyDataMapper()
intMapper2.SetInputConnection(interpolator2.GetOutputPort())
intActor2 = vtk.vtkActor()
intActor2.SetMapper(intMapper2)
# Create an outline
outline2 = vtk.vtkStructuredGridOutlineFilter()
outline2.SetInputData(output)
outlineMapper2 = vtk.vtkPolyDataMapper()
outlineMapper2.SetInputConnection(outline2.GetOutputPort())
outlineActor2 = vtk.vtkActor()
outlineActor2.SetMapper(outlineMapper2)
# SPH kernel-------------------------------------------------------
SPHKernel = vtk.vtkSPHKernel()
interpolator3 = vtk.vtkPointInterpolator()
interpolator3.SetInputConnection(plane.GetOutputPort())
interpolator3.SetSourceData(output)
interpolator3.SetKernel(voronoiKernel)
#interpolator3.SetKernel(SPHKernel)
interpolator3.SetLocator(locator)
# Time execution
timer.StartTimer()
interpolator3.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Interpolate Points (SPH): {0}".format(time))
intMapper3 = vtk.vtkPolyDataMapper()
intMapper3.SetInputConnection(interpolator3.GetOutputPort())
intActor3 = vtk.vtkActor()
intActor3.SetMapper(intMapper3)
# Create an outline
outline3 = vtk.vtkStructuredGridOutlineFilter()
outline3.SetInputData(output)
outlineMapper3 = vtk.vtkPolyDataMapper()
outlineMapper3.SetInputConnection(outline3.GetOutputPort())
outlineActor3 = vtk.vtkActor()
outlineActor3.SetMapper(outlineMapper3)
# Create the RenderWindow, Renderer and both Actors
#
ren0 = vtk.vtkRenderer()
ren0.SetViewport(0,0,.5,.5)
ren1 = vtk.vtkRenderer()
ren1.SetViewport(0.5,0,1,.5)
ren2 = vtk.vtkRenderer()
ren2.SetViewport(0,0.5,.5,1)
ren3 = vtk.vtkRenderer()
ren3.SetViewport(0.5,0.5,1,1)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren0)
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
renWin.AddRenderer(ren3)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren0.AddActor(intActor)
ren0.AddActor(outlineActor)
ren0.SetBackground(0.1, 0.2, 0.4)
ren1.AddActor(intActor1)
ren1.AddActor(outlineActor1)
ren1.SetBackground(0.1, 0.2, 0.4)
ren2.AddActor(intActor2)
ren2.AddActor(outlineActor2)
ren2.SetBackground(0.1, 0.2, 0.4)
ren3.AddActor(intActor3)
ren3.AddActor(outlineActor3)
ren3.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(500, 500)
cam = ren0.GetActiveCamera()
cam.SetClippingRange(3.95297, 50)
cam.SetFocalPoint(8.88908, 0.595038, 29.3342)
cam.SetPosition(-12.3332, 31.7479, 41.2387)
cam.SetViewUp(0.060772, -0.319905, 0.945498)
ren1.SetActiveCamera(cam)
ren2.SetActiveCamera(cam)
ren3.SetActiveCamera(cam)
iren.Initialize()
# render the image
#
renWin.Render()
#iren.Start()
|
In his latest mission, Commander Jeffrey Fuller will go head to head against his most dangerous adversary yet his own allies.Jeffrey Fuller has a new mission use his cutting edge submarine Challenger to recover a German spy claiming to have key information about the Berlin Boer Axis, information that could be crucial to winning the war Fuller will have to navigate hisIn his latest mission, Commander Jeffrey Fuller will go head to head against his most dangerous adversary yet his own allies.Jeffrey Fuller has a new mission use his cutting edge submarine Challenger to recover a German spy claiming to have key information about the Berlin Boer Axis, information that could be crucial to winning the war Fuller will have to navigate his super silent sub through some of the most densely patrolled waters in Europe if he hopes to accomplish his mission.But Fuller knows he can t trust the spy, code named Zeno, an expert in electronic and information warfare The man could be a double agent sent by the enemy to compromise Challenger And when they finally recover the mysterious spy, he reveals that only by helping him infiltrate Israel can he hope to prevent an imminent Axis attack Fuller is caught in a terrible dilemma if the man is a double agent, he could be dooming Israel, but if he does nothing, the country could fall to the German assault To go ahead with the plan would pitch Fuller against the best defences of his own allies, placing his crew in danger and possibly shattering bonds between nations It is a battle that, if fought, Fuller will have to fight entirely on his own.
SuspensefulVery interesting book involving submarine warfare. However, too much time spent on the seals and their land mission. If I wanted to read about Seals I would purchase a book more oriented towards them.
Good book. I would read it again. In a genre with many submarine novels, it does very well. |
#!/usr/bin/env python
"""
"""
import os,sys
import numpy as np
import cPickle as pkl
if __name__ == "__main__":
from optparse import OptionParser
o = OptionParser()
o.set_usage('%prog [options] [pklReduceDict.py DICT]')
o.set_description(__doc__)
o.add_option('--snr',dest='snr',default=100,type='int',
help='SNR value to use (rounds to nearest int value), default: 100')
o.add_option('--info',dest='info',action='store_true',
help='Print parameter information in the dictionary and exit')
o.add_option('--dJ',dest='dJ',default=0.05,type='float',
help='Calibration error to select out, default: 0.05')
o.add_option('-c','--cal',dest='calMode',default='cal',
help='cal mode to use: cal or uncal, default: cal')
o.add_option('-m','--mode',dest='mode',default='rms',
help='Data mode: rms, chi2, sigma ; default: rms')
o.add_option('-r','--rms', dest='rmsMode', default=0, type='int',
help='Set RMS mode, 0: total intesity, 1: invariant interval, 2: matrix template matching. default: 0')
opts, args = o.parse_args(sys.argv[1:])
print 'Loading PKL file'
reduceDict=pkl.load(open(args[0]))
if opts.info:
snrs=[]
deltaJs=[]
ixrs=[]
for key,val in reduceDict.iteritems():
snrs.append(key[1])
deltaJs.append(key[2]*100.)
ixrs.append(10.*np.log10(1./(key[3]**2)))
snrs=np.array(snrs)
deltaJs=np.array(deltaJs)
ixrs=np.array(ixrs)
print 'SNR:', np.unique(snrs)
print 'delta J (\%):',np.unique(deltaJs)
print 'IXR (dB):', np.unique(ixrs)
exit()
ixrdbs=[]
vals=[]
for key,val in reduceDict.iteritems():
#key: (mode,snr,dJ,IXR,cal/uncal)
#val keys: ['rms', 'chi2', 'avgSigma', 'obsMJD', 'nobs', 'expMJD', 'sigmas']
if key[0]==opts.rmsMode and int(key[1])==opts.snr and key[2]==opts.dJ and key[4].startswith(opts.calMode): #timing mode, snr, dJ, cal mode selection
ixrdb=10.*np.log10(1./(key[3]**2))
ixrdbs.append(ixrdb)
if opts.mode.startswith('rms'): vals.append(val['rms'])
elif opts.mode.startswith('chi'): vals.append(val['chi2'])
elif opts.mode.startswith('sigma'): vals.append(val['avgSigma'])
ixrdbs=np.array(ixrdbs)
vals=np.array(vals)
idx=np.argsort(ixrdbs)
print 'IXR',ixrdbs[idx]
print 'RMS',vals[idx]
print 'precent',100.*np.diff(vals[idx])/vals[idx][:-1]
|
These excellent value rust plant border supports are designed to keep your plants neat and tidy. They can be used at varying heights, singly or in combinations, back to back in lines or in irregular shapes to support your prized or unruly plants.
Please allow up to 10cm/4in to be inserted in the ground and to give sufficient support the border plant supports should be reaching halfway up the plant. The supports will achieve its rustic look in a month time.
Made from solid steel, uncoated ready to rust. Coated in a durable green finish version is also available.
Height 34in (86cm) Width 15in (38cm) Depth 10in (25.5cm).
Delivered within 7 to 10 working days to UK mainland only (Scotland 7 to 12 working days). Unfortunately, we are unable to deliver to Scottish Highlands, or offshore islands (including Isle of Wight and Isle of Man). |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#from math import pi
from assisipy import sim
import argparse
#import random
from assisipy_utils import arena
from assisipy_utils.mgmt import specs
#from assisipy_utils.arena import Transformation
#import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'''
Create a circular wall with some casus outside of the wall,
and spawn bees
''')
parser.add_argument('-n', '--num-bees', type=int, default=0)
parser.add_argument('-ol', '--obj-listing', type=str, default=None)
parser.add_argument('-a', '--arena-file', type=str, default='valid.arena')
parser.add_argument('-l', '--label', type=str, default='popln1-')
parser.add_argument('-e', '--exec-script', type=str, required=True,
help='name of script to execute for each bee in `bee-file`')
args = parser.parse_args()
simctrl = sim.Control()
obj_file = None
if args.obj_listing is not None:
obj_file = open(args.obj_listing, 'w')
specs.write_header(obj_file)
# find out where the bees can go
bl, tr, trans =arena.read_reqs(args.arena_file)
bee_poses = arena.gen_valid_bee_positions((bl, tr), n=args.num_bees, trans=trans)
if args.num_bees > 0:
for i, pts in enumerate(bee_poses):
pose = pts[0].x, pts[0].y, pts[1]
#for i in range(1, args.num_bees+1):
name = '{}-Bee-{:03d}'.format(args.label, i)
if i < args.num_bees / 2:
conf = 'gf.conf'
else:
conf = 'wf.conf'
#pose = (random.uniform(-4, 4), random.uniform(-4, 4),
# 2*pi*random.random())
simctrl.spawn('Bee', name, pose)
print 'Spawned bee', name
if obj_file:
s = specs.gen_spec_str(name, 'Bee', pose,
args.exec_script, conf,
'tcp://localhost:5556',
'tcp://localhost:5555',
)
obj_file.write(s + "\n")
if obj_file:
obj_file.close()
print "[I] wrote object listing to {}".format(obj_file.name)
|
We can help you to achieve more for your community by delivering the services local people need and bringing investment and jobs to your local area. We offer discounts on our competitive day rates for Locality members.
Our local team provide support ranging from full governance reviews to tailored action plans for a specific area, with discounts for Locality members.
We can provide support to help you to progress your project and we offer discounts for Locality members. |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type Grimoire."""
from bases.objet.attribut import Attribut
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.objet.types.base import BaseType
class Grimoire(BaseType):
"""Type d'objet : grimoire.
Ce type d'objet permet d'apprendre un sort, en l'étudiant, si on
est du bon élément. Sinon il se détruit et les points de tribut
du sort sont ajoutés dans les points du lecteur.
"""
nom_type = "grimoire"
def __init__(self, cle=""):
"""Constructeur de l'objet"""
BaseType.__init__(self, cle)
self._cle_sort = ""
self.etendre_editeur("s", "sort", Uniligne, self, "cle_sort")
# Attributs propres à l'objet (non au prototype)
self._attributs = {
"proprietaire": Attribut(None),
}
def _get_cle_sort(self):
return self._cle_sort
def _set_cle_sort(self, sort):
sorts = [sort.cle for sort in type(self).importeur.magie.sorts.values()]
if sort in sorts:
self._cle_sort = sort
cle_sort = property(_get_cle_sort, _set_cle_sort)
@property
def sort(self):
"""Renvoie le sort de ce parchemin."""
if self.cle_sort:
return importeur.magie.sorts[self.cle_sort]
else:
return None
def travailler_enveloppes(self, enveloppes):
"""Travail sur les enveloppes"""
sort = enveloppes["s"]
sort.apercu = "{objet.cle_sort}"
sort.prompt = "Clé du sort : "
sort.aide_courte = \
"Entrez la |ent|clé|ff| du sort appris par ce grimoire. Il " \
"va sans dire que le sort\nen question doit être déjà créé. " \
"Entrez |cmd|/|ff| pour revenir à la fenêtre parente.\n\n" \
"Sort actuel : {objet.cle_sort}"
def acheter(self, quantite, magasin, transaction):
"""Achète le grimoire."""
objets = BaseType.acheter(self, quantite, magasin, transaction)
acheteur = transaction.initiateur
for objet in objets:
objet.proprietaire = acheteur
acheteur.envoyer_tip("Vous êtes propriétaire de ce grimoire. " \
"Utilisez la commande %étudier% pour l'étudier.")
def regarder(self, personnage):
"""Le personnage regarde l'objet."""
sort = self.sort
if sort:
if sort.elements[0] != personnage.element:
return "L'ancre ondule étrangement devant vos yeux... " \
"vous ne pouvez lire ce parchemin."
msg = BaseType.regarder(self, personnage)
points = sort.points_tribut
s = "s" if points > 1 else ""
phrase = "Il vous faut {} point{s} de tribut pour apprendre ce " \
"sort.".format(points, s=s)
msg += "\n\n" + phrase
return msg
|
Hurry, item low in stock!
Refurbished Very Good What's This?
This product is in very good cosmetic condition and although there may be some light signs of use, the item has been fully tested and is in excellent working order. You're getting a great device at a great price!
Samsung Galaxy Note 8 64GB Black T-MOBILE. |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This module's classes provide an interface to mojo modules. Modules are
# collections of interfaces and structs to be used by mojo ipc clients and
# servers.
#
# A simple interface would be created this way:
# module = mojom.generate.module.Module('Foo')
# interface = module.AddInterface('Bar')
# method = interface.AddMethod('Tat', 0)
# method.AddParameter('baz', 0, mojom.INT32)
# We use our own version of __repr__ when displaying the AST, as the
# AST currently doesn't capture which nodes are reference (e.g. to
# types) and which nodes are definitions. This allows us to e.g. print
# the definition of a struct when it's defined inside a module, but
# only print its name when it's referenced in e.g. a method parameter.
def Repr(obj, as_ref=True):
"""A version of __repr__ that can distinguish references.
Sometimes we like to print an object's full representation
(e.g. with its fields) and sometimes we just want to reference an
object that was printed in full elsewhere. This function allows us
to make that distinction.
Args:
obj: The object whose string representation we compute.
as_ref: If True, use the short reference representation.
Returns:
A str representation of |obj|.
"""
if hasattr(obj, 'Repr'):
return obj.Repr(as_ref=as_ref)
# Since we cannot implement Repr for existing container types, we
# handle them here.
elif isinstance(obj, list):
if not obj:
return '[]'
else:
return ('[\n%s\n]' % (',\n'.join(' %s' % Repr(elem, as_ref).replace(
'\n', '\n ') for elem in obj)))
elif isinstance(obj, dict):
if not obj:
return '{}'
else:
return ('{\n%s\n}' % (',\n'.join(' %s: %s' % (
Repr(key, as_ref).replace('\n', '\n '),
Repr(val, as_ref).replace('\n', '\n '))
for key, val in obj.items())))
else:
return repr(obj)
def GenericRepr(obj, names):
"""Compute generic Repr for |obj| based on the attributes in |names|.
Args:
obj: The object to compute a Repr for.
names: A dict from attribute names to include, to booleans
specifying whether those attributes should be shown as
references or not.
Returns:
A str representation of |obj|.
"""
def ReprIndent(name, as_ref):
return ' %s=%s' % (name, Repr(getattr(obj, name), as_ref).replace(
'\n', '\n '))
return '%s(\n%s\n)' % (obj.__class__.__name__, ',\n'.join(
ReprIndent(name, as_ref) for (name, as_ref) in names.items()))
class Kind(object):
"""Kind represents a type (e.g. int8, string).
Attributes:
spec: A string uniquely identifying the type. May be None.
module: {Module} The defining module. Set to None for built-in types.
parent_kind: The enclosing type. For example, an enum defined
inside an interface has that interface as its parent. May be None.
"""
def __init__(self, spec=None, module=None):
self.spec = spec
self.module = module
self.parent_kind = None
def Repr(self, as_ref=True):
return '<%s spec=%r>' % (self.__class__.__name__, self.spec)
def __repr__(self):
# Gives us a decent __repr__ for all kinds.
return self.Repr()
class ReferenceKind(Kind):
"""ReferenceKind represents pointer and handle types.
A type is nullable if null (for pointer types) or invalid handle (for handle
types) is a legal value for the type.
Attributes:
is_nullable: True if the type is nullable.
"""
def __init__(self, spec=None, is_nullable=False, module=None):
assert spec is None or is_nullable == spec.startswith('?')
Kind.__init__(self, spec, module)
self.is_nullable = is_nullable
self.shared_definition = {}
def Repr(self, as_ref=True):
return '<%s spec=%r is_nullable=%r>' % (self.__class__.__name__, self.spec,
self.is_nullable)
def MakeNullableKind(self):
assert not self.is_nullable
if self == STRING:
return NULLABLE_STRING
if self == HANDLE:
return NULLABLE_HANDLE
if self == DCPIPE:
return NULLABLE_DCPIPE
if self == DPPIPE:
return NULLABLE_DPPIPE
if self == MSGPIPE:
return NULLABLE_MSGPIPE
if self == SHAREDBUFFER:
return NULLABLE_SHAREDBUFFER
nullable_kind = type(self)()
nullable_kind.shared_definition = self.shared_definition
if self.spec is not None:
nullable_kind.spec = '?' + self.spec
nullable_kind.is_nullable = True
nullable_kind.parent_kind = self.parent_kind
nullable_kind.module = self.module
return nullable_kind
@classmethod
def AddSharedProperty(cls, name):
"""Adds a property |name| to |cls|, which accesses the corresponding item in
|shared_definition|.
The reason of adding such indirection is to enable sharing definition
between a reference kind and its nullable variation. For example:
a = Struct('test_struct_1')
b = a.MakeNullableKind()
a.name = 'test_struct_2'
print(b.name) # Outputs 'test_struct_2'.
"""
def Get(self):
return self.shared_definition[name]
def Set(self, value):
self.shared_definition[name] = value
setattr(cls, name, property(Get, Set))
# Initialize the set of primitive types. These can be accessed by clients.
BOOL = Kind('b')
INT8 = Kind('i8')
INT16 = Kind('i16')
INT32 = Kind('i32')
INT64 = Kind('i64')
UINT8 = Kind('u8')
UINT16 = Kind('u16')
UINT32 = Kind('u32')
UINT64 = Kind('u64')
FLOAT = Kind('f')
DOUBLE = Kind('d')
STRING = ReferenceKind('s')
HANDLE = ReferenceKind('h')
DCPIPE = ReferenceKind('h:d:c')
DPPIPE = ReferenceKind('h:d:p')
MSGPIPE = ReferenceKind('h:m')
SHAREDBUFFER = ReferenceKind('h:s')
NULLABLE_STRING = ReferenceKind('?s', True)
NULLABLE_HANDLE = ReferenceKind('?h', True)
NULLABLE_DCPIPE = ReferenceKind('?h:d:c', True)
NULLABLE_DPPIPE = ReferenceKind('?h:d:p', True)
NULLABLE_MSGPIPE = ReferenceKind('?h:m', True)
NULLABLE_SHAREDBUFFER = ReferenceKind('?h:s', True)
# Collection of all Primitive types
PRIMITIVES = (
BOOL,
INT8,
INT16,
INT32,
INT64,
UINT8,
UINT16,
UINT32,
UINT64,
FLOAT,
DOUBLE,
STRING,
HANDLE,
DCPIPE,
DPPIPE,
MSGPIPE,
SHAREDBUFFER,
NULLABLE_STRING,
NULLABLE_HANDLE,
NULLABLE_DCPIPE,
NULLABLE_DPPIPE,
NULLABLE_MSGPIPE,
NULLABLE_SHAREDBUFFER
)
ATTRIBUTE_MIN_VERSION = 'MinVersion'
ATTRIBUTE_EXTENSIBLE = 'Extensible'
ATTRIBUTE_SYNC = 'Sync'
class NamedValue(object):
def __init__(self, module, parent_kind, mojom_name):
self.module = module
self.parent_kind = parent_kind
self.mojom_name = mojom_name
def GetSpec(self):
return (self.module.mojom_namespace + '.' +
(self.parent_kind and (self.parent_kind.mojom_name + '.') or "") +
self.mojom_name)
class BuiltinValue(object):
def __init__(self, value):
self.value = value
class ConstantValue(NamedValue):
def __init__(self, module, parent_kind, constant):
NamedValue.__init__(self, module, parent_kind, constant.mojom_name)
self.constant = constant
@property
def name(self):
return self.constant.name
class EnumValue(NamedValue):
def __init__(self, module, enum, field):
NamedValue.__init__(self, module, enum.parent_kind, field.mojom_name)
self.field = field
self.enum = enum
def GetSpec(self):
return (self.module.mojom_namespace + '.' +
(self.parent_kind and (self.parent_kind.mojom_name + '.') or "") +
self.enum.mojom_name + '.' + self.mojom_name)
@property
def name(self):
return self.field.name
class Constant(object):
def __init__(self, mojom_name=None, kind=None, value=None, parent_kind=None):
self.mojom_name = mojom_name
self.kind = kind
self.value = value
self.parent_kind = parent_kind
def Stylize(self, stylizer):
self.name = stylizer.StylizeConstant(self.mojom_name)
class Field(object):
def __init__(self, mojom_name=None, kind=None, ordinal=None, default=None,
attributes=None):
if self.__class__.__name__ == 'Field':
raise Exception()
self.mojom_name = mojom_name
self.kind = kind
self.ordinal = ordinal
self.default = default
self.attributes = attributes
def Repr(self, as_ref=True):
# Fields are only referenced by objects which define them and thus
# they are always displayed as non-references.
return GenericRepr(self, {'mojom_name': False, 'kind': True})
def Stylize(self, stylizer):
self.name = stylizer.StylizeField(self.mojom_name)
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
class StructField(Field): pass
class UnionField(Field): pass
class Struct(ReferenceKind):
"""A struct with typed fields.
Attributes:
mojom_name: {str} The name of the struct type as defined in mojom.
name: {str} The stylized name.
native_only: {bool} Does the struct have a body (i.e. any fields) or is it
purely a native struct.
custom_serializer: {bool} Should we generate a serializer for the struct or
will one be provided by non-generated code.
fields: {List[StructField]} The members of the struct.
enums: {List[Enum]} The enums defined in the struct scope.
constants: {List[Constant]} The constants defined in the struct scope.
attributes: {dict} Additional information about the struct, such as
if it's a native struct.
"""
ReferenceKind.AddSharedProperty('mojom_name')
ReferenceKind.AddSharedProperty('name')
ReferenceKind.AddSharedProperty('native_only')
ReferenceKind.AddSharedProperty('custom_serializer')
ReferenceKind.AddSharedProperty('fields')
ReferenceKind.AddSharedProperty('enums')
ReferenceKind.AddSharedProperty('constants')
ReferenceKind.AddSharedProperty('attributes')
def __init__(self, mojom_name=None, module=None, attributes=None):
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
ReferenceKind.__init__(self, spec, False, module)
self.mojom_name = mojom_name
self.native_only = False
self.custom_serializer = False
self.fields = []
self.enums = []
self.constants = []
self.attributes = attributes
def Repr(self, as_ref=True):
if as_ref:
return '<%s mojom_name=%r module=%s>' % (
self.__class__.__name__, self.mojom_name,
Repr(self.module, as_ref=True))
else:
return GenericRepr(self,
{'mojom_name': False, 'fields': False, 'module': True})
def AddField(self, mojom_name, kind, ordinal=None, default=None,
attributes=None):
field = StructField(mojom_name, kind, ordinal, default, attributes)
self.fields.append(field)
return field
def Stylize(self, stylizer):
self.name = stylizer.StylizeStruct(self.mojom_name)
for field in self.fields:
field.Stylize(stylizer)
for enum in self.enums:
enum.Stylize(stylizer)
for constant in self.constants:
constant.Stylize(stylizer)
class Union(ReferenceKind):
"""A union of several kinds.
Attributes:
mojom_name: {str} The name of the union type as defined in mojom.
name: {str} The stylized name.
fields: {List[UnionField]} The members of the union.
attributes: {dict} Additional information about the union, such as
which Java class name to use to represent it in the generated
bindings.
"""
ReferenceKind.AddSharedProperty('mojom_name')
ReferenceKind.AddSharedProperty('name')
ReferenceKind.AddSharedProperty('fields')
ReferenceKind.AddSharedProperty('attributes')
def __init__(self, mojom_name=None, module=None, attributes=None):
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
ReferenceKind.__init__(self, spec, False, module)
self.mojom_name = mojom_name
self.fields = []
self.attributes = attributes
def Repr(self, as_ref=True):
if as_ref:
return '<%s spec=%r is_nullable=%r fields=%s>' % (
self.__class__.__name__, self.spec, self.is_nullable,
Repr(self.fields))
else:
return GenericRepr(self, {'fields': True, 'is_nullable': False})
def AddField(self, mojom_name, kind, ordinal=None, attributes=None):
field = UnionField(mojom_name, kind, ordinal, None, attributes)
self.fields.append(field)
return field
def Stylize(self, stylizer):
self.name = stylizer.StylizeUnion(self.mojom_name)
for field in self.fields:
field.Stylize(stylizer)
class Array(ReferenceKind):
"""An array.
Attributes:
kind: {Kind} The type of the elements. May be None.
length: The number of elements. None if unknown.
"""
ReferenceKind.AddSharedProperty('kind')
ReferenceKind.AddSharedProperty('length')
def __init__(self, kind=None, length=None):
if kind is not None:
if length is not None:
spec = 'a%d:%s' % (length, kind.spec)
else:
spec = 'a:%s' % kind.spec
ReferenceKind.__init__(self, spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
self.length = length
def Repr(self, as_ref=True):
if as_ref:
return '<%s spec=%r is_nullable=%r kind=%s length=%r>' % (
self.__class__.__name__, self.spec, self.is_nullable, Repr(self.kind),
self.length)
else:
return GenericRepr(self, {'kind': True, 'length': False,
'is_nullable': False})
class Map(ReferenceKind):
"""A map.
Attributes:
key_kind: {Kind} The type of the keys. May be None.
value_kind: {Kind} The type of the elements. May be None.
"""
ReferenceKind.AddSharedProperty('key_kind')
ReferenceKind.AddSharedProperty('value_kind')
def __init__(self, key_kind=None, value_kind=None):
if (key_kind is not None and value_kind is not None):
ReferenceKind.__init__(self,
'm[' + key_kind.spec + '][' + value_kind.spec +
']')
if IsNullableKind(key_kind):
raise Exception("Nullable kinds cannot be keys in maps.")
if IsAnyHandleKind(key_kind):
raise Exception("Handles cannot be keys in maps.")
if IsAnyInterfaceKind(key_kind):
raise Exception("Interfaces cannot be keys in maps.")
if IsArrayKind(key_kind):
raise Exception("Arrays cannot be keys in maps.")
else:
ReferenceKind.__init__(self)
self.key_kind = key_kind
self.value_kind = value_kind
def Repr(self, as_ref=True):
if as_ref:
return '<%s spec=%r is_nullable=%r key_kind=%s value_kind=%s>' % (
self.__class__.__name__, self.spec, self.is_nullable,
Repr(self.key_kind), Repr(self.value_kind))
else:
return GenericRepr(self, {'key_kind': True, 'value_kind': True})
class PendingRemote(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
'pending_remote<T> requires T to be an interface type. Got %r' %
kind.spec)
ReferenceKind.__init__(self, 'rmt:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class PendingReceiver(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
'pending_receiver<T> requires T to be an interface type. Got %r' %
kind.spec)
ReferenceKind.__init__(self, 'rcv:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class PendingAssociatedRemote(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
'pending_associated_remote<T> requires T to be an interface ' +
'type. Got %r' % kind.spec)
ReferenceKind.__init__(self, 'rma:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class PendingAssociatedReceiver(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
'pending_associated_receiver<T> requires T to be an interface' +
'type. Got %r' % kind.spec)
ReferenceKind.__init__(self, 'rca:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class InterfaceRequest(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
"Interface request requires %r to be an interface." % kind.spec)
ReferenceKind.__init__(self, 'r:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class AssociatedInterfaceRequest(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, InterfaceRequest):
raise Exception(
"Associated interface request requires %r to be an interface "
"request." % kind.spec)
assert not kind.is_nullable
ReferenceKind.__init__(self, 'asso:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind.kind if kind is not None else None
class Parameter(object):
def __init__(self, mojom_name=None, kind=None, ordinal=None, default=None,
attributes=None):
self.mojom_name = mojom_name
self.ordinal = ordinal
self.kind = kind
self.default = default
self.attributes = attributes
def Repr(self, as_ref=True):
return '<%s mojom_name=%r kind=%s>' % (
self.__class__.__name__, self.mojom_name, self.kind.Repr(as_ref=True))
def Stylize(self, stylizer):
self.name = stylizer.StylizeParameter(self.mojom_name)
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
class Method(object):
def __init__(self, interface, mojom_name, ordinal=None, attributes=None):
self.interface = interface
self.mojom_name = mojom_name
self.ordinal = ordinal
self.parameters = []
self.param_struct = None
self.response_parameters = None
self.response_param_struct = None
self.attributes = attributes
def Repr(self, as_ref=True):
if as_ref:
return '<%s mojom_name=%r>' % (self.__class__.__name__, self.mojom_name)
else:
return GenericRepr(self, {'mojom_name': False, 'parameters': True,
'response_parameters': True})
def AddParameter(self, mojom_name, kind, ordinal=None, default=None,
attributes=None):
parameter = Parameter(mojom_name, kind, ordinal, default, attributes)
self.parameters.append(parameter)
return parameter
def AddResponseParameter(self, mojom_name, kind, ordinal=None, default=None,
attributes=None):
if self.response_parameters == None:
self.response_parameters = []
parameter = Parameter(mojom_name, kind, ordinal, default, attributes)
self.response_parameters.append(parameter)
return parameter
def Stylize(self, stylizer):
self.name = stylizer.StylizeMethod(self.mojom_name)
for param in self.parameters:
param.Stylize(stylizer)
if self.response_parameters is not None:
for param in self.response_parameters:
param.Stylize(stylizer)
if self.param_struct:
self.param_struct.Stylize(stylizer)
if self.response_param_struct:
self.response_param_struct.Stylize(stylizer)
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
@property
def sync(self):
return self.attributes.get(ATTRIBUTE_SYNC) \
if self.attributes else None
class Interface(ReferenceKind):
ReferenceKind.AddSharedProperty('mojom_name')
ReferenceKind.AddSharedProperty('name')
ReferenceKind.AddSharedProperty('methods')
ReferenceKind.AddSharedProperty('enums')
ReferenceKind.AddSharedProperty('constants')
ReferenceKind.AddSharedProperty('attributes')
def __init__(self, mojom_name=None, module=None, attributes=None):
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
ReferenceKind.__init__(self, spec, False, module)
self.mojom_name = mojom_name
self.methods = []
self.enums = []
self.constants = []
self.attributes = attributes
def Repr(self, as_ref=True):
if as_ref:
return '<%s mojom_name=%r>' % (self.__class__.__name__, self.mojom_name)
else:
return GenericRepr(self, {'mojom_name': False, 'attributes': False,
'methods': False})
def AddMethod(self, mojom_name, ordinal=None, attributes=None):
method = Method(self, mojom_name, ordinal, attributes)
self.methods.append(method)
return method
def Stylize(self, stylizer):
self.name = stylizer.StylizeInterface(self.mojom_name)
for method in self.methods:
method.Stylize(stylizer)
for enum in self.enums:
enum.Stylize(stylizer)
for constant in self.constants:
constant.Stylize(stylizer)
class AssociatedInterface(ReferenceKind):
ReferenceKind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
"Associated interface requires %r to be an interface." % kind.spec)
assert not kind.is_nullable
ReferenceKind.__init__(self, 'asso:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
class EnumField(object):
def __init__(self, mojom_name=None, value=None, attributes=None,
numeric_value=None):
self.mojom_name = mojom_name
self.value = value
self.attributes = attributes
self.numeric_value = numeric_value
def Stylize(self, stylizer):
self.name = stylizer.StylizeEnumField(self.mojom_name)
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
class Enum(Kind):
def __init__(self, mojom_name=None, module=None, attributes=None):
self.mojom_name = mojom_name
self.native_only = False
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
Kind.__init__(self, spec, module)
self.fields = []
self.attributes = attributes
self.min_value = None
self.max_value = None
def Repr(self, as_ref=True):
if as_ref:
return '<%s mojom_name=%r>' % (self.__class__.__name__, self.mojom_name)
else:
return GenericRepr(self, {'mojom_name': False, 'fields': False})
def Stylize(self, stylizer):
self.name = stylizer.StylizeEnum(self.mojom_name)
for field in self.fields:
field.Stylize(stylizer)
@property
def extensible(self):
return self.attributes.get(ATTRIBUTE_EXTENSIBLE, False) \
if self.attributes else False
class Module(object):
def __init__(self, path=None, mojom_namespace=None,
attributes=None):
self.path = path
self.mojom_namespace = mojom_namespace
self.structs = []
self.unions = []
self.interfaces = []
self.enums = []
self.constants = []
self.kinds = {}
self.attributes = attributes
self.imports = []
self.imported_kinds = {}
def __repr__(self):
# Gives us a decent __repr__ for modules.
return self.Repr()
def Repr(self, as_ref=True):
if as_ref:
return '<%s path=%r mojom_namespace=%r>' % (
self.__class__.__name__, self.path, self.mojom_namespace)
else:
return GenericRepr(self, {'path': False, 'mojom_namespace': False,
'attributes': False, 'structs': False,
'interfaces': False, 'unions': False})
def AddInterface(self, mojom_name, attributes=None):
interface = Interface(mojom_name, self, attributes)
self.interfaces.append(interface)
return interface
def AddStruct(self, mojom_name, attributes=None):
struct = Struct(mojom_name, self, attributes)
self.structs.append(struct)
return struct
def AddUnion(self, mojom_name, attributes=None):
union = Union(mojom_name, self, attributes)
self.unions.append(union)
return union
def Stylize(self, stylizer):
self.namespace = stylizer.StylizeModule(self.mojom_namespace)
for struct in self.structs:
struct.Stylize(stylizer)
for union in self.unions:
union.Stylize(stylizer)
for interface in self.interfaces:
interface.Stylize(stylizer)
for enum in self.enums:
enum.Stylize(stylizer)
for constant in self.constants:
constant.Stylize(stylizer)
for imported_module in self.imports:
imported_module.Stylize(stylizer)
def IsBoolKind(kind):
return kind.spec == BOOL.spec
def IsFloatKind(kind):
return kind.spec == FLOAT.spec
def IsDoubleKind(kind):
return kind.spec == DOUBLE.spec
def IsIntegralKind(kind):
return (kind.spec == BOOL.spec or
kind.spec == INT8.spec or
kind.spec == INT16.spec or
kind.spec == INT32.spec or
kind.spec == INT64.spec or
kind.spec == UINT8.spec or
kind.spec == UINT16.spec or
kind.spec == UINT32.spec or
kind.spec == UINT64.spec)
def IsStringKind(kind):
return kind.spec == STRING.spec or kind.spec == NULLABLE_STRING.spec
def IsGenericHandleKind(kind):
return kind.spec == HANDLE.spec or kind.spec == NULLABLE_HANDLE.spec
def IsDataPipeConsumerKind(kind):
return kind.spec == DCPIPE.spec or kind.spec == NULLABLE_DCPIPE.spec
def IsDataPipeProducerKind(kind):
return kind.spec == DPPIPE.spec or kind.spec == NULLABLE_DPPIPE.spec
def IsMessagePipeKind(kind):
return kind.spec == MSGPIPE.spec or kind.spec == NULLABLE_MSGPIPE.spec
def IsSharedBufferKind(kind):
return (kind.spec == SHAREDBUFFER.spec or
kind.spec == NULLABLE_SHAREDBUFFER.spec)
def IsStructKind(kind):
return isinstance(kind, Struct)
def IsUnionKind(kind):
return isinstance(kind, Union)
def IsArrayKind(kind):
return isinstance(kind, Array)
def IsInterfaceKind(kind):
return isinstance(kind, Interface)
def IsAssociatedInterfaceKind(kind):
return isinstance(kind, AssociatedInterface)
def IsInterfaceRequestKind(kind):
return isinstance(kind, InterfaceRequest)
def IsAssociatedInterfaceRequestKind(kind):
return isinstance(kind, AssociatedInterfaceRequest)
def IsPendingRemoteKind(kind):
return isinstance(kind, PendingRemote)
def IsPendingReceiverKind(kind):
return isinstance(kind, PendingReceiver)
def IsPendingAssociatedRemoteKind(kind):
return isinstance(kind, PendingAssociatedRemote)
def IsPendingAssociatedReceiverKind(kind):
return isinstance(kind, PendingAssociatedReceiver)
def IsEnumKind(kind):
return isinstance(kind, Enum)
def IsReferenceKind(kind):
return isinstance(kind, ReferenceKind)
def IsNullableKind(kind):
return IsReferenceKind(kind) and kind.is_nullable
def IsMapKind(kind):
return isinstance(kind, Map)
def IsObjectKind(kind):
return IsPointerKind(kind) or IsUnionKind(kind)
def IsPointerKind(kind):
return (IsStructKind(kind) or IsArrayKind(kind) or IsStringKind(kind) or
IsMapKind(kind))
# Please note that it doesn't include any interface kind.
def IsAnyHandleKind(kind):
return (IsGenericHandleKind(kind) or
IsDataPipeConsumerKind(kind) or
IsDataPipeProducerKind(kind) or
IsMessagePipeKind(kind) or
IsSharedBufferKind(kind))
def IsAnyInterfaceKind(kind):
return (IsInterfaceKind(kind) or IsInterfaceRequestKind(kind) or
IsAssociatedKind(kind) or IsPendingRemoteKind(kind) or
IsPendingReceiverKind(kind))
def IsAnyHandleOrInterfaceKind(kind):
return IsAnyHandleKind(kind) or IsAnyInterfaceKind(kind)
def IsAssociatedKind(kind):
return (IsAssociatedInterfaceKind(kind) or
IsAssociatedInterfaceRequestKind(kind) or
IsPendingAssociatedRemoteKind(kind) or
IsPendingAssociatedReceiverKind(kind))
def HasCallbacks(interface):
for method in interface.methods:
if method.response_parameters != None:
return True
return False
# Finds out whether an interface passes associated interfaces and associated
# interface requests.
def PassesAssociatedKinds(interface):
visited_kinds = set()
for method in interface.methods:
if MethodPassesAssociatedKinds(method, visited_kinds):
return True
return False
def _AnyMethodParameterRecursive(method, predicate, visited_kinds=None):
def _HasProperty(kind):
if kind in visited_kinds:
# No need to examine the kind again.
return False
visited_kinds.add(kind)
if predicate(kind):
return True
if IsArrayKind(kind):
return _HasProperty(kind.kind)
if IsStructKind(kind) or IsUnionKind(kind):
for field in kind.fields:
if _HasProperty(field.kind):
return True
if IsMapKind(kind):
if _HasProperty(kind.key_kind) or _HasProperty(kind.value_kind):
return True
return False
if visited_kinds is None:
visited_kinds = set()
for param in method.parameters:
if _HasProperty(param.kind):
return True
if method.response_parameters != None:
for param in method.response_parameters:
if _HasProperty(param.kind):
return True
return False
# Finds out whether a method passes associated interfaces and associated
# interface requests.
def MethodPassesAssociatedKinds(method, visited_kinds=None):
return _AnyMethodParameterRecursive(method, IsAssociatedKind,
visited_kinds=visited_kinds)
# Determines whether a method passes interfaces.
def MethodPassesInterfaces(method):
return _AnyMethodParameterRecursive(method, IsInterfaceKind)
def HasSyncMethods(interface):
for method in interface.methods:
if method.sync:
return True
return False
def ContainsHandlesOrInterfaces(kind):
"""Check if the kind contains any handles.
This check is recursive so it checks all struct fields, containers elements,
etc.
Args:
struct: {Kind} The kind to check.
Returns:
{bool}: True if the kind contains handles.
"""
# We remember the types we already checked to avoid infinite recursion when
# checking recursive (or mutually recursive) types:
checked = set()
def Check(kind):
if kind.spec in checked:
return False
checked.add(kind.spec)
if IsStructKind(kind):
return any(Check(field.kind) for field in kind.fields)
elif IsUnionKind(kind):
return any(Check(field.kind) for field in kind.fields)
elif IsAnyHandleKind(kind):
return True
elif IsAnyInterfaceKind(kind):
return True
elif IsArrayKind(kind):
return Check(kind.kind)
elif IsMapKind(kind):
return Check(kind.key_kind) or Check(kind.value_kind)
else:
return False
return Check(kind)
def ContainsNativeTypes(kind):
"""Check if the kind contains any native type (struct or enum).
This check is recursive so it checks all struct fields, scoped interface
enums, etc.
Args:
struct: {Kind} The kind to check.
Returns:
{bool}: True if the kind contains native types.
"""
# We remember the types we already checked to avoid infinite recursion when
# checking recursive (or mutually recursive) types:
checked = set()
def Check(kind):
if kind.spec in checked:
return False
checked.add(kind.spec)
if IsEnumKind(kind):
return kind.native_only
elif IsStructKind(kind):
if kind.native_only:
return True
if any(enum.native_only for enum in kind.enums):
return True
return any(Check(field.kind) for field in kind.fields)
elif IsUnionKind(kind):
return any(Check(field.kind) for field in kind.fields)
elif IsInterfaceKind(kind):
return any(enum.native_only for enum in kind.enums)
elif IsArrayKind(kind):
return Check(kind.kind)
elif IsMapKind(kind):
return Check(kind.key_kind) or Check(kind.value_kind)
else:
return False
return Check(kind)
|
After four plus years in the making I'm super excited to be launching my debut novel, With the Music. They'll be music, (brief) speeches from awesome people, nibbles, drinks and fun. So come along to the Queensberry Hotel on the 27th of May. For catering purposes, it would be fantastic if you RSVP here. |
import itertools
import re
import os
end_space = re.compile(r"([^\\]\s)*$")
def fnmatch_translate(pat, path_name=False):
parts = []
seq = False
i = 0
if pat[0] == "/" or path_name:
parts.append("^")
any_char = "[^/]"
if pat[0] == "/":
pat = pat[1:]
else:
any_char = "."
parts.append("^(?:.*/)?")
if pat[-1] == "/":
# If the last character is / match this directory or any subdirectory
pat = pat[:-1]
suffix = "(?:/|$)"
else:
suffix = "$"
while i < len(pat):
c = pat[i]
if c == "\\":
if i < len(pat) - 1:
i += 1
c = pat[i]
parts.append(re.escape(c))
else:
raise ValueError
elif seq:
if c == "]":
seq = False
# First two cases are to deal with the case where / is the only character
# in the sequence but path_name is True so it shouldn't match anything
if parts[-1] == "[":
parts = parts[:-1]
elif parts[-1] == "^" and parts[-2] == "[":
parts = parts[:-2]
else:
parts.append(c)
elif c == "-":
parts.append(c)
elif not (path_name and c == "/"):
parts += re.escape(c)
elif c == "[":
parts.append("[")
if i < len(pat) - 1 and pat[i+1] in ("!", "^"):
parts.append("^")
i += 1
seq = True
elif c == "*":
if i < len(pat) - 1 and pat[i+1] == "*":
parts.append(any_char + "*")
i += 1
if i < len(pat) - 1 and pat[i+1] == "*":
raise ValueError
else:
parts.append(any_char + "*")
elif c == "?":
parts.append(any_char)
else:
parts.append(re.escape(c))
i += 1
if seq:
raise ValueError
parts.append(suffix)
try:
return re.compile("".join(parts))
except Exception:
raise
def parse_line(line):
line = line.rstrip()
if not line or line[0] == "#":
return
invert = line[0] == "!"
if invert:
line = line[1:]
dir_only = line[-1] == "/"
if dir_only:
line = line[:-1]
return invert, dir_only, fnmatch_translate(line, dir_only)
class PathFilter(object):
def __init__(self, root, extras=None):
if root:
ignore_path = os.path.join(root, ".gitignore")
else:
ignore_path = None
if not ignore_path and not extras:
self.trivial = True
return
self.trivial = False
self.rules_file = []
self.rules_dir = []
if extras is None:
extras = []
if ignore_path and os.path.exists(ignore_path):
self._read_ignore(ignore_path)
for item in extras:
self._read_line(item)
def _read_ignore(self, ignore_path):
with open(ignore_path) as f:
for line in f:
self._read_line(line)
def _read_line(self, line):
parsed = parse_line(line)
if not parsed:
return
invert, dir_only, regexp = parsed
if dir_only:
self.rules_dir.append((regexp, invert))
else:
self.rules_file.append((regexp, invert))
def __call__(self, path):
if os.path.sep != "/":
path = path.replace(os.path.sep, "/")
if self.trivial:
return True
path_is_dir = path[-1] == "/"
if path_is_dir:
path = path[:-1]
rules = self.rules_dir
else:
rules = self.rules_file
include = True
for regexp, invert in rules:
if not include and invert and regexp.match(path):
include = True
elif include and not invert and regexp.match(path):
include = False
return include
|
Parents and policy-makers share two broad concerns when it comes to young children from infancy to age 5. The first is the care and nurturing of the children, ensuring their health and well-being. The second has to do with developing their minds and preparing children for kindergarten and elementary school.
For a variety of reasons, families in the U.S. face a fragmented system of care and an often confusing array of preschool options. Young children from low-income families are at the greatest risk of getting off to a poor educational start, and educational researchers will tell you that we still lack a theory of change for how best to prepare children from impoverished environments for long-term school success.
My colleague, Professor Dale Farran, has studied many different pre-K educational curricula. With her fellow scholars at the Peabody Research Institute, she is currently conducting an evaluation of Tennessee’s Voluntary Pre-K program. According to Dale, policy-makers tend to fall back on concrete skills such as letter recognition, phonemic awareness (understanding letter-sound combinations), or counting as measurements of a given program’s success. While important, Dale’s research suggests that other qualities, especially a child’s ability to regulate their own behaviors and sustain concentration and focus may be much more important for long-term success.
Positive teacher affect. For example, does the teacher smile often? Does she or he appear to enjoy the children and take joy in their learning?
Positive reinforcement. Does the teacher spend more time offering positive or negative comments about children’s behavior? When teachers are frequently disapproving, the teacher is the one regulating the child’s behavior, not the child.
Anticipation. Does the teacher foresee classroom problems developing and offer guidance or redirection before things get out of hand?
Rich activities. Does the classroom feature well-stocked centers with a variety of activities, including math activities in addition to storybooks?
Solo time. Are children allowed time for individual, imaginative play? When children play on their own, they build concentration and focus.
While pre-K programs like Waldorf or Montessori have real philosophical differences and their own merits, Dale notes that many pre-K curricula are largely transferable. Families that can be selective in their choices should be sure to gauge factors like those above.
Unfortunately, families that qualify for Head Start or state-funded pre-K may have fewer options from which to choose. Even so, family members should be sure to visit their child’s classroom on a regular basis to monitor quality of care and provide feedback to the teacher. |
"""
davies.pockettopo: Module for parsing and working with exported PocketTopo survey data
"""
from __future__ import division
from __future__ import print_function
import re
import codecs
import logging
from datetime import datetime
from collections import OrderedDict, defaultdict
log = logging.getLogger(__name__)
__all__ = 'TxtFile', 'Survey', 'MergingSurvey', 'Shot', 'PocketTopoTxtParser'
# TODO: properly handle zero-length shots with both from/to (station equivalence)
# TODO: older versions didn't specify units?
class Shot(OrderedDict):
"""
Representation of a single shot in a PocketTopo Survey.
:kwarg FROM: (str) from station
:kwarg TO: (str) optional to station
:kwarg LENGTH: (float) distance
:kwarg AZM: (float) compass
:kwarg INC: (float) inclination
:kwarg COMMENT: (str)
:kwarg declination: (float) optional
:ivar declination: (float) set or get the applied magnetic declination for the shot
"""
def __init__(self, *args, **kwargs):
self.declination = kwargs.pop('declination', 0.0)
OrderedDict.__init__(self, *args, **kwargs)
self.dupe_count = 1 # denotes averaged backsights (2) and triple-shots (3)
@property
def azm(self):
"""Corrected azimuth, taking into account declination."""
return self.get('AZM', -0.0) + self.declination
@property
def inc(self):
"""Corrected inclination."""
return self.get('INC', -0.0)
@property
def length(self):
"""Corrected distance."""
return self.get('LENGTH', -0.0)
@property
def is_splay(self):
"""Is this shot a "splay shot"?"""
return self.get('TO', None) in (None, '')
def __str__(self):
return ', '.join('%s=%s' % (k,v) for (k,v) in self.items())
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self)
class Survey(object):
"""
Representation of a PocketTopo Survey object. A Survey is a container for :class:`Shot` objects.
"""
def __init__(self, name=None, date=None, comment=None, declination=0.0, cave_name=None, length_units='m', angle_units=360, shots=None):
self.name = name
self.date = date
self.comment = comment
self.declination = declination
self.cave_name = cave_name
self.length_units = length_units
self.angle_units = angle_units
self.shots = []
self.splays = defaultdict(list)
if shots:
[self.add_shot(shot) for shot in shots]
def add_shot(self, shot):
"""Add a Shot to :attr:`shots`, applying our survey's :attr:`declination` to it."""
shot.declination = self.declination
if shot.is_splay:
self.splays[shot['FROM']].append(shot)
self.shots.append(shot)
@property
def length(self):
"""Total surveyed cave length, not including splays."""
return sum([shot.length for shot in self.shots if not shot.is_splay])
@property
def total_length(self):
"""Total surveyed length including splays."""
return sum([shot.length for shot in self.shots])
def __len__(self):
return len(self.shots)
def __iter__(self):
for shot in self.shots:
yield shot
def __contains__(self, item):
for shot in self.shots:
if item in (shot.get('FROM', None), shot.get('TO', None)):
return True
return False
def __str__(self):
return self.name
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.name)
# def _serialize(self):
# return []
class MergingSurvey(Survey):
"""
Representation of a PocketTopo Survey object. A Survey is a container for :class:`Shot` objects.
This Survey implementation merges "duplicate" shots into a single averaged shot.
PocketTopo (and DistoX) convention is to use triple forward shots for mainline survey. When
adding a new shot to this class with `add_shot()`, if we detect that the previous shot was
between the same two stations, we average values and merge the two together instead of appending
the duplicate shot. We use a "running" mean algorithm, so that this feature works for any number
of subsequent duplicate shots (two, three, four...).
"""
# For performance, we only look backwards at the immediately preceding shots!
def _inverse_azm(self, azm):
"""Convert forward AZM to back AZM and vice versa"""
return (azm + self.angle_units/2) % self.angle_units
def _inverse_inc(self, inc):
"""Convert forward INC to back INC and vice versa"""
return -1 * inc
def add_shot(self, shot):
"""
Add a shot dictionary to :attr:`shots`, applying our survey's :attr:`declination`, and
optionally averaging and merging with duplicate previous shot.
"""
if not self.shots or not shot.get('TO', None) or not self.shots[-1].get('TO', None):
return super(MergingSurvey, self).add_shot(shot)
from_, to = shot['FROM'], shot['TO']
prev_shot = self.shots[-1]
prev_from, prev_to = prev_shot['FROM'], prev_shot['TO']
if from_ == prev_from and to == prev_to:
# dupe shot! calculate iterative "running" mean and merge into the previous shot
total_count = prev_shot.dupe_count + 1
log.debug('Merging %d shots "%s" <- "%s"', total_count, prev_shot, shot)
if abs(shot['AZM'] - prev_shot['AZM']) > 2.0:
log.warning('Merged forward AZM disagreement of %0.1f for "%s" <- "%s"', abs(shot['AZM'] - prev_shot['AZM']), prev_shot, shot)
if abs(shot['INC'] - prev_shot['INC']) > 2.0:
log.warning('Merged forward INC disagreement of %0.1f for "%s" <- "%s"', abs(shot['INC'] - prev_shot['INC']), prev_shot, shot)
if abs(shot['LENGTH'] - prev_shot['LENGTH']) > 1.0:
log.warning('Merged forward LENGTH disagreement of %0.1f for "%s" <- "%s"', abs(shot['LENGTH'] - prev_shot['LENGTH']), prev_shot, shot)
avg_length = (prev_shot['LENGTH'] * prev_shot.dupe_count + shot['LENGTH']) / total_count
avg_azm = (prev_shot['AZM'] * prev_shot.dupe_count + shot['AZM']) / total_count
avg_inc = (prev_shot['INC'] * prev_shot.dupe_count + shot['INC']) / total_count
merged_comments = ('%s %s' % (prev_shot.get('COMMENT', '') or '', shot.get('COMMENT', '') or '')).strip() or None
prev_shot['LENGTH'], prev_shot['AZM'], prev_shot['INC'], prev_shot['COMMENT'] = avg_length, avg_azm, avg_inc, merged_comments
prev_shot.dupe_count += 1
elif from_ == prev_to and to == prev_from:
# backsight! we do the same iterative "running" mean rather than assuming a single forward and single back
total_count = prev_shot.dupe_count + 1
inv_azm, inv_inc = self._inverse_azm(shot['AZM']), self._inverse_inc(shot['INC'])
log.debug('Merging %d backsights "%s" <- "%s"', total_count, prev_shot, shot)
if abs(inv_azm - prev_shot['AZM']) > 2.0:
log.warning('Backsight AZM disagreement of %0.1f for "%s" <- "%s"', abs(inv_azm - prev_shot['AZM']), prev_shot, shot)
if abs(inv_inc - prev_shot['INC']) > 2.0:
log.warning('Backsight INC disagreement of %0.1f for "%s" <- "%s"', abs(inv_inc - prev_shot['INC']), prev_shot, shot)
if abs(shot['LENGTH'] - prev_shot['LENGTH']) > 1.0:
log.warning('Backsight LENGTH disagreement of %0.1f for "%s" <- "%s"', abs(shot['LENGTH'] - prev_shot['LENGTH']), prev_shot, shot)
avg_length = (prev_shot['LENGTH'] * prev_shot.dupe_count + shot['LENGTH']) / total_count
avg_azm = (prev_shot['AZM'] * prev_shot.dupe_count + inv_azm) / total_count
avg_inc = (prev_shot['INC'] * prev_shot.dupe_count + inv_inc) / total_count
merged_comments = ('%s %s' % (prev_shot.get('COMMENT', '') or '', shot.get('COMMENT', '') or '')).strip() or None
prev_shot['LENGTH'], prev_shot['AZM'], prev_shot['INC'], prev_shot['COMMENT'] = avg_length, avg_azm, avg_inc, merged_comments
prev_shot.dupe_count += 1
else:
# a new, different shot; no merge
return super(MergingSurvey, self).add_shot(shot)
class UTMLocation(object):
"""
Represents a UTM-based coordinate for Reference Point.
Note that PocketTopo doesn't support UTM Zones.
:ivar easting: (float)
:ivar northing: (float)
:ivar elevation: (float) meters
:ivar comment: (str)
"""
def __init__(self, easting, northing, elevation=0.0, comment=None):
self.easting = easting
self.northing = northing
self.elevation = elevation
self.altitude = elevation # alias
self.comment = comment
@property
def __geo_interface__(self):
return {'type': 'Point', 'coordinates': (self.easting, self.northing, self.elevation)}
def __str__(self):
return "<UTM %0.1fE %0.1fN %0.1fm>" % (self.easting, self.northing, self.elevation)
class TxtFile(object):
"""
Representation of a PocketTopo .TXT File. A TxtFile is a container for :class:`Survey` objects.
:ivar name: (string) the TxtFile's "name"
:ivar length_units: (string) `m` (default) or `feet`
:ivar angle_units: (int) `360` for degrees (default) or `400` for grads
:ivar surveys: (list of :class:`Survey`)
:ivar reference_points: (dict of :class:`UTMLocation` by station)
"""
def __init__(self, name=None, length_units='m', angle_units=360):
self.name = name
if length_units not in ('m', 'feet'):
raise Exception('Length units must be either \'m\' for meters (default) or \'feet\' for feet')
self.length_units = length_units
if angle_units not in (360, '360', 400, '400'):
raise Exception('Angle units must be either `360` for degrees (default) or `400` for grads')
self.angle_units = int(angle_units)
self.surveys = []
self.reference_points = OrderedDict()
def add_survey(self, survey):
"""Add a :class:`Survey` to :attr:`surveys`."""
survey.length_units = self.length_units
survey.angle_units = self.angle_units
self.surveys.append(survey)
def add_reference_point(self, station, utm_location):
"""Add a :class:`UTMLocation` to :attr:`reference_points`."""
self.reference_points[station] = utm_location
@property
def length(self):
"""Total surveyed length."""
return sum([survey.length for survey in self.surveys])
def __len__(self):
return len(self.surveys)
def __iter__(self):
for survey in self.surveys:
yield survey
def __contains__(self, item):
for survey in self.surveys:
if item == survey.name or item == survey:
return True
return False
def __getitem__(self, item):
for survey in self.surveys:
if item == survey.name or item == survey:
return survey
raise KeyError(item)
@staticmethod
def read(fname, merge_duplicate_shots=False, encoding='windows-1252'):
"""Read a PocketTopo .TXT file and produce a `TxtFile` object which represents it"""
return PocketTopoTxtParser(fname, merge_duplicate_shots, encoding).parse()
# def write(self, outf):
# """Write a `Survey` to the specified .DAT file"""
# with codecs.open(outf, 'wb', 'windows-1252') as outf:
# for survey in self.surveys:
# outf.write('\r\n'.join(survey._serialize()))
class PocketTopoTxtParser(object):
"""Parses the PocketTopo .TXT file format"""
def __init__(self, txtfilename, merge_duplicate_shots=False, encoding='windows-1252'):
self.txtfilename = txtfilename
self.merge_duplicate_shots = merge_duplicate_shots
self.encoding = encoding
def parse(self):
"""Produce a `TxtFile` object from the .TXT file"""
log.debug('Parsing PocketTopo .TXT file %s ...', self.txtfilename)
SurveyClass = MergingSurvey if self.merge_duplicate_shots else Survey
txtobj = None
with codecs.open(self.txtfilename, 'rb', self.encoding) as txtfile:
lines = txtfile.read().splitlines()
# first line is cave name and units
first_line_re = re.compile(r'^([\w\s]*)\(([\w\s]*),([\w\s]*)')
first_line = lines.pop(0)
cave_name, length_units, angle_units = first_line_re.search(first_line).groups()
cave_name, angle_units = cave_name.strip(), int(angle_units)
txtobj = TxtFile(cave_name, length_units, angle_units)
while not lines[0]:
lines.pop(0) # skip blanks
# next block identifies surveys (trip) metadata
while lines[0].startswith('['):
toks = lines.pop(0).split(None, 3)
id, date, declination = toks[:3]
id = id.strip('[]:')
date = datetime.strptime(date, '%Y/%m/%d').date()
declination = float(declination)
comment = toks[3].strip('"') if len(toks) == 4 else ''
survey = SurveyClass(id, date, comment, declination, cave_name)
txtobj.add_survey(survey)
while not lines[0]:
lines.pop(0) # skip blanks
# finally actual survey data
while lines:
line = lines.pop(0).strip()
if not line:
continue
if '"' in line:
line, comment = line.split('"', 1)
comment = comment.rstrip('"')
else:
comment = None
if '[' not in line:
# this is either a Reference Point or a zero-length fake shot
toks = line.split()
if len(toks) != 4: # ??
log.debug('Skipping unrecognized shot: %s %s', line, '"%s"' % comment if comment else '')
continue
station, vals = toks[0], list(map(float, toks[1:]))
if vals[0] == 0.0: # fake shot
log.debug('Skipping zero-length shot: %s %s', line, '"%s"' % comment if comment else '')
else: # reference point
easting, northing, altitude = vals
reference_point = UTMLocation(easting, northing, altitude, comment)
log.debug('Reference point: %s', reference_point)
txtobj.add_reference_point(station, reference_point)
continue
line, survey_id = line.split('[')
survey_id = survey_id.rstrip().rstrip(']')
toks = line.split()
from_to, (length, azm, inc) = toks[:-3], (float(tok) for tok in toks[-3:])
if len(from_to) == 2:
from_, to = tuple(from_to) # shot
elif len(from_to) == 1:
from_, to = from_to[0], None # splay
elif not from_to and length == 0.0:
continue # skip junk zero-length placeholder shots
else:
raise Exception()
shot = Shot([('FROM',from_), ('TO',to), ('LENGTH',length), ('AZM',azm), ('INC',inc), ('COMMENT',comment)])
txtobj[survey_id].add_shot(shot)
return txtobj
if __name__ == '__main__':
import sys
logging.basicConfig(level=logging.DEBUG)
for fname in sys.argv[1:]:
txtfile = PocketTopoTxtParser(fname, merge_duplicate_shots=True).parse()
print('%s (%s, %d)' % (txtfile.name, txtfile.length_units, txtfile.angle_units))
for survey in txtfile:
print('\t', '[%s] %s (%0.1f %s)' % (survey.name, survey.comment, survey.length, txtfile.length_units))
for shot in survey:
print('\t\t', shot)
|
Do You Know the Sign?
I received an interesting email from Worthy News. George Whitten put together an excellent devotional on the letter tav. Enjoy!
Ezekiel 9:4-6 and the Lord said to him, “Go through the midst of the city, through the midst of Jerusalem, and put a mark (Tav in Hebrew) on the foreheads of the men who sigh and cry over all the abominations that are done within it.” To the others He said in my hearing, “Go after him through the city and kill; do not let your eye spare, nor have any pity. Utterly slay old and young men, maidens and little children and women; but do not come near anyone on whom is the mark (Tav in Hebrew); and begin at My sanctuary.” So they began with the elders who were before the temple.
Yesterday, we began identifying the ancient Hebrew alphabet and exploring the potential symbolic meanings of its letters. The last letter, “Tav”, as we saw, strongly resembles a cross. Today, we’re going to look at how “Tav” is spelled in ancient Hebrew. The phonetic spelling of “Tav” is Tav (T)-Vav (V). Now the ancient letter, “Vav” strongly resembled a commonly used tent-peg, and then, later, a common nail. So t he spelling of “Tav” contains a cross and a nail.
The Ancient Word Tav is illustrated to the left, which is also the spelling of the Hebrew Letter Tav translated as mark in Ezekiel 9:4,6. The Vav is the image to the left of the cross which means in Ancient Hebrew a nail or a wooden peg. To the right of the Vav is the image of the cross, which was the way the Hebrew Letter Tav was originally written. If you cannot view the image to the left, please visit Worthy Devotions.
This word in Hebrew is translated, “mark“, in the verses above; so, this ancient Hebrew “mark” that was on the foreheads of righteous men who grieved over abominations in the Lord’s house, was identified by a word containing a cross and a nail. Interesting…since the prophet is told to pass over all those that have that mark, beginning at the Lord’s sanctuary, we have an Old Testament picture of the protection of the Lord’s cross, expressed by ancient Hebrew letters. Perhaps this is just another way in which the Lord has expressed His sovereignty, weaving His revelation into the entire fabric of His Word, revealing the protection of the cross in both the Old and New Testaments.
Another interesting aspect of this verse is the location where the Lord’s judgment begins: the Lord’s sanctuary. 1 Peter 4:17 states that, “it is time for judgment to begin with the house of God.” I understand this to mean that those who are most accountable, who stand, as it were, closest to place where God dwells, will come under His judgment first. For us who believe, this judgment will be purification, possibly through suffering, as Peter suggests. For the wicked, who are increasingly distant from God, His judgment means eternal condemnation.
The Lord is returning for a bride who is prepared to meet Him. She will be pure and spotless, made white by His cross and purified through her submission to His careful preparation. Illustrated by the letter “Tav” in ancient Hebrew, her identifying mark will be that very cross.
Are you prepared as a bride should be for her husband? Your sorrow and weeping over the abominations in our Lord’s world, and even in His body, confirm the mark which is upon you. Wear the sign with honor — glorying in it, and carry your cross faithfully to the end, protected from the eternal judgment which awaits all men at His coming.
A Remnant Is Rising In The Land!
This is a repost from an email I received. I pass it along for those of you who are praying, watching and preparing for the next move of the Lord. This word is from Dr. Robert Stearns the executive director of Eagles’ Wings.
I was in Israel on Yom Kippur, preparing to enter the Holy Days of the Biblical calendar, when I felt the Lord impressing this word on my heart… I pray it is an encouragement to you.
A Remnant is Rising in the Land.
Unseen, unrecognized, they know Whose they are and have died to this present world’s ambitions. They want nothing of a consumer Christianity, where God exists for their earthly pleasure. They have caught a whiff of Heaven’s fragrant food, and just the smell has satisfied them more than the sweetest delicacies of this passing age, and motivates them to remember that they are just passing through.
They will not be bought or sold with position or money. They are remembering why they “got into this” in the first place – for the Lamb. For Jesus Christ’s honor, for the sake of His worthy name. Their backbones are being steeled in the furnace of affliction, and purified of contemporary dross. They are old-school, hard-core Jesus freaks, and they are not apologizing for it. The remnant is not ashamed!
They are blood-bought,and fiercely committed to the Word – the Logos and the Rhema.
Something within them stirs because they know they are here on a kairos assignment. They realize that they may have forgotten that for a moment, been diverted off the path for some years, but like soldiers who remember drills from long ago, they are coming to attention and position and awareness.
They are here on purpose. They are necessary to Heaven’s narrative. They are not bystanders. They are the players on the stage of His-story.
This army is realizing they never should have eaten Babylon’s food in the first place, even if it was served in halls decorated to look like a cheap imitation of Zion. They would rather be pilgrims than settle. They would rather be fools for Christ than wise in the world’s eyes. They would rather be known in Heaven and feared in Hell than recognized on earth.
Whatever happens to them in the world really doesn’t matter. They have locked gazes with the Lamb’s eyes, and locked step with the cadence of Heaven’s march.
His remnant understand that this is the Final Act and there is no turning back now.
They are nowhere to be seen and everywhere to be found. There are very few of them chosen, and a great host of them who will emerge.
It’s going to be very, very intense, for the sword they carry is stained with the blood of their lesser desires and their crucified carnality. They’ve had their battles, faced their own personal Valleys of Decision, and made their choice.
I have Decided. To Follow Jesus. No Turning Back. We have decided. Have you decided?
The background of Jesus’ teaching on end time events surrounds the temple. When Jesus forecasted the imminent destruction of the temple, the disciple’s ears were pricked. They wanted to know more. Most people want to know more about futuristic events. I would assume you are no different. But Jesus, in His wisdom, didn’t tell the disciples what they wanted to hear. He related information they needed to know.
The disciples, like many of us today, were concerned about the wrong things. They wanted to hear information that would be of no real value to them, other than to satisfy their own curiosity. Jesus was more interested in affecting their conduct and character rather than satisfying their curiosity.
Jesus, however, knew that if the disciples looked for signs they would be susceptible to deception. He virtually avoided their question and focused instead on character issues.
In the midst of all these tumultuous end times events Jesus specifically warns the believer about three very real dangers in Luke 21.
Deception (8) – With all the information we have been given about end-time events by Jesus, the apostles, and others… you would think deception would not be an issue. Jesus specifically warned us that there will be an increase in deception and the tremendous potential for people to be deceived. If Jesus says deception will be our number one problem, then most assuredly it will. Unfortunately, the group who will mislead the greatest number of people will be Christians. They are the ones who love making predictions about the date and time of Jesus’ return.
Fear (9) – How many times did Jesus tell His disciples not to be afraid? Evidently fear will run rampant in the end times. It is a mistake to take any one prophecy of Jesus or those in Revelation as infallible proof that the end of the age is immediately at hand. As troubles do intensify and increase, the true believer is told to “look up” for their redemption. They are not encouraged to “look at” troubles, hardships, and difficulties.
Distractions (34-38) – This point is difficult to make because every generation since the time of Christ has been distracted to some degree. But evidently the distractions will be greater as we head toward the end of days. To combat this very issue Jesus told several parables in a row to communicate the need for everyone to be watching, waiting, and ready for His return.
Let me tell you a modern story that mimics Jesus’ parables.
Have you ever heard the story of Jeff and Janell’s first date?
As someone once said, “God may not be early, but He is never late.” Put your wedding clothes back on and be ready!
Are You Expecting a Visitation?
Ever had someone show up unexpectedly? We commonly say things like, “Well, if I had known you were coming, I would have...” The reason we say these types of things is because we were not expecting company.
The Jews of Jesus’ day were expecting a political Messiah, not a spiritual leader. Much like our day, we are looking for a political leader to help solve our economic problems. Therefore, the people of Jesus’ day missed a real visitation from God. They missed out on real deliverance, real salvation, and real peace. How can anyone miss the time of God’s visitation?
Evidently if we hold fast to false expectations of how God moves or should move, we can miss His visitation. Lets face it, none of us knows the Lord as well as we think we do. The result of missing God’s visitation is that real peace is now hidden from our eyes!
As we roll further down the path towards the end of days, it is imperative we get a clear picture of how the Lord moves and how to walk in His ways. Lay aside useless information that doesn’t benefit your walk with God. The good news is we can know the Lord and be a part of His return! Are you ready to meet Him this time around?
Don’t Believe in a Kingdom Far, Far, Away!
What’s your belief about the return of Christ? Everybody believes something. Even before I committed my life to Christ, I believed Christ’s return to be imminent. But that was a long time ago, in a galaxy far, far, away. Ok, it was 35 years ago.
There is nothing wrong with having expectations. Jesus told everyone to “be ready” and expect His return. His instructions were to live as though His return will be tomorrow, but plan as though it will be a lifetime away.
The parable Jesus employs here in Luke 19 corrects this type of faulty thinking. Evidently many people believed “the Kingdom of God was going to appear immediately.” The Kingdom is going to manifest today, just not in its full manifestation, with Jesus returning to Jerusalem to reign supreme.
Why does Jesus mention this issue? Because we have a tendency to waste the real investment opportunities the Lord has made in us. We believe that in the future God will make us complete, perfect and whole just like Jesus. The truth … God is actually working in our “now”, making us in the image of His Son.
God expects us to be investors like Him. Those who invest wisely are given more investment capital. Those who neglect their opportunities, lose their entire investment. See, if you believe Kingdom work and Kingdom realities are a “future” event you will miss God’s best now. The Kingdom is now and exists within you. The Lord has placed you here to conduct Kingdom business until He returns.
Will you miss the investment opportunity of a lifetime? Don’t believe in a Kingdom far, far, away! Believe in a Kingdom right in your heart. God does.
Your Wedding Clothes Are Coming!
The Lord has told me numerous times in the past two years to “get ready for the wedding!” I have heard this so many times, I believe it to be imminent. The Lord is shaking and waking anyone “who has ears to hear” to look forward to a greater reality that is about to happen. The church, of course, is the bride of Christ. The Father’s call is to ready ourselves now by putting on our wedding clothes.
Remember, the Lord is calling for His Bride to ready herself for the wedding. He wants all believers to be dressed for that day. Your wedding clothes are on their way this very moment. Be ready for them!
One of the things that helps keep true believers alert is the knowledge of the Lord’s return. Jesus spoke on numerous occasions about the need for everyone to be ready and to be watching and waiting for His imminent return. Two thousand years seems to be a long time to wait, but as each day passes we are one day closer.
With this in mind, would you be ready to meet the Lord today? It is easy to “fake believe” that you and the Lord are close. No one knows the actual truth, but Jesus knows! The truth is you can be just as close to the Lord right now as any other point in the past, present or future. We all determine each day how close we desire to be to the Lord and how relevant He is to our lives. I am reminded of a story by Max Lucado that illustrates the point that one day we will all give account of our lives.
It is true, we all eventually “face the music.” We may “fake the music” for a while, but the truth comes out in the end when we have to perform a solo. Be ready for Jesus’ return now, so that whenever He appears, you are prepared.
I had a dream about these verses. My wife and I were dressed in wedding clothes waiting for a wedding ceremony. The Lord has spoken to both of us numerous times to prepare ourselves. This call was not just to simply be ready for the return of the Lord, but for the marriage of the church and Jesus Christ. If you prepare yourself for the later, you will also be ready for His return.
Make sure your personal inward lamp is lit and you are on fire for the Lord. Your heart must always be warm and soft towards Him in every way. In addition, be filled with the Holy Spirit. This assures your lamp will not go out before He returns.
Watch and Wait for the Lord’s return. Know the Lord’s voice and presence now so that when He appears you will not be surprised or caught unaware.
Plan now to be ready so that you will not be ashamed when He appears. |
import sys
import pytest
from errata_tool.cli import main
import errata_tool.cli.release
from errata_tool.connector import ErrataConnector
class CallRecorder(object):
def __call__(self, *args):
self.args = args
def test_short_help(monkeypatch):
monkeypatch.setattr(sys, 'argv', ['errata-tool', '-h'])
with pytest.raises(SystemExit):
main.main()
def test_help(monkeypatch):
monkeypatch.setattr(sys, 'argv', ['errata-tool', '--help'])
with pytest.raises(SystemExit):
main.main()
def test_prod_connector(monkeypatch):
argv = ['errata-tool', 'release', 'get', 'rhceph-2.4']
monkeypatch.setattr(sys, 'argv', argv)
monkeypatch.setattr(errata_tool.cli.release, 'get', lambda x: None)
main.main()
expected = 'https://errata.devel.redhat.com'
assert ErrataConnector._url == expected
def test_staging_connector(monkeypatch):
argv = ['errata-tool', '--stage', 'release', 'get', 'rhceph-2.4']
monkeypatch.setattr(sys, 'argv', argv)
monkeypatch.setattr(errata_tool.cli.release, 'get', lambda x: None)
main.main()
expected = 'https://errata.stage.engineering.redhat.com'
assert ErrataConnector._url == expected
def test_dispatch(monkeypatch):
argv = ['errata-tool', 'release', 'get', 'rhceph-2.4']
monkeypatch.setattr(sys, 'argv', argv)
recorder = CallRecorder()
monkeypatch.setattr(errata_tool.cli.release, 'get', recorder)
main.main()
assert recorder.args
|
Math is one of the subjects which are either loved by the child immensely or the child is really fearful of it. If we say websites are rendering free and enjoyable ways for your child to learn subjects including math. Isn’t that wonderful! As a parent, one is concerned about a child’s education and DominoQQ is serving parents by providing puzzles on Math. While surfing the internet one can come across innumerable puzzle games which engage the child and is learning for the child.
Child’s development with puzzle games, Is it plausible?
If your kid is afraid of math or has difficulty in the subject; these puzzle games are the medium for you.
Improvement in Problem solving skills: Math and puzzles both include a problem-solving approach to reach a solution. Toddlers and children think, implement and then solve the math puzzles; this strategy development will help the child all lifelong.
Game-based learning and a boon to child’s brain: Puzzles make the child learn while playing and they love to reach a result taking interest and pleasure. The concentration and brain power of the kid increases with solving math puzzles and then there are time constraints which improve the performance of the memory overall.
Portable fun: In the digital age, these fun math puzzles can be played on any device and at any time. The kids can be engaged on PC or mobile devices with this visual cognition method.
Motivation: As the student keeps on solving math problems, he/she becomes inspired to solve further and the concern of math turns into passion for the same. The sense of achievement boosts the self-esteem of the child.
This online gaming is an educational tool in an entertaining way. Many free websites are available, so let your child begin the challenge. |
import Queue
import os
import shutil
import urlparse
import random
from datetime import datetime
from Plugins.Extensions.archivCZSK import log
from Plugins.Extensions.archivCZSK.engine.tools import util
from Plugins.Extensions.archivCZSK.compat import eConnectCallback
from Components.AVSwitch import AVSwitch
from Tools.LoadPixmap import LoadPixmap
from enigma import eTimer, ePicLoad
class PosterProcessing:
def __init__(self, poster_limit, poster_dir):
self.poster_limit = poster_limit
self.poster_dir = poster_dir
self.got_image_callback = None
self.poster_files = []
self._init_poster_dir()
def _init_poster_dir(self):
if not os.path.isdir(self.poster_dir):
try:
os.makedirs(self.poster_dir)
except Exception:
pass
for filename in os.listdir(self.poster_dir):
file_path = os.path.join(self.poster_dir, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
log.error('Failed to delete %s. Reason: %s' % (file_path, e))
def _remove_oldest_poster_file(self):
_, path = self.poster_files.pop(0)
log.debug("PosterProcessing._remove_oldest_poster_file: {0}".format(path))
try:
os.unlink(path)
except Exception as e:
log.error("PosterProcessing._remove_oldest_poster_file: {0}".format(str(e)))
def _create_poster_path(self):
dt = datetime.now()
filename = datetime.strftime(dt, "poster_%y_%m_%d__%H_%M_%S")
filename += "_"+ str(random.randint(1,9)) + ".jpg"
dest = os.path.join(self.poster_dir, filename)
return dest
def _image_downloaded(self, url, path):
if path is None:
return
if len(self.poster_files) == self.poster_limit:
log.debug("PosterProcessing._image_downloaded: download limit reached({0})".format(self.poster_limit))
self._remove_oldest_poster_file()
log.debug("PosterProcessing._image_downloaded: {0}".format(path))
self.poster_files.append((url, path))
self.got_image_callback(url, path)
def get_image_file(self, poster_url):
if os.path.isfile(poster_url):
log.debug("PosterProcessing.get_image_file: found poster path (local)")
return poster_url
for idx, (url, path) in enumerate(self.poster_files):
if (url == poster_url):
print "PosterProcessing.get_image_file: found poster path on position {0}/{1}".format(idx, self.poster_limit)
return path
from Plugins.Extensions.archivCZSK.settings import USER_AGENT
headers = {"User-Agent": USER_AGENT }
util.download_to_file_async(util.toString(poster_url), self._create_poster_path(), self._image_downloaded, headers=headers, timeout=3)
return None
class PosterPixmapHandler:
def __init__(self, poster_widget, poster_processing, no_image_path):
self.poster_widget = poster_widget
self.poster_processing = poster_processing
self.poster_processing.got_image_callback = self._got_image_data
self.no_image_path = no_image_path
self._decoding_url = None
self._decoding_path = None
self.last_decoded_url = None
self.last_selected_url = None
self.picload = ePicLoad()
self.picload_conn = eConnectCallback(self.picload.PictureData, self._got_picture_data)
self.retry_timer = eTimer()
self.retry_timer_conn = eConnectCallback(self.retry_timer.timeout, self._decode_current_image)
self._max_retry_times = 3
self._retry_times = 0
def __del__(self):
log.debug("PosterImageHandler.__del__")
self.retry_timer.stop()
del self.retry_timer_conn
del self.retry_timer
del self.picload_conn
del self.picload
def _got_image_data(self, url, path):
self._start_decode_image(url, path)
def _decode_current_image(self):
if self._retry_times < self._max_retry_times:
self._retry_times += 1
self._start_decode_image(self.last_selected_url, self._decoding_path)
else:
self._start_decode_image(None, self.no_image_path)
self._retry_times = 0
self.retry_timer.stop()
def _start_decode_image(self, url, path):
log.debug("PosterImageHandler._start_decode_image: {0}".format(path))
if self._decode_image(path):
log.debug("PosterImageHandler._start_decode_image: started...")
self.retry_timer.stop()
self._decoding_path = None
self._decoding_url = url
else:
log.debug("PosterImageHandler._start_decode_image: failed...")
self._decoding_path = path
self.retry_timer.start(200)
def _decode_image(self, path):
wsize = self.poster_widget.instance.size()
sc = AVSwitch().getFramebufferScale()
self.picload.setPara((wsize.width(), wsize.height(),
sc[0], sc[1], False, 1, "#ff000000"))
self.last_decoded_url = None
return 0 == self.picload.startDecode(util.toString(path))
def _got_picture_data(self, picInfo=None):
picPtr = self.picload.getData()
if picPtr is not None:
log.debug("PosterImageHandler._got_picture_data, success")
self.poster_widget.instance.setPixmap(picPtr)
self.last_decoded_url = self._decoding_url
else:
log.error("PosterImageHandler._got_picture_data, failed")
self.last_decoded_url = None
self._decoding_url = None
def set_image(self, url):
log.debug("PosterImageHandler.set_image: {0}".format(url))
if self.last_selected_url:
if self.last_selected_url == url:
log.debug("PosterImageHandler.set_image: same url as before")
return
self.last_selected_url = url
if self.last_decoded_url:
if self.last_decoded_url == url:
log.debug("PosterImageHandler.set_image: same decoded url as before")
return
self.retry_timer.stop()
if url is None:
imgPtr = LoadPixmap(path=self.no_image_path, cached=True)
if imgPtr:
self.poster_widget.instance.setPixmap(imgPtr)
else:
path = self.poster_processing.get_image_file(url)
log.debug("PosterImageHandler.set_image: path={0}".format(path))
self.poster_widget.instance.setPixmap(None)
self.last_decoded_url = None
# sync
if path is not None:
self._start_decode_image(url, path)
|
Get out of town with these interesting Mexico City side-trips: Cuernavaca (Cuernavaca Country Club, ASLI-Spanish Language Institute, & more) and Zona Arqueologica Teotihuacan (in San Juan Teotihuacan). There's much more to do: see the interesting displays at National Museum of Anthropology (Museo Nacional de Antropologia), ponder the world of politics at National Palace (Palacio Nacional), get to know the fascinating history of Historic Center (Centro Historico), and admire the masterpieces at Palacio de Bellas Artes.
To see ratings, photos, maps, and tourist information, refer to the Mexico City trip itinerary planning tool .
In March in Mexico City, expect temperatures between 28°C during the day and 11°C at night. Finish your sightseeing early on the 29th (Thu) to allow enough time to travel back home. |
from datetime import datetime, timedelta
import time
from cloudshell.cp.azure.common.exceptions.quali_timeout_exception import QualiTimeoutException
class TaskWaiterService(object):
def __init__(self, cancellation_service):
"""
:param cancellation_service: cloudshell.cp.azure.domain.services.command_cancellation.CommandCancellationService
"""
self.cancellation_service = cancellation_service
def wait_for_task(self, operation_poller, cancellation_context, wait_time=30, logger=None):
"""Wait for Azure operation end
:param operation_poller: msrestazure.azure_operation.AzureOperationPoller instance
:param cancellation_context cloudshell.shell.core.driver_context.CancellationContext instance
:param wait_time: (int) seconds to wait before polling request
:return: Azure Operation Poller result
"""
while not operation_poller.done():
if logger:
logger.info('Waiting for poller, current status is {0}'.format(operation_poller.status()))
self.cancellation_service.check_if_cancelled(cancellation_context)
time.sleep(wait_time)
return operation_poller.result()
def wait_for_task_with_timeout(self, operation_poller, cancellation_context, wait_time=30, timeout=1800,
logger=None):
"""Wait for Azure operation end
:param timeout:
:param operation_poller: msrestazure.azure_operation.AzureOperationPoller instance
:param cancellation_context cloudshell.shell.core.driver_context.CancellationContext instance
:param wait_time: (int) seconds to wait before polling request
:return: Azure Operation Poller result
"""
datetime_now = datetime.now()
next_time = datetime_now + timedelta(seconds=timeout)
while not operation_poller.done() and (datetime_now < next_time):
self.cancellation_service.check_if_cancelled(cancellation_context)
if logger:
logger.info('Waiting for poller, current status is {0}'.format(operation_poller.status()))
time.sleep(wait_time)
datetime_now = datetime.now()
if not operation_poller.done() and (datetime_now > next_time):
if logger:
logger.warn('Had a timeout, current status in poller is: {0}'.format(operation_poller.status()))
raise QualiTimeoutException()
return operation_poller.result()
|
75. min 0:0 Mahamoudou Kéré was cautioned with a yellow card.
70. min 0:0 Aliou Cissé was cautioned with a yellow card.
6. min 0:0 Saïdou Panandétiguiri was cautioned with a yellow card. |
#!/usr/bin/env python
"""This is a script that ensures the specified CropObjectList files are
formatted up-to-date:
* Uses ``<Top>`` and ``<Left>``, not ``<X>`` and ``<Y>``
* Does not use ``<Selected>``
* Does not use ``<MLClassId>``
You can either provide a root directory, individual files, and ``--outdir``,
which takes the files together with their filenames and creates the re-coded
copies in the output directory (including paths), or you can provide
``--inplace`` and the script modifies the file in-place.
Example::
recode_xy_to_topleft.py -r /my/data/cropobjects -i /my/data/cropobjects/*.xml
-o /my/data/recoded-cropobjects
"""
from __future__ import print_function, unicode_literals
from builtins import zip
from builtins import str
import argparse
import copy
import logging
import os
import time
from muscima.io import parse_cropobject_list, export_cropobject_list
__version__ = "0.0.1"
__author__ = "Jan Hajic jr."
##############################################################################
def get_document_namespace(filename, root=None, output_dir=None):
"""Derives the document namespace for a CropObjectList file
with the given filename, optionally with a given root
and output dir.
In fact, only takes ``os.path.splitext(os.path.basename(filename))[0]``.
"""
return os.path.splitext(os.path.basename(filename))[0]
def recode_ids(cropobjects,
document_namespace,
dataset_namespace):
"""Recode all IDs of the given CropObjects, so that they are (hopefully)
globally unique. That is, from e.g. ``611``, we get
``MUSCIMA++_1.0::CVC-MUSCIMA_W-35_N-08_D-ideal::611.
Creates new CropObjects, does *not* modify the input in-place.
:param cropobjects: A list of CropObject instances.
:param document_namespace: An identifier of the given
CropObjectList. It should be unique for each dataset,
i.e. ``absolute_dataset_namespace``.
:param dataset_namespace: An identifier of the given
dataset. It should be globally unique (which is impossible
to guarantee, but at least within further versions of MUSCIMA++,
it should hold).
"""
output_cropobjects = []
for c in cropobjects:
c_out = copy.deepcopy(c)
uid = c.UID_DELIMITER.join([dataset_namespace,
document_namespace,
str(c.objid)])
c_out.set_uid(uid)
output_cropobjects.append(c_out)
return output_cropobjects
##############################################################################
def build_argument_parser():
parser = argparse.ArgumentParser(description=__doc__, add_help=True,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-r', '--root', action='store', default=None,
help='Root directory of the CropObjectList files.'
' The paths of the input files will be interpreted'
' relative to this directory in order to place'
' the output files, unless \'--inplace\' is given.'
' If \'--output_dir\' is given but \'--root\''
' is not, the ')
parser.add_argument('-o', '--output_dir', action='store',
help='Output directory for the recoded files.'
' Equivalent role to the \'--root\': if you'
' supply a file /my/root/subdir/somefile.xml,'
' root /my/root/, and output dir /my/output, the'
' output file will be /my/output/subdir/somefile.xml.'
' If the output dir does not exist, it will be'
' created')
parser.add_argument('-i', '--input_files', action='store', nargs='+',
help='Input files. Full paths, *including* root dir:'
' the root is only there for retaining directory'
' structure, if applicable. (This is because you'
' will probably want to use shell wildcards, and'
' it would not work if you did not supply the'
' full paths to the input directory/directories.)')
parser.add_argument('--inplace', action='store_true',
help='Modify input files in-place.')
parser.add_argument('--recode_uids', action='store_true',
help='Add UIDs to CropObjects. The dataset namespace'
' is given by \'--uid_global_namespace\', the'
' document namespace is derived from filenames'
' (as basename, without filetype extension).')
parser.add_argument('--uid_dataset_namespace', action='store',
default='MUSCIMA-pp_1.0',
help='If UIDs are getting added, this is their global'
' namespace.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Turn on INFO messages.')
parser.add_argument('--debug', action='store_true',
help='Turn on DEBUG messages.')
return parser
def main(args):
logging.info('Starting main...')
_start_time = time.clock()
##########################################################################
logging.info('Converting to absolute paths...')
root = None
if args.root is not None:
root = os.path.abspath(args.root)
output_dir = os.path.abspath(args.output_dir)
input_files = [os.path.abspath(f) for f in args.input_files]
logging.info('Root: {0}'.format(root))
logging.info('Output: {0}'.format(output_dir))
logging.info('Example input: {0}'.format(input_files[0]))
##########################################################################
# Get output filenames,
# fail on non-corresponding input file and root.
logging.info('Inferring output pathnames...')
if args.inplace:
output_files = input_files
else:
if args.root is None:
relative_files = input_files
else:
len_root = len(root)
relative_files = []
for f in input_files:
if not os.path.samefile(os.path.commonpath([f, root]),
root):
raise ValueError('Input file {0} does not seem to'
' come from the root directory {1}.'
''.format(f, root))
relative_files.append(f[len_root+1:])
# Ensure output dir exists
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
logging.debug('Making output file names. Output dir: {0}'.format(output_dir))
logging.debug('Example rel file: {0}'.format(relative_files[0]))
logging.debug('Ex. output: {0}'.format(os.path.join(output_dir, relative_files[0])))
output_files = [os.path.join(output_dir, f)
for f in relative_files]
logging.debug('Local Example output file: {0}'.format(output_files[0]))
logging.info('Example output file: {0}'.format(output_files[0]))
##########################################################################
# Parse cropobjects
logging.info('Parsing cropobject files ({0} total)...'.format(len(input_files)))
cropobjects_for_files = []
for i, f in enumerate(input_files):
cropobjects_for_files.append(parse_cropobject_list(f))
if (i > 0) and (i % 10 == 0):
logging.info('Parsed {0} files.'.format(i))
if args.recode_uids:
dataset_namespace = args.uid_dataset_namespace
document_namespace = get_document_namespace(filename=f,
root=root,
output_dir=output_dir)
recoded_cropobjects = recode_ids(cropobjects_for_files[-1],
document_namespace=document_namespace,
dataset_namespace=dataset_namespace)
cropobjects_for_files[-1] = recoded_cropobjects
##########################################################################
logging.info('Exporting cropobjects...')
_i = 0
for output_file, c in zip(output_files, cropobjects_for_files):
s = export_cropobject_list(c)
with open(output_file, 'w') as hdl:
hdl.write(s)
hdl.write('\n')
_i += 1
if (_i % 10) == 0:
logging.info('Done: {0} files'.format(_i))
_end_time = time.clock()
logging.info('recode_xy_to_topleft.py done in {0:.3f} s'.format(_end_time - _start_time))
if __name__ == '__main__':
parser = build_argument_parser()
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
if args.debug:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
main(args)
|
Compatible with HP 2000-299WM Keyboard is Genuine and new keyboard, manufacturer by HP , With 6 months warranty for manufacturer defects and 30-days money-back guarantee on all the products.
HP 2000-299WM Keyboard Fits Parts NO.
HP 2000-299WM Keyboard Fit Laptop Model.
Why Buy HP 2000-299WM Keyboard here?
We focus on bring the best HP 2000-299WM Keyboard sales to our customers and strong desire our customers can buy their satisfaction HP 2000-299WM Keyboard. We are committed to providing customers with high-quality, high-capacity and low price Replacement keyboard for laptop. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.