text
stringlengths 29
850k
|
---|
#encoding: UTF-8
# Copyright (C) 2016 Mauro Palumbo
# This file is distributed under the terms of the # MIT License.
# See the file `License' in the root directory of the present distribution.
"""
A collection of wrappers for the *matplotlib* functions.
.. Note::
All functions return a *matplotlib* which can be modified by the user.
"""
try:
wx
from matplotlib import use
use('WXAgg')
except:
pass
import matplotlib.pyplot as plt
import numpy as np
from .eos import calculate_fitted_points
def simple_plot_xy(x,y,xlabel="",ylabel=""):
"""
This function generates a simple xy plot with matplotlib.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
ax.plot(x, y, 'r')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show()
return fig
def multiple_plot_xy(x,y,xlabel="",ylabel="",labels=""):
"""
This function generates a simple xy plot with matplotlib overlapping several
lines as in the matrix y. y second index refers to a line in the plot, the first
index is for the array to be plotted.
"""
if (len(y[0,:])>7):
print ("Too many data on y axis!")
return
colors = ['k','r','b','g','c','m','y']
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
if (labels==""):
try: # try if there are multiple data on x axis
for i in range(0,len(y[0,:])):
ax.plot(x[:,i], y[:,i], colors[i])
except: # if not use a single x axis
for i in range(0,len(y[0,:])):
ax.plot(x, y[:,i], colors[i])
else:
try: # try if there are multiple data on x axis
for i in range(0,len(y[0,:])):
ax.plot(x[:,i], y[:,i], colors[i],label=labels[i])
except: # if not use a single x axis
for i in range(0,len(y[0,:])):
ax.plot(x, y[:,i], colors[i],label=labels[i])
ax.legend()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show()
return fig
def plot_EV(V,E,a=None,labely="Etot"):
"""
This function plots with matplotlib E(V) data and if a is given it also plot
the fitted results
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
ax.plot(V, E, 'o', label=labely+" data", markersize=10)
if (a!=None):
Vdense, Edensefitted = calculate_fitted_points(V,a)
ax.plot(Vdense, Edensefitted, 'r', label='Fitted EOS')
ax.legend()
ax.set_xlabel('V (a.u.^3)')
ax.set_ylabel('E (Ry)')
plt.show()
return fig
def plot_Etot(celldmsx,Ex,n,nmesh=(50,50,50),fittype="quadratic",ibrav=4,a=None):
"""
This function makes a 3D plot with matplotlib Ex(celldmsx) data and if a is given it also plot
the fitted results. The plot type depends on ibrav.
"""
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
from .minutils import calculate_fitted_points_anis
if (Ex==None) and (a==None):
return
fig = plt.figure()
if (ibrav==4): # hex case
ax = fig.gca(projection='3d')
if (Ex!=None):
na=n[0]
nc=n[2]
X = np.zeros((na,nc))
Y = np.zeros((na,nc))
Z = np.zeros((na,nc))
for j in range(0,nc):
for i in range(0,na):
index = j*na+i
X[i,j] = celldmsx[index,0]
Y[i,j] = celldmsx[index,2]
Z[i,j] = Ex[index]
#print (index,X[i,j],Y[i,j],Z[i,j])
ax.set_xlim(X.min(),X.max())
ax.set_ylim(Y.min(),Y.max())
ax.set_zlim(Z.min(),Z.max())
ax.scatter(X,Y,Z,c='r',marker='o')
if (a!=None):
celldmsxdense, Edensefitted = calculate_fitted_points_anis(celldmsx,nmesh,fittype,ibrav,a)
Xd = np.zeros((nmesh[0],nmesh[2]))
Yd = np.zeros((nmesh[0],nmesh[2]))
Zd = np.zeros((nmesh[0],nmesh[2]))
for i in range(0,nmesh[0]):
for j in range(0,nmesh[2]):
index = i*nmesh[0]+j
Xd[i,j] = celldmsxdense[index,0]
Yd[i,j] = celldmsxdense[index,2]
Zd[i,j] = Edensefitted[index]
ax.set_xlim(Xd.min(),Xd.max())
ax.set_ylim(Yd.min(),Yd.max())
ax.set_zlim(Zd.min(),Zd.max())
ax.plot_surface(Xd, Yd, Zd, rstride=1, cstride=1, alpha=0.3)
cset = ax.contour(Xd, Yd, Zd, zdir='z', offset=Zd.min(), cmap=cm.coolwarm)
ax.set_xlabel("a (a.u.)")
ax.set_ylabel("c (a.u.)")
ax.set_zlabel("Etot (Ry)")
plt.show()
return fig
def plot_Etot_contour(celldmsx,nmesh=(50,50,50),fittype="quadratic",ibrav=4,a=None):
"""
This function makes a countour plot with matplotlib of Ex(celldmsx) fitted results.
The plot type depends on ibrav.
"""
from .minutils import calculate_fitted_points_anis
if a==None:
return
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
if (ibrav==4):
celldmsxdense, Edensefitted = calculate_fitted_points_anis(celldmsx,nmesh,fittype,ibrav,a)
Xd = np.zeros((nmesh[0],nmesh[2]))
Yd = np.zeros((nmesh[0],nmesh[2]))
Zd = np.zeros((nmesh[0],nmesh[2]))
for i in range(0,nmesh[0]):
for j in range(0,nmesh[2]):
index = i*nmesh[0]+j
Xd[i,j] = celldmsxdense[index,0]
Yd[i,j] = celldmsxdense[index,2]
Zd[i,j] = Edensefitted[index]
CS = ax.contour(Xd, Yd, Zd)
plt.clabel(CS, inline=1, fontsize=10)
CS.ax.set_xlabel("a (a.u.)")
CS.ax.set_ylabel("c (a.u.)")
plt.show()
return fig
|
Refurbishment and modernization of the existing hospital along with the development of two new wards either side of the current building. The current building (6,240 m2) has a total surface of 5,240 m2. The total extension measured 18,350 m2, of which 7,240 m2 involved a three-storey underground parking area with 232 spaces. The remaining 11,110 m2 area was used for the construction of the hospital extensions. The landscaping of the surrounding area was also included in the project. Capacity: 48 beds, 17 operating theatres. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016-2017 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.module_utils.basic import * # noqa
from dciclient.v1.api import context as dci_context
from dciclient.v1.api import file as dci_file
import mimetypes
import os
DOCUMENTATION = '''
---
module: dci_upload
short_description: upload logs from local directory to the dci control server
'''
EXAMPLES = '''
- name: upload local logs
dci_upload:
src_dir: <path-logs-directory>
dci_login: <dci login>
dci_password: <dci password>
dci_cs_url: <dci cs url>
dci_status: <dci status>
job_id: <job id>
'''
def upload(context, dci_status, job_id, src_dir=None, file_path=None):
"""This function upload logs to dci control server from local logs
directory.
"""
def upload_directory():
status_dir = '%s/%s' % (src_dir, dci_status)
jobstate_id = open('%s/jobstate_id' % status_dir).read()
logs_files = os.listdir(status_dir)
# sort the file by date in order to upload the files in order
logs_files_paths = [os.path.join(status_dir, f) for f in logs_files]
logs_files_paths.sort(key=lambda x: os.path.getmtime(x))
for log_file_path in logs_files_paths:
name = os.path.basename(log_file_path)
if not log_file_path.endswith('jobstate_id'):
# junit file a associated directly to the job
if log_file_path.endswith('.junit'):
dci_file.create_with_stream(context, name=name,
file_path=log_file_path,
mime='application/junit',
job_id=job_id)
else:
dci_file.create_with_stream(context, name=name,
file_path=log_file_path,
jobstate_id=jobstate_id)
os.remove(log_file_path)
return {'uploaded': logs_files}
def upload_file():
if not os.path.exists(file_path):
return {'failed': True,
'msg': 'file %s does not exist' % file_path}
mimetype, _ = mimetypes.guess_type(file_path)
mimetype = mimetype or 'text/plain'
name = os.path.basename(file_path)
dci_file.create_with_stream(context, name=name,
file_path=file_path,
mime=mimetype,
job_id=job_id)
return {'uploaded': file_path}
if src_dir is not None:
return upload_directory()
else:
return upload_file()
def main():
fields = {
"src_dir": {"required": False, "type": "str"},
"file": {"required": False, "type": "str"},
"dci_status": {"required": True, "type": "str"},
"dci_login": {"required": True, "type": "str"},
"dci_password": {"required": True, "type": "str"},
"dci_cs_url": {"required": True, "type": "str"},
"job_id": {"required": True, "type": "str"}
}
module = AnsibleModule(argument_spec=fields)
src_dir = module.params.get('src_dir')
file = module.params.get('file')
dci_status = module.params['dci_status']
dci_login = module.params['dci_login']
dci_password = module.params['dci_password']
dci_cs_url = module.params['dci_cs_url']
job_id = module.params['job_id']
_dci_context = dci_context.build_dci_context(
dci_cs_url,
dci_login,
dci_password
)
response = upload(_dci_context, dci_status, job_id, src_dir, file)
module.exit_json(changed=True, meta=response)
if __name__ == '__main__':
main()
|
Stardrome is the first web community where members, who take the name of Starlaners, can compare performance they achieved on real tracks throughout the world and for the various categories of motor sports.
You only need to have a last-generation STARLANE GPS device to understand your errors, refine your driving techniques and, why not, see yourself projected into a global listing with your own friends and sportsmen from all over the world.
In Stardrome you can also leave comments on your own laps or others’ performance, thus becoming a real teaching tool for setting the vehicle or choosing the best components.
» Follow the GUIDED PROCEDURE to register on STARDROME and UPLOAD YOUR LAPS! |
import hashlib
import hmac
from errbot.templating import tenv
GITHUB_EVENTS = ['commit_comment', 'create', 'delete', 'deployment',
'deployment_status', 'fork', 'gollum', 'issue_comment',
'issues', 'member', 'page_build', 'public',
'pull_request_review_comment', 'pull_request', 'push',
'release', 'status', 'team_add', 'watch', '*']
GITLAB_EVENTS = ['push_hook', 'tag_push_hook', 'issue_hook', 'note_hook', 'merge_request_hook']
SUPPORTED_EVENTS = GITHUB_EVENTS + GITLAB_EVENTS
DEFAULT_EVENTS = ['commit_comment', 'issue_comment', 'issues', 'pull_request_review_comment',
'pull_request', 'push', 'push_hook', 'tag_push_hook', 'issue_hook',
'note_hook', 'merge_request_hook']
class CommonGitWebProvider(object):
def create_message(self, body, event_type, repo):
"""
Dispatch the message. Check explicitly with hasattr first. When
using a try/catch with AttributeError errors in the
message_function which result in an AttributeError would cause
us to call msg_generic, which is not what we want.
"""
message_function = 'msg_{0}'.format(event_type)
if hasattr(self, message_function):
message = getattr(self, message_function)(body, repo)
else:
message = self.msg_generic(body, repo, event_type)
return message
def render_template(self, template='generic', **kwargs):
kwargs['repo_name'] = kwargs.get('repo_name') or self.name
return tenv().get_template('{0}.html'.format(template)).render(**kwargs)
def msg_generic(self, body, repo, event_type):
return self.render_template(
template='generic', body=body, repo=repo, event_type=event_type)
class GithubHandlers(CommonGitWebProvider):
name = 'Github'
@staticmethod
def valid_message(request, token):
"""Validate the signature of the incoming payload.
The header received from Github is in the form of algorithm=hash.
"""
# TODO: Fix GitLab token validation:
# https://docs.gitlab.com/ce/web_hooks/web_hooks.html#secret-token
signature = request.get_header('X-Hub-Signature')
if signature is None:
return False
try:
alg, sig = signature.split('=')
except ValueError:
return False
if alg != 'sha1':
return False
message = request.body.read()
mac = hmac.new(token.encode(), msg=message, digestmod=hashlib.sha1).hexdigest()
return hmac.compare_digest(mac, sig)
def get_repo(self, body):
return body['repository']['full_name']
def msg_issues(self, body, repo):
return self.render_template(
template='issues', body=body, repo=repo,
action=body['action'],
number=body['issue']['number'],
title=body['issue']['title'],
user=body['issue']['user']['login'],
url=body['issue']['url'],
is_assigned=body['issue']['assignee'],
assignee=body['issue']['assignee']['login'] if body['issue']['assignee'] else None
)
def msg_pull_request(self, body, repo):
action = body['action']
user = body['pull_request']['user']['login']
if action == 'closed' and body['pull_request']['merged']:
user = body['pull_request']['merged_by']['login']
action = 'merged'
if action == 'synchronize':
action = 'updated'
return self.render_template(
template='pull_request', body=body, repo=repo,
action=action, user=user,
number=body['pull_request']['number'],
url=body['pull_request']['html_url'],
merged=body['pull_request']['merged'],
)
def msg_pull_request_review_comment(self, body, repo):
return self.render_template(
template='pull_request_review_comment', body=body, repo=repo,
action='commented' if body['action'] == 'created' else body['action'],
user=body['comment']['user']['login'],
line=body['comment']['position'],
l_url=body['comment']['html_url'],
pr=body['pull_request']['number'],
url=body['pull_request']['html_url'],
)
def msg_push(self, body, repo):
return self.render_template(
template='push', body=body, repo=repo,
user=body['pusher']['name'],
commits=len(body['commits']),
branch=body['ref'].split('/')[-1],
url=body['compare'],
)
def msg_status(*args):
"""Status events are crazy and free form. There's no sane, consistent
or logical way to deal with them."""
return None
def msg_issue_comment(self, body, repo):
return self.render_template(
template='issue_comment', body=body, repo=repo,
action='commented' if body['action'] == 'created' else body['action'],
user=body['comment']['user']['login'],
number=body['issue']['number'],
title=body['issue']['title'],
url=body['issue']['html_url'],
)
def msg_commit_comment(self, body, repo):
return self.render_template(
template='commit_comment', body=body, repo=repo,
user=body['comment']['user']['login'],
url=body['comment']['html_url'],
line=body['comment']['line'],
sha=body['comment']['commit_id'],
)
class GitLabHandlers(CommonGitWebProvider):
name = 'GitLab'
@staticmethod
def valid_message(request, token):
"""Validate the signature of the incoming payload.
The header received from GitLab is in the form of algorithm=hash.
# TODO: Fix GitLab token validation:
# https://docs.gitlab.com/ce/web_hooks/web_hooks.html#secret-token
"""
signature = request.get_header('X-Gitlab-Token')
return True
def get_repo(self, body):
return body['project']['name']
def map_event_type(self, event_type):
return {
'push_hook': 'push',
'issue_hook': 'issue',
'note_hook': 'comment',
}.get(event_type)
def create_message(self, body, event_type, repo):
mapped_event_type = self.map_event_type(event_type)
return super(GitLabHandlers, self).create_message(body, mapped_event_type, repo)
def msg_push(self, body, repo):
if body['commits']:
last_commit_url = body['commits'][-1]['url']
commit_messages = [
dict(msg=c['message'][:80].split('\n')[0], hash=c['id'][:8],
url=c['url']) for c in body['commits']
]
else:
last_commit_url = body['project']['web_url']
commit_messages = []
return self.render_template(
template='push', body=body, repo=repo,
user=body['user_name'],
commits=len(body['commits']),
branch='/'.join(body['ref'].split('/')[2:]),
url=last_commit_url,
commit_messages=commit_messages,
)
def msg_issue(self, body, repo):
action = {'reopen': 'reopened', 'close': 'closed', 'open': 'opened'}.get(body['object_attributes']['action'])
if action:
return self.render_template(
template='issues', body=body, repo=repo,
action=action,
title=body['object_attributes']['title'],
user=body['user']['name'],
url=body['object_attributes']['url']
)
def msg_comment(self, body, repo):
noteable = body['object_attributes']['noteable_type'].lower()
if noteable == "issue":
return self.render_template(
template='issue_comment', body=body, repo=repo,
user=body['user']['name'],
url=body['object_attributes']['url'],
action='commented',
title=body['issue']['title']
)
elif noteable == "commit":
return self.render_template(
template='commit_comment', body=body, repo=repo,
user=body['user']['name'],
url=body['object_attributes']['url'],
line=None,
)
elif noteable == "mergerequest":
return self.render_template(
template='merge_request_comment', body=body, repo=repo,
user=body['user']['name'],
url=body['object_attributes']['url'],
)
|
Stage 2's Undisciplined Pursuit of More by the hybrid regime is hands down the most evident to peasants on this tiny island. The chapter that shocked me the most is Stage 2.
The chart shows a steep rising tide for Stage 2 but we already know SG is in decline. This type of growth is akin to cancerous growth.
While there are other factors that lead to a company's fall, Jim Collins research reveal it is undisciplined pursuit of more for the majority of fallen companies.
Key points by Jim Collins are in blue.
What is Undisciplined Pursuit of More (growth) according to Jim Collins?
Discontinuous leaps into arenas for which you have no burning passion.
Taking action inconsistent with your core values.
Investing heavily in new arenas where you cannot attain distinctive capability, better than your competitors.
Launching headlong into activities that do not fit with your economic or resource engine.
Neglecting core business while leaping after exciting new adventures.
Using organization primarily as a vehicle to increase own personal success- more $$$, more fame, more power at the expense of the organization's long term success.
Compromising on values or losing sight of core purpose in pursuit of growth & expansion.
Packard's law: No company can consistently grow revenues faster than its ability to get enough of the right people to implement that growth & still become a great company.
The hybrid regime's revenues are growing consistently such as via exorbitant World Cup subscription, regular public transport fare hikes with declining service, more & more road tolls etc.
However the hybrid regime is no longer a great political party as it no longer can get enough of the right people. It is scrapping the bottom of the barrel in terms of personnel when Kate Spade girl Tin Pei Ling are put up as an election candidate. And not forgetting the Yesmen aka military generals.
You break Packard's law & begin to fill key seats with the wrong people to compensate for the wrong people's inadequacies, you institute bureaucratic procedures; this, in turn, drives away the right people (because they chafe under the bureaucracy or cannot tolerate working with less competent people or both); this then invites more bureaucracy for having more of the wrong people, which then drives away more of the right people & a culture of bureaucratic mediocrity gradually replaces a culture of disciplined excellence. When bureaucratic rules erode an ethic of freedom & responsibility within a framework of core values & demanding standards, you've become infected with a disease of mediocrity.
An excellent explanation by Jim Collins. It clearly explains why better candidates are flocking to alternative political parties in the last general election. Look at the disease of mediocrity infecting the hybrid regime which spreads the disease down to the populace via their undisciplined pursuit of more.
If a great company consistently grows revenues faster than its ability to get enough of the right people to implement that growth, it will not simply stagnate; it will fall.
Remember the golden parachutes of the top management which we don't have. Rather than letting the top management bring us down with their misguided policies, i would say at least 50% of them needs to be voted out in the next election so as to have a buffer of 33% parliamentary seats in alternative parties hands.
Any exceptional enterprise depends first & foremost upon having self-managed & self motivated people-the #1 ingredient for a culture of discipline.
Self-motivated? No self-motivated person is joining the hybrid regime forcing it to resort to paying the world highest political salaries. And the type of people they attract?--> Yesmen.
Stage 2 overreaching tends to increase after a legendary leader steps away. Perhaps those who assume power next feel extra pressure to be bold, visionary & aggressive, to live up to the implicit expectations of their predecessors or the irrational expectations of Wall Street, which accentuates Stage 2. Or perhaps legendary leaders pick successors less capable in a subconscious (or maybe even conscious) strategy to increase their own status by comparison. But whatever the underlying dynamic, when companies engage in Stage 2 overreaching & bungle the transfer of power, they tend to hurtle downward toward Stage 3 & beyond.
Best leaders we've studied had a peculiar genius for seeing themselves as not all that important, recognizing the need to build an executive team & to craft a culture based on core values that do not depend upon a single heroic leader. But in cases of decline, we find a more pronounced role for the powerful individual, & not for the better.
I reckon Jim Collins would consider this the greatest failing of the old man. Is only 5 days away from National Day & people are wondering about the condition of the old man as he show up. According to Jim Collins definition of a great leader in his book 'Good to Great', the old man is considered a level 4 leader. He hasn't reach the pinnacle of Level 5 leader.
Working folks or those having been through national service might have experienced the 'Super On' new boss who wants to outshine his predecessor. The 'Super On' new boss then implement audacious new initiative/s. It might be good or it might be bad. How you know is bad? Well that is what Jim Collins set out to research on its warning signs.
While no leader can single-handedly build an enduring great company, the wrong leader vested with power can almost single-handedly being a company down.
Again Jim Collins do not believe in a lone heroic leader. Many peasants are brainwashed into thinking the old man is instrumental in building SG Inc up with heaps of credit due to him.
The more warning signs the higher likelihood of disease. It might be a normal fever but if there are other symptoms associated with malaria, then malaria is highly likely.
Success creates pressure for more growth, setting up a vicious cycle of expectations; this strains people; the culture & systems to breaking point; unable to deliver consistent tactical excellence, the institution frays at the edges.
Institutions fraying with the recent spate of corruption scandals in SG Inc's much vaulted civil service. Police caught totally unprepared with Little India Riots & the escape of a limping terrorist.
1) Do they ignite passion & fit with the company's core values?
2) Can the organization be the best in the world at these activities or in these arenas?
3) Will these activities help drive the organization's economic or resource engine.
The people joining the hybrid regime are not passionate about serving the country. Hybrid regime's core values have mutated beyond recognition. Its core value is now $$$ & more $$$.
Losing the right people and/or growing beyond the organization's ability to get enough people to execute on that growth with excellence.
Is cancerous growth we are now experiencing. Gone are the days of growth with excellence.
Organization responds to increasing costs by increasing prices & revenues rather than increasing discipline.
Seeing peasants as cows to be milked dry for lavish wasteful projects such as Bay Gardens & shopping mall at the airport.
System of bureaucratic rules subverts the ethic of freedom & responsibility that marks a culture of discipline; people increasingly think in terms of 'jobs' rather than responsibilities.
'This is outside of my jobscope hence it is not my business'. Little wonder we taxpayers $$$ go into feeding those overbloated bureaucracies with its multiple permanent secretaries, deputy directors etc.
The organization experiences leadership-transition difficulties, be they in the form of poor succession planning, failure to groom leaders from within. political turmoil, bad luck or an unwise selection of successors.
As explained earlier that old man is considered a level 4 leader.
People in power allocate more for themselves or their constituents - more $$$, more privileges, more fame, more of the spoils of success - seeking to capitalize as much as possible in the short term, rather than investing primarily in building for greatness decades into the future.
Check out the book "Collapse" by Jared Diamond too if you haven't done so. |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
import frappe.permissions
from frappe.model.document import Document
from frappe.utils import get_fullname
from frappe import _
exclude_from_linked_with = True
class Feed(Document):
no_feed_on_delete = True
def validate(self):
if not (self.reference_doctype and self.reference_name):
# reset both if even one is missing
self.reference_doctype = self.reference_name = None
def on_doctype_update():
if not frappe.db.sql("""show index from `tabFeed`
where Key_name="feed_doctype_docname_index" """):
frappe.db.commit()
frappe.db.sql("""alter table `tabFeed`
add index feed_doctype_docname_index(doc_type, doc_name)""")
def get_permission_query_conditions(user):
if not user: user = frappe.session.user
use_user_permissions = frappe.permissions.apply_user_permissions("Feed", "read", user)
if not use_user_permissions:
return ""
conditions = ['`tabFeed`.owner="{user}" or `tabFeed`.doc_owner="{user}"'.format(user=frappe.db.escape(user))]
user_permissions = frappe.defaults.get_user_permissions(user)
can_read = frappe.get_user().get_can_read()
can_read_doctypes = ['"{}"'.format(doctype) for doctype in
list(set(can_read) - set(user_permissions.keys()))]
if not can_read_doctypes:
conditions += ["tabFeed.doc_type in ({})".format(", ".join(can_read_doctypes))]
if user_permissions:
can_read_docs = []
for doctype, names in user_permissions.items():
for n in names:
can_read_docs.append('"{}|{}"'.format(doctype, n))
if can_read_docs:
conditions.append("concat_ws('|', tabFeed.doc_type, tabFeed.doc_name) in ({})".format(
", ".join(can_read_docs)))
return "(" + " or ".join(conditions) + ")"
def has_permission(doc, user):
return frappe.has_permission(doc.doc_type, "read", doc.doc_name, user=user)
def update_feed(doc, method=None):
"adds a new feed"
if frappe.flags.in_patch or frappe.flags.in_install or frappe.flags.in_import:
return
if doc.doctype == "Feed" or doc.meta.issingle:
return
if hasattr(doc, "get_feed"):
feed = doc.get_feed()
if feed:
if isinstance(feed, basestring):
feed = {"subject": feed}
feed = frappe._dict(feed)
doctype = feed.doctype or doc.doctype
name = feed.name or doc.name
# delete earlier feed
frappe.db.sql("""delete from tabFeed
where doc_type=%s and doc_name=%s
and ifnull(feed_type,'')=''""", (doctype, name))
frappe.get_doc({
"doctype": "Feed",
"feed_type": feed.feed_type or "",
"doc_type": doctype,
"doc_name": name,
"subject": feed.subject,
"full_name": get_fullname(doc.owner),
"doc_owner": frappe.db.get_value(doctype, name, "owner"),
"reference_doctype": feed.reference_doctype,
"reference_name": feed.reference_name
}).insert(ignore_permissions=True)
def login_feed(login_manager):
frappe.get_doc({
"doctype": "Feed",
"feed_type": "Login",
"subject": _("{0} logged in").format(get_fullname(login_manager.user)),
"full_name": get_fullname(login_manager.user)
}).insert(ignore_permissions=True)
|
Mahindra XUV500 price in Jajpur (Orissa) starts at ₹ 18.82 Lakhs and goes upto ₹ 22.98 Lakhs. Petrol XUV500 price in Jajpur (Orissa) starts at ₹ 18.82 Lakhs. Diesel XUV500 price in Jajpur (Orissa) starts at ₹ 15.17 Lakhs. |
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Utilities for requesting information for a gerrit server via https.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
"""
import base64
import contextlib
import cookielib
import httplib # Still used for its constants.
import json
import logging
import netrc
import os
import re
import socket
import stat
import sys
import tempfile
import time
import urllib
import urlparse
from cStringIO import StringIO
import gclient_utils
import subprocess2
from third_party import httplib2
LOGGER = logging.getLogger()
# With a starting sleep time of 1 second, 2^n exponential backoff, and six
# total tries, the sleep time between the first and last tries will be 31s.
TRY_LIMIT = 6
# Controls the transport protocol used to communicate with gerrit.
# This is parameterized primarily to enable GerritTestCase.
GERRIT_PROTOCOL = 'https'
class GerritError(Exception):
"""Exception class for errors commuicating with the gerrit-on-borg service."""
def __init__(self, http_status, *args, **kwargs):
super(GerritError, self).__init__(*args, **kwargs)
self.http_status = http_status
self.message = '(%d) %s' % (self.http_status, self.message)
class GerritAuthenticationError(GerritError):
"""Exception class for authentication errors during Gerrit communication."""
def _QueryString(params, first_param=None):
"""Encodes query parameters in the key:val[+key:val...] format specified here:
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes
"""
q = [urllib.quote(first_param)] if first_param else []
q.extend(['%s:%s' % (key, val) for key, val in params])
return '+'.join(q)
def GetConnectionObject(protocol=None):
if protocol is None:
protocol = GERRIT_PROTOCOL
if protocol in ('http', 'https'):
return httplib2.Http()
else:
raise RuntimeError(
"Don't know how to work with protocol '%s'" % protocol)
class Authenticator(object):
"""Base authenticator class for authenticator implementations to subclass."""
def get_auth_header(self, host):
raise NotImplementedError()
@staticmethod
def get():
"""Returns: (Authenticator) The identified Authenticator to use.
Probes the local system and its environment and identifies the
Authenticator instance to use.
"""
if GceAuthenticator.is_gce():
return GceAuthenticator()
return CookiesAuthenticator()
class CookiesAuthenticator(Authenticator):
"""Authenticator implementation that uses ".netrc" or ".gitcookies" for token.
Expected case for developer workstations.
"""
def __init__(self):
self.netrc = self._get_netrc()
self.gitcookies = self._get_gitcookies()
@classmethod
def get_new_password_url(cls, host):
assert not host.startswith('http')
# Assume *.googlesource.com pattern.
parts = host.split('.')
if not parts[0].endswith('-review'):
parts[0] += '-review'
return 'https://%s/new-password' % ('.'.join(parts))
@classmethod
def get_new_password_message(cls, host):
assert not host.startswith('http')
# Assume *.googlesource.com pattern.
parts = host.split('.')
if not parts[0].endswith('-review'):
parts[0] += '-review'
url = 'https://%s/new-password' % ('.'.join(parts))
return 'You can (re)generate your credentials by visiting %s' % url
@classmethod
def get_netrc_path(cls):
path = '_netrc' if sys.platform.startswith('win') else '.netrc'
return os.path.expanduser(os.path.join('~', path))
@classmethod
def _get_netrc(cls):
# Buffer the '.netrc' path. Use an empty file if it doesn't exist.
path = cls.get_netrc_path()
content = ''
if os.path.exists(path):
st = os.stat(path)
if st.st_mode & (stat.S_IRWXG | stat.S_IRWXO):
print >> sys.stderr, (
'WARNING: netrc file %s cannot be used because its file '
'permissions are insecure. netrc file permissions should be '
'600.' % path)
with open(path) as fd:
content = fd.read()
# Load the '.netrc' file. We strip comments from it because processing them
# can trigger a bug in Windows. See crbug.com/664664.
content = '\n'.join(l for l in content.splitlines()
if l.strip() and not l.strip().startswith('#'))
with tempdir() as tdir:
netrc_path = os.path.join(tdir, 'netrc')
with open(netrc_path, 'w') as fd:
fd.write(content)
os.chmod(netrc_path, (stat.S_IRUSR | stat.S_IWUSR))
return cls._get_netrc_from_path(netrc_path)
@classmethod
def _get_netrc_from_path(cls, path):
try:
return netrc.netrc(path)
except IOError:
print >> sys.stderr, 'WARNING: Could not read netrc file %s' % path
return netrc.netrc(os.devnull)
except netrc.NetrcParseError as e:
print >> sys.stderr, ('ERROR: Cannot use netrc file %s due to a '
'parsing error: %s' % (path, e))
return netrc.netrc(os.devnull)
@classmethod
def get_gitcookies_path(cls):
if os.getenv('GIT_COOKIES_PATH'):
return os.getenv('GIT_COOKIES_PATH')
try:
return subprocess2.check_output(
['git', 'config', '--path', 'http.cookiefile']).strip()
except subprocess2.CalledProcessError:
return os.path.join(os.environ['HOME'], '.gitcookies')
@classmethod
def _get_gitcookies(cls):
gitcookies = {}
path = cls.get_gitcookies_path()
if not os.path.exists(path):
return gitcookies
try:
f = open(path, 'rb')
except IOError:
return gitcookies
with f:
for line in f:
try:
fields = line.strip().split('\t')
if line.strip().startswith('#') or len(fields) != 7:
continue
domain, xpath, key, value = fields[0], fields[2], fields[5], fields[6]
if xpath == '/' and key == 'o':
login, secret_token = value.split('=', 1)
gitcookies[domain] = (login, secret_token)
except (IndexError, ValueError, TypeError) as exc:
LOGGER.warning(exc)
return gitcookies
def _get_auth_for_host(self, host):
for domain, creds in self.gitcookies.iteritems():
if cookielib.domain_match(host, domain):
return (creds[0], None, creds[1])
return self.netrc.authenticators(host)
def get_auth_header(self, host):
auth = self._get_auth_for_host(host)
if auth:
return 'Basic %s' % (base64.b64encode('%s:%s' % (auth[0], auth[2])))
return None
def get_auth_email(self, host):
"""Best effort parsing of email to be used for auth for the given host."""
auth = self._get_auth_for_host(host)
if not auth:
return None
login = auth[0]
# login typically looks like 'git-xxx.example.com'
if not login.startswith('git-') or '.' not in login:
return None
username, domain = login[len('git-'):].split('.', 1)
return '%s@%s' % (username, domain)
# Backwards compatibility just in case somebody imports this outside of
# depot_tools.
NetrcAuthenticator = CookiesAuthenticator
class GceAuthenticator(Authenticator):
"""Authenticator implementation that uses GCE metadata service for token.
"""
_INFO_URL = 'http://metadata.google.internal'
_ACQUIRE_URL = ('%s/computeMetadata/v1/instance/'
'service-accounts/default/token' % _INFO_URL)
_ACQUIRE_HEADERS = {"Metadata-Flavor": "Google"}
_cache_is_gce = None
_token_cache = None
_token_expiration = None
@classmethod
def is_gce(cls):
if os.getenv('SKIP_GCE_AUTH_FOR_GIT'):
return False
if cls._cache_is_gce is None:
cls._cache_is_gce = cls._test_is_gce()
return cls._cache_is_gce
@classmethod
def _test_is_gce(cls):
# Based on https://cloud.google.com/compute/docs/metadata#runninggce
try:
resp, _ = cls._get(cls._INFO_URL)
except (socket.error, httplib2.ServerNotFoundError):
# Could not resolve URL.
return False
return resp.get('metadata-flavor') == 'Google'
@staticmethod
def _get(url, **kwargs):
next_delay_sec = 1
for i in xrange(TRY_LIMIT):
p = urlparse.urlparse(url)
c = GetConnectionObject(protocol=p.scheme)
resp, contents = c.request(url, 'GET', **kwargs)
LOGGER.debug('GET [%s] #%d/%d (%d)', url, i+1, TRY_LIMIT, resp.status)
if resp.status < httplib.INTERNAL_SERVER_ERROR:
return (resp, contents)
# Retry server error status codes.
LOGGER.warn('Encountered server error')
if TRY_LIMIT - i > 1:
LOGGER.info('Will retry in %d seconds (%d more times)...',
next_delay_sec, TRY_LIMIT - i - 1)
time.sleep(next_delay_sec)
next_delay_sec *= 2
@classmethod
def _get_token_dict(cls):
if cls._token_cache:
# If it expires within 25 seconds, refresh.
if cls._token_expiration < time.time() - 25:
return cls._token_cache
resp, contents = cls._get(cls._ACQUIRE_URL, headers=cls._ACQUIRE_HEADERS)
if resp.status != httplib.OK:
return None
cls._token_cache = json.loads(contents)
cls._token_expiration = cls._token_cache['expires_in'] + time.time()
return cls._token_cache
def get_auth_header(self, _host):
token_dict = self._get_token_dict()
if not token_dict:
return None
return '%(token_type)s %(access_token)s' % token_dict
def CreateHttpConn(host, path, reqtype='GET', headers=None, body=None):
"""Opens an https connection to a gerrit service, and sends a request."""
headers = headers or {}
bare_host = host.partition(':')[0]
auth = Authenticator.get().get_auth_header(bare_host)
if auth:
headers.setdefault('Authorization', auth)
else:
LOGGER.debug('No authorization found for %s.' % bare_host)
url = path
if not url.startswith('/'):
url = '/' + url
if 'Authorization' in headers and not url.startswith('/a/'):
url = '/a%s' % url
if body:
body = json.JSONEncoder().encode(body)
headers.setdefault('Content-Type', 'application/json')
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('%s %s://%s%s' % (reqtype, GERRIT_PROTOCOL, host, url))
for key, val in headers.iteritems():
if key == 'Authorization':
val = 'HIDDEN'
LOGGER.debug('%s: %s' % (key, val))
if body:
LOGGER.debug(body)
conn = GetConnectionObject()
conn.req_host = host
conn.req_params = {
'uri': urlparse.urljoin('%s://%s' % (GERRIT_PROTOCOL, host), url),
'method': reqtype,
'headers': headers,
'body': body,
}
return conn
def ReadHttpResponse(conn, accept_statuses=frozenset([200])):
"""Reads an http response from a connection into a string buffer.
Args:
conn: An Http object created by CreateHttpConn above.
accept_statuses: Treat any of these statuses as success. Default: [200]
Common additions include 204, 400, and 404.
Returns: A string buffer containing the connection's reply.
"""
sleep_time = 1
for idx in range(TRY_LIMIT):
response, contents = conn.request(**conn.req_params)
# Check if this is an authentication issue.
www_authenticate = response.get('www-authenticate')
if (response.status in (httplib.UNAUTHORIZED, httplib.FOUND) and
www_authenticate):
auth_match = re.search('realm="([^"]+)"', www_authenticate, re.I)
host = auth_match.group(1) if auth_match else conn.req_host
reason = ('Authentication failed. Please make sure your .gitcookies file '
'has credentials for %s' % host)
raise GerritAuthenticationError(response.status, reason)
# If response.status < 500 then the result is final; break retry loop.
# If the response is 404, it might be because of replication lag, so
# keep trying anyway.
if ((response.status < 500 and response.status != 404)
or response.status in accept_statuses):
LOGGER.debug('got response %d for %s %s', response.status,
conn.req_params['method'], conn.req_params['uri'])
# If 404 was in accept_statuses, then it's expected that the file might
# not exist, so don't return the gitiles error page because that's not the
# "content" that was actually requested.
if response.status == 404:
contents = ''
break
# A status >=500 is assumed to be a possible transient error; retry.
http_version = 'HTTP/%s' % ('1.1' if response.version == 11 else '1.0')
LOGGER.warn('A transient error occurred while querying %s:\n'
'%s %s %s\n'
'%s %d %s',
conn.req_host, conn.req_params['method'],
conn.req_params['uri'],
http_version, http_version, response.status, response.reason)
if TRY_LIMIT - idx > 1:
LOGGER.info('Will retry in %d seconds (%d more times)...',
sleep_time, TRY_LIMIT - idx - 1)
time.sleep(sleep_time)
sleep_time = sleep_time * 2
if response.status not in accept_statuses:
if response.status in (401, 403):
print('Your Gerrit credentials might be misconfigured. Try: \n'
' git cl creds-check')
reason = '%s: %s' % (response.reason, contents)
raise GerritError(response.status, reason)
return StringIO(contents)
def ReadHttpJsonResponse(conn, accept_statuses=frozenset([200])):
"""Parses an https response as json."""
fh = ReadHttpResponse(conn, accept_statuses)
# The first line of the response should always be: )]}'
s = fh.readline()
if s and s.rstrip() != ")]}'":
raise GerritError(200, 'Unexpected json output: %s' % s)
s = fh.read()
if not s:
return None
return json.loads(s)
def QueryChanges(host, params, first_param=None, limit=None, o_params=None,
start=None):
"""
Queries a gerrit-on-borg server for changes matching query terms.
Args:
params: A list of key:value pairs for search parameters, as documented
here (e.g. ('is', 'owner') for a parameter 'is:owner'):
https://gerrit-review.googlesource.com/Documentation/user-search.html#search-operators
first_param: A change identifier
limit: Maximum number of results to return.
start: how many changes to skip (starting with the most recent)
o_params: A list of additional output specifiers, as documented here:
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes
Returns:
A list of json-decoded query results.
"""
# Note that no attempt is made to escape special characters; YMMV.
if not params and not first_param:
raise RuntimeError('QueryChanges requires search parameters')
path = 'changes/?q=%s' % _QueryString(params, first_param)
if start:
path = '%s&start=%s' % (path, start)
if limit:
path = '%s&n=%d' % (path, limit)
if o_params:
path = '%s&%s' % (path, '&'.join(['o=%s' % p for p in o_params]))
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GenerateAllChanges(host, params, first_param=None, limit=500,
o_params=None, start=None):
"""
Queries a gerrit-on-borg server for all the changes matching the query terms.
WARNING: this is unreliable if a change matching the query is modified while
this function is being called.
A single query to gerrit-on-borg is limited on the number of results by the
limit parameter on the request (see QueryChanges) and the server maximum
limit.
Args:
params, first_param: Refer to QueryChanges().
limit: Maximum number of requested changes per query.
o_params: Refer to QueryChanges().
start: Refer to QueryChanges().
Returns:
A generator object to the list of returned changes.
"""
already_returned = set()
def at_most_once(cls):
for cl in cls:
if cl['_number'] not in already_returned:
already_returned.add(cl['_number'])
yield cl
start = start or 0
cur_start = start
more_changes = True
while more_changes:
# This will fetch changes[start..start+limit] sorted by most recently
# updated. Since the rank of any change in this list can be changed any time
# (say user posting comment), subsequent calls may overalp like this:
# > initial order ABCDEFGH
# query[0..3] => ABC
# > E get's updated. New order: EABCDFGH
# query[3..6] => CDF # C is a dup
# query[6..9] => GH # E is missed.
page = QueryChanges(host, params, first_param, limit, o_params,
cur_start)
for cl in at_most_once(page):
yield cl
more_changes = [cl for cl in page if '_more_changes' in cl]
if len(more_changes) > 1:
raise GerritError(
200,
'Received %d changes with a _more_changes attribute set but should '
'receive at most one.' % len(more_changes))
if more_changes:
cur_start += len(page)
# If we paged through, query again the first page which in most circumstances
# will fetch all changes that were modified while this function was run.
if start != cur_start:
page = QueryChanges(host, params, first_param, limit, o_params, start)
for cl in at_most_once(page):
yield cl
def MultiQueryChanges(host, params, change_list, limit=None, o_params=None,
start=None):
"""Initiate a query composed of multiple sets of query parameters."""
if not change_list:
raise RuntimeError(
"MultiQueryChanges requires a list of change numbers/id's")
q = ['q=%s' % '+OR+'.join([urllib.quote(str(x)) for x in change_list])]
if params:
q.append(_QueryString(params))
if limit:
q.append('n=%d' % limit)
if start:
q.append('S=%s' % start)
if o_params:
q.extend(['o=%s' % p for p in o_params])
path = 'changes/?%s' % '&'.join(q)
try:
result = ReadHttpJsonResponse(CreateHttpConn(host, path))
except GerritError as e:
msg = '%s:\n%s' % (e.message, path)
raise GerritError(e.http_status, msg)
return result
def GetGerritFetchUrl(host):
"""Given a gerrit host name returns URL of a gerrit instance to fetch from."""
return '%s://%s/' % (GERRIT_PROTOCOL, host)
def GetChangePageUrl(host, change_number):
"""Given a gerrit host name and change number, return change page url."""
return '%s://%s/#/c/%d/' % (GERRIT_PROTOCOL, host, change_number)
def GetChangeUrl(host, change):
"""Given a gerrit host name and change id, return an url for the change."""
return '%s://%s/a/changes/%s' % (GERRIT_PROTOCOL, host, change)
def GetChange(host, change):
"""Query a gerrit server for information about a single change."""
path = 'changes/%s' % change
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetChangeDetail(host, change, o_params=None):
"""Query a gerrit server for extended information about a single change."""
path = 'changes/%s/detail' % change
if o_params:
path += '?%s' % '&'.join(['o=%s' % p for p in o_params])
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetChangeCommit(host, change, revision='current'):
"""Query a gerrit server for a revision associated with a change."""
path = 'changes/%s/revisions/%s/commit?links' % (change, revision)
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetChangeCurrentRevision(host, change):
"""Get information about the latest revision for a given change."""
return QueryChanges(host, [], change, o_params=('CURRENT_REVISION',))
def GetChangeRevisions(host, change):
"""Get information about all revisions associated with a change."""
return QueryChanges(host, [], change, o_params=('ALL_REVISIONS',))
def GetChangeReview(host, change, revision=None):
"""Get the current review information for a change."""
if not revision:
jmsg = GetChangeRevisions(host, change)
if not jmsg:
return None
elif len(jmsg) > 1:
raise GerritError(200, 'Multiple changes found for ChangeId %s.' % change)
revision = jmsg[0]['current_revision']
path = 'changes/%s/revisions/%s/review'
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetChangeComments(host, change):
"""Get the line- and file-level comments on a change."""
path = 'changes/%s/comments' % change
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def AbandonChange(host, change, msg=''):
"""Abandon a gerrit change."""
path = 'changes/%s/abandon' % change
body = {'message': msg} if msg else {}
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
return ReadHttpJsonResponse(conn)
def RestoreChange(host, change, msg=''):
"""Restore a previously abandoned change."""
path = 'changes/%s/restore' % change
body = {'message': msg} if msg else {}
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
return ReadHttpJsonResponse(conn)
def SubmitChange(host, change, wait_for_merge=True):
"""Submits a gerrit change via Gerrit."""
path = 'changes/%s/submit' % change
body = {'wait_for_merge': wait_for_merge}
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
return ReadHttpJsonResponse(conn)
def HasPendingChangeEdit(host, change):
conn = CreateHttpConn(host, 'changes/%s/edit' % change)
try:
ReadHttpResponse(conn)
except GerritError as e:
# 204 No Content means no pending change.
if e.http_status == 204:
return False
raise
return True
def DeletePendingChangeEdit(host, change):
conn = CreateHttpConn(host, 'changes/%s/edit' % change, reqtype='DELETE')
# On success, gerrit returns status 204; if the edit was already deleted it
# returns 404. Anything else is an error.
ReadHttpResponse(conn, accept_statuses=[204, 404])
def SetCommitMessage(host, change, description, notify='ALL'):
"""Updates a commit message."""
assert notify in ('ALL', 'NONE')
path = 'changes/%s/message' % change
body = {'message': description, 'notify': notify}
conn = CreateHttpConn(host, path, reqtype='PUT', body=body)
try:
ReadHttpResponse(conn, accept_statuses=[200, 204])
except GerritError as e:
raise GerritError(
e.http_status,
'Received unexpected http status while editing message '
'in change %s' % change)
def GetReviewers(host, change):
"""Get information about all reviewers attached to a change."""
path = 'changes/%s/reviewers' % change
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetReview(host, change, revision):
"""Get review information about a specific revision of a change."""
path = 'changes/%s/revisions/%s/review' % (change, revision)
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def AddReviewers(host, change, reviewers=None, ccs=None, notify=True,
accept_statuses=frozenset([200, 400, 422])):
"""Add reviewers to a change."""
if not reviewers and not ccs:
return None
if not change:
return None
reviewers = frozenset(reviewers or [])
ccs = frozenset(ccs or [])
path = 'changes/%s/revisions/current/review' % change
body = {
'drafts': 'KEEP',
'reviewers': [],
'notify': 'ALL' if notify else 'NONE',
}
for r in sorted(reviewers | ccs):
state = 'REVIEWER' if r in reviewers else 'CC'
body['reviewers'].append({
'reviewer': r,
'state': state,
'notify': 'NONE', # We handled `notify` argument above.
})
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
# Gerrit will return 400 if one or more of the requested reviewers are
# unprocessable. We read the response object to see which were rejected,
# warn about them, and retry with the remainder.
resp = ReadHttpJsonResponse(conn, accept_statuses=accept_statuses)
errored = set()
for result in resp.get('reviewers', {}).itervalues():
r = result.get('input')
state = 'REVIEWER' if r in reviewers else 'CC'
if result.get('error'):
errored.add(r)
LOGGER.warn('Note: "%s" not added as a %s' % (r, state.lower()))
if errored:
# Try again, adding only those that didn't fail, and only accepting 200.
AddReviewers(host, change, reviewers=(reviewers-errored),
ccs=(ccs-errored), notify=notify, accept_statuses=[200])
def RemoveReviewers(host, change, remove=None):
"""Remove reveiewers from a change."""
if not remove:
return
if isinstance(remove, basestring):
remove = (remove,)
for r in remove:
path = 'changes/%s/reviewers/%s' % (change, r)
conn = CreateHttpConn(host, path, reqtype='DELETE')
try:
ReadHttpResponse(conn, accept_statuses=[204])
except GerritError as e:
raise GerritError(
e.http_status,
'Received unexpected http status while deleting reviewer "%s" '
'from change %s' % (r, change))
def SetReview(host, change, msg=None, labels=None, notify=None, ready=None):
"""Set labels and/or add a message to a code review."""
if not msg and not labels:
return
path = 'changes/%s/revisions/current/review' % change
body = {'drafts': 'KEEP'}
if msg:
body['message'] = msg
if labels:
body['labels'] = labels
if notify is not None:
body['notify'] = 'ALL' if notify else 'NONE'
if ready:
body['ready'] = True
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
response = ReadHttpJsonResponse(conn)
if labels:
for key, val in labels.iteritems():
if ('labels' not in response or key not in response['labels'] or
int(response['labels'][key] != int(val))):
raise GerritError(200, 'Unable to set "%s" label on change %s.' % (
key, change))
def ResetReviewLabels(host, change, label, value='0', message=None,
notify=None):
"""Reset the value of a given label for all reviewers on a change."""
# This is tricky, because we want to work on the "current revision", but
# there's always the risk that "current revision" will change in between
# API calls. So, we check "current revision" at the beginning and end; if
# it has changed, raise an exception.
jmsg = GetChangeCurrentRevision(host, change)
if not jmsg:
raise GerritError(
200, 'Could not get review information for change "%s"' % change)
value = str(value)
revision = jmsg[0]['current_revision']
path = 'changes/%s/revisions/%s/review' % (change, revision)
message = message or (
'%s label set to %s programmatically.' % (label, value))
jmsg = GetReview(host, change, revision)
if not jmsg:
raise GerritError(200, 'Could not get review information for revison %s '
'of change %s' % (revision, change))
for review in jmsg.get('labels', {}).get(label, {}).get('all', []):
if str(review.get('value', value)) != value:
body = {
'drafts': 'KEEP',
'message': message,
'labels': {label: value},
'on_behalf_of': review['_account_id'],
}
if notify:
body['notify'] = notify
conn = CreateHttpConn(
host, path, reqtype='POST', body=body)
response = ReadHttpJsonResponse(conn)
if str(response['labels'][label]) != value:
username = review.get('email', jmsg.get('name', ''))
raise GerritError(200, 'Unable to set %s label for user "%s"'
' on change %s.' % (label, username, change))
jmsg = GetChangeCurrentRevision(host, change)
if not jmsg:
raise GerritError(
200, 'Could not get review information for change "%s"' % change)
elif jmsg[0]['current_revision'] != revision:
raise GerritError(200, 'While resetting labels on change "%s", '
'a new patchset was uploaded.' % change)
def CreateGerritBranch(host, project, branch, commit):
"""
Create a new branch from given project and commit
https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#create-branch
Returns:
A JSON with 'ref' key
"""
path = 'projects/%s/branches/%s' % (project, branch)
body = {'revision': commit}
conn = CreateHttpConn(host, path, reqtype='PUT', body=body)
response = ReadHttpJsonResponse(conn, accept_statuses=[201])
if response:
return response
raise GerritError(200, 'Unable to create gerrit branch')
def GetGerritBranch(host, project, branch):
"""
Get a branch from given project and commit
https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-branch
Returns:
A JSON object with 'revision' key
"""
path = 'projects/%s/branches/%s' % (project, branch)
conn = CreateHttpConn(host, path, reqtype='GET')
response = ReadHttpJsonResponse(conn)
if response:
return response
raise GerritError(200, 'Unable to get gerrit branch')
def GetAccountDetails(host, account_id='self'):
"""Returns details of the account.
If account_id is not given, uses magic value 'self' which corresponds to
whichever account user is authenticating as.
Documentation:
https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-account
"""
if account_id != 'self':
account_id = int(account_id)
conn = CreateHttpConn(host, '/accounts/%s' % account_id)
return ReadHttpJsonResponse(conn)
def PercentEncodeForGitRef(original):
"""Apply percent-encoding for strings sent to gerrit via git ref metadata.
The encoding used is based on but stricter than URL encoding (Section 2.1
of RFC 3986). The only non-escaped characters are alphanumerics, and
'SPACE' (U+0020) can be represented as 'LOW LINE' (U+005F) or
'PLUS SIGN' (U+002B).
For more information, see the Gerrit docs here:
https://gerrit-review.googlesource.com/Documentation/user-upload.html#message
"""
safe = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 '
encoded = ''.join(c if c in safe else '%%%02X' % ord(c) for c in original)
# spaces are not allowed in git refs; gerrit will interpret either '_' or
# '+' (or '%20') as space. Use '_' since that has been supported the longest.
return encoded.replace(' ', '_')
@contextlib.contextmanager
def tempdir():
tdir = None
try:
tdir = tempfile.mkdtemp(suffix='gerrit_util')
yield tdir
finally:
if tdir:
gclient_utils.rmtree(tdir)
|
Paul VALENSI is Professor of Nutrition, diabetologist and endocrinologist, Head of the departments of Endocrinology-Diabetology-Nutrition of Jean Verdier Hospital (Bondy), René Muret hospital (Sevran) and Avicenne hospital (Bobigny), Paris-Nord University, France, and co-Chair of the Centre Intégré Nord Francilien Obésité Adultes et Enfants (CINFO).
He served as Chairman of Neurodiab, Study Group of the EASD on diabetic neuropathy (2002-2005), as Chairman of the French Study Group on Heart and diabetes (2002-2005) and of the French section of the Diabetes Education Study Group (DESG) (1994-1997), and was co-organisator of the IDF meeting in Paris, august 2004.
He is currently President of the French Group on the prevention of type 2 diabetes, member of the Executive Committee of Diabetes and Cardiovascular disease EASD Study Group, member of the R3I international Consortium on the reduction of residual risk, and associate Editor to Diabetes Metabolism.
He is co-author of several French guidelines on silent myocardial ischemia, on the anti-aggregants, on peripheral and autonomic neuropathy in diabetes, on Care of the hyperglycaemic patient during acute coronary syndrome, and co-author of the European IMAGE guidelines on the prevention of type 2 diabetes, of the Toronto consensus on diabetic neuropathy (co-Chair) and of the ESC Guidelines on diabetes, pre-diabetes, and cardiovascular diseases developed in collaboration with the EASD (2013).
He has given lectures to several international meetings (ADA, European Society of Cardiology, Neurodiab, Diabetes&Cardiovascular EASD study Group, Journées Européennes de la Société Française de Cardiologie, Société Francophone de Diabète...).
His main research topics are macro and microvascular complications of diabetes and obesity, neuropathic disorders in diabetes and obesity, and prevention of diabetes and its complications. He is author of 310 papers referenced in PubMed, about 40 chapters in scientific books including “Type 2 diabetes” (Ellipses 2004), “Le diabète. Une épidémie silencieuse” (Le bord de l’eau 2013), “Diabetes in cardiovascular disease” (Elsevier 2014), and Editor or co-editor of books : “Heart and diabetes” (Ed. Frison-Roche, 1999, 470 pages), “All in One. Diabetes and the Heart” (Merck Santé 2004). |
#!/usr/bin/env python
#
# @file MetaField.py
# @brief A proxy for another field.
# @author Aleix Conchillo Flaque <[email protected]>
# @date Fri Jan 15, 2010 10:22
#
# Copyright (C) 2010, 2011, 2012 Aleix Conchillo Flaque
#
# This file is part of BitPacket.
#
# BitPacket is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BitPacket is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BitPacket. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = '''
MetaField field
===============
**API reference**: :class:`MetaField`
'''
from BitPacket.Field import Field
class MetaField(Field):
@staticmethod
def _raise_error(instance):
raise TypeError("No field created for MetaField '%s'" % instance.name())
@staticmethod
def _non_proxyable():
return ["_field", "_fieldfunc", "_create_field",
"_encode", "_decode", "_set_name", "write"]
def __init__(self, name, fieldfunc):
Field.__init__(self, name)
self._fieldfunc = fieldfunc
self._field = None
def type(self):
if self._field:
return type(self._field)
else:
return type(self._create_field())
def _encode(self, stream):
if self._field:
self._field._encode(stream)
else:
self._raise_error(self)
def _decode(self, stream):
self._field = self._create_field()
self._field._decode(stream)
def _create_field(self):
# Call name(), root() and parent() before proxy is
# available.
name = self.name()
root = self.root()
parent = self.parent()
field = self._fieldfunc(root)
field._set_name(name)
field._set_root(root)
field._set_parent(parent)
return field
def __len__(self):
if self._field:
return len(self._field)
else:
self._raise_error(self)
def __repr__(self):
if self._field:
return repr(self._field)
else:
self._raise_error(self)
def __getitem__(self, name):
if self._field:
return self._field[name]
else:
self._raise_error(self)
def __setitem__(self, name, value):
if self._field:
self._field[name] = value
else:
self._raise_error(self)
def __getattribute__(self, name):
try:
# We'll get an exception due to _field access in __init__,
# as _field attribute does not exist yet.
field = object.__getattribute__(self, "_field")
except AttributeError:
field = None
# Get the list of methods that should not be forwarded.
non_proxyable = object.__getattribute__(self, "_non_proxyable")()
# If _field is created and we are accessing a proxyable
# attribute, then forward it to _field.
if field and name not in non_proxyable:
return object.__getattribute__(field, name)
else:
return object.__getattribute__(self, name)
# import array
# from BitPacket.Structure import Structure
# from BitPacket.Integer import *
# class Test(Structure):
# def __init__(self):
# Structure.__init__(self, "tesbabat")
# self.append(UInt8("value1"))
# self.append(UInt8("value2"))
# s = Structure("metastruct")
# ss = Structure("substruct")
# s.append(ss)
# f = MetaField("test", lambda ctx: Test())
# ss.append(f)
# s.set_array(array.array("B", [123, 124]))
# print s
|
President Abdel Fattah El Sisi attended on Sunday 29/07/2018 a session on the national project of the state information database at the 6th National Youth Conference, held at Cairo University.
The session was attended also by Prime Minister Mostafa Madbouli, Assistant President for National and Strategic Projects Sherif Ismail, House Speaker Ali Abdel-Aal and Chairman of the Suez Canal Authority Mohab Mamish.
A host of ministers and senior state officials were also present. |
# Copyright (C) 2010 by Kevin Saff
# This file is part of the CA scanner.
# The CA scanner is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# The CA scanner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with the CA scanner. If not, see <http://www.gnu.org/licenses/>.
"""
This algorithm handles 256 states, packed at 1 cell per byte.
A cell is considered 'active' if its low bit is 1. Otherwise, it is inactive.
The state of a cell after evolution depends on its complete 8-bite state
and on the activity of the 8 cells in its Moore(1) neighborhood.
The bits for the lookup table are as follows:
[ 0][ 1][ 2]
[ 3][*4][ 5]
[ 6][ 7][ 8]
Bits 9-15 are the remainder of the state, minus the activity bit.
"""
import generate
def evolve(input, output, lookup):
"""Evolve it."""
return generate.inline("""
PyArrayObject *input;
PyArrayObject *output;
PyArrayObject *lookup;
char h = 0;
unsigned xstride, ystride;
unsigned xa, x0, x1, xM;
unsigned /*ya,*/ y0, y1, yM;
unsigned z;
char c, n, r;
if (!PyArg_ParseTuple(args, "O!O!O!",
&PyArray_Type, &input,
&PyArray_Type, &output,
&PyArray_Type, &lookup
))
return NULL;
xstride = input-> strides[0];
ystride = input-> strides[1];
xM = (input-> dimensions[0] - 1) * xstride;
yM = (input-> dimensions[1] - 1) * ystride;
xa = 0;
for (x0 = xstride; x0 < xM; x0 += xstride)
{
xa = x0 - xstride;
x1 = x0 + xstride;
n = input-> data[x0 + 1 * ystride];
z = ((input-> data[xa + 0 * ystride] & 1) << 0x3) |
((input-> data[x0 + 0 * ystride] & 1) << 0x4) |
((input-> data[x1 + 0 * ystride] & 1) << 0x5) |
((input-> data[xa + 1 * ystride] & 1) << 0x6) |
((n & 1) << 0x7) |
((input-> data[x1 + 1 * ystride] & 1) << 0x8) ;
for (y0 = ystride; y0 < yM; y0 += ystride)
{
z >>= 3;
y1 = y0 + ystride;
c = n;
n = input-> data[x0 + y1];
z |=((input-> data[xa + y1] & 1) << 0x6) |
((n & 1) << 0x7) |
((input-> data[x1 + y1] & 1) << 0x8) ;
r = lookup-> data[z | ((c & 0xFE) << 8)];
output-> data[x0 + y0] = (char)r;
}
}
return PyFloat_FromDouble(1.0);
""")(input, output, lookup)
import numpy
from _util import bit_count, register
from _algorithm import algorithm
@register('compile_rule', type='life', quality=1.0)
def _life(X):
lookup0 = []
for i in range(0x200):
if bit_count[i & 0x1EF] in (X.birth, X.survival)[(i & 0x10) and 1]:
lookup0.append(1)
else:
lookup0.append(0)
return algorithm('bytescan',
evolve=evolve,
table=numpy.tile(numpy.asarray(lookup0, dtype = numpy.uint8), 0x80),
states=(0,1))
@register('compile_rule', type='brain', quality=0.9)
def _brain(X):
lookup = numpy.ndarray(shape=0x10000, dtype=numpy.uint8)
mdecay = 2 * (X.decay + 1)
if mdecay > 256:
mdecay = 256
for i in range(0x10000):
if i & 0x10: #alive
if bit_count[i & 0x1EF] in X.survival:
lookup[i] = 1
else:
lookup[i] = 2 % mdecay
elif i < 0x200: #dead
lookup[i] = bit_count[i & 0x1FF] in X.birth and 1 or 0
else: #dying
lookup[i] = ((i >> 9) * 2 + 2) % mdecay
return algorithm('bytescan',
evolve=evolve,
table=lookup,
states=range(2) + range(2, (X.decay + 1) * 2, 2),
)
|
On Friday evening, April 29th, 2016, Portland was invaded by “Star Wars” and other music of John Williams, including material from “Harry Potter," "Indiana Jones".
Until the very last minute there was an open seat beside mine. As the lights dimmed a young woman slipped into the open seat. I think her arrival made it a full house of 2,176 people.
Tremendous enthusiasm and loud shout-outs filled the place. If it has a memory the 1920s building must have relived its early decades as Oregon’s biggest movie theater.
Author W. R. vanHage, licensed under the Creative Commons Attribution-Share Alike 3.0 License and the GNU Free Documentation License (GFDL), version 1.2.
Musically, Williams’ scores drew on a fantastically wide variety of instruments. Some musicians (especially the cymbal player and the kettle drummer) worked far harder than required in classical concerts.
All ages audience. Bottom row: daughter Christina, wife Lucy and me. Upper row, granddaughter, her friend, grandson, his friend. Image by Christina.
Finally, during “Main Title,” not just one, but two, R2-D2s appeared in mini-sized stages on the back wall above the orchestra. More shouts from the patrons soared.
Mr. Kim, the Resident Conductor, proved to be a stand-up comedian, throwing off ad-lib humor throughout the concert. After “Main Title,” the audience, on its feet, demanded an encore. He bargained for one and we weren’t satisfied. He yielded and granted a second. People screamed, “More.” He appeared a bit confused and then folded his hands in a pillow pose, rested his head on the “pillow,” and dismissed the orchestra. Reluctantly, the audience left the hall for the ice cream parlors and frozen yogurt spots in the downtown around the Hall.
During one number I hauled out my phone but it wouldn’t yield a decent photo in the darkened hall. Worse, even though I had the flash turned off, it operated on its own. People seated in front of me turned around to see if something was wrong and I worried that an usher would take my phone away.
However, the young woman next to me—the one who arrived at the last minute to take the last open seat—grabbed my camera and filled a wonderful 22-second video on my behalf. No flash and lovely lighting. How did she do it?
The values topic? Classical values exuded from both music and the accompanying antics. The eternal conflict of good versus evil filled the evening. At one point, “Darth Vader” entered from the wings and stole the baton from conductor Paul Ghun Kim. Kim exited the stage and soon reappeared, costumed as Obi Kenobi. He exiled Darth Vader to the wings, and finished conducting “Yoda’s Theme.” Good prevailed. Hard to imagine a better way to present Good over Bad to a youthful audience.
Luke Skywalker? He was a no-show last night. Maybe next time. |
"""
Utilities for working with github/licensed cached license data
"""
import pathlib
import yaml
import csv
from six import StringIO
_LICENSE_URLS = {'apache-2.0': 'https://www.apache.org/licenses/LICENSE-2.0',
'mit': 'https://opensource.org/licenses/MIT',
'gpl2': 'https://www.gnu.org/licenses/old-licenses/gpl-2.0.txt',
'gpl3': 'https://www.gnu.org/licenses/gpl-3.0.txt',
'gpl-3.0': 'https://www.gnu.org/licenses/gpl-3.0.txt',
'bsd-2-clause': 'https://opensource.org/licenses/BSD-2-Clause',
'bsd-3-clause': 'https://opensource.org/licenses/BSD-3-Clause',
'other': 'unknown'}
_LICENSE_NAMES = {'apache-2.0': 'Apache License Version 2.0',
'mit': 'MIT',
'gpl2': 'GNU General Public License Version 2',
'gpl3': 'GNU General Public License Version 3',
'gpl-3.0': 'GNU General Public License Version 3',
'bsd-2-clause': 'Simplified BSD License',
'bsd-3-clause': 'BSD License'}
def cached_license_dict(dep_path: pathlib.Path) -> dict:
with dep_path.open('r') as f:
dep = yaml.safe_load(f)
dep['URL'] = _LICENSE_URLS.get(dep['license'], 'unknown')
dep['license'] = _LICENSE_NAMES.get(dep['license'], dep['license'])
return {(k.capitalize() if k is not "URL" else k): dep[k] for k in ['name', 'version', 'license', 'URL']}
def cached_licenses(cache_path: pathlib.Path) -> [dict]:
return [cached_license_dict(p) for p in cache_path.glob("**/*.dep.yml")]
def cached_license_csv(cache_path: pathlib.Path, output: pathlib.Path = None) -> str:
licenses = cached_licenses(cache_path)
if output is None:
with StringIO() as f:
_write_licenses_csv(f, licenses)
return f.getvalue()
else:
with output.open('w', newline='') as f:
_write_licenses_csv(f, licenses)
def _write_licenses_csv(f, licenses):
fieldnames = ['Name', 'Version', 'License', 'URL']
writer = csv.DictWriter(f, fieldnames)
writer.writeheader()
writer.writerows(licenses)
|
How To Adjust A Craftsman Belt Drive Garage Door Opener – Craftsman Kuta ran the garage door opener enables remote access to the garage door. The operator sends an electronic signal to the garage door to engage the engine to close or open the door. Unfortunately, the adjustment of the garage door opening mechanism required when the door opener does not respond and the garage door is stuck.
You can manually adjust your craftsman belt drive garage door opener regardless of your familiarity with troubleshooting garage doors. Find craftsmen Kuta ran the garage door’s edition rope, this red rope dangling from the garage door installation above you. Pull the rope down to disengage the garage door artisans Kuta drove motor control which allows manual corrections of the garage door. This will also place the release arm in a vertical orientation.
Find garages handle for adjust a craftsman belt drive garage door opener this will be near the bottom of the inside of the garage door. Hold the handle and slowly “walk” down the door manually, now that the door has been the motorized via the emergency release rope. Grasp craftsmen Kuta pushed the garage door emergency release rope and pull in about 45 degree angle. This will activate the motor on the garage door and set the release lever back into place. Attempt to use the door opener. It should be functioning properly.
Tags: adjust craftsman garage door opener, adjusting craftsman garage door opener, adjusting sears garage door opener, how to adjust craftsman garage door, craftsman garage door opener adjustment, craftsman garage door opener travel adjustment, garage door opener. |
#! /usr/bin/env python
# -*- coding:utf-8 -*-
def printlist(head):
print head.val
while head.next:
head = head.next
print head.val
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
import heapq
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if len(lists) == 0: return []
heap = []
td = {}
ml = ListNode(0)
tml = ml
while True:
if len(lists) == lists.count(None): break
for i in range(len(lists)):
node = lists[i]
if not node: continue
heapq.heappush(heap, node.val)
td.setdefault(node.val, []).append(node)
lists[i] = lists[i].next
if not heap: return []
val = heapq.heappop(heap)
tml.next = ListNode(val)
tml = tml.next
while heap:
val = heapq.heappop(heap)
tml.next = ListNode(val)
tml = tml.next
return ml.next
if __name__ == "__main__":
l1 = ListNode(1)
l1.next = ListNode(2)
l1.next.next = ListNode(3)
l2 = ListNode(6)
l2.next = ListNode(7)
l2.next.next = ListNode(8)
l3 = ListNode(9)
l3.next = ListNode(10)
l3.next.next = ListNode(11)
lists = [l1, l2, l3]
lists = [[]]
l = Solution().mergeKLists(lists)
# printlist(l)
|
Points2Shop Forums » Referral Talk » Easy way to get referrals ?
Referrals aren't easy to get, but once you get the hang of things, they do become easy depending on WHERE and HOW you get them.
It really depends, I've tried creating blogs to get referrals but it hasn't worked very well. I'd try making youtube videos, they honestly seem the best way.
Its actually really hard. those who work hard at earning them are able to alot of refs. Im starting now to put effort in making refs by making my own website. try doing that. Try promoting the website rather than making a site solely for p2s.
I use youtube. Thats all. So I have to do no hardwork to get those 70 refs a month.
Read the official points2shop referral guide.... I have over 11,000 referrals. |
import json
import logging
from connexion import problem, request
from flask import Response, jsonify
from auslib.global_state import dbo
from auslib.web.common.csrf import get_csrf_headers
log = logging.getLogger(__name__)
def strip_data(release):
"""Return a release with all the fields except data.
This is used in multiple APIs to save bandwidth and present a
simplified view of the release by removing its largest field,
data, which is of no use except when serving clients.
"""
return dict((k, v) for (k, v) in release.items() if k != "data")
def release_list(request):
kwargs = {}
if request.args.get("product"):
kwargs["product"] = request.args.get("product")
if request.args.get("name_prefix"):
kwargs["name_prefix"] = request.args.get("name_prefix")
if request.args.get("names_only"):
kwargs["nameOnly"] = True
return dbo.releases.getReleaseInfo(**kwargs)
def serialize_releases(request, releases):
if request.args.get("names_only"):
names = []
for release in releases:
names.append(release["name"])
data = {"names": names}
else:
data = {"releases": [strip_data(release) for release in releases]}
return jsonify(data)
def get_releases():
releases = release_list(request)
return serialize_releases(request, releases)
def _get_release(release):
releases = dbo.releases.getReleases(name=release, limit=1)
return releases[0] if releases else None
def get_release(release, with_csrf_header=False):
release = _get_release(release)
if not release:
return problem(404, "Not Found", "Release name: %s not found" % release)
headers = {"X-Data-Version": release["data_version"]}
if with_csrf_header:
headers.update(get_csrf_headers())
if request.args.get("pretty"):
indent = 4
separators = (",", ": ")
else:
indent = None
separators = None
# separators set manually due to https://bugs.python.org/issue16333 affecting Python 2
return Response(response=json.dumps(release["data"], indent=indent, separators=separators, sort_keys=True), mimetype="application/json", headers=headers)
def get_release_with_csrf_header(release):
return get_release(release, with_csrf_header=True)
def get_release_single_locale(release, platform, locale, with_csrf_header=False):
try:
locale = dbo.releases.getLocale(release, platform, locale)
except KeyError as e:
return problem(404, "Not Found", json.dumps(e.args))
data_version = dbo.releases.getReleases(name=release)[0]["data_version"]
headers = {"X-Data-Version": data_version}
if with_csrf_header:
headers.update(get_csrf_headers())
return Response(response=json.dumps(locale), mimetype="application/json", headers=headers)
def get_release_single_locale_with_csrf_header(release, platform, locale):
return get_release_single_locale(release, platform, locale, with_csrf_header=True)
|
All QX Products have been certified for sale and use around the globe. They products are compact, fully rack-mountable and contained in a metal enclosure. They are meant to mixed and matched for a fully customization system. With the purchase of a rack-mounting kit, the units also come with two DC power cables for power redundancy.
Please select a product below for more details.
GET a FREE ecQX Demo Today! |
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, jsonify,render_template, flash
from contextlib import closing
DATABASE = '/tmp/vikas.db'
DEBUG = True
SECRET_KEY = 'vikas123'
USERNAME = 'admin'
PASSWORD = 'admin'
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def show_entries():
cur = g.db.execute('select title, text from entries order by id desc')
entries = [dict(title=row[0], text=row[1]) for row in cur.fetchall()]
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run()
|
Dealer - Forex - Limassol - Cyprus. On behalf of our client, a large Forex company, GRS are looking for a Dealer to work within their Dealing department.
Limassol Forex Brokers showcases Brokers based in Limassol, offering services, such as stocks, commodities and bonds trading via online trading platforms.
Cyperns huvudstad är Nicosia och andra större städer är Limassol, Larnaca, Kyrenia och Paphos.
Easy Forex Limassol, Vermittlung Durch Arbeitsvermittlung24 Gmbh. Institut Für Handelsmanagement! |
# -*- coding:utf-8 -*-
"""
Django settings for RSSReader project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&t_1&t$fh5(+e%fb-boxy9q-#s##e*@aa1p23z*@#iqbypoq%)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DESENV = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rss_reader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'RSSReader.urls'
WSGI_APPLICATION = 'RSSReader.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
) |
The Fabulous Mary Stewart & Giveaway!
Do you remember Mary Stewart? An even more important question would be, of course, did you ever read a Mary Stewart novel?
Those of us who are of a certain age – I would say over thirty – probably did read her, since her books were among the most popular of the romance genre in the late 50’s and then the 60’s and 70’s. In fact, many critics would say she’s one of the creators of the modern romantic suspense novel, that unique form of storytelling that blends a thrilling mystery with a solid love story. I discovered Mary Stewart’s books in my teens, when my big sister handed me a whole pile of them to read. It was love at first sight and I tore through them. Along with Victoria Holt and Georgette Heyer, Mrs. Stewart’s books inspired in me a love for the romance genre that ultimately prompted me to pick up pen and begin writing romance novels myself.
And her books are beautifully written. One of Mrs. Stewart’s many strengths was her ability to incorporate setting into her books as a key element of story. Her settings are exotic and a reflection of her many travels around the world with her husband, Frederick Stewart. For instance, The Gabriel Hounds is set in Lebanon, This Rough Magic is set in Corfu, Greece, and the book I just finished, Madam Will You Talk? is set in the south of France. Her descriptions are so evocative and compelling that they blend seamlessly into the story, and enhance plot and character in a way that is unique to Mary Stewart novels.
But why am I talking about Mary Stewart today? Several weeks ago, I received an email from a woman who works in the marketing department of Hodder and Stoughton, the venerable English publisher who has published everyone from Winston Churchill to Stephen King. Recently, they decided to reissue the books of Mary Stewart, who began her career with H & S and remains with them to this day. Since I had written a few posts about Mary Stewart, the marketing person with H & S asked if I would be interested in reading the reissues and posting some reviews. As you can imagine I was eager to say yes, and thanks to the generosity of H & S I now have several of the beautiful reissues of classic Mary Stewart novels to give away. And the books are gorgeous, too, lovely trade paperbacks with really cool retro covers.
That’s the cover for Madam Will You Talk?, Mrs. Stewart’s first book, published in 1955. It was one of the few MS books I hadn’t yet read and I finished it last night. It blew me away with the quality of the writing, the intensity of the suspense, and the deeply realized character of the heroine, a young war widow who, while on vacation in Provence, stumbles upon a kidnapping and a murder plot. Madam also features two incredible car chases through the French countryside – the heroine learned to drive race cars from her husband – that were incredibly exciting and suspenseful. And how many car chases do you actually find in novels these days? MS totally pulls it off.
Anyway, I’m thrilled to have the opporunity to introduce a new generation of readers to Mary Stewart’s classic romantic supsense novels. Today, I’m giving away a copy of one of my very favorite MS books, This Rough Magic. It takes place on the island of Corfu and the heroine, a British actress fleeing her failed career on the London stage, stumbles into the middle of smuggling ring and lots of danger and adventure. Oh, and along the way she rescues a stranded dolphin and finds the man of her dreams. It’s an absolutely awesome book.
H & S also sent me several sets of beautiful postcards of these reissues, so I’ll also give away two copies of those to readers who comment. Just tell me who was the first romance author you ever read and you’ll have a chance to win the book or one of these great postcard sets. And if you have read Mary Stewart, tell me which book is your favorite! |
from zeit.cms.checkout.interfaces import ICheckoutManager
import gocept.testing.mock
import lxml.etree
import mock
import zeit.cms.content.interfaces
import zeit.cms.testing
import zeit.content.article.edit.author
import zeit.content.author.author
import zeit.content.author.testing
import zope.component
class AuthorshipXMLReferenceUpdater(zeit.cms.testing.FunctionalTestCase):
layer = zeit.content.author.testing.ZCML_LAYER
def setUp(self):
super(AuthorshipXMLReferenceUpdater, self).setUp()
self.shakespeare = zeit.content.author.author.Author()
self.shakespeare.firstname = 'William'
self.shakespeare.lastname = 'Shakespeare'
self.repository['shakespeare'] = self.shakespeare
def test_location_is_copied(self):
content = self.repository['testcontent']
content.authorships = (content.authorships.create(self.shakespeare),)
content.authorships[0].location = 'London'
reference = zope.component.getAdapter(
content, zeit.cms.content.interfaces.IXMLReference, name='related')
self.assertEllipsis("""\
<reference...>
...
<title xsi:nil="true"/>
...
<author href="http://xml.zeit.de/shakespeare"...>
<display_name...>William Shakespeare</display_name>
<location>London</location>
</author>
</reference> """, lxml.etree.tostring(reference, pretty_print=True))
def test_old_author_nodes_are_removed(self):
andersen = zeit.content.author.author.Author()
andersen.firstname = 'Hans Christian'
andersen.lastname = 'Andersen'
self.repository['andersen'] = andersen
content = self.repository['testcontent']
content.authorships = (content.authorships.create(self.shakespeare),)
reference = zope.component.getAdapter(
content, zeit.cms.content.interfaces.IXMLReference, name='related')
content.authorships = (content.authorships.create(andersen),)
zeit.cms.content.interfaces.IXMLReferenceUpdater(
content).update(reference)
reference = lxml.etree.tostring(reference, pretty_print=True)
self.assertEllipsis(
'...<author href="http://xml.zeit.de/andersen"...', reference)
self.assertNotIn('shakespeare', reference)
def test_works_with_security(self):
with zeit.cms.checkout.helper.checked_out(
self.repository['testcontent'], temporary=False) as co:
co = zope.security.proxy.ProxyFactory(co)
co.authorships = (co.authorships.create(self.shakespeare),)
co.authorships[0].location = 'London'
reference = zope.component.getAdapter(
co, zeit.cms.content.interfaces.IXMLReference, name='related')
self.assertIn(
'William Shakespeare',
lxml.etree.tostring(reference, pretty_print=True))
def test_fills_in_bbb_author_attribute(self):
andersen = zeit.content.author.author.Author()
andersen.firstname = 'Hans Christian'
andersen.lastname = 'Andersen'
self.repository['andersen'] = andersen
content = self.repository['testcontent']
reference = zope.component.getAdapter(
content, zeit.cms.content.interfaces.IXMLReference, name='related')
self.assertNotIn(
'author=""', lxml.etree.tostring(reference, pretty_print=True))
content.authorships = (
content.authorships.create(self.shakespeare),
content.authorships.create(andersen))
reference = zope.component.getAdapter(
content, zeit.cms.content.interfaces.IXMLReference, name='related')
self.assertEllipsis(
"""<reference...
author="William Shakespeare;Hans Christian Andersen"...""",
lxml.etree.tostring(reference, pretty_print=True))
def test_updater_suppress_errors(self):
content = ICheckoutManager(self.repository['testcontent']).checkout()
content.authorships = (content.authorships.create(self.shakespeare),)
# This error condition cannot be synthesized easily (would need to make
# an Author lose its metadata so it's treated as
# PersistentUnknownResource).
with mock.patch('zeit.content.author.author.Author.display_name',
gocept.testing.mock.Property()) as display_name:
display_name.side_effect = AttributeError()
with self.assertNothingRaised():
updater = zeit.cms.content.interfaces.IXMLReferenceUpdater(
content)
updater.update(content.xml, suppress_errors=True)
class RelatedReferenceTest(zeit.cms.testing.FunctionalTestCase):
layer = zeit.content.author.testing.ZCML_LAYER
def setUp(self):
super(RelatedReferenceTest, self).setUp()
self.repository['testauthor'] = zeit.content.author.author.Author()
self.author = self.repository['testauthor']
self.reference_container = zeit.content.article.edit.author.Author(
self.author, self.author.xml)
def test_author_can_be_adapted_to_IXMLReference(self):
result = zope.component.getAdapter(
self.author,
zeit.cms.content.interfaces.IXMLReference,
name='related')
self.assertEqual('author', result.tag)
self.assertEqual(self.author.uniqueId, result.get('href'))
def test_author_can_be_adapted_to_IReference(self):
from zeit.content.author.interfaces import IAuthorBioReference
result = zope.component.getMultiAdapter(
(self.reference_container, self.author.xml),
zeit.cms.content.interfaces.IReference, name='related')
result.biography = 'bio'
self.assertEqual(True, IAuthorBioReference.providedBy(result))
self.assertEqual('bio', result.xml.biography.text)
|
Posted: Mon Apr 21, 2014 1:09 pm Post subject: One book to rule them all.
As you may or may not have noticed, Mongoose is no longer carrying the Wayfarers print. They sold a short run, but determined that sales were not fast enough to justify a second run.
In some ways, it is an unfortunate development. However, we didn't set out with dreams of becoming a major RPG publisher, and in some regards I am happier to get things back in house.
With that in mind, I have begun on a new project.
It will be a hefty book, but should encompass all one needs to play Wayfarers at a price significantly less than what it would cost to acquire the same today.
I have some ideas on where we will go from there, but one thing at a time.
That being said, if anyone has feedback/suggestions, now would be the time.
YES! You can count me in.
The compiled works including errata would make me a happy camper. It can get tricky to get all cross-references to other books, pages, and tables right.
I would not go so far as to suggest another revision but it might be a good opportunity to introduce a few minor tweaks where you feel they would be warranted.
You have my full confidence on this. As it currently stands, the game is about as close to what I'm looking for in a D&D-like RPG as it is likely to get.
I sincerely believe that Wayfarers will be better served back in the hands of its creators. You have proven it in the past with the awesome gaming package that was the original edition.
I appreciate the confidence, and I will definitely take advantage of the offer, Meta. This won't go to print until no one here can find an error. I will likely make some tweaks, bit I also don't feel that the game needs much more than that.
Of course, any feedback you all have in that respect is valuable.
Alright, an extensive treatise on mules.
Huzzah!!! My money is yours!
Edit/Addendum: My absence from these forums has been due to enormous amounts of work we're putting into getting my wife's business going, raising kids, and keeping up with my studies so I can eventually get that PhD.
My hopes of creating a campaign setting for Wayfarers are nowhere near dead, but I haven't even had time to play any RPGs this year.
Glad to hear it's back in house!
Any kind of update on this? Timeframe, delivery method....Is it going to be a print on demand type of deal or you going to use a public backing (IE kickstarter) to garner enough cash to do print run?
Expect the fall/winter. My summer is very fragmented by work and travel so work has not been fast, but I tend to get it done in chunks.
I doubt we can get enough kickstarted for a print run, so it might be POD. That said, I really want a matte finish on this hardcover. I am going to work on that.
I'm still down for this!
An alphabetical index of monsters should definitely be a requirement!
Something going in Optional Rules. Not fully edited.
Thanks to Moth for much of the inspiration.
Always good to see more awesome stuff happening here!
Some folks in the Facebook D&D 5e are looking for a way to make characters without classes and I've pointed them this direction. |
"""Execution environment for events that synchronizes passing of time
with the real-time (aka *wall-clock time*).
"""
try:
# Python >= 3.3
from time import monotonic as time, sleep
except ImportError:
# Python < 3.3
from time import time, sleep
from simpy.core import Environment, EmptySchedule, Infinity
class RealtimeEnvironment(Environment):
"""Execution environment for an event-based simulation which is
synchronized with the real-time (also known as wall-clock time). A time
step will take *factor* seconds of real time (one second by default).
A step from ``0`` to ``3`` with a ``factor=0.5`` will, for example, take at
least
1.5 seconds.
The :meth:`step()` method will raise a :exc:`RuntimeError` if a time step
took too long to compute. This behaviour can be disabled by setting
*strict* to ``False``.
"""
def __init__(self, initial_time=0, factor=1.0, strict=True):
Environment.__init__(self, initial_time)
self.env_start = initial_time
self.real_start = time()
self._factor = factor
self._strict = strict
@property
def factor(self):
"""Scaling factor of the real-time."""
return self._factor
@property
def strict(self):
"""Running mode of the environment. :meth:`step()` will raise a
:exc:`RuntimeError` if this is set to ``True`` and the processing of
events takes too long."""
return self._strict
def sync(self):
"""Synchronize the internal time with the current wall-clock time.
This can be useful to prevent :meth:`step()` from raising an error if
a lot of time passes between creating the RealtimeEnvironment and
calling :meth:`run()` or :meth:`step()`.
"""
self.real_start = time()
def step(self):
"""Process the next event after enough real-time has passed for the
event to happen.
The delay is scaled according to the real-time :attr:`factor`. With
:attr:`strict` mode enabled, a :exc:`RuntimeError` will be raised, if
the event is processed too slowly.
"""
evt_time = self.peek()
if evt_time is Infinity:
raise EmptySchedule()
real_time = self.real_start + (evt_time - self.env_start) * self.factor
if self.strict and time() - real_time > self.factor:
# Events scheduled for time *t* may take just up to *t+1*
# for their computation, before an error is raised.
raise RuntimeError('Simulation too slow for real time (%.3fs).' % (
time() - real_time))
# Sleep in a loop to fix inaccuracies of windows (see
# http://stackoverflow.com/a/15967564 for details) and to ignore
# interrupts.
while True:
delta = real_time - time()
if delta <= 0:
break
sleep(delta)
return Environment.step(self)
|
After breakfasts go for Thimphu sightseeing covering – Memorial Chorten, Tashichho Dzong, Buddha Point (Kuensel Phodong),Changangkha Lhakhang (Monastery), Motithang Takin Preservation Centre, Drupthob Goemba / Zilukha Nunnery, National Library (Closed on Saturday , Sunday & National Holiday), Institute for Zorig Chusum (Traditional Art & Craft School-Closed on Saturday , Sunday, National Holiday & During Summer & Winter Vacations), Textiles Museum (Handicrafts Shops - Closed on Saturday, Sunday & National Holiday), Zangthopelri Lhakhang (Monastery), Afternoon drive to Wangdi (Wangdiphodrang) (1350Mts / 4430Fts, 70 Kms / 03 to 3½ Hrs). Wangdi is the last town on the highway before entering Central Bhutan. The drive is over Dochu La pass (3080Mts / 10102Fts) which is very scenic with fascinating view of mountains of Bhutan. Stopping briefly here to take in the view and admire the Chorten, Mani wall, and Prayer flags which decorate the highest point on the road. If skies are clear, the following peaks can be seen from this Pass (Left to Right): Mt. Masagang (7,158Mts / 23478Fts), Mt. Tsendagang (6,960Mts / 22829Fts), Mt. Terigang (7,060Mts / 23157Fts), Mt. Jejegangphugang (7,158Mts / 23478Fts), Mt. Kangphugang (7,170Mts / 23518Fts), Mt. Zongphugang (7,060Mts / 23157Fts), a table mountain that dominates the isolated region of Lunana - Finally Mt.Gangkar Puensum (7,497Mts / 24590Fts), the highest peak in Bhutan. On reaching Wangdi, Check in to the Hotel. Overnight at Hotel.
After breakfast transfer to Paro (2134Mts / 7000Fts, 135 Kms / 05 Hrs,) Enroute visit Punakha Dzong, If time permit visit the Suspension Bridge and then proceed to Chimi Lhakhang (Monastery). On arrival Paro, check in to the hotel Overnight at Hotel.
After breakfast go for Paro Sightseeing covering Drukgyel Dzong(Presently closed for tourist), Ta Dzong (National Museum-Closed on Saturday, Sunday & National Holiday), Rinpung Dzong, Kyichu Lhakhang (Monastery), Back to the hotel. Overnight at Hotel. OR PARO - FULL DAY HIKE TO TAKTSANG LHAKHANG (TIGER`S NEST) Refer to GENERAL POINTS ON BHUTAN OPERATION for Pink Permit Start the day early for a day hike to Taktsang Lhakhang (Tiger's Nest Monastery)(Tuesday Closed) - The hike which is all the way uphill takes about 2 /3 hours through pine forests. The monastery clings to a huge granite cliff 800 meters from the Paro valley. It is one of the most famous of Bhutan's monasteries, perched on the side of a cliff 900m above the Paro valley floor. It is said that Guru Rinpoche arrived here on the back of a tigress and meditated at this monastery and hence it is called ""Tiger's Nest"". This site has been recognized as a most sacred place and visited by Shabdrung Ngawang Namgyal in 1646 and now visited by all Bhutanese at least once in their lifetime. On 19 April, 1998, a fire severely damaged the main structure of building but now this Bhutanese jewel has been restored to its original splendour. Afternoon at leisure for go for shopping in the market. Overnight at Hotel.
• GST (Government Service Tax as applicable) (as on date 01/07/2017 it is 05.00%) • Cost for supplementary service, optional Tours, Up-gradation Charges, Sightseeing entrance fees. • FOR hotel marked with * Mandatory Additional Surcharge on Normal Rate for Travelling only on 24th Dec & 31st Dec 2017 (Kindly check for the rates before quoting) • Cost for Airfare, Train fare, Insurance Premiums, Rafting Charges. • Cost for service provided on a personal request. • Cost for personal expenses such as laundry, bottled water, soft drinks, incidentals, porter charges, tips etc. • Cost for any other service not mentioned under the “Cost Includes” head. • Difference in cost arising due to extra usage of vehicle, other than scheduled & mentioned in the itinerary. • Difference in cost arising due to mishaps, political unrest, natural calamities like - landslides, road blockage, etc. In such case extra will have to be paid on the spot by the guest directly.
After breakfast go for Paro Sightseeing covering Drukgyel Dzong(Presently closed for tourist),Ta Dzong (National Museum-Closed on Saturday, Sunday & National Holiday), Rinpung Dzong, Kyichu Lhakhang (Monastery), Back to the hotel. Overnight at Hotel. OR PARO - FULL DAY HIKE TO TAKTSANG LHAKHANG (TIGER`S NEST) Refer to GENERAL POINTS ON BHUTAN OPERATION for Pink Permit Start the day early for a day hike to Taktsang Lhakhang (Tiger's Nest Monastery)(Tuesday Closed) - The hike which is all the way uphill takes about 2 /3 hours through pine forests. The monastery clings to a huge granite cliff 800 meters from the Paro valley. It is one of the most famous of Bhutan's monasteries, perched on the side of a cliff 900m above the Paro valley floor. It is said that Guru Rinpoche arrived here on the back of a tigress and meditated at this monastery and hence it is called ""Tiger's Nest"". This site has been recognized as a most sacred place and visited by Shabdrung Ngawang Namgyal in 1646 and now visited by all Bhutanese at least once in their lifetime. On 19 April, 1998, a fire severely damaged the main structure of building but now this Bhutanese jewel has been restored to its original splendour. Afternoon at leisure for go for shopping in the market. Overnight at Hotel. |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.engine.internals.native_engine_pyo3 import PyExecutor, PyStubCAS
from pants.option.global_options import RemoteCacheWarningsBehavior
from pants.option.scope import GLOBAL_SCOPE_CONFIG_SECTION
from pants.testutil.pants_integration_test import run_pants
def test_warns_on_remote_cache_errors():
executor = PyExecutor(core_threads=2, max_threads=4)
cas = PyStubCAS.builder().always_errors().build(executor)
def run(behavior: RemoteCacheWarningsBehavior) -> str:
pants_run = run_pants(
[
"--backend-packages=['pants.backend.python']",
"--no-dynamic-ui",
"package",
"testprojects/src/python/hello/main:main",
],
use_pantsd=False,
config={
GLOBAL_SCOPE_CONFIG_SECTION: {
"remote_cache_read": True,
"remote_cache_write": True,
"remote_cache_warnings": behavior.value,
# NB: Our options code expects `grpc://`, which it will then convert back to
# `http://` before sending over FFI.
"remote_store_address": cas.address.replace("http://", "grpc://"),
}
},
)
pants_run.assert_success()
return pants_run.stderr
def read_err(i: int) -> str:
return f"Failed to read from remote cache ({i} occurrences so far): Unimplemented"
def write_err(i: int) -> str:
return (
f'Failed to write to remote cache ({i} occurrences so far): Internal: "StubCAS is '
f'configured to always fail"'
)
first_read_err = read_err(1)
first_write_err = write_err(1)
third_read_err = read_err(3)
third_write_err = write_err(3)
fourth_read_err = read_err(4)
fourth_write_err = write_err(4)
ignore_result = run(RemoteCacheWarningsBehavior.ignore)
for err in [
first_read_err,
first_write_err,
third_read_err,
third_write_err,
fourth_read_err,
fourth_write_err,
]:
assert err not in ignore_result
first_only_result = run(RemoteCacheWarningsBehavior.first_only)
for err in [first_read_err, first_write_err]:
assert err in first_only_result
for err in [third_read_err, third_write_err, fourth_read_err, fourth_write_err]:
assert err not in first_only_result
backoff_result = run(RemoteCacheWarningsBehavior.backoff)
for err in [first_read_err, first_write_err, fourth_read_err, fourth_write_err]:
assert err in backoff_result
for err in [third_read_err, third_write_err]:
assert err not in backoff_result
|
During a construction or a demolition, it is impossible to avoid littering the entire site with debris. Building materials such as insulation, electrical wiring, roofing and shingle will be lying all over the place. Wastes originating from site preparation such as dredging materials, tree stumps and rubble will still be at the site.
Construction waste may include harmful or hazardous materials like asbestos and lead. Most of the construction waste is made of brick fragments, concrete and damaged wood. Some of these materials may not have been used during the construction for various reasons.
Construction waste management can be a costly process, however, it saves a lot of time and is done with efficiency to meet the standards of the client. With proper waste management though, contractors and site owners are destined to experience substantial benefits. Managing the waste from your construction project will benefit your business in a number of ways. Builders waste removal from Vonvil Junk.
Meeting legal and other requirements; proper management of your construction waste is supposed to make it easier for you to comply with waste legislation such as the duty care and hazardous waste control. Other obligations that it should help you meet easily are planning and building regulations, and internal targets and policies, such as the ones in the environmental management system or corporate social responsibility policy in London.
Putting proper waste management practices might also help you to stay in the fore front of legal requirements and your competitors.
Minimized costs and more business opportunities; more effective use of materials can help in cutting your costs. Fewer materials will need to be purchased and the waste requiring disposal is reduced. The outlay for the project is reduced and this will increase your profit margins. You will spend less on professional management services, hence improving the efficiency of your staff and contactor.
Increased Competitive Difference; besides the monetary advantages, reducing waste from the construction site actively will elevate you above your competitors. Clients who are aware of the costs associated with waste in London prefer hiring services of companies that are ‘green’ builders. They are after a reputable public image and seek to use contractors that understand and pursue waste reduction. Your business will be rated as the most economically friendly in town.
Public Policy; by making your waste management well known by developers, contractors and clients, you can enjoy a wide range of benefits associated with eco-friendliness. If your company may have experienced negative publicity in the past, this will be the best time to redeem your brand. You will get a great deal of goodwill by effectively managing the waste you produce.
With the implementation of great construction waste management alongside effective minimization of policies, you can transform your entire company into an efficient service provider. You will be in a position to utilize natural resources without compromising cost, quality or construction programs.
Have you been struggling to send money to a stranger in London and you don’t want your identity revealed? Do you need to receive money from someone you are afraid to meet because you don’t trust him? Bitcoin offers a worked up secure platform through hardware wallet, and you can efficiently transact currencies without any physical connection or even being bothered of who you transacted with.
Bitcoin in London has been in operation for since 2009, and during this time it keeps updating its security features. Through the hardware wallet, a sophisticated security system, a smart card has been successfully implemented which allows you to make or receive payments offline, and many people give it preference to the paper wallet.
In recent times, computer viruses have been a nuisance to those using Bitcoin paper and Software wallet in London. The introduction of the hardware wallet has come handy to give a lasting solution to the problem, as it ensures that it safely stores your private keys in a hardware device, on an offline module.
When using a hardware wallet for the first time, it will generate a 256-bit seed. The seed will be used to calculate 24 words that you must keep safely for future use.
These wallets will require you to set up a PIN which consists of between 4-8 digits. It gives a provision for a paraphrase to the portfolio. A secondary PIN maximizes its security login details.
These devices also have a security grid cards installed in them which allows you to verify any transactions going on through the wallet. We ensure prompt responses to the verification process.
The hardware wallet has a unique feature that helps in processing the transactions. If you are in London, you can purchase the “Ledger Nano S,” “Trezor,” or “Keepkey” which have two buttons which you equally have to press to confirm any payment.
Unlike other wallets, when you carry out a transaction and the verification process is complete, it is saved both on the network and computer where you can easily refer to it, and when you lose your device, you will be able to recover the transaction.
The use of Pi Wallets in London has helped many secure their Bitcoin. Since it comes as a fully installed gadget, you don’t need any internet connection. It equally comes with a backup card that allows you to keep your transactions safely. The Armory feature found in Pi-Wallets creates the leverage of receiving addresses and will enable you to confirm your activities without revealing your private keys.
It is necessary to use a system that will save you costs for professional training, internet connection and more and can give you a longer and popular service with maximized security. Securing your Bitcoin in London using hardware will not disappoint concerning your identification and the safety of your coins. Don’t look any further as Bitcoin hardware wallet will sort your problem today.
How rubbish collection in London is performed by Vonvil Junk team. Hire us for best and most affordable waste collection in London. We have over a decade of experience in the industry and we can professionally clean the waste from your location in London. |
#!/usr/bin/env python
#
"""Modify an open office document.
Specifically designed to modify an OpenOffice Presentation.
"""
__author__ = '[email protected] (scottkirkwood))'
import sys
import zipfile
import copy
try:
import xml.etree.ElementTree as ET # Python 2.5
except:
try:
import cElementTree as ET # Faster
except:
import elementtree.ElementTree as ET
NS_OFFICE = '{urn:oasis:names:tc:opendocument:xmlns:office:1.0}'
NS_DRAW = '{urn:oasis:names:tc:opendocument:xmlns:drawing:1.0}'
NS_TEXT = '{urn:oasis:names:tc:opendocument:xmlns:text:1.0}'
def dump(elems):
lines = []
for elem in elems.getiterator():
lines.append(str(elem))
return '\n'.join(lines)
class UpdateOdp:
def __init__(self, meta, pageitems):
"""ctor
Args:
pageitems: list of [(slide-title, [listitems, ...]), ...]
"""
self.meta = meta
self.pages = pageitems
def ReadWrite(self, infname, outfname):
"""Read in in fname, and write out to out fname.
infname and outfname must be different, the idea is that infname is a
'template' and outfname is the final version.
"""
z_in = zipfile.ZipFile(infname, 'r')
z_out = zipfile.ZipFile(outfname, 'w')
for info in z_in.infolist():
text = z_in.read(info.filename)
text = self.CallBack(info, text)
z_out.writestr(info, text)
z_out.close()
z_in.close()
def CallBack(self, zipinfo, text):
if zipinfo.filename == 'content.xml':
et = ET.fromstring(text)
et = self.UpdateContent(et)
text = ET.tostring(et)
text = text.encode('utf-8', 'xmlcharrefreplace')
return text
def UpdateContent(self, et):
"""Update content.xml.
Pure function, no side effects.
Args:
text: text representation of content.xml.
Returns:
Elementtree
"""
presentation = et.find('.//%sbody/%spresentation' % (NS_OFFICE, NS_OFFICE))
page_copy = copy.deepcopy(presentation[1])
del presentation[1:]
texts = presentation[0].findall('.//%sp' % (NS_TEXT))
for index, text in enumerate(texts):
if index == 0:
text.text = self.meta['title']
elif index == 1:
text.text = self.meta['subtitle']
elif index == 2:
text.text = self.meta['author']
for page in self.pages:
title = page[0]
items = page[1]
page_copycopy = copy.deepcopy(page_copy)
text_boxes = page_copycopy.findall('.//%stext-box' % NS_DRAW)
textp = text_boxes[0].findall('.//%sp' % NS_TEXT)
textp[0].text = title
list_copy = copy.deepcopy(text_boxes[1].find('.//%slist' % NS_TEXT))
del text_boxes[1][0:]
self._recurse_items(items, list_copy, text_boxes[1], 1)
presentation.append(page_copycopy)
return et
def _recurse_items(self, items, list_copy, text_box, depth):
"""Search for line items of a certain depth.
"""
findp = './/%sp' % NS_TEXT
for item in items:
if isinstance(item, list):
tofind = '%slist-item' % NS_TEXT
stylename = '%sstyle-name' % NS_TEXT
list_copycopy = copy.deepcopy(list_copy)
for node in list_copycopy.getiterator():
if node.tag == tofind:
if len(node.getchildren()):
node.remove(node.getchildren()[0])
else:
raise 'Unable to find any child nodes for "%s"' % node
newsubnode = copy.deepcopy(list_copy)
del newsubnode.attrib[stylename]
textp = newsubnode.findall(findp)
if textp:
textp = textp[0]
text = textp.attrib[stylename]
text = text[0:-1] + str(int(text[-1]) + 1)
textp.attrib[stylename] = text
textp.text = ''
node.append(newsubnode)
else:
raise 'Unable to find2 "%s" in "%s"' % (findp,
dump(newsubnode))
break
self._recurse_items(item, list_copycopy, text_box, depth + 1)
else:
list_copycopy = copy.deepcopy(list_copy)
textp = list_copycopy.findall(findp)
if textp:
textp = textp[0]
textp.text = item
else:
raise 'Unable to find "%s" in "%s"' % (findp,
dump(list_copycopy))
text_box.append(list_copycopy)
def main(argv):
import optparse
parser = optparse.OptionParser()
options, args = parser.parse_args()
meta = {
'title' : 'My title',
}
pages = [
('Page 1', ['Line 1', 'Line 2']),
('Page 2', ['Line 3', 'Line 4']),
]
update_odf = UpdateOdp(meta, pages)
update_odf.ReadWrite('template.odp', 'sample-out.odp')
if __name__ == '__main__':
main(sys.argv)
|
The Petroleum Corporation of Jamaica (PCJ) says solar panels have been installed in 12 schools as part of a project to cut electricity costs to the Government.
The schools include Ardenne High, Clarendon College, Glenmuir High, Hampton School for Girls, and Munro College.
The PCJ says it anticipates that the systems should be fully operational later this month.
The state-run agency says three more schools and select government offices are to be outfitted with solar panels under phase two of the project.
The institutions are the Kingston High School, Rose Hill Primary and St Hugh’s High School, and the main offices of the Scientific Research Council, the Office of Disaster Preparedness and Emergency Management and the PCJ.
The PCJ says all 18 systems are expected to generate 344,000 kilowatt hours of energy per year.
It says after all the systems are operationalised the government expects to save an estimated $16 million annually on electricity bills. |
STOP = "stop"
PLAY = "play"
PAUSE = "pause"
class Song:
"""
Simple structure over currentsong dict returned by MPDClient
"""
def __init__(self, song: dict):
"""
Create instance from dictionary returned by MPDClient
"""
# primary
self.title = song.get('title')
self.artist = song.get('artist')
self.album = song.get('album')
self.file = song.get('file')
# derived
self.isstream = self.file.startswith('http')
if not self.isstream:
self.length = int(song['time']) * 1000
def __repr__(self):
nstr = lambda s: s if s else "<empty>"
source = "[http]" if self.isstream else "[file]"
return "{:s} {:s} - {:s}".format(source, nstr(self.artist), nstr(self.title))
class Status:
"""
Simple structure over status dict returned by MPDClient
"""
def __init__(self, status: dict):
self.state = status['state']
self.elapsed = status['elapsed'] if self.state != STOP else None
def __str__(self):
return "{:s}({:s})".format(self.state, self.elapsed if self.elapsed else "")
|
We provide personalized and independent advice.
As fiduciaries, we put our clients’ interests first.
As fee-only advisors, we charge 0.6% of the assets we manage.
With over 200 years of combined investment experience, our team of 10 investment professionals includes 7 CFA® Charterholders, 2 CFP® professionals, and 2 CPAs.
NSAM is the 2nd largest independent SEC Registered Investment Advisory firm in Wisconsin, with over $1.5 billion of assets under management.
As fiduciaries, we provide families, corporations and endowments with customized investment management and retirement planning services.
We build customized equity and fixed income portfolios by performing proprietary fundamental analysis, including company visits and communication with Wall Street analysts.
Our team of 11 investment professionals, including 7 CFA charterholders, 2 CFP professionals and 2 CPAs, has over 200 years of combined investment experience.
Founded in 1996 to ensure the continuity of our team and investment approach as well as our commitment to personalized client service, we have grown assets under management to well over $1 B.
Access our views on current economic and investment trends.
Read frequently asked questions and related answers pertaining to North Star Asset Management. |
# coding=utf-8
from sqlalchemy import create_engine, Column, Integer, String, Sequence
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import and_, or_
from sqlalchemy.orm import sessionmaker
from consts import DB_URI
eng = create_engine(DB_URI)
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, Sequence('user_id_seq'),
primary_key=True, autoincrement=True)
name = Column(String(50))
Base.metadata.drop_all(bind=eng)
Base.metadata.create_all(bind=eng)
Session = sessionmaker(bind=eng)
session = Session()
session.add_all([User(name=username)
for username in ('xiaoming', 'wanglang', 'lilei')])
session.commit()
def get_result(rs):
print '-' * 20
for user in rs:
print user.name
rs = session.query(User).all()
get_result(rs)
rs = session.query(User).filter(User.id.in_([2, ]))
get_result(rs)
rs = session.query(User).filter(and_(User.id > 2, User.id < 4))
get_result(rs)
rs = session.query(User).filter(or_(User.id == 2, User.id == 4))
get_result(rs)
rs = session.query(User).filter(User.name.like('%min%'))
get_result(rs)
user = session.query(User).filter_by(name='xiaoming').first()
get_result([user])
|
Get the party started on board our Union City Party Bus!
Think Escape Union City Party Bus is the number one provider of limo bus and limousine in the Union City Bay Area. We have one of the largest fleets of limousine buses in the SF Bay Area, and have a wide area of limo rentals including Hummer Limousine, Escalade Limo, Excursion Limo and more! Our staff is top notch and will provide you with the best in service the Union City Area has to offer. Our party buses are comfortable, stylish and come fully equipped with state of the art sound systems, iPod Hookups, DVD / Blue Ray Players, Wet Bars, Fiber Optic Lighting and more. Some of our limo buses even come with dancing poles for those looking for some extra fun on the bus. We also provide the top Union City Party Buses for corporate events throughout the Bay Area. Our fully licensed and trained Chauffer’s will make sure you and your team have an unforgettable experience.
Union City Limo Bus has Flat Wide Screen Television, DVD, CD player, plush leather couch seating, unbelievable state of the art 18 Speaker Dynamic Surround Sound System and custom specialty lighting.
Reserve a Union City Party Bus Today for the Best Price!
Enjoy all the luxuries of the number one Union City Party Bus and Limo Bus provider at exceptional rates! We also offer amazing Union City Prom Party Bus packages, along with wedding limousine service, night club limo bus tours, concert packages, corporate transportation and quinceanera party bus. You can even make your way on our limo buses from Union City to Napa Valley Wineries for a wine tour. Our drivers and staff are seasoned veterans when it comes to party bus wine tours in the Napa Valley and Sonoma area.
If you’re looking for a Union City Party Bus Company that holds is clients as number, and has been voted the best party bus tour company on City Search – then Think Escape SF Party Bus is for you! Give us a call at (800) 823-7249 for more details. |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006-2007 Alec Thomas <[email protected]>
# Copyright (C) 2007 Christian Boos <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christian Boos <[email protected]>
# Alec Thomas <[email protected]>
from trac.core import *
from trac.util.translation import _
class ResourceNotFound(TracError):
"""Thrown when a non-existent resource is requested"""
class IResourceManager(Interface):
def get_resource_realms():
"""Return resource realms managed by the component.
:rtype: `basestring` generator
"""
def get_resource_url(resource, href, **kwargs):
"""Return the canonical URL for displaying the given resource.
:param resource: a `Resource`
:param href: an `Href` used for creating the URL
Note that if there's no special rule associated to this realm for
creating URLs (i.e. the standard convention of using realm/id applies),
then it's OK to not define this method.
"""
def get_resource_description(resource, format='default', context=None,
**kwargs):
"""Return a string representation of the resource, according to the
`format`.
:param resource: the `Resource` to describe
:param format: the kind of description wanted. Typical formats are:
`'default'`, `'compact'` or `'summary'`.
:param context: an optional rendering context to allow rendering rich
output (like markup containing links)
:type context: `Context`
Additional keyword arguments can be given as extra information for
some formats.
For example, the ticket with the id 123 is represented as:
- `'#123'` in `'compact'` format,
- `'Ticket #123'` for the `default` format.
- `'Ticket #123 (closed defect): This is the summary'` for the
`'summary'` format
Note that it is also OK to not define this method if there's no
special way to represent the resource, in which case the standard
representations 'realm:id' (in compact mode) or 'Realm id' (in
default mode) will be used.
"""
class Resource(object):
"""Resource identifier.
This specifies as precisely as possible *which* resource from a Trac
environment is manipulated.
A resource is identified by:
(- a `project` identifier) 0.12?
- a `realm` (a string like `'wiki'` or `'ticket'`)
- an `id`, which uniquely identifies a resource within its realm.
If the `id` information is not set, then the resource represents
the realm as a whole.
- an optional `version` information.
If `version` is `None`, this refers by convention to the latest
version of the resource.
Some generic and commonly used rendering methods are associated as well
to the Resource object. Those properties and methods actually delegate
the real work to the Resource's manager.
"""
__slots__ = ('realm', 'id', 'version', 'parent')
def __repr__(self):
path = []
r = self
while r:
name = r.realm
if r.id:
name += ':' + unicode(r.id) # id can be numerical
if r.version is not None:
name += '@' + unicode(r.version)
path.append(name or '')
r = r.parent
return '<Resource %r>' % (', '.join(reversed(path)))
def __eq__(self, other):
return self.realm == other.realm and \
self.id == other.id and \
self.version == other.version and \
self.parent == other.parent
def __hash__(self):
"""Hash this resource descriptor, including its hierarchy."""
path = ()
current = self
while current:
path += (self.realm, self.id, self.version)
current = current.parent
return hash(path)
# -- methods for creating other Resource identifiers
def __new__(cls, resource_or_realm=None, id=False, version=False,
parent=False):
"""Create a new Resource object from a specification.
:param resource_or_realm: this can be either:
- a `Resource`, which is then used as a base for making a copy
- a `basestring`, used to specify a `realm`
:param id: the resource identifier
:param version: the version or `None` for indicating the latest version
>>> main = Resource('wiki', 'WikiStart')
>>> repr(main)
"<Resource u'wiki:WikiStart'>"
>>> Resource(main) is main
True
>>> main3 = Resource(main, version=3)
>>> repr(main3)
"<Resource u'wiki:WikiStart@3'>"
>>> main0 = main3(version=0)
>>> repr(main0)
"<Resource u'wiki:WikiStart@0'>"
In a copy, if `id` is overriden, then the original `version` value
will not be reused.
>>> repr(Resource(main3, id="WikiEnd"))
"<Resource u'wiki:WikiEnd'>"
>>> repr(Resource(None))
"<Resource ''>"
"""
realm = resource_or_realm
if isinstance(resource_or_realm, Resource):
if id is False and version is False and parent is False:
return resource_or_realm
else: # copy and override
realm = resource_or_realm.realm
if id is False:
id = resource_or_realm.id
if version is False:
if id == resource_or_realm.id:
version = resource_or_realm.version # could be 0...
else:
version = None
if parent is False:
parent = resource_or_realm.parent
else:
if id is False:
id = None
if version is False:
version = None
if parent is False:
parent = None
resource = super(Resource, cls).__new__(cls)
resource.realm = realm
resource.id = id
resource.version = version
resource.parent = parent
return resource
def __call__(self, realm=False, id=False, version=False, parent=False):
"""Create a new Resource using the current resource as a template.
Optional keyword arguments can be given to override `id` and
`version`.
"""
return Resource(realm is False and self or realm, id, version, parent)
# -- methods for retrieving children Resource identifiers
def child(self, realm, id=False, version=False):
"""Retrieve a child resource for a secondary `realm`.
Same as `__call__`, except that this one sets the parent to `self`.
>>> repr(Resource(None).child('attachment', 'file.txt'))
"<Resource u', attachment:file.txt'>"
"""
return Resource(realm, id, version, self)
class ResourceSystem(Component):
"""Resource identification and description manager.
This component makes the link between `Resource` identifiers and their
corresponding manager `Component`.
"""
resource_managers = ExtensionPoint(IResourceManager)
def __init__(self):
self._resource_managers_map = None
# Public methods
def get_resource_manager(self, realm):
"""Return the component responsible for resources in the given `realm`
:param realm: the realm name
:return: a `Component` implementing `IResourceManager` or `None`
"""
# build a dict of realm keys to IResourceManager implementations
if not self._resource_managers_map:
map = {}
for manager in self.resource_managers:
for manager_realm in manager.get_resource_realms():
map[manager_realm] = manager
self._resource_managers_map = map
return self._resource_managers_map.get(realm)
def get_known_realms(self):
"""Return a list of all the realm names of resource managers."""
realms = []
for manager in self.resource_managers:
for realm in manager.get_resource_realms():
realms.append(realm)
return realms
# -- Utilities for manipulating resources in a generic way
def get_resource_url(env, resource, href, **kwargs):
"""Retrieve the canonical URL for the given resource.
This function delegates the work to the resource manager for that
resource if it implements a `get_resource_url` method, otherwise
reverts to simple '/realm/identifier' style URLs.
:param env: the `Environment` where `IResourceManager` components live
:param resource: the `Resource` object specifying the Trac resource
:param href: an `Href` object used for building the URL
Additional keyword arguments are translated as query paramaters in the URL.
>>> from trac.test import EnvironmentStub
>>> from trac.web.href import Href
>>> env = EnvironmentStub()
>>> href = Href('/trac.cgi')
>>> main = Resource('generic', 'Main')
>>> get_resource_url(env, main, href)
'/trac.cgi/generic/Main'
>>> get_resource_url(env, main(version=3), href)
'/trac.cgi/generic/Main?version=3'
>>> get_resource_url(env, main(version=3), href)
'/trac.cgi/generic/Main?version=3'
>>> get_resource_url(env, main(version=3), href, action='diff')
'/trac.cgi/generic/Main?action=diff&version=3'
>>> get_resource_url(env, main(version=3), href, action='diff', version=5)
'/trac.cgi/generic/Main?action=diff&version=5'
"""
manager = ResourceSystem(env).get_resource_manager(resource.realm)
if not manager or not hasattr(manager, 'get_resource_url'):
args = {'version': resource.version}
args.update(kwargs)
return href(resource.realm, resource.id, **args)
else:
return manager.get_resource_url(resource, href, **kwargs)
def get_resource_description(env, resource, format='default', **kwargs):
"""Retrieve a standardized description for the given resource.
This function delegates the work to the resource manager for that
resource if it implements a `get_resource_description` method,
otherwise reverts to simple presentation of the realm and identifier
information.
:param env: the `Environment` where `IResourceManager` components live
:param resource: the `Resource` object specifying the Trac resource
:param format: which formats to use for the description
Additional keyword arguments can be provided and will be propagated
to resource manager that might make use of them (typically, a `context`
parameter for creating context dependent output).
>>> from trac.test import EnvironmentStub
>>> env = EnvironmentStub()
>>> main = Resource('generic', 'Main')
>>> get_resource_description(env, main)
u'generic:Main'
>>> get_resource_description(env, main(version=3))
u'generic:Main'
>>> get_resource_description(env, main(version=3), format='summary')
u'generic:Main at version 3'
"""
manager = ResourceSystem(env).get_resource_manager(resource.realm)
if not manager or not hasattr(manager, 'get_resource_description'):
name = u'%s:%s' % (resource.realm, resource.id)
if format == 'summary':
name += _(' at version %(version)s', version=resource.version)
return name
else:
return manager.get_resource_description(resource, format, **kwargs)
def get_resource_name(env, resource):
return get_resource_description(env, resource)
def get_resource_shortname(env, resource):
return get_resource_description(env, resource, 'compact')
def get_resource_summary(env, resource):
return get_resource_description(env, resource, 'summary')
def get_relative_resource(resource, path=''):
"""Build a Resource relative to a reference resource.
:param path: path leading to another resource within the same realm.
"""
if path in (None, '', '.'):
return resource
else:
base = unicode(path[0] != '/' and resource.id or '').split('/')
for comp in path.split('/'):
if comp == '..':
if base:
base.pop()
elif comp and comp != '.':
base.append(comp)
return resource(id=base and '/'.join(base) or None)
def get_relative_url(env, resource, href, path='', **kwargs):
"""Build an URL relative to a resource given as reference.
:param path: path leading to another resource within the same realm.
>>> from trac.test import EnvironmentStub
>>> env = EnvironmentStub()
>>> from trac.web.href import Href
>>> href = Href('/trac.cgi')
>>> main = Resource('wiki', 'Main', version=3)
Without parameters, return the canonical URL for the resource, like
`get_resource_url` does.
>>> get_relative_url(env, main, href)
'/trac.cgi/wiki/Main?version=3'
Paths are relative to the given resource:
>>> get_relative_url(env, main, href, '.')
'/trac.cgi/wiki/Main?version=3'
>>> get_relative_url(env, main, href, './Sub')
'/trac.cgi/wiki/Main/Sub'
>>> get_relative_url(env, main, href, './Sub/Infra')
'/trac.cgi/wiki/Main/Sub/Infra'
>>> get_relative_url(env, main, href, './Sub/')
'/trac.cgi/wiki/Main/Sub'
>>> mainsub = main(id='Main/Sub')
>>> get_relative_url(env, mainsub, href, '..')
'/trac.cgi/wiki/Main'
>>> get_relative_url(env, main, href, '../Other')
'/trac.cgi/wiki/Other'
References always stay within the current resource realm:
>>> get_relative_url(env, mainsub, href, '../..')
'/trac.cgi/wiki'
>>> get_relative_url(env, mainsub, href, '../../..')
'/trac.cgi/wiki'
>>> get_relative_url(env, mainsub, href, '/toplevel')
'/trac.cgi/wiki/toplevel'
Extra keyword arguments are forwarded as query parameters:
>>> get_relative_url(env, main, href, action='diff')
'/trac.cgi/wiki/Main?action=diff&version=3'
"""
return get_resource_url(env, get_relative_resource(resource, path),
href, **kwargs)
def render_resource_link(env, context, resource, format='default'):
"""Utility for generating a link `Element` to the given resource.
Some component manager may directly use an extra `context` parameter
in order to directly generate rich content. Otherwise, the textual output
is wrapped in a link to the resource.
"""
from genshi.builder import Element, tag
link = get_resource_description(env, resource, format, context=context)
if not isinstance(link, Element):
link = tag.a(link, href=get_resource_url(env, resource, context.href))
return link
|
A server that receives requests.
A GraphQL schema that's attached to the server.
A client-side mechanism for communicating with a GraphQL schema, via a server.
In Pup, all three of these are provided for you, ready to use out-of-the-box. In order to start customizing Pup for your application, it's important to understand what "schema" refers to in GraphQL as that is where you will define what data is available in your application and how to access it.
All of the available Types of data in your application.
All of the Queries that can be performed in your application.
All of the Mutations that can be performed in your application.
All of the Subscriptions that can be used in your application.
In addition to these core conventions, your GraphQL schema can also be used to define "helper" types of data like Fragments and Enums.
Your schema is best thought of as the "registration" mechanism that defines all of the possible ways to consume data in your application. When a request is made to your GraphQL server, it's passed to your schema and then your schema decides how to respond or resolve that request.
When we think about the traditional CRUD (create, read, update, delete) pattern for an application, your queries handle the "R" or "read" part and your mutations handle the "CUD" or "create, update, delete" part.
When a query request is made from the client of our application, it's done so by specifying a query field which maps to a resolver. A resolver is a function that—like the name implies—resolves the request (i.e., it returns the requested data).
When a mutation request is made from the client of our application, it's done so by specifying a mutation field which maps to a resolver. Again, just like with queries, a mutation resolver is a function that resolves the request (i.e., mutates or changes the data).
Whenever a resolver function—query or mutation—returns some data, it's passed or filtered through a Type before being returned to the client/request.
How a request flows in GraphQL.
Here, in our Document type we've omitted the revisions field visible on our example document in our database. Assuming we performed a query request from the client requesting a document with the _id of 123, when it's returned, even though revisions is defined on that document, it will be "scrubbed" by our GraphQL Type because it's not defined as a field supported by the Document data type.
Here, our query is asking for a document with the _id 123 and specifies that when that document is returned, it should only include the _id and title fields. This is unique to GraphQL, even though our Document type defines three possible fields that can be returned, from the client, we can request only the specific fields we need for our UI.
Schema sees a query for the document field defined under the type Query block (expecting it to return some data matching a required _id argument as a String that itself matches the Document type).
Schema locates the matching resolver function with the same name under the resolvers.Query object and calls the corresponding function.
Data is passed through the type for validation/filtering.
Data is returned to client.
Types are the essential building block of your schema. Types define the shape of the data that's coming in and out of your schema. Though the type keyword is most common, there are several other types of...types like input and enum.
In your schema, there are three "root" types type Query, type Mutation, and type Subscription along with your custom types. These three root types can be seen in the example above and are responsible for defining the query, mutation, and subscription fields that can be accessed from the client along with their expected return values.
The idea here is that when you define a field (e.g., documents: [Document]), you're telling GraphQL "I want users to be able to type in a query that looks like this and call to a resolver function with the same name:"
That resolver function, then, is defined in the resolvers.Query object on your schema.
An example of a basic type is displayed on the right. This represents the Document type in Pup. Types are simple. They define the fields that a piece of data can have and the scalar types those fields are expected to contain.
Notice that types can be combined. In our example Document type, the comments field is saying that it expects an array of the Comment type to be returned for that field. What's neat about this is that this can be defined as a nested query, meaning, we don't have store the comments on the document itself. Instead, we can retrieve the comments for the document separately and combine them with the document before sending a response back to the server.
inputs define the shape of input data or arguments passed to queries and mutations. Instead of specifying argument fields individually when defining a query or mutation, you can pass them as an object, defining its type as an input.
enums are pre-defined lists of values that are allowed by a given field on a type.
Example UserInput type taken from /api/Users/types.js.
key: String # What is the key value you'll access this setting with?
label: String # The user-facing label for the setting.
Notice that the AllowedSettingType enum is used to specify the type of data expected by the type field on the UserSetting type. This means only a value of 'boolean', 'string', or 'number' can be passed for this field's value. Example taken from /api/UserSettings/types.js.
A type for the data being returned needs to be added to the typeDefs object.
A field needs to be defined on the root Query type.
A resolver function needs to be defined on the resolvers.Query object.
We covered types above, but need to discuss defining a field on the root Query type along with a resolver function. On the right, we can see the schema from Pup displayed, simplified to show the code for querying a list of documents.
The example here is purposefully simplified to remove imports so that you can see how things wire together. Keep in mind that in Pup, parts of the schema like types and resolvers are stored in their own files and imported into /startup/server/api.js.
Here, we've defined all three parts outlined above. Pay close attention to the structure and where things are being placed.
The important thing to note here is the connection between the documents nested inside of type Query and the documents nested inside of resolvers.Query. The former defines that field as something we can query against and the resolver function is how we resolve that query.
Though it may seem like we're doing work twice here, one part is typing our query and one part is actually handling it. It is a bit more work than some data systems, but it gives us 100% clarity over what is and isn't happening.
parent the parent query, if one exists.
args any arguments passed to the query.
context a miscellaneous context object. In Pup, this contains a context.user property containing the logged in user if one exists.
Here, in response to our documents resolver function, we're saying "if there's a user with an _id, find all of the documents owned by that user and .fetch() them as an array. If there's not a user, return an empty array."
It's important to note that "the GraphQL part" stops once we're inside of our function. At that point, we can run whatever code we'd like to resolve the query—that's up to us. Here, we're using the Documents collection that's built-in to Pup and uses Meteor's MongoDB implementation.
Just the same, we could make an API call to a third-party service here and GraphQL could care less. It's only concern is that the data returned matches the type specified for the field on the type Query. In this case, an array of objects resembling the Document type.
We've purposefully ignored something here. On our Document type, we include a comments property set equal to an array of the Comment type defined just above it. Here, we're telling GraphQL that when documents are returned to us, an array of comments might be included.
To cut to the chase: we do not nest comments directly on documents in the database. Instead, we rely on GraphQL's ability to perform nested queries to do that work for us. Think of this as a sort of database join (if you're familiar with SQL).
The "magic" at play here takes place down in our resolvers object. At this point, we've told GraphQL to expect a possible comments array, but we haven't defined how to return or resolve those comments.
If we look close, our resolvers object has a Document property in addition to the Query property. On that Document property, we've defined a function called comments. Just like we see a few lines up, this is a resolver function. The difference is that here, we're telling GraphQL how to resolve a query for the comments field on the Document type.
This may seem confusing, but consider this: Query is a type defined in our schema and so is Document. On the resolvers object in our schema, then, we're telling GraphQL how to resolve for those types. Query is a type and so is Document. The functions we define within them resolve the fields we've defined on them.
Focusing back on our comments resolver function under Document, we can see that the first argument parent is actually being utilized here. In this context, because comments is being resolved as a field on a document, we expect parent to represent the document being returned.
Because we're querying for documents (plural), for each document returned, this comments resolver function will fire, passing the current document as the parent.
That's it! Now, we have a queryable field added to our schema, complete with a nested field.
This example has been adapted from /startup/server/api.js.
As we mentioned earlier, mutations are the CUD or "create, update, delete" part of GraphQL. While there's technically no limit on what code you can call with a mutation (at their core, mutations are known as RPCs or remote procedure calls which are just a means for invoking code on the server from a client), traditionally they're used for these purposes.
Like we saw above with queries, a similar process is followed for mutations. In order to fully implement a mutation, we need to have 3-4 things: a type of data being returned from the mutation, a field on the root Mutation type, and a resolver function for the mutation. Optionally, if your mutation accepts arguments, you may also need to define an input type.
Again, on the right, we've defined a simplified example of the schema in Pup. This time, we're focusing on the addDocument mutation.
This should look familiar. In fact, mutations are nearly identical in terms of the "parts" involved. The big difference here is the way we've defined our field on our root Mutation: addDocument(title: String, body: String): Document.
Notice that we define a mutation with a set of parentheses after its name which contains a set of arguments. Here, we expect title and body to be passed as String values from the client. Notice, too, that instead of expecting an array of documents to be returned from our resolver, we expect a single document.
This needs some clarification. Even though our intent with a mutation is to mutate or change some data, it's still good practice to return some value back to the client once that mutation is finished. The why of this depends on your product, but primarily, it's helpful for updating the client side cache with the new or changed data resulting from the mutation.
If we look at our resolvers.Mutation resolver function, we can see a similar idea being implemented to our query resolver function. Inside, we write the code—again, whatever we'd like—to resolve our mutation. In this case, our intent is to add a new document, so we call to the Documents.insert() method to create a new document in our MongoDB collection. Notice, too, that we put our .insert() call into a variable and expect back the _id of the new document.
This is important. Remember, we want to return our new document from our mutation, so at the bottom of our resolver function, we retrieve that new document using documentId and then return it from our Mutation's resolver function.
That's it! We've added a mutation to our schema and are ready to add new documents.
Subscriptions are a different beast compared to queries and mutations, but they needn't be confusing. In the simple terms, subscriptions are a way for a client to "subscribe" to some event on the server and when that even occurs, a message is published back to that client.
In GraphQL, a subscription is exactly this: a means for us to communicate something happening in our schema back to clients who've subscribed to that particular "something happening."
In Pup, an example is implemented using subscriptions to tell clients when a new comment is added to a document they're viewing. Because GraphQL is a pull system and not push, by default if a user is viewing a document and a comment is posted, they'd need to refresh to see the new comment. With subscriptions, we can enable the missing push functionality and send new comments to connected clients as they're created.
To illustrate this, we're going to use the same technique as above, but juxtapose a mutation along with a subscription. We want to do this because subscriptions are typically used in relation to data being mutated or changed in the app.
If we start by looking at our addComment mutation's resolver function, if we look close we can spot a call to context.pubsub.publish() immediately after Comments.insert(). This is where our subscriptions start to take shape.
Here, we take in the pubsub connection—provided for you on the context object by Pup and PupQL—and call the .publish() method, passing two things: the name of the event that's occurring and a payload of data to send along with the event. This is the pub or "publish" part of pubsub.
When this is called, we're sending a message over the pubsub connection. Pay close attention to the name we're using here commentAdded. This is the name of the event that we need to tell our subscription to listen for.
To define our subscription, next, down in the resolvers.Subscription block, we add a property commentAdded (this pairs with the name we used in our pubsub.publish() call and assign it a call to a function withFilter() imported from the apollo-server package. The idea here is that when a comment is added, we only want to notify clients who are subscribed to changes for a specific document.
As the first argument to withFilter() we pass a function which returns a call to context.pubsub.asyncIterator() passing the name of the event we want to listen for. Here, the .asyncIterator() part is listening for events being published with the name commentAdded. When it sees an event matching that name, it returns that event's payload from this function (again, the argument passed as the first function to withFilter().
This is where withFilter() actually comes into play. The first function is saying "hey! we saw an event with the name commentAdded" while the second function is saying "easy on the gas, nerd, let's make sure that's something we care about." The second function then takes in the payload which is the data published via the pubsub.publish() call above and compares it to the variables it's received from the client (more on this in the Using Subscriptions docs).
If the documentId on the comment being added matches the documentId passed from the client (i.e., the one the user is currently subscribing to new comments on) then the filter returns true and lets the event pass through to the client.
Boom! Notice that this is super granular. We're not just willy nilly opening up a WebSocket and pushing data all over the place. We're only sending what's asked for. This means that we can take advantage of real-time data without bogging down clients. S-C-O-R-E. |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pure Python crypto-related routines for oauth2client.
Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
certificates.
"""
from pyasn1.codec.der import decoder
from pyasn1_modules import pem
from pyasn1_modules.rfc2459 import Certificate
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
import six
from oauth2client import _helpers
_PKCS12_ERROR = r"""\
PKCS12 format is not supported by the RSA library.
Either install PyOpenSSL, or please convert .p12 format
to .pem format:
$ cat key.p12 | \
> openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
> openssl rsa > key.pem
"""
_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
_PKCS1_MARKER = ('-----BEGIN RSA PRIVATE KEY-----',
'-----END RSA PRIVATE KEY-----')
_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
'-----END PRIVATE KEY-----')
_PKCS8_SPEC = PrivateKeyInfo()
def _bit_list_to_bytes(bit_list):
"""Converts an iterable of 1's and 0's to bytes.
Combines the list 8 at a time, treating each group of 8 bits
as a single byte.
"""
num_bits = len(bit_list)
byte_vals = bytearray()
for start in six.moves.xrange(0, num_bits, 8):
curr_bits = bit_list[start:start + 8]
char_val = sum(val * digit
for val, digit in zip(_POW2, curr_bits))
byte_vals.append(char_val)
return bytes(byte_vals)
class RsaVerifier(object):
"""Verifies the signature on a message.
Args:
pubkey: rsa.key.PublicKey (or equiv), The public key to verify with.
"""
def __init__(self, pubkey):
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message. If
string, will be encoded to bytes as utf-8.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
try:
return rsa.pkcs1.verify(message, signature, self._pubkey)
except (ValueError, rsa.pkcs1.VerificationError):
return False
@classmethod
def from_string(cls, key_pem, is_x509_cert):
"""Construct an RsaVerifier instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
RsaVerifier instance.
Raises:
ValueError: if the key_pem can't be parsed. In either case, error
will begin with 'No PEM start marker'. If
``is_x509_cert`` is True, will fail to find the
"-----BEGIN CERTIFICATE-----" error, otherwise fails
to find "-----BEGIN RSA PUBLIC KEY-----".
"""
key_pem = _helpers._to_bytes(key_pem)
if is_x509_cert:
der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b'':
raise ValueError('Unused bytes', remaining)
cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
else:
pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
return cls(pubkey)
class RsaSigner(object):
"""Signs messages with a private key.
Args:
pkey: rsa.key.PrivateKey (or equiv), The private key to sign with.
"""
def __init__(self, pkey):
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return rsa.pkcs1.sign(message, self._key, 'SHA-256')
@classmethod
def from_string(cls, key, password='notasecret'):
"""Construct an RsaSigner instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
RsaSigner instance.
Raises:
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
"""
key = _helpers._from_bytes(key) # pem expects str in Py3
marker_id, key_bytes = pem.readPemBlocksFromFile(
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
if marker_id == 0:
pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
format='DER')
elif marker_id == 1:
key_info, remaining = decoder.decode(
key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b'':
raise ValueError('Unused bytes', remaining)
pkey_info = key_info.getComponentByName('privateKey')
pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
format='DER')
else:
raise ValueError('No key could be detected.')
return cls(pkey)
|
Jazz and Pizza with Morphology Take some cool jazz from 1950s California, mix it with latin and calypso beats, add some proper rustic Italian Pizza, then host it all in an intimate studio theatre with great sightlines. It’s Jazz and Pizza night at the Red Rose Chain. Perfect for a night out with friends or a fun alternative date night! Tickets £8 (pizza sold separately).
With highly acclaimed albums, a host of top festivals in the UK and overseas, and numerous international tours under her belt, Clare Free is a force to be reckoned with. She plays her uniquely cool brand of bluesy based music solo tonight.
The Blue Dahlia trio performs live at historic St. Peter's!
The Blue Dahlia trio opens for Eyal Lovett Quartet at the Stoke-by-Nayland Resort! Brief Bio: "Chanson from a cabaret on Mars"! An international project, based in NYC and Paris, led by chanteuse/uke player from NYC, Dahlia Dumont. Edgy, smartly lyrically-fueled, jazz-infused tunes in English and French with classic chanson and Caribbean influences, with lyrics in French and in English. A joyous and groovy experience, then warm and nostalgic; an organic sound with an electric energy.
With highly acclaimed albums, a host of top Blues Festivals in the UK and overseas, and numerous international tours under her belt, Clare Free is a force to be reckoned with. She plays her uniquely cool brand of bluesy based music solo tonight.
With highly acclaimed albums, a host of top festivals in the UK and overseas, and numerous international tours under her belt, Clare Free is a force to be reckoned with. She plays her uniquely cool brand of bluesy based modern rock n roll solo tonight.
With highly acclaimed albums, a host of top Blues Festivals in the UK and overseas, and numerous international tours under her belt, Clare Free is a force to be reckoned with. She plays her uniquely cool brand of bluesy based music solo today.
With highly acclaimed albums, a host of top festivals in the UK and overseas, and numerous international tours under her belt, Clare Free is a force to be reckoned with. She plays her uniquely cool brand of bluesy based modern rock n roll solo today.
Back again for another Sunday afternoon of music.
With highly acclaimed albums, a host of top festivals in the UK and overseas, and numerous international tours under her belt, Clare Free is a force to be reckoned with. She plays her uniquely cool brand of bluesy based music solo this afternoon. |
# coding=utf-8
from ._commandbase import RadianceCommand
from ..datatype import RadiancePath, RadianceValue, RadianceNumber
from ..datatype import RadianceBoolFlag
from ..parameters.rfluxmtx import RfluxmtxParameters
import os
# TODO (sarith and mostapha): move parameters such as output_data_format to command
# parameters. They are not command inputs.
class Rfluxmtx(RadianceCommand):
"""Radiance Rfluxmtx matrix."""
ground_string = """
void glow ground_glow
0
0
4 1 1 1 0
ground_glow source ground
0
0
4 0 0 -1 180
"""
sky_string = """
void glow sky_glow
0
0
4 1 1 1 0
sky_glow source sky
0
0
4 0 0 1 180
"""
@staticmethod
def control_parameters(hemi_type='u', hemi_up_direction='Y', output_file=''):
"""Rfluxmtx ControlParameters."""
return RfluxmtxControlParameters(hemi_type, hemi_up_direction, output_file)
@staticmethod
def default_sky_ground(file_name, sky_type=None, sky_file_format=None,
ground_file_format=None):
"""
Args:
file_name:This should be the name of the file to which the sky defintion
should be written to.
sky_type:The acceptable inputs for hemisphere type are:
u for uniform.(Usually applicable for ground).\n
kf for klems full.\n
kh for klems half.\n
kq for klems quarter.\n
rN for Reinhart - Tregenza type skies. N stands for
subdivisions and defaults to 1.\n
scN for shirley-chiu subdivisions.
Returns:
file_name: Passes back the same file_name that was provided as input.
"""
sky_param = Rfluxmtx.control_parameters(hemi_type=sky_type or 'r',
output_file=sky_file_format)
ground_param = Rfluxmtx.control_parameters(hemi_type='u',
output_file=ground_file_format)
ground_string = Rfluxmtx.add_control_parameters(Rfluxmtx.ground_string,
{'ground_glow': ground_param})
sky_string = Rfluxmtx.add_control_parameters(Rfluxmtx.sky_string,
{'sky_glow': sky_param})
with open(file_name, 'w')as skyFile:
skyFile.write(ground_string + '\n' + sky_string)
return file_name
@staticmethod
def add_control_parameters(input_string, modifier_dict):
if os.path.exists(input_string):
with open(input_string)as fileString:
file_data = fileString.read()
else:
file_data = input_string
output_string = ''
check_dict = dict.fromkeys(modifier_dict.keys(), None)
for lines in file_data.split('\n'):
for key, value in modifier_dict.items():
if key in lines and not check_dict[key] and \
not lines.strip().startswith('#'):
output_string += str(value) + '\n'
check_dict[key] = True
else:
output_string += lines.strip() + '\n'
for key, value in check_dict.items():
assert value, "The modifier %s was not found in the string specified" % key
if os.path.exists(input_string):
new_output_file = input_string[:-4] + '_cp_added' + input_string[-4:]
with open(new_output_file, 'w') as newoutput:
newoutput.write(output_string)
output_string = new_output_file
return output_string
@staticmethod
def check_for_rflux_parameters(file_val):
with open(file_val)as rfluxFile:
rflux_string = rfluxFile.read()
assert '#@rfluxmtx' in rflux_string, \
"The file %s does not have any rfluxmtx control parameters."
return True
# sender = RadiancePath('sender','sender file')
receiver_file = RadiancePath('receiver', 'receiver file')
octree_file = RadiancePath('octree', 'octree file', extension='.oct')
output_matrix = RadiancePath('output_matrix', 'output Matrix File')
view_rays_file = RadiancePath('view_rays_file',
'file containing ray samples generated by vwrays')
output_data_format = RadianceValue('f', 'output data format', is_joined=True)
verbose = RadianceBoolFlag('v', 'verbose commands in stdout')
num_processors = RadianceNumber('n', 'number of processors', num_type=int)
# TODO: This method misses RfluxmtxParameters as an input.
def __init__(self, sender=None, receiver_file=None, octree_file=None,
rad_files=None, points_file=None, output_matrix=None,
view_rays_file=None, view_info_file=None, output_filename_format=None,
num_processors=None):
RadianceCommand.__init__(self)
self.sender = sender
"""Sender file will be either a rad file containing rfluxmtx variables
or just a - """
self.receiver_file = receiver_file
"""Receiver file will usually be the sky file containing rfluxmtx
variables"""
self.octree_file = octree_file
"""Octree file containing the other rad file in the scene."""
self.rad_files = rad_files
"""Rad files other than the sender and receiver that are a part of the
scene."""
self.points_file = points_file
"""The points file or input vwrays for which the illuminance/luminance
value are to be calculated."""
self.number_of_points = 0
"""Number of test points. Initially set to 0."""
self.output_matrix = output_matrix
"""The flux matrix file that will be created by rfluxmtx."""
self.view_rays_file = view_rays_file
"""File containing ray samples generated from vwrays"""
self.view_info_file = view_info_file
"""File containing view dimensions calculated from vwrays."""
self.output_filename_format = output_filename_format
"""Filename format"""
self.num_processors = num_processors
"""Number of processors"""
@property
def output_filename_format(self):
return self._output_filename_format
@output_filename_format.setter
def output_filename_format(self, value):
# TODO: Add testing logic for this !
self._output_filename_format = value or None
@property
def view_info_file(self):
return self._view_info_file
@view_info_file.setter
def view_info_file(self, file_name):
"""
The input for this setter is a file containing the view dimensions
calculated through the -d option in rfluxmtx.
"""
if file_name:
assert os.path.exists(file_name),\
"The file %s specified as view_info_file does not exist." % file_name
self._view_info_file = file_name
with open(file_name) as view_fileName:
self._view_fileDimensions = view_fileName.read().strip()
else:
self._view_info_file = ''
self._view_fileDimensions = ''
@property
def points_file(self):
return self._points_file
@points_file.setter
def points_file(self, value):
if value:
if os.path.exists(value):
with open(value, 'rb') as pfile:
self.number_of_points = sum(1 for line in pfile if line.strip())
elif self.number_of_points == 0:
print('Warning: Failed to find the points_file at "{}".'
' Use number_of_points method to set the number_of_points'
'separately.')
self._points_file = value
else:
self._points_file = ''
@property
def rad_files(self):
"""Get and set scene files."""
return self.__rad_files
@rad_files.setter
def rad_files(self, files):
if files:
self.__rad_files = [os.path.normpath(f) for f in files]
else:
self.__rad_files = []
@property
def rfluxmtx_parameters(self):
return self.__rfluxmtx_parameters
@rfluxmtx_parameters.setter
def rfluxmtx_parameters(self, parameters):
self.__rfluxmtx_parameters = parameters or RfluxmtxParameters()
assert hasattr(self.rfluxmtx_parameters, "isRfluxmtxParameters"), \
"input rfluxmtx_parameters is not a valid parameters type."
def to_rad_string(self, relative_path=False):
octree = self.octree_file.to_rad_string()
octree = '-i %s' % self.normspace(octree) if octree else ''
output_data_format = self.output_data_format.to_rad_string()
verbose = self.verbose.to_rad_string()
number_of_processors = self.num_processors.to_rad_string()
number_of_points = '-y %s' % self.number_of_points \
if self.number_of_points > 0 else ''
points_file = self.normspace(self.points_file)
points_file = '< %s' % points_file if points_file else ''
view_file_samples = self.normspace(self.view_rays_file.to_rad_string())
view_file_samples = '< %s' % view_file_samples if view_file_samples else ''
assert not (points_file and view_file_samples),\
'View file and points file cannot be specified at the same time!'
input_rays = points_file or view_file_samples
output_matrix = self.normspace(self.output_matrix.to_rad_string())
output_matrix = "> %s" % output_matrix if output_matrix else ''
output_filename_format = self.output_filename_format
output_filename_format = "-o %s" % output_filename_format if \
output_filename_format else ''
# method for adding an input or nothing to the command
def add_to_str(val):
return "%s " % val if val else ''
# Creating the string this way because it might change again in the
# future.
rad_string = ["%s " % self.normspace(os.path.join(self.radbin_path, 'rfluxmtx'))]
rad_string.append(add_to_str(output_data_format))
rad_string.append(add_to_str(verbose))
rad_string.append(add_to_str(number_of_processors))
rad_string.append(add_to_str(number_of_points))
rad_string.append(add_to_str(self._view_fileDimensions))
if str(self.sender).strip() == '-':
rad_string.append(add_to_str(self.rfluxmtx_parameters.to_rad_string()))
else:
# -I and -i options are only valid for pass-through cases
rflux_par = add_to_str(
self.rfluxmtx_parameters.to_rad_string()).replace(
'-I', '')
rad_string.append(rflux_par)
rad_string.append(add_to_str(output_filename_format))
rad_string.append(add_to_str(self.sender))
rad_string.append(add_to_str(self.normspace(self.receiver_file.to_rad_string())))
rad_string.append(add_to_str(" ".join(self.rad_files)))
rad_string.append(add_to_str(octree))
rad_string.append(add_to_str(input_rays))
rad_string.append(add_to_str(output_matrix))
return ''.join(rad_string)
@property
def input_files(self):
return [self.receiver_file] + self.rad_files
class RfluxmtxControlParameters(object):
"""Rfluxmtx ControlParameters.
Set the values for hemispheretype, hemisphere up direction and output file
location (optional).
"""
def __init__(self, hemi_type='u', hemi_up_direction='Y', output_file=''):
"""Init class."""
self.hemisphere_type = hemi_type
"""
The acceptable inputs for hemisphere type are:
u for uniform.(Usually applicable for ground).
kf for klems full.
kh for klems half.
kq for klems quarter.
rN for Reinhart - Tregenza type skies. N stands for subdivisions
and defaults to 1.
scN for shirley-chiu subdivisions."""
self.hemisphere_up_direction = hemi_up_direction
"""The acceptable inputs for hemisphere direction are %s""" % \
(",".join(('X', 'Y', 'Z', 'x', 'y', 'z', '-X', '-Y',
'-Z', '-x', '-y', '-z')))
self.output_file = output_file
@property
def hemisphere_type(self):
return self._hemisphereType
@hemisphere_type.setter
def hemisphere_type(self, value):
"""Hemisphere type.
The acceptable inputs for hemisphere type are:
u for uniform.(Usually applicable for ground).
kf for klems full.
kh for klems half.
kq for klems quarter.
rN for Reinhart - Tregenza type skies. N stands for subdivisions and
defaults to 1.
scN for shirley-chiu subdivisions.
"""
if value:
if value in ('u', 'kf', 'kh', 'kq'):
self._hemisphereType = value
return
elif value.startswith('r'):
if len(value) > 1:
try:
num = int(value[1:])
except ValueError:
raise Exception(
"The format reinhart tregenza type skies is rN ."
"The value entered was %s" % value)
else:
num = ''
self._hemisphereType = 'r' + str(num)
elif value.startswith('sc'):
if len(value) > 2:
try:
num = int(value[2:])
except ValueError:
raise Exception(
"The format for ShirleyChiu type values is scN."
"The value entered was %s" % value)
else:
raise Exception(
"The format for ShirleyChiu type values is scN."
"The value entered was %s" % value)
self._hemisphereType = 'sc' + str(num)
else:
except_str = """
The acceptable inputs for hemisphere type are:
u for uniform.(Usually applicable for ground).
kf for klems full.
kh for klems half.
kq for klems quarter.
rN for Reinhart - Tregenza type skies. N stands for
subdivisions and defaults to 1.
scN for shirley-chiu subdivisions.
The value entered was %s
""" % (value)
raise Exception(except_str)
@property
def hemisphere_up_direction(self):
return self._hemisphere_upDirection
@hemisphere_up_direction.setter
def hemisphere_up_direction(self, value):
"""hemisphere direction.
The acceptable inputs for hemisphere direction are a tuple with 3 values
or 'X', 'Y', 'Z', 'x', 'y', 'z', '-X', '-Y','-Z', '-x', '-y','-z'.
"""
allowed_values = ('X', 'Y', 'Z', 'x', 'y', 'z', '-X', '-Y',
'-Z', '-x', '-y', '-z', "+X", "+Y", "+Z",
'+x', "+y", "+z")
if isinstance(value, (tuple, list)):
assert len(value) == 3, \
'Length of emisphereUpDirection vector should be 3.'
self._hemisphere_upDirection = ','.join((str(v) for v in value))
elif value:
assert value in allowed_values, "The value for hemisphere_upDirection" \
"should be one of the following: %s" \
% (','.join(allowed_values))
self._hemisphere_upDirection = value
else:
self._hemisphere_upDirection = '+Z'
def __str__(self):
output_file_spec = "o=%s" % self.output_file if self.output_file else ''
return "#@rfluxmtx h=%s u=%s %s" % (self.hemisphere_type,
self.hemisphere_up_direction,
output_file_spec)
|
105 / 30, Ghagare Residency, Lane No. 14, Prabhat Road, Near Income Tax Office, Beside Sakas Food Products, Erandwane, Pune - 411004, Maharastra, India.
Prasad Lifespaces LLP is a leading player in pune real estate industry. Everyone dreams to have our own home & we help many of them to make their dreams come true. We build each home painstakingly, with focus on Quality, Useful detailing & ensure Value for money. They desire to earn people's trust and confidence while they create whenever they launch their new product and services. |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing a clickable label.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSignal, Qt, QPoint
from PyQt5.QtWidgets import QLabel
class E5ClickableLabel(QLabel):
"""
Class implementing a clickable label.
@signal clicked(QPoint) emitted upon a click on the label
with the left button
@signal middleClicked(QPoint) emitted upon a click on the label
with the middle button or CTRL and left button
"""
clicked = pyqtSignal(QPoint)
middleClicked = pyqtSignal(QPoint)
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget (QWidget)
"""
super(E5ClickableLabel, self).__init__(parent)
def mouseReleaseEvent(self, evt):
"""
Protected method handling mouse release events.
@param evt mouse event (QMouseEvent)
"""
if evt.button() == Qt.LeftButton and self.rect().contains(evt.pos()):
if evt.modifiers() == Qt.ControlModifier:
self.middleClicked.emit(evt.globalPos())
else:
self.clicked.emit(evt.globalPos())
elif evt.button() == Qt.MidButton and \
self.rect().contains(evt.pos()):
self.middleClicked.emit(evt.globalPos())
else:
super(E5ClickableLabel, self).mouseReleaseEvent(evt)
|
The Wishing Well Inn Peckham SE15 gives you the chance to experience this vibrant part of South London. A chance to experience the real London after a day of taking in all the historic sights the great city has to offer. As well as comfortable and modern rooms you have the choice of two bars in which to enjoy an evening drink. Then there are the superb Escorts near the Wishing Well Inn Peckham, make sure you indulge.
Wishing Well Inn, 79 Choumert Road, Peckham, London SE15 4AR, England. |
#! /usr/bin/env python
'''
Python to javascript compiler.
Do not confuse this pyjs module with the python dialect at pyjs.org.
Currently compiles RapydScript, a dialect of python designed to
compile to javascript.
Copyright 2013-2016 GoodCrypto
Last modified: 2016-04-20
This file is open source, licensed under GPLv3 <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
assert False, 'Untested'
import sys
IS_PY2 = sys.version_info[0] == 2
import os
from tempfile import mkstemp
if IS_PY2:
from cStringIO import StringIO
else:
from io import StringIO
from rapydscript.compiler import parse_file, finalize_source
def compile(py):
''' Compile python to javascript.
As of 2012-01 RapydScript does not have a module function that
compiles. Compiling requires a source file, not a stream or string.
'''
# save py code to file
rsfile, rsfilename = mkstemp()
os.write(rsfile, py)
os.close(rsfile)
parse_output = StringIO()
handler = parse_file(rsfilename, parse_output)
os.unlink(rsfilename)
js = finalize_source(parse_output.getvalue(), handler)
parse_output.close()
return js
|
If you have a New Year's shindig in the works or need a rush delivery for Aunt Gladys, here are some great options that don't just sparkle, they help women excel.
Investor, speaker and writer dedicated to educating women who want to fund women-owned companies and navigate the world of entrepreneurship.
At the holidays, I show love with food. But not necessarily my food.
Wherever I can, I buy from new companies that are owned by women or mentor young women. It's a natural outgrowth of my work as an investor of woman-owned startups. Not only do I help support these businesses, but I also end up serving some of the best holiday treats around.
Imagine yourself swirling a glass of Merlot, a cheeky glint in your eye, "Why yes, I did support emerging vintner talent in South Africa when I bought this wine. Not just me of course, some folks in Napa were involved too. So glad you came to my fete!"
Then prepare to stand back as your friends break into giggles and Merlot shoots out their noses.
When I entertain this season, I'm ordering from Wine for the World. The company pairs undiscovered winemakers from developing countries, with Food & Wine Magazine Winemakers of the Year, and the result is a fantastic selection of red and whites.
I've been a fan of Wine for the World since it rocked out its Indiegogo crowdfunding campaign in 2013, exceeding its fundraising goal and appearing on the startup scene in 2014.
"We're at a really exciting time as a company," says founder Mika Bulmash. "We're in the market with outstanding wines, connecting with enthusiastic consumers who love our story, building great partnerships and scouting for new talent as we expand into new markets and grow our team."
And who doesn't like handy gift sets? Wine For the World has 2 and 3 bottle sets ready to go for the holidays. You can also buy in bulk and stock up for your holiday party. With wines running under $25, it's a deal that gets you sophisticated hostess cred.
I like the idea of cooking and I like the idea of giving, but when I try to put them together, its not pretty.
Neighbors give me pitying looks when I present them cookies that are both lopsided and maybe extra crispy. Does anyone really like fruit cake? No. Does anyone want to see me try to bake a fruit cake? Definitely not.
So how about I skip the whole mess and order goodies like pink peppermint marshmallows and holiday macarons from a New Orleans sweet boutique, oui? Oui!
Sucre caught my attention when I discovered they support the young men and women who graduate from Venture for America, a group that trains ridiculously smart college graduates to enter early-stage companies across the country.
Elizabeth Nicholas is a Venture For America fellow and after receiving her training, Sucre offered her the position of Digital Retail Manager. "Everyday I get to see powerful women excel in various roles, from sales and business development to culinary innovation to graphic design and e-commerce."
She says the co-owners, restauranteur Joel Dondis and executive chef Tariq Hanna, are supportive and challenging leaders. "They expect you to continually build and improve upon your initial idea and then waste no time in implementing it."
When the time comes for Elizabeth to start her own company, she will be ready. "What I've most benefited from is the exposure to so many different industries: the restaurant industry, the world of wholesale and national expansion, haute confectionary cuisine, e-commerce, accounting, etc. We even have an in-house sommelier (female, of course!)."
As an investor, I support Venture for America's work and have had the pleasure of connecting with their hard working corps of fellows. I don't mind sending a little love New Orleans way as a thank you for training people like Elizabeth who I will no doubt see in investor boardrooms sooner rather than later.
Maybe you want to be that person who arrives at the door bearing gifts that are kind to the hostesses's waist line. Good thought, but how do you avoid predictability and re-gifting?
It would be so handy if someone could create an algorithm around a product's buzz, sales, user reviews and editor's picks, and then compile it into easy gift guides. Oh wait! That's a thing. It's called Rank & Style, and it was created by three women.
"Lists make everything better," says founder Sarika Doshi, "especially around the holidays."
At least a dozen gift guides can be found on the site, including Deluxe Beauty Delights, For the Preppy Girl and Tech Cases & Accessories.
"We believe it's the thought that counts, not our personal opinions." Doshi explains. "Our gift guides do all the homework for you by objectively ranking the best gifts to give and get in every category, for every personality type. [We] take into account hundreds of inputs from trusted sources."
Rank & Style isn't just making gift giving easy. Want to make algebra glamorous to young girls? Start a company that uses metrics behind pretty things to maximize the value behind the customer's dollar. It's brains and beauty at every level.
Stay safe and warm this season, and in all your party hopping, may the holidays bring you fine wine, tasty treats and sparkly boxes from these fabulous women. |
# coding: utf-8
"""
实时通讯消息相关操作。
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
from typing import Any
from typing import Dict
from typing import Generator
from typing import List
from typing import Optional
from typing import Union
import six
from leancloud import client
class Message(object):
def __init__(self):
self.bin = None # type: bool
self.conversation_id = None # type: str
self.data = None # type: str
self.from_client = None # type: str
self.from_ip = None # type: str
self.is_conversation = None # type: bool
self.is_room = None # type: bool
self.message_id = None # type: str
self.timestamp = None # type: float
self.to = None # type: str
@classmethod
def _find(cls, query_params): # type: (dict) -> Generator[Message, None, None]
content = client.get('/rtm/messages/history', params=query_params).json()
for data in content:
msg = cls()
msg._update_data(data)
yield msg
@classmethod
def find_by_conversation(cls, conversation_id, limit=None, reversed=None, after_time=None, after_message_id=None):
# type: (str, Optional[int], Optional[bool], Optional[Union[datetime, float]], Optional[str]) -> List[Message]
"""获取某个对话中的聊天记录
:param conversation_id: 对话 id
:param limit: 返回条数限制,可选,服务端默认 100 条,最大 1000 条
:param reversed: 以默认排序相反的方向返回结果,服务端默认为 False
:param after_time: 查询起始的时间戳,返回小于这个时间(不包含)的记录,服务端默认是当前时间
:param after_message_id: 起始的消息 id,使用时必须加上对应消息的时间 after_time 参数,一起作为查询的起点
:return: 符合条件的聊天记录
"""
query_params = {} # type: Dict[str, Any]
query_params['convid'] = conversation_id
if limit is not None:
query_params['limit'] = limit
if reversed is not None:
query_params['reversed'] = reversed
if isinstance(after_time, datetime):
query_params['max_ts'] = after_time.timestamp() * 1000
elif isinstance(after_time, six.integer_types) or isinstance(after_time, float):
query_params['max_ts'] = after_time * 1000
if after_message_id is not None:
query_params['msgid'] = after_message_id
return list(cls._find(query_params))
@classmethod
def find_by_client(cls, from_client, limit=None, after_time=None, after_message_id=None):
# type: (str, Optional[int], Optional[Union[datetime, float]], Optional[str]) -> List[Message]
"""获取某个 client 的聊天记录
:param from_client: 要获取聊天记录的 client id
:param limit: 返回条数限制,可选,服务端默认 100 条,最大 1000 条
:param after_time: 查询起始的时间戳,返回小于这个时间(不包含)的记录,服务端默认是当前时间
:param after_message_id: 起始的消息 id,使用时必须加上对应消息的时间 after_time 参数,一起作为查询的起点
:return: 符合条件的聊天记录
"""
query_params = {} # type: Dict[str, Any]
query_params['from'] = from_client
if limit is not None:
query_params['limit'] = limit
if isinstance(after_time, datetime):
query_params['max_ts'] = after_time.timestamp() * 1000
elif isinstance(after_time, six.integer_types) or isinstance(after_time, float):
query_params['max_ts'] = after_time * 1000
if after_message_id is not None:
query_params['msgid'] = after_message_id
return list(cls._find(query_params))
@classmethod
def find_all(cls, limit=None, after_time=None, after_message_id=None):
# type: (Optional[int], Optional[Union[datetime, float]], Optional[str]) -> List[Message]
"""获取应用全部聊天记录
:param limit: 返回条数限制,可选,服务端默认 100 条,最大 1000 条
:param after_time: 查询起始的时间戳,返回小于这个时间(不包含)的记录,服务端默认是当前时间
:param after_message_id: 起始的消息 id,使用时必须加上对应消息的时间 after_time 参数,一起作为查询的起点
:return: 符合条件的聊天记录
"""
query_params = {} # type: Dict[str, Any]
if limit is not None:
query_params['limit'] = limit
if isinstance(after_time, datetime):
query_params['max_ts'] = after_time.timestamp() * 1000
elif isinstance(after_time, six.integer_types) or isinstance(after_time, float):
query_params['max_ts'] = after_time * 1000
if after_message_id is not None:
query_params['msgid'] = after_message_id
return list(cls._find(query_params))
def _update_data(self, server_data): # type: (dict) -> None
self.bin = server_data.get('bin')
self.conversation_id = server_data.get('conv-id')
self.data = server_data.get('data')
self.from_client = server_data.get('from')
self.from_ip = server_data.get('from-ip')
self.is_conversation = server_data.get('is-conv')
self.is_room = server_data.get('is-room')
self.message_id = server_data.get('msg-id')
self.timestamp = server_data.get('timestamp', 0) / 1000
self.to = server_data.get('to')
|
A powerful storm system — including three tornadoes — swept through North Georgia late Monday, sending trees into roads, damaging homes and businesses, knocking out power to thousands and leaving behind a path of incredible devastation in one south Fulton County neighborhood.
More than 174 severe weather reports of large hail, damaging wind and tornadoes came in Monday, according to Channel 2 Action News. The National Weather Service confirmed three tornadoes — one in south Fulton County and two in Haralson County — touched down late Monday evening.
An EF-2 tornado, which has potential winds between 111 and 135 mph, touched down between Fairburn and Campbellton, according to the NWS. The storm damaged 50 homes with winds reaching 120 mph.
In Cobb County, a tree crashed into a home on Glenroy Place. Lightning caused two house fires in Gwinnett County. And in Clayton County, a fire damaged an eight-unit apartment in the 7200 block of Tara Boulevard.
Food, shelter and other essentials were provided for 17 people affected by the apartment fire, American Red Cross of Georgia spokeswoman Sherry Nicholson said.
But the most severe damage was reported in south Fulton, which saw “unbelievable” destruction, according to state Insurance Commissioner Ralph Hudgens.
A tornado tore through the Jumpers Trail neighborhood off Campbellton Fairburn Road in Fairburn, blowing off the side of Thomas Correa’s daughter’s bedroom. Correa’s daughter is a student at Kennesaw State University and was not home at the time.
While there were no reports of injuries, the storm ripped roofs off houses, knocked homes off their foundations and damaged cars.
Hudgens said he was amazed no one was injured. He was on the scene to help residents connect with their insurance agents and the commissioner’s office.
To protect the family, he dragged a mattress into a bathroom, where everyone hid to avoid the storm’s path.
Ashli Andrews said she tracked the weather all day and lined a bathtub with a blanket in preparation for a possible tornado.
After about 11 p.m., Andrews made use of that planning, ushering her 2-year-old daughter, mother and sister inside the bathroom.
The storm shifted her home off its foundation in what sounded like a roar, Andrews said. Parts of the second-floor ceiling caved in and water damage was reported.
In Haralson County, officials received multiple reports of downed trees and power outages as winds intensified about 10 p.m. Monday.
Fire Chief Brian Walker said trees fell on about 25 homes, including five at a trailer park on Riverside Lane.
One home caught fire when a family lit a candle for light. Damage was minor and no one was injured, Walker said.
In another incident, a woman and her father were taken to a local hospital after a tree crashed into their Tallapoosa Street home in Buchanan. Although the daughter has been released from the hospital, the father is still there, Walker said. His injuries are critical.
It was also a close call for “American Idol” hopeful Andrew Weaver, whose home was caught in Monday’s storm. Weaver made it to Hollywood in the March 12 episode, so he was home watching other contestants try their luck.
Once Burns announced that Haralson County was under a tornado warning, Weaver’s family took cover. They were safe, but their neighbors’ home was struck by lightning.
Haralson County and Bremen City district officials canceled school and activities Tuesday.
Earlier, more than 3,000 Georgia Power customers in Fulton and Haralson counties and 13,000 Georgia EMC customers were without power. By 9 p.m., those numbers were down to 10 and zero, respectively.
Traffic lights were out and roads in parts of Atlanta were blocked early Tuesday, including Northside Drive at Peachtree Battle Avenue, Defoors Ferry Road at Bohler Road and Hollowell Parkway at Hollywood Road.
South Fulton Parkway was closed at Winstar Lane as crews worked to clear trees, according to the Georgia Department of Transportation.
Downed trees and power lines also affected MARTA bus routes Tuesday, according to the agency.
— Staff writer Raisa Habersham contributed to this article.
MORE: If your neighbor's tree falls in your yard, who pays for cleanup?
» Download the WSB-TV weather app for weather alerts on-the-go. |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes.
"""
import collections
import functools
from oslo.config import cfg
from cinder import context
from cinder.db import base
from cinder import exception
from cinder.image import glance
from cinder import keymgr
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import uuidutils
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import utils
from cinder.volume.flows.api import create_volume
from cinder.volume import qos_specs
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host '
'where snapshot resides')
volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az',
default=True,
help='Ensure that the new volumes are the '
'same AZ as snapshot or source volume')
CONF = cfg.CONF
CONF.register_opt(volume_host_opt)
CONF.register_opt(volume_same_az_opt)
CONF.import_opt('storage_availability_zone', 'cinder.volume.manager')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
target.update(target_obj or {})
_action = 'volume:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager."""
def __init__(self, db_driver=None, image_service=None):
self.image_service = (image_service or
glance.get_default_image_service())
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zone_names = ()
self.key_manager = keymgr.API()
super(API, self).__init__(db_driver)
def _valid_availability_zone(self, availability_zone):
#NOTE(bcwaldon): This approach to caching fails to handle the case
# that an availability zone is disabled/removed.
if availability_zone in self.availability_zone_names:
return True
if CONF.storage_availability_zone == availability_zone:
return True
azs = self.list_availability_zones()
self.availability_zone_names = [az['name'] for az in azs]
return availability_zone in self.availability_zone_names
def list_availability_zones(self):
"""Describe the known availability zones
:retval list of dicts, each with a 'name' and 'available' key
"""
topic = CONF.volume_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
az_data = [(s['availability_zone'], s['disabled']) for s in services]
disabled_map = {}
for (az_name, disabled) in az_data:
tracked_disabled = disabled_map.get(az_name, True)
disabled_map[az_name] = tracked_disabled and disabled
azs = [{'name': name, 'available': not disabled}
for (name, disabled) in disabled_map.items()]
return tuple(azs)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None, source_volume=None,
scheduler_hints=None, backup_source_volume=None):
if source_volume and volume_type:
if volume_type['id'] != source_volume['volume_type_id']:
msg = _("Invalid volume_type provided (requested type "
"must match source volume, or be omitted). "
"You should omit the argument.")
raise exception.InvalidInput(reason=msg)
if snapshot and volume_type:
if volume_type['id'] != snapshot['volume_type_id']:
msg = _("Invalid volume_type provided (requested type "
"must match source snapshot, or be omitted). "
"You should omit the argument.")
raise exception.InvalidInput(reason=msg)
def check_volume_az_zone(availability_zone):
try:
return self._valid_availability_zone(availability_zone)
except exception.CinderException:
LOG.exception(_("Unable to query if %s is in the "
"availability zone set"), availability_zone)
return False
create_what = {
'context': context,
'raw_size': size,
'name': name,
'description': description,
'snapshot': snapshot,
'image_id': image_id,
'raw_volume_type': volume_type,
'metadata': metadata,
'raw_availability_zone': availability_zone,
'source_volume': source_volume,
'scheduler_hints': scheduler_hints,
'key_manager': self.key_manager,
'backup_source_volume': backup_source_volume,
}
try:
flow_engine = create_volume.get_flow(self.scheduler_rpcapi,
self.volume_rpcapi,
self.db,
self.image_service,
check_volume_az_zone,
create_what)
except Exception:
LOG.exception(_("Failed to create api volume flow"))
raise exception.CinderException(
_("Failed to create api volume flow"))
flow_engine.run()
volume = flow_engine.storage.fetch('volume')
return volume
@wrap_check_policy
def delete(self, context, volume, force=False, unmanage_only=False):
if context.is_admin and context.project_id != volume['project_id']:
project_id = volume['project_id']
else:
project_id = context.project_id
volume_id = volume['id']
if not volume['host']:
volume_utils.notify_about_volume_usage(context,
volume, "delete.start")
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update quota for deleting volume"))
self.db.volume_destroy(context.elevated(), volume_id)
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
volume_utils.notify_about_volume_usage(context,
volume, "delete.end")
return
if not force and volume['status'] not in ["available", "error",
"error_restoring",
"error_extending"]:
msg = _("Volume status must be available or error, "
"but current status is: %s") % volume['status']
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
if len(snapshots):
msg = _("Volume still has %d dependent snapshots") % len(snapshots)
raise exception.InvalidVolume(reason=msg)
# If the volume is encrypted, delete its encryption key from the key
# manager. This operation makes volume deletion an irreversible process
# because the volume cannot be decrypted without its key.
encryption_key_id = volume.get('encryption_key_id', None)
if encryption_key_id is not None:
self.key_manager.delete_key(context, encryption_key_id)
now = timeutils.utcnow()
self.db.volume_update(context, volume_id, {'status': 'deleting',
'terminated_at': now})
self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
@wrap_check_policy
def update(self, context, volume, fields):
self.db.volume_update(context, volume['id'], fields)
def get(self, context, volume_id):
rv = self.db.volume_get(context, volume_id)
volume = dict(rv.iteritems())
check_policy(context, 'get', volume)
return volume
def get_all(self, context, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None):
check_policy(context, 'get_all')
if filters == None:
filters = {}
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
# Non-admin shouldn't see temporary target of a volume migration, add
# unique filter data to reflect that only volumes with a NULL
# 'migration_status' or a 'migration_status' that does not start with
# 'target:' should be returned (processed in db/sqlalchemy/api.py)
if not context.is_admin:
filters['no_migration_targets'] = True
if filters:
LOG.debug(_("Searching by: %s") % str(filters))
if (context.is_admin and 'all_tenants' in filters):
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
volumes = self.db.volume_get_all(context, marker, limit, sort_key,
sort_dir, filters=filters)
else:
volumes = self.db.volume_get_all_by_project(context,
context.project_id,
marker, limit,
sort_key, sort_dir,
filters=filters)
return volumes
def get_snapshot(self, context, snapshot_id):
check_policy(context, 'get_snapshot')
rv = self.db.snapshot_get(context, snapshot_id)
return dict(rv.iteritems())
def get_volume(self, context, volume_id):
check_policy(context, 'get_volume')
rv = self.db.volume_get(context, volume_id)
return dict(rv.iteritems())
def get_all_snapshots(self, context, search_opts=None):
check_policy(context, 'get_all_snapshots')
search_opts = search_opts or {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
snapshots = self.db.snapshot_get_all(context)
else:
snapshots = self.db.snapshot_get_all_by_project(
context, context.project_id)
if search_opts:
LOG.debug(_("Searching by: %s") % search_opts)
results = []
not_found = object()
for snapshot in snapshots:
for opt, value in search_opts.iteritems():
if snapshot.get(opt, not_found) != value:
break
else:
results.append(snapshot)
snapshots = results
return snapshots
@wrap_check_policy
def check_attach(self, volume):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("status must be available")
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("already attached")
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def check_detach(self, volume):
# TODO(vish): abstract status checking?
if volume['status'] != "in-use":
msg = _("status must be in-use to detach")
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def reserve_volume(self, context, volume):
#NOTE(jdg): check for Race condition bug 1096983
#explicitly get updated ref and check
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'available':
self.update(context, volume, {"status": "attaching"})
else:
msg = _("Volume status must be available to reserve")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def unreserve_volume(self, context, volume):
if volume['status'] == "attaching":
self.update(context, volume, {"status": "available"})
@wrap_check_policy
def begin_detaching(self, context, volume):
# If we are in the middle of a volume migration, we don't want the user
# to see that the volume is 'detaching'. Having 'migration_status' set
# will have the same effect internally.
if not volume['migration_status']:
self.update(context, volume, {"status": "detaching"})
@wrap_check_policy
def roll_detaching(self, context, volume):
if volume['status'] == "detaching":
self.update(context, volume, {"status": "in-use"})
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name,
mountpoint, mode):
volume_metadata = self.get_volume_admin_metadata(context.elevated(),
volume)
if 'readonly' not in volume_metadata:
# NOTE(zhiyan): set a default value for read-only flag to metadata.
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': 'False'})
volume_metadata['readonly'] = 'False'
if volume_metadata['readonly'] == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume['id'])
return self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
mountpoint,
mode)
@wrap_check_policy
def detach(self, context, volume):
return self.volume_rpcapi.detach_volume(context, volume)
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
return self.volume_rpcapi.initialize_connection(context,
volume,
connector)
@wrap_check_policy
def terminate_connection(self, context, volume, connector, force=False):
self.unreserve_volume(context, volume)
return self.volume_rpcapi.terminate_connection(context,
volume,
connector,
force)
@wrap_check_policy
def accept_transfer(self, context, volume, new_user, new_project):
return self.volume_rpcapi.accept_transfer(context,
volume,
new_user,
new_project)
def _create_snapshot(self, context,
volume, name, description,
force=False, metadata=None):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("must be available")
raise exception.InvalidVolume(reason=msg)
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
self._check_metadata_properties(metadata)
options = {'volume_id': volume['id'],
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id'],
'metadata': metadata}
try:
snapshot = self.db.snapshot_create(context, options)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.snapshot_destroy(context, volume['id'])
finally:
QUOTAS.rollback(context, reservations)
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
return snapshot
def create_snapshot(self, context,
volume, name,
description, metadata=None):
return self._create_snapshot(context, volume, name, description,
False, metadata)
def create_snapshot_force(self, context,
volume, name,
description, metadata=None):
return self._create_snapshot(context, volume, name, description,
True, metadata)
@wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False):
if not force and snapshot['status'] not in ["available", "error"]:
msg = _("Volume Snapshot status must be available or error")
raise exception.InvalidSnapshot(reason=msg)
self.db.snapshot_update(context, snapshot['id'],
{'status': 'deleting'})
volume = self.db.volume_get(context, snapshot['volume_id'])
self.volume_rpcapi.delete_snapshot(context, snapshot, volume['host'])
@wrap_check_policy
def update_snapshot(self, context, snapshot, fields):
self.db.snapshot_update(context, snapshot['id'], fields)
@wrap_check_policy
def get_volume_metadata(self, context, volume):
"""Get all metadata associated with a volume."""
rv = self.db.volume_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_metadata(self, context, volume, key):
"""Delete the given metadata item from a volume."""
self.db.volume_metadata_delete(context, volume['id'], key)
def _check_metadata_properties(self, metadata=None):
if not metadata:
metadata = {}
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warn(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
@wrap_check_policy
def update_volume_metadata(self, context, volume, metadata, delete=False):
"""Updates or creates volume metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.volume_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return db_meta
def get_volume_metadata_value(self, volume, key):
"""Get value of particular metadata key."""
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if i['key'] == key:
return i['value']
return None
@wrap_check_policy
def get_volume_admin_metadata(self, context, volume):
"""Get all administration metadata associated with a volume."""
rv = self.db.volume_admin_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_admin_metadata(self, context, volume, key):
"""Delete the given administration metadata item from a volume."""
self.db.volume_admin_metadata_delete(context, volume['id'], key)
@wrap_check_policy
def update_volume_admin_metadata(self, context, volume, metadata,
delete=False):
"""Updates or creates volume administration metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_admin_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
self.db.volume_admin_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return _metadata
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
rv = self.db.snapshot_metadata_get(context, snapshot['id'])
return dict(rv.iteritems())
def delete_snapshot_metadata(self, context, snapshot, key):
"""Delete the given metadata item from a snapshot."""
self.db.snapshot_metadata_delete(context, snapshot['id'], key)
def update_snapshot_metadata(self, context,
snapshot, metadata,
delete=False):
"""Updates or creates snapshot metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_snapshot_metadata(context, snapshot)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.snapshot_metadata_update(context,
snapshot['id'],
_metadata,
True)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return db_meta
def get_snapshot_metadata_value(self, snapshot, key):
pass
def get_volumes_image_metadata(self, context):
check_policy(context, 'get_volumes_image_metadata')
db_data = self.db.volume_glance_metadata_get_all(context)
results = collections.defaultdict(dict)
for meta_entry in db_data:
results[meta_entry['volume_id']].update({meta_entry['key']:
meta_entry['value']})
return results
@wrap_check_policy
def get_volume_image_metadata(self, context, volume):
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
return dict(
(meta_entry.key, meta_entry.value) for meta_entry in db_data
)
def _check_volume_availability(self, volume, force):
"""Check if the volume can be used."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume status must be available/in-use.')
raise exception.InvalidVolume(reason=msg)
if not force and 'in-use' == volume['status']:
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
"""Create a new image from the specified volume."""
self._check_volume_availability(volume, force)
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
self.volume_rpcapi.copy_volume_to_image(context,
volume,
recv_metadata)
response = {"id": volume['id'],
"updated_at": volume['updated_at'],
"status": 'uploading',
"display_description": volume['display_description'],
"size": volume['size'],
"volume_type": volume['volume_type'],
"image_id": recv_metadata['id'],
"container_format": recv_metadata['container_format'],
"disk_format": recv_metadata['disk_format'],
"image_name": recv_metadata.get('name', None)}
return response
@wrap_check_policy
def extend(self, context, volume, new_size):
if volume['status'] != 'available':
msg = _('Volume status must be available to extend.')
raise exception.InvalidVolume(reason=msg)
size_increase = (int(new_size)) - volume['size']
if size_increase <= 0:
msg = (_("New size for extend must be greater "
"than current size. (current: %(size)s, "
"extended: %(new_size)s)") % {'new_size': new_size,
'size': volume['size']})
raise exception.InvalidInput(reason=msg)
try:
reservations = QUOTAS.reserve(context, gigabytes=+size_increase)
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
msg = _("Quota exceeded for %(s_pid)s, tried to extend volume by "
"%(s_size)sG, (%(d_consumed)dG of %(d_quota)dG already "
"consumed).")
LOG.error(msg % {'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=size_increase,
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
self.update(context, volume, {'status': 'extending'})
self.volume_rpcapi.extend_volume(context, volume, new_size,
reservations)
@wrap_check_policy
def migrate_volume(self, context, volume, host, force_host_copy):
"""Migrate the volume to the specified host."""
# We only handle "available" volumes for now
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume status must be available/in-use.')
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure volume is not part of a migration
if volume['migration_status'] is not None:
msg = _("Volume is already part of an active migration")
raise exception.InvalidVolume(reason=msg)
# We only handle volumes without snapshots for now
snaps = self.db.snapshot_get_all_for_volume(context, volume['id'])
if snaps:
msg = _("volume must not have snapshots")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure the host is in the list of available hosts
elevated = context.elevated()
topic = CONF.volume_topic
services = self.db.service_get_all_by_topic(elevated,
topic,
disabled=False)
found = False
for service in services:
if utils.service_is_up(service) and service['host'] == host:
found = True
if not found:
msg = (_('No available service named %s') % host)
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
# Make sure the destination host is different than the current one
if host == volume['host']:
msg = _('Destination host must be different than current host')
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
self.update(context, volume, {'migration_status': 'starting'})
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume_type = {}
volume_type_id = volume['volume_type_id']
if volume_type_id:
volume_type = volume_types.get_volume_type(context, volume_type_id)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id']}
self.scheduler_rpcapi.migrate_volume_to_host(context,
CONF.volume_topic,
volume['id'],
host,
force_host_copy,
request_spec)
@wrap_check_policy
def migrate_volume_completion(self, context, volume, new_volume, error):
# This is a volume swap initiated by Nova, not Cinder. Nova expects
# us to return the new_volume_id.
if not (volume['migration_status'] or new_volume['migration_status']):
return new_volume['id']
if not volume['migration_status']:
msg = _('Source volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
if not new_volume['migration_status']:
msg = _('Destination volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
expected_status = 'target:%s' % volume['id']
if not new_volume['migration_status'] == expected_status:
msg = (_('Destination has migration_status %(stat)s, expected '
'%(exp)s.') % {'stat': new_volume['migration_status'],
'exp': expected_status})
raise exception.InvalidVolume(reason=msg)
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
@wrap_check_policy
def update_readonly_flag(self, context, volume, flag):
if volume['status'] != 'available':
msg = _('Volume status must be available to update readonly flag.')
raise exception.InvalidVolume(reason=msg)
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': str(flag)})
@wrap_check_policy
def retype(self, context, volume, new_type, migration_policy=None):
"""Attempt to modify the type associated with an existing volume."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Unable to update type due to incorrect status '
'on volume: %s') % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume['migration_status'] is not None:
msg = (_("Volume %s is already part of an active migration.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if migration_policy and migration_policy not in ['on-demand', 'never']:
msg = _('migration_policy must be \'on-demand\' or \'never\', '
'passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
# Support specifying volume type by ID or name
try:
if uuidutils.is_uuid_like(new_type):
vol_type = volume_types.get_volume_type(context, new_type)
else:
vol_type = volume_types.get_volume_type_by_name(context,
new_type)
except exception.InvalidVolumeType:
msg = _('Invalid volume_type passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
vol_type_id = vol_type['id']
vol_type_qos_id = vol_type['qos_specs_id']
old_vol_type = None
old_vol_type_id = volume['volume_type_id']
old_vol_type_qos_id = None
# Error if the original and new type are the same
if volume['volume_type_id'] == vol_type_id:
msg = (_('New volume_type same as original: %s') % new_type)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if volume['volume_type_id']:
old_vol_type = volume_types.get_volume_type(
context, old_vol_type_id)
old_vol_type_qos_id = old_vol_type['qos_specs_id']
# We don't support changing encryption requirements yet
old_enc = volume_types.get_volume_type_encryption(context,
old_vol_type_id)
new_enc = volume_types.get_volume_type_encryption(context,
vol_type_id)
if old_enc != new_enc:
msg = _('Retype cannot change encryption requirements')
raise exception.InvalidInput(reason=msg)
# We don't support changing QoS at the front-end yet for in-use volumes
# TODO(avishay): Call Nova to change QoS setting (libvirt has support
# - virDomainSetBlockIoTune() - Nova does not have support yet).
if (volume['status'] != 'available' and
old_vol_type_qos_id != vol_type_qos_id):
for qos_id in [old_vol_type_qos_id, vol_type_qos_id]:
if qos_id:
specs = qos_specs.get_qos_specs(context.elevated(), qos_id)
if specs['qos_specs']['consumer'] != 'back-end':
msg = _('Retype cannot change front-end qos specs for '
'in-use volumes')
raise exception.InvalidInput(reason=msg)
# We're checking here in so that we can report any quota issues as
# early as possible, but won't commit until we change the type. We
# pass the reservations onward in case we need to roll back.
reservations = quota_utils.get_volume_type_reservation(context, volume,
vol_type_id)
self.update(context, volume, {'status': 'retyping'})
request_spec = {'volume_properties': volume,
'volume_id': volume['id'],
'volume_type': vol_type,
'migration_policy': migration_policy,
'quota_reservations': reservations}
self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'],
request_spec=request_spec,
filter_properties={})
def manage_existing(self, context, host, ref, name=None, description=None,
volume_type=None, metadata=None,
availability_zone=None):
if availability_zone is None:
elevated = context.elevated()
try:
service = self.db.service_get_by_host_and_topic(
elevated, host, CONF.volume_topic)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_('Unable to find service for given host.'))
availability_zone = service.get('availability_zone')
volume_type_id = volume_type['id'] if volume_type else None
volume_properties = {
'size': 0,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
# Rename these to the internal name.
'display_description': description,
'display_name': name,
'host': host,
'availability_zone': availability_zone,
'volume_type_id': volume_type_id,
'metadata': metadata
}
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume = self.db.volume_create(context, volume_properties)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id'],
'ref': ref}
self.scheduler_rpcapi.manage_existing(context, CONF.volume_topic,
volume['id'],
request_spec=request_spec)
return volume
class HostAPI(base.Base):
def __init__(self):
super(HostAPI, self).__init__()
"""Sub-set of the Volume Manager API for managing host operations."""
def set_host_enabled(self, context, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
raise NotImplementedError()
def get_host_uptime(self, context, host):
"""Returns the result of calling "uptime" on the target host."""
raise NotImplementedError()
def host_power_action(self, context, host, action):
raise NotImplementedError()
def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
volume evacuation.
"""
raise NotImplementedError()
|
We make money not art has a stimulating brief overview of the Yokohama Triennale of Contemporary Art (what is it with pretentious names today?) currently going down in, well, Yokohama.
Flat-panel reseller Eizo has just reopened its Tokyo showroom over by Hibiya Station grandly known as Eizo Galleria Tokyo.
Those lovely people at SPOnG have been boasting of their superior fitness again, this time with a view to prodding us in the direction of the local gymnasium.
Folk who really dig their home theater will already be HDMI-ed to their eyeballs, we’re sure, but for those big spenders who want to push that high-def goodness a little further, may we suggest splashing up to $2k on Ophit’s new HDMI booster?
If you’re sick of the fuss about Apple’s newest iPod (and no, we don’t feel that compelling urge to cover them here at DWT), then Korean firm i-BEAD’s latest DAP, the i-BEAD700, may soothe you somewhat.
The much-hyped Touch! Generations range of educational/slightly-more-thoughtful-than-point-and-frag games on the Nintendo DS has apparently had the knock-on effect of encouraging folk to open their wallets for more good stuff.
PSP Updates has been quick off the mark in pointing out that Sony has quietly released the 2.50 firmware update via the handheld’s Network Update function.
Looks like the Koreans just beat everyone else to the punch when it comes to wireless iPod nano headphones c’mon: you know you want ‘em. Asiana IDT has been demonstrating the abilities of its new SH-N10s to our intrepid reporter in Seoul, who confirms that said cans have magical powers, including the ability to control the nano directly.
Quick heads up on a really useful little publication laboring under the clumsy moniker of Akibatica.
Just a quickie to point out that any PSP (non-homebrew) lovers with firmware version 2.0 installed can get themselves some, er, street cred courtesy of Curtis “50 Cent” Jackson and Paramount Pictures’ promo for the new Jim Sheridan movie Get Rich or Die Tryin’.
Where does Sega come from?
Lest it slip your mind, and because it gives us an excuse to run that photo, here’s a brief reminder that Sega’s follow up to Feel the Magic, called Where do Babies Come From? (or Akachan wa doko kara kuru no? in Japanese), hits the shelves of Japan next Thursday to titillate all Nintendo DS owners.
To mark yesterday’s Health and Sports Day public holiday, Takara subsidiary A.G. held the 12th Annual All Japan Paper Airplane Championships, also known as the 2005 Japan Cup.
If ever proof were needed that the world is a crazy, mixed-up place, the fact that Toyota and Hitachi have hopped into bed together may just be it.
Ceramicist yosoh brings us this elegant comment on how many of us tend to spend our mealtimes in front of computers, focusing more on bytes than bites (sorry, but we all know which is more important, right?). |
import datetime
class Interval(object):
"Base date interval class"
@classmethod
def range(cls, start, end):
"""
Return a range of datetime objects between start and end
"""
r = []
while start <= end:
r.append(start)
start += cls.delta
return r
@classmethod
def pack(cls, value):
"""
Pack a datetime object into a string
"""
return value.strftime(cls.pack_format_string)
@classmethod
def unpack(cls, value):
"""
Unpack a string into a datetime object
"""
return datetime.datetime.strptime(value, cls.pack_format_string)
@classmethod
def display_format(cls, rng):
return [d.strftime(cls.display_format_string) for d in rng]
@classmethod
def pack_format(cls, rng):
return [d.strftime(cls.pack_format_string) for d in rng]
class Hour(Interval):
display_name = 'Hour'
divides_into = None
pack_format_string = '%Y%m%d%H'
delta = datetime.timedelta(hours=1)
display_format_string = "%H"
class Day(Interval):
display_name = 'Day'
divides_into = Hour
pack_format_string = '%Y%m%d'
delta = datetime.timedelta(days=1)
display_format_string = "%m-%d-%y"
class Month(Interval):
display_name = 'Month'
divides_into = Day
pack_format_string = '%Y%m'
display_format_string = "%M %y"
@classmethod
def range(cls, start, end):
# there's no "months" arg for timedelta.
r = []
# reset the start date to the beginning of the month
start = datetime.datetime(year=start.year, month=start.month, day=1)
while start <= end:
r.append(start)
if start.month == 12:
start = datetime.datetime(year=start.year+1, month=1, day=1)
else:
start = datetime.datetime(year=start.year, month=start.month+1, day=1)
return r
|
In Finnish, Pikku Joulua means “Little Christmas”. Held the first Thursday in December every year, all are welcome to join the Finland, MN community for this potluck of traditional Finish foods (bring your favorite dish to share), fun, and a few Christmas “traditions”.
Following the candlelit evening meal you may be entertained with live musicians, “Star Boys”, Christmas caroling, live reindeer, and of course Jouler Pukki (our own Finnish Santa) who has even brought his goat “Pukki” and gifts for the children.
In Finland, Europe, the tradition was to visit the cemetery, where they lit illuminaries. To reflect this tradition we have had illuminaries lining our walkway and on the tables.
Everyone is welcome, come join us for a traditional Finish Christmas with social hour from 5:00-6:00pm, dinner 6:00-7:00pm and entertainment from 7:00-8:00pm. Bring the kids and a dish to share. |
import gnomedvb
import unittest
import sys
import random
import datetime
import time
import re
from gi.repository import GLib
class DVBTestCase(unittest.TestCase):
def assertSuccessAndType(self, data, objtype):
self.assertType(data[1], bool)
self.assertTrue(data[1])
self.assertType(data[0], objtype)
def assertTypeAll(self, objlist, objtype):
for obj in objlist:
self.assertType(obj, objtype)
def assertType(self, obj, objtype):
if not isinstance(obj, objtype):
raise self.failureException, \
"%r is not %r" % (obj, objtype)
class TestManager(DVBTestCase):
def setUp(self):
self.manager = gnomedvb.DVBManagerClient()
def testGetChannelGroups(self):
data = self.manager.get_channel_groups()
self.assertType(data, list)
for cid, name in data:
self.assertType(cid, int)
self.assertType(name, str)
def testAddDeleteChannelGroup(self):
name = "Test Group %f" % random.random()
data = self.manager.add_channel_group(name)
self.assertSuccessAndType(data, int)
has_group = False
for gid, gname in self.manager.get_channel_groups():
if gid == data[0]:
self.assertEqual(name, gname)
has_group = True
break
self.assertTrue(has_group)
self.assertTrue(self.manager.remove_channel_group(data[0]))
def testAddDeviceNotExists(self):
adapter = 9
frontend = 0
self.assertFalse(self.manager.add_device_to_new_group (
adapter, frontend,
"channels.conf", "Recordings", "Test Group"))
class DeviceGroupTestCase(DVBTestCase):
def setUp(self):
self.manager = gnomedvb.DVBManagerClient()
self.devgroups = self.manager.get_registered_device_groups()
self.assertTypeAll(self.devgroups, gnomedvb.DVBDeviceGroupClient)
class TestDeviceGroup(DeviceGroupTestCase):
def testGetSetType(self):
for dg in self.devgroups:
dtype = dg.get_type()
self.assertType(dtype, str)
self.assert_(dtype in ("DVB-C", "DVB-S", "DVB-T"))
name_before = dg.get_name()
self.assertType(name_before, str)
new_name = "%s %f" % (name_before, random.random())
self.assertTrue(dg.set_name(new_name))
self.assertEqual(dg.get_name(), new_name)
self.assertTrue(dg.set_name(name_before))
def testGetMembers(self):
for dg in self.devgroups:
for member in dg.get_members():
self.assert_(member.startswith("/dev/dvb/adapter"))
def testGetRecordingsDirectory(self):
for dg in self.devgroups:
self.assertType(dg.get_recordings_directory(), str)
class TestScanner(DeviceGroupTestCase):
def setUp(self):
DeviceGroupTestCase.setUp(self)
self.path_regex = re.compile(r"/dev/dvb/adapter(\d+)/frontend(\d+)")
def testGetScanner(self):
for dg in self.devgroups:
for member in dg.get_members():
match = self.path_regex.search(member)
self.assertNotEqual(match, None)
adapter, frontend = match.group(1, 2)
scanner = self.manager.get_scanner_for_device(int(adapter),
int(frontend))
self.assertType(scanner, gnomedvb.DVBScannerClient)
data = {"frequency": GLib.Variant('u', 738000000),
"hierarchy": GLib.Variant('u', 0), # NONE
"bandwidth": GLib.Variant('u', 8), # 8MHz
"transmission-mode": GLib.Variant('s', "8k"),
"code-rate-hp": GLib.Variant('s', "2/3"),
"code-rate-lp": GLib.Variant('s', "NONE"),
"constellation": GLib.Variant('s', "QAM16"),
"guard-interval": GLib.Variant('u', 4),} # 1/4
success = scanner.add_scanning_data(data)
self.assertTrue(success)
self.assertType(success, bool)
scanner.run()
time.sleep(15)
scanner.destroy()
class TestChannelList(DeviceGroupTestCase):
def setUp(self):
DeviceGroupTestCase.setUp(self)
self.chanlists = []
for dg in self.devgroups:
self.chanlists.append(dg.get_channel_list())
self.assertTypeAll(self.chanlists, gnomedvb.DVBChannelListClient)
self.changroups = [data[0] for data in self.manager.get_channel_groups()]
def testGetChannels(self):
for cl in self.chanlists:
ids = cl.get_channels()
self.assertTypeAll(ids, long)
for cid in ids:
self.assertSuccessAndType(cl.get_channel_name(cid),
str)
self.assertSuccessAndType(cl.get_channel_network(cid),
str)
self.assertSuccessAndType(cl.get_channel_url(cid),
str)
def testGetChannelInfos(self):
for cl in self.chanlists:
for cid, name, is_radio in cl.get_channel_infos():
self.assertType(cid, long)
self.assertType(name, str)
self.assertType(is_radio, bool)
def testGetTVChannels(self):
for cl in self.chanlists:
ids = cl.get_tv_channels()
self.assertTypeAll(ids, long)
for cid in ids:
data = cl.is_radio_channel(cid)
self.assertSuccessAndType(data, bool)
self.assertFalse(data[0])
def testGetRadioChannels(self):
for cl in self.chanlists:
ids = cl.get_radio_channels()
self.assertTypeAll(ids, long)
for cid in ids:
data = cl.is_radio_channel(cid)
self.assertSuccessAndType(data, bool)
self.assertTrue(data[0])
def testGetChannelsOfGroup(self):
for cl in self.chanlists:
all_channels = set(cl.get_channels())
for gid in self.changroups:
data = cl.get_channels_of_group(gid)
self.assertTrue(data[1])
self.assertTypeAll(data[0], long)
group_chans = set(data[0])
other_chans = all_channels - group_chans
for chan in other_chans:
self.assertTrue(cl.add_channel_to_group(chan, gid))
data = cl.get_channels_of_group(gid)
self.assertTrue(chan in data[0])
self.assertTrue(cl.remove_channel_from_group(chan, gid))
def testChannelNotExists(self):
cid = 1000
for cl in self.chanlists:
self.assertFalse(cl.get_channel_name(cid)[1])
self.assertFalse(cl.get_channel_network(cid)[1])
self.assertFalse(cl.get_channel_url(cid)[1])
self.assertFalse(cl.is_radio_channel(cid)[1])
self.assertFalse(cl.add_channel_to_group(cid, 1000))
self.assertFalse(cl.remove_channel_from_group(cid, 1000))
class TestRecorder(DeviceGroupTestCase):
DURATION = 2
def _get_time_now(self):
nowt = datetime.datetime.now()
# We don't want (micro)seconds
now = datetime.datetime(nowt.year, nowt.month,
nowt.day, nowt.hour, nowt.minute)
return now
def setUp(self):
DeviceGroupTestCase.setUp(self)
self.recorder = []
self.channels = []
for dg in self.devgroups:
chanlist = dg.get_channel_list()
self.channels.append(chanlist.get_tv_channels()[0])
self.recorder.append(dg.get_recorder())
self.assertTypeAll(self.recorder, gnomedvb.DVBRecorderClient)
def _assert_time_equals(self, expected, actual):
self.assertTypeAll(actual, long)
self.assertEqual(len(actual), 5)
self.assertEqual(expected.year, actual[0])
self.assertEqual(expected.month, actual[1])
self.assertEqual(expected.day, actual[2])
self.assertEqual(expected.hour, actual[3])
self.assertEqual(expected.minute, actual[4])
def testAddTimer(self):
for i, rec in enumerate(self.recorder):
now = self._get_time_now()
delay = datetime.timedelta(hours=2)
delayed = now + delay
chan = self.channels[i]
data = rec.add_timer(chan, delayed.year, delayed.month,
delayed.day, delayed.hour, delayed.minute, self.DURATION * 2)
self.assertSuccessAndType(data, long)
rec_id = data[0]
data = rec.get_start_time(rec_id)
self.assertSuccessAndType(data, list)
start = data[0]
self._assert_time_equals(delayed, start)
data = rec.get_duration(rec_id)
self.assertSuccessAndType(data, long)
self.assertEqual(data[0], self.DURATION * 2)
self.assertTrue(rec.set_start_time(rec_id, now.year, now.month,
now.day, now.hour, now.minute))
data = rec.get_start_time(rec_id)
self.assertSuccessAndType(data, list)
start = data[0]
self._assert_time_equals(now, start)
self.assertTrue(rec.set_duration(rec_id, self.DURATION))
data = rec.get_duration(rec_id)
self.assertSuccessAndType(data, long)
self.assertEqual(data[0], self.DURATION)
time.sleep(10)
self.assert_(rec_id in rec.get_active_timers())
self.assertTrue(rec.is_timer_active(rec_id))
self.assertTrue(rec.has_timer(now.year, now.month, now.day,
now.hour, now.minute, self.DURATION))
data = rec.get_end_time(rec_id)
self.assertSuccessAndType(data, list)
end = data[0]
self.assertTypeAll(end, long)
self.assertEqual(len(end), 5)
endt = datetime.datetime(*end)
self.assertEqual(endt - now,
datetime.timedelta(minutes=self.DURATION))
self.assertSuccessAndType(rec.get_channel_name(rec_id),
str)
self.assertSuccessAndType(rec.get_title(rec_id), str)
data = rec.get_all_informations(rec_id)
self.assertSuccessAndType(data, tuple)
rid, duration, active, channel, title = data[0]
self.assertEqual(rid, rec_id)
self.assertEqual(duration, self.DURATION)
self.assertTrue(active)
self.assertType(channel, str)
self.assertType(title, str)
self.assertFalse(rec.set_start_time(rec_id, delayed.year,
delayed.month, delayed.day, delayed.hour, delayed.minute))
time.sleep(20)
self.assertTrue(rec.delete_timer(rec_id))
self.assertFalse(rec.has_timer(now.year, now.month, now.day,
now.hour, now.minute, self.DURATION))
def testTimerNotExists(self):
rec_id = 1000
for rec in self.recorder:
self.assertFalse(rec.delete_timer(rec_id))
self.assertFalse(rec.get_start_time(rec_id)[1])
self.assertFalse(rec.get_end_time(rec_id)[1])
self.assertFalse(rec.get_duration(rec_id)[1])
self.assertFalse(rec.get_channel_name(rec_id)[1])
self.assertFalse(rec.get_title(rec_id)[1])
self.assertFalse(rec.is_timer_active(rec_id))
self.assertFalse(rec.get_all_informations(rec_id)[1])
self.assertFalse(rec.set_start_time(rec_id, 2010, 1, 5, 15, 0))
class TestSchedule(DeviceGroupTestCase):
def setUp(self):
DeviceGroupTestCase.setUp(self)
self.schedules = []
for dg in self.devgroups:
chanlist = dg.get_channel_list()
for chan in chanlist.get_channels():
self.schedules.append(dg.get_schedule(chan))
self.assertTypeAll(self.schedules, gnomedvb.DVBScheduleClient)
def testGetAllEvents(self):
for sched in self.schedules:
for eid in sched.get_all_events():
self._get_event_details(sched, eid)
def _get_event_details(self, sched, eid):
self.assertSuccessAndType(sched.get_name(eid), str)
self.assertSuccessAndType(sched.get_short_description(eid),
str)
self.assertSuccessAndType(sched.get_extended_description(eid),
str)
self.assertSuccessAndType(sched.get_duration(eid), long)
data = sched.get_local_start_time(eid)
self.assertSuccessAndType(data, list)
self.assertTypeAll(data[0], long)
self.assertSuccessAndType(sched.get_local_start_timestamp(eid),
long)
self.assertSuccessAndType(sched.is_running(eid), bool)
self.assertSuccessAndType(sched.is_scrambled(eid), bool)
data = sched.get_informations(eid)
self.assertSuccessAndType(data, tuple)
eeid, next, name, duration, desc = data[0]
self.assertEqual(eeid, eid)
self.assertType(next, long)
self.assertType(name, str)
self.assertType(duration, long)
self.assertType(desc, str)
def testNowPlaying(self):
for sched in self.schedules:
eid = sched.now_playing()
self.assertType(eid, long)
if eid != 0:
self._get_event_details(sched, eid)
def testNext(self):
for sched in self.schedules:
eid = sched.now_playing()
while eid != 0:
eid = sched.next(eid)
self.assertType(eid, long)
def testEventNotExists(self):
eid = 1
for sched in self.schedules:
self.assertFalse(sched.get_name(eid)[1])
self.assertFalse(sched.get_short_description(eid)[1])
self.assertFalse(sched.get_extended_description(eid)[1])
self.assertFalse(sched.get_duration(eid)[1])
self.assertFalse(sched.get_local_start_time(eid)[1])
self.assertFalse(sched.get_local_start_timestamp(eid)[1])
self.assertFalse(sched.is_running(eid)[1])
self.assertFalse(sched.is_scrambled(eid)[1])
self.assertFalse(sched.get_informations(eid)[1])
class TestRecordingsStore(DVBTestCase):
def setUp(self):
self.recstore = gnomedvb.DVBRecordingsStoreClient()
def testGetRecordings(self):
rec_ids = self.recstore.get_recordings()
for rid in rec_ids:
self.assertSuccessAndType(self.recstore.get_channel_name(rid),
str)
self.assertSuccessAndType(self.recstore.get_location(rid),
str)
start_data = self.recstore.get_start_time(rid)
self.assertSuccessAndType(start_data, list)
start = start_data[0]
self.assertEqual(len(start), 5)
self.assertTypeAll(start, long)
self.assertSuccessAndType(self.recstore.get_start_timestamp(rid),
long)
self.assertSuccessAndType(self.recstore.get_length(rid),
long)
self.assertSuccessAndType(self.recstore.get_name (rid),
str)
self.assertSuccessAndType(self.recstore.get_description(rid),
str)
def testGetRecordingsNotExists(self):
rid = 1000
self.assertFalse(self.recstore.get_channel_name(rid)[1])
self.assertFalse(self.recstore.get_location(rid)[1])
self.assertFalse(self.recstore.get_start_time(rid)[1])
self.assertFalse(self.recstore.get_start_timestamp(rid)[1])
self.assertFalse(self.recstore.get_length(rid)[1])
self.assertFalse(self.recstore.get_name (rid)[1])
self.assertFalse(self.recstore.get_description(rid)[1])
def testGetAllInformations(self):
rec_ids = self.recstore.get_recordings()
for rid in rec_ids:
data = self.recstore.get_all_informations(rid)
self.assertType(data[1], bool)
self.assertTrue(data[1])
self.assertType(data[0], tuple)
rrid, name, desc, length, ts, chan, loc = data[0]
self.assertType(rrid, long)
self.assertEqual(rrid, rid)
self.assertType(name, str)
self.assertType(desc, str)
self.assertType(length, long)
self.assertType(ts, long)
self.assertType(chan, str)
self.assertType(loc, str)
def testGetAllInformationsNotExists(self):
rid = 1000
data = self.recstore.get_all_informations(rid)
self.assertType(data[1], bool)
self.assertFalse(data[1])
if __name__ == '__main__':
loop = GLib.MainLoop()
unittest.main()
loop.run()
|
Superior piece woven in macrame with greater proportion of the weave on the front with tubes intertwined within the weave of the armhole contour.
The weave is gathered up in a thick cordon so the strips are not left hanging. It has an anchor type clasp on the back.
OROPENDALO products are handmade by Colombian artisans. In case your product is not in stock the production of your piece will begin once your purchase is done. The production and delivery time varies according to the piece. Production time: 12 days. |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'RepositoryUser'
db.create_table('repositories_repositoryuser', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('repo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['repositories.Repository'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('repositories', ['RepositoryUser'])
# Adding unique constraint on 'RepositoryUser', fields ['repo', 'user']
db.create_unique('repositories_repositoryuser', ['repo_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'RepositoryUser', fields ['repo', 'user']
db.delete_unique('repositories_repositoryuser', ['repo_id', 'user_id'])
# Deleting model 'RepositoryUser'
db.delete_table('repositories_repositoryuser')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'repositories.repository': {
'Meta': {'unique_together': "(('user', 'slug'),)", 'object_name': 'Repository'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'repositories.repositoryuser': {
'Meta': {'unique_together': "(('repo', 'user'),)", 'object_name': 'RepositoryUser'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['repositories.Repository']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['repositories']
|
Perfect for containing and preventing spillage!
WashroomAccessories is a 100% Australian owned online wholesaler, specialising in supplying washoom and bathroom supplies for both commercial and domestic toilets.
We have the largest range of washroom accessories including toilet roll dispensers, grab rails, Soap dispensers, Mirrors, Tapware and Hand towels ... along with all the consumables you need to keep them filled up and functioning. From commercial to domestic, we have it... at the best prices as well.
Can’t find what you want? Looking for the right toilet paper to fit that old dispenser?
Contact us and we will find it for you!
Whether you are thinking about buying, browsing or you have already made a purchase, our customer service staff is only a phone call or email away. We’re here to serve your needs.
Submit your query online and our staff will get back to you as soon as they can to assist you.
"We aim for your absolute satisfaction – 100% of the time."
Please contact us and we will do everything in our power to help.. |
try:
import keystone
except ImportError:
keystone = None
try:
import capstone
except ImportError:
capstone = None
from .__base__ import AsmBase
class DeenPluginAsmMips(AsmBase):
name = 'assembly_mips'
display_name = 'MIPS'
aliases = ['asm_mips',
'mips32',
'mips']
cmd_name = 'assembly_mips'
cmd_help='Assemble/Disassemble for the MIPS architecture'
keystone_arch = keystone.KS_ARCH_MIPS \
if (keystone and hasattr(keystone, 'KS_ARCH_MIPS')) else None
keystone_mode = keystone.KS_MODE_MIPS32 \
if (keystone and hasattr(keystone, 'KS_MODE_MIPS32')) else None
capstone_arch = capstone.CS_ARCH_MIPS \
if (capstone and hasattr(capstone, 'CS_ARCH_MIPS')) else None
capstone_mode = capstone.CS_MODE_MIPS32 \
if (capstone and hasattr(capstone, 'CS_MODE_MIPS32')) else None
@staticmethod
def add_argparser(argparser, plugin_class, *args, **kwargs):
# Add an additional argument for big endian mode.
parser = AsmBase.add_argparser(argparser, plugin_class)
parser.add_argument('-e', '--big-endian', dest='bigendian',
default=False, help='use big endian',
action='store_true')
class DeenPluginAsmMips64(DeenPluginAsmMips):
name = 'assembly_mips64'
display_name = 'MIPS64'
aliases = ['asm_mips64',
'mips64']
cmd_name = 'assembly_mips64'
cmd_help='Assemble/Disassemble for the MIPS64 architecture'
keystone_arch = keystone.KS_ARCH_MIPS \
if (keystone and hasattr(keystone, 'KS_ARCH_MIPS')) else None
keystone_mode = keystone.KS_MODE_MIPS64 \
if (keystone and hasattr(keystone, 'KS_MODE_MIPS64')) else None
capstone_arch = capstone.CS_ARCH_MIPS \
if (capstone and hasattr(capstone, 'CS_ARCH_MIPS')) else None
capstone_mode = capstone.CS_MODE_MIPS64 \
if (capstone and hasattr(capstone, 'CS_MODE_MIPS64')) else None
|
Helping the planet with style, these tees are made from 50% Recycled Plastic Bottles and 50% Organic Cotton. They are very soft and durable. You won't believe these were from Recycled Plastic. |
# -*- coding: utf-8 -*-
import sys
import HTMLParser
import os
import urllib2
import tweepy
import json
from time import gmtime, strftime
from secrets import *
reload(sys)
sys.setdefaultencoding('utf-8')
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
hparser = HTMLParser.HTMLParser()
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
tweets = api.user_timeline('IranNewsBot')
# ====== Individual bot configuration ==========================
bot_username = 'IranNewsBot'
logfile_name = bot_username + ".log"
# ==============================================================
def get():
# Get the headlines, iterate through them to try to find a suitable one
page = 1
while page <= 3:
try:
request = urllib2.Request(
"http://content.guardianapis.com/search?format=json&page-size=50&page=" +
str(page) + "&api-key=" + GUARDIAN_KEY)
response = urllib2.urlopen(request)
except urllib2.URLError as e:
print(e.reason)
else:
items = json.loads(response.read());
for item in items['response']['results']:
headline = item['webTitle'].encode('utf-8', 'ignore')
h_split = headline.split()
# We don't want to use incomplete headlines
if "..." in headline:
continue
# Try to weed out all-caps headlines
if count_caps(h_split) >= len(h_split) - 3:
continue
# Remove attribution string
if "-" in headline:
headline = headline.split("-")[:-1]
headline = ' '.join(headline).strip()
if process(headline):
return
else:
page += 1
page += 1
# Log that no tweet could be made
f = open(os.path.join(__location__, "IranNewsBot.log"), 'a')
t = strftime("%d %b %Y %H:%M:%S", gmtime())
f.write("\n" + t + " No possible tweet.")
f.close()
def process(headline):
# Don't tweet anything that's too long
if len(headline) > 140:
return False
# only tweet if Iran is mentioned
if "Iran" in headline:
return tweet(headline)
else:
return False
def tweet(headline):
# Check that we haven't tweeted this before
for tweet in tweets:
if headline == tweet.text:
return False
# Log tweet to file
f = open(os.path.join(__location__, "IranNewsBot.log"), 'a')
t = strftime("%d %b %Y %H:%M:%S", gmtime())
f.write(("\n" + t + " " + headline).encode('utf-8', 'ignore'))
f.close()
# Post tweet
api.update_status(status=headline)
return True
def count_caps(headline):
count = 0
for word in headline:
if word[0].isupper():
count += 1
return count
def log(message):
"""Log message to logfile."""
path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(path, logfile_name), 'a+') as f:
t = strftime("%d %b %Y %H:%M:%S", gmtime())
f.write("\n" + t + " " + message)
if __name__ == "__main__":
get()
|
Thank you so much for the reply. I went through your blog and all the questions. Yet I have a few more questions and I think only you can answer them.
1. How did you get your first order for designer merchandise? I am very much interested in starting my own online store business and selling designer merchandise and accessories but I don't know how to start and whom to approach. I make Illustrations but I don't know how to earn a living out of it.
2. How do you collaborate with big brands like Lenovo, Tinder, etc. I have no idea about how to approach well known companies. Do they get in touch with you or you contact them?
3. Do you manufacture the cute illustrated products or do you get them printed? Whom to contact and how to collaborate with manufacturers of products?
4. Where do you store all your products before shipping? What is the process when you get an order? For example, you get an order for a designer bag then is it ready to be shipped before hand or you get your artwork printed on the bag later? Can you please guide on how to approach manufacturers and collaborate with them?
Your inputs will be of great help as I am very much interested in designer merchandise.
Thank you so much for your guidance and help. You are an inspiration.
Thank you soooo much for your mail!
1) I think I was a bit lucky on that front. I had a few people ask for merchandise before I even considered it. But I took a risk, thanks to the help of a friend, of making a few products for a flea market event (the Sunday Soul Sante) and we ended up selling out and having people mail with queries for months after. That was when I realised there was a need; though it was a long time later that I started the online store, stopped and started again!
When it comes to merchandise, I think the thing to realise is that it’s an entire different ballgame. Handling the manufacturing, stocking, logistics and then sales is a full time job (depending on how profitable you want it to be). But getting your products manufactured is as simple as going to the nearest/ biggest print shop around and getting your work printed on the merchandise that’s easily available to be customised. This could include t-shirts, mugs, badges, prints etc. Basically things you can make small quantities without worrying that you’ll be stuck with a whole lot of stock. I would say look at it as a testing project first and then go guns banging after doing the math of the business. It’s a little less risky and grey-hair-starting.
2) I think I’ve been lucky enough to be contacted by every client who I’ve ever worked with. It’s also because I’m rather public with my work (though I rarely actually showcase client work). I would recommend getting a portfolio of your work online first, because when you approach a potential client, you want to show them your capabilities. Then, go ahead and approach them! We live in such a beautiful age wherein most people are just an email away! And that too, an email address that can be scouted with just a bit of research! Mail and mail some more. Showcase your work. Also pitch some ideas! The sky is the limit!
3) Yes, we manufacture each product. We work with manufacturers (sometimes two and even three on a single product). I started off, as I mentioned, at the local print shop. That worked for a while, and then I took a break because running a product business meant that I had to be doing it full time. Which also meant that if I was single-handedly doing the packaging and shipping, I had to be in town almost all the time, which is not possible with my schedule. It wasn’t until I met Saurabh, my current partner, that we started the online store on full swing.
Finding manufacturers is a task in itself, and to be honest, I don’t have a knack for it, nor do I enjoy it. Figuring out cost prices along with follow ups for timelines and MOQs (minimum order quantities) is something that can take more time than I have to offer. So my partner handles that side of the business while I do what I love, i.e., the drawing bit!
Back to my beginnings, I think the local print shop should do just fine initially and if it’s something you want to pursue after the first few lots, then I would suggest getting someone to help with the business side of things, including finding manufacturers (this is sometimes asking current manufacturers and just plain ol’ google searches!).
4) When I started, I only stocked smaller products under my bed in boxes. Also under my housemates bed. It was a mess. Every time there was an order, I’d had to pull them out, pack them and call the courier guy who would send it across. It took a LOT of time in my day even if the number of orders weren’t too many. At this point, we have a little office with a store room for all our products. I still recommend going through the hassle of the home storage before investing in a storage room because the number of orders will dictate if the business is worth growing and investing your time and money. Almost every order is pre-manufactured, which means we have stock of products. But we do have a few products (like our custom stamps) that we manufacture on demand. That’s only something we’ve been able to do with a team and process in place, because it takes longer and also involves working with a manufacturer every time an order comes in. Custom orders tend to be a lot more expensive (depending on the product) because you manufacture single units, which means it’s a lot more effort from the manufacturers side and a lot of time from yours too! So unless it’s a high value product, like say a bag as opposed to a badge, I wouldn’t think it would be worth getting into.
A shirt I mocked when I was in university. I used to draw more skulls back then haha!
Lastly, you should also start mocking your illustrations onto products. I did this from my university days, which was when I knew I would love to see my work on merchandise. Mocking is still something we do because I cannot get everything I envision manufactured, so I mock it instead!
Hope this helps! All the best and don’t be afraid to take a risk!
You have no idea what have you done here. |
# Copyright (C) 2011 Red Hat, Inc.
# Written by Zane Bitter <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import uuid
class Host(object):
def __init__(self, state):
self.manager = state.manager
def __call__(self, param):
for h in self.manager.hosts():
if str(h) == param:
return h
raise ValueError('Host "%s" not found' % param)
def complete(self, param):
return filter(lambda s: s.startswith(param),
map(str, self.manager.hosts()))
class SharedInfo(object):
def __init__(self, default=None):
self.last = self.default = default
def set(self, last):
self.last = last
def setdefault(self):
self.last = self.default
def get(self):
last, self.last = self.last, self.default
return last
class Package(object):
def __init__(self, state, info):
self.state = state
self.info = info
self.info.setdefault()
def __call__(self, param):
if param not in self.state.list_packages():
self.info.setdefault()
raise ValueError('Package "%s" not found' % param)
self.info.set(param)
return param
def default(self):
self.info.setdefault()
return self.info.default
def complete(self, param):
packages = self.state.list_packages()
if param in packages:
self.info.set(param)
else:
self.info.setdefault()
return [p + ' ' for p in packages if p.startswith(param)]
class Class(object):
def __init__(self, state, pkginfo):
self.state = state
self.pkginfo = pkginfo
def __call__(self, param):
package = self.pkginfo.get()
if param not in self.state.list_classnames(package):
fq = package is not None and ('%s:%s' % (package, param)) or param
raise ValueError('Class "%s" not found' % fq)
return param
def complete(self, param):
classnames = self.state.list_classnames(self.pkginfo.get())
return [c + ' ' for c in classnames if c.startswith(param)]
QMF_TYPES = (U8, U16, U32, U64,
_UNUSED,
SSTR, LSTR,
ABSTIME, DELTATIME,
REF,
BOOL,
FLOAT, DOUBLE,
UUID,
FTABLE,
S8, S16, S32, S64,
OBJECT, LIST, ARRAY) = range(1, 23)
class QMFProperty(object):
"""CLI verification class for a QMF Property argument"""
def __new__(cls, arg):
"""Create a CLI argument of the correct type from a SchemaArg."""
if cls == QMFProperty:
return _qmfPropertyTypes.get(arg.type, String)(arg)
else:
return super(QMFProperty, cls).__new__(cls, arg)
def __init__(self, arg):
self.arg = arg
self.__name__ = str(self)
def __repr__(self):
return self.__class__.__name__.lower()
def __str__(self):
return self.arg.name
def help(self):
return ('%9s %-18s %s' % (repr(self),
str(self),
self.arg.desc or '')).rstrip()
def default(self):
return self.arg.default
class String(QMFProperty):
"""A QMF String property argument"""
def __repr__(self):
if self.arg.type not in _qmfPropertyTypes:
return str(self)
return self.arg.type == SSTR and 'sstr' or 'lstr'
def __call__(self, param):
maxlen = self.arg.maxlen or (self.arg.type == SSTR and 255) or 65535
if len(param) > maxlen:
raise ValueError('Parameter is too long')
return param
class Bool(QMFProperty):
"""A QMF Boolean property argument"""
TRUTHY = ('true', 't', '1', 'y', 'yes')
FALSEY = ('false', 'f', '0', 'n', 'no')
def __call__(self, param):
lc = param.lower()
if lc in self.TRUTHY:
return True
if lc in self.FALSEY:
return False
try:
value = eval(param, {}, {})
except (NameError, SyntaxError):
raise ValueError('"%s" is not a boolean' % (param,))
if not isinstance(value, (int, bool)):
raise ValueError('"%s" is not a boolean' % (param,))
return bool(value)
def complete(self, param):
if not param:
return map(lambda l: l[0].capitalize(), (self.TRUTHY, self.FALSEY))
lc = param.lower()
matches = []
for v in self.TRUTHY + self.FALSEY:
if v == lc:
return [param + ' ']
if v.startswith(lc):
return [param + v[len(param):] + ' ']
return []
class Int(QMFProperty):
"""A QMF Integer property argument"""
NAMES = {U8: 'u8', U16: 'u16', U32: 'u32', U64: 'u64',
S8: 's8', S16: 's16', S32: 's32', S64: 's64'}
MINIMUM = {U8: 0, U16: 0,
U32: 0, U64: 0,
S8: -(1 << 7), S16: -(1 << 15),
S32: -(1 << 31), S64: -(1 << 63)}
MAXIMUM = {U8: (1 << 8) - 1, U16: (1 << 16) - 1,
U32: (1 << 32) - 1, U64: (1 << 64) - 1,
S8: (1 << 7) - 1, S16: (1 << 15) - 1,
S32: (1 << 31) - 1, S64: (1 << 63) - 1}
def __str__(self):
if self.arg.min is not None or self.arg.max is not None:
return '<%d-%d>' % (self._min(), self._max())
return QMFProperty.__str__(self)
def __repr__(self):
try:
return self.NAMES[self.arg.type]
except KeyError:
return QMFProperty.__repr__(self)
def _min(self):
"""Get the minimum allowed value"""
if self.arg.min is not None:
return self.arg.min
try:
return self.MINIMUM[self.arg.type]
except KeyError:
return -(1 << 31)
def _max(self):
"""Get the maximum allowed value"""
if self.arg.max is not None:
return self.arg.max
try:
return self.MAXIMUM[self.arg.type]
except KeyError:
return (1 << 31) - 1
def __call__(self, param):
value = int(param)
if value < self._min():
raise ValueError('Value %d underflows minimum of range (%d)' %
(value, self._min()))
if value > self._max():
raise ValueError('Value %d overflows maximum of range (%d)' %
(value, self._max()))
return value
class Float(QMFProperty):
"""A QMF Floating Point property argument"""
def __repr__(self):
return self.arg.type == FLOAT and 'float' or 'double'
def __call__(self, param):
return float(param)
class Uuid(QMFProperty):
"""A QMF UUID property argument"""
LENGTH = 32
def __call__(self, param):
return uuid.UUID(param)
def complete(self, param):
raw = param.replace('-', '')
try:
val = int(param, 16)
except ValueError:
return []
if len(raw) in (8, 12, 16, 20):
return [param + '-']
if len(raw) == self.LENGTH:
return [param + ' ']
return ['']
class List(QMFProperty):
"""A QMF List property argument"""
def __call__(self, param):
try:
l = eval(param, {}, {})
except (NameError, SyntaxError):
raise ValueError('"%s" is not a valid list' % (param,))
if not isinstance(l, list):
raise ValueError('"%s" is not a list' % (param,))
return l
def complete(self, param):
if not param:
return ['[']
if not param.startswith('['):
return []
if param.endswith(']'):
try:
self(param)
except ValueError:
return []
return [param + ' ']
return ['']
class Map(QMFProperty):
"""A QMF Map property argument"""
def __call__(self, param):
try:
l = eval(param, {}, {})
except (NameError, SyntaxError):
raise ValueError('"%s" is not a valid map' % (param,))
if not isinstance(l, dict):
raise ValueError('"%s" is not a map' % (param,))
return dict((unicode(k), v) for k, v in l.items())
def complete(self, param):
if not param:
return ['{']
if not param.startswith('{'):
return []
if param.endswith('}'):
try:
self(param)
except ValueError:
return []
return [param + ' ']
return ['']
_qmfPropertyTypes = {
U8: Int,
U16: Int,
U32: Int,
U64: Int,
SSTR: String,
LSTR: String,
BOOL: Bool,
FLOAT: Float,
DOUBLE: Float,
UUID: Uuid,
FTABLE: Map,
S8: Int,
S16: Int,
S32: Int,
S64: Int,
LIST: List,
ARRAY: List,
}
|
A (physics) teacher education programme is typically focused on the development of requisite dispositions, knowledge, and skills needed for effective teaching. However, even if these are developed, the professional demands on a teacher's time are so great out of, and so complex during class time that if every decision requires multiple considerations and deliberations with oneself, the productive decisions might not materialise. I will argue that the link between intentional decision making and actual teaching practice are teachers' habits. I will provide a theoretical framework for the determinative role that habits of mind, habits of practice, and habits of maintenance can play in physics teacher formation and professional growth. The indispensable context for habits formation is apprenticeship in a community that shares a common vision for effective teaching. To illustrate the framework I have developed together with Eugenia Etkina (Rutgers University) and Stamatis Vokos (California Polytechnic State University), I will provide some concrete examples of the habits of physicists and habits of physics teachers that a pre-service teacher training programme should aim to develop in prospective physics teachers. |
#! /usr/bin/env python
"""
Parses the NetHMC output into TSV sorted by affinity.
"""
import argparse
import itertools
import re
import sys
from operator import itemgetter
def parse_fasta_for_names(filename):
"""
"""
name_dict = {}
with open(filename, 'rU') as handle:
for line in handle:
if not re.match(">", line):
continue
name = line.strip('\n')[1:]
name_sub = name[:15]
name_dict[name_sub] = name
return name_dict
def parse_tsv(filename, name_dict):
"""
"""
output_matrix = []
with open(filename, 'rU') as handle:
curr_protein = []
for line in handle:
if line[0] == "#" or line[0] == "-" or len(line.strip('\n')) < 1:
continue
if re.match("Protein", line):
continue
arow = line.strip('\n').split()
if arow[0] == "pos":
continue
arow[12] = float(arow[12])
if len(arow[10].split('-')) == 3:
#arow = arow[:10] + arow[10].split('_') + arow[11:]
arow = arow[:10] + name_dict[arow[10]].split('-') + arow[11:]
#print arow
output_matrix.append(arow)
return output_matrix
def main():
"""
Arg parsing and central dispatch.
"""
# arg parsing
parser = argparse.ArgumentParser(description="Parse NetHMC output to TSV.")
parser.add_argument("-f", "--fasta", metavar="FASTA",
help="fasta file for epitopes input into nethmc")
parser.add_argument("nethmc", metavar="NETHMC_OUTPUT",
nargs='+',
help="Output file from NetHMC")
args = parser.parse_args()
# Central dispatch
name_dict = parse_fasta_for_names(args.fasta)
lom = list(itertools.chain.from_iterable([parse_tsv(nethmc, name_dict)
for nethmc in args.nethmc]))
#print set([len(v) for v in list(itertools.chain.from_iterable(lom))])
nethmc_matrix = sorted([v for v in lom if len(v) >= 18 and v[16] <= 50],
key=itemgetter(14))
sys.stdout.write('\t'.join(["pos", "HLA", "peptide", "Core Offset", "I_pos",
"I_len", "D_pos", "D_len", "iCore",
"identity", "chrom", "genomic_pos",
"gene_symbol", "ref_allele", "alt_allele",
"1-log50k(aff)", "Affinity(nM)",
"%Rank BindLevel"]) + '\n')
for v in nethmc_matrix:
sys.stdout.write('\t'.join([str(i) for i in v[:18]]) + '\n')
#nethmc_matrix
if __name__ == "__main__":
main()
|
MANCHESTER, N.H. – Junior Ken Morrisino (E. Longmeadow, Mass.) and freshman Shaine Carpenter (Florham Park, N.J.) each collected two hits and combined to drive in five runs, but the Saint Anselm College baseball team closed out its season with a 15-9 loss to third-ranked Franklin Pierce University on Tuesday evening at Sullivan Park.
MANCHESTER, N.H. – Senior Dave Kent (N. Falmouth, Mass.) was dominant on the mound in game one and classmates Mike Hayden (Cranston, R.I.) and Joe Morin (Salem, N.H.) put together great days at the plate as the Saint Anselm College baseball team earned a Senior Day sweep of Bentley University on Saturday afternoon at Sullivan Park. Saint Anselm posted a 7-2 victory in game one and then held on for an 8-7 win in game two.
MANCHESTER, N.H. – Senior Mike Hayden (Cranston, R.I.) tied the game with an RBI double in the third inning, but Bentley University plated a pair in the seventh to beat the Saint Anselm College baseball team, 5-3, Friday afternoon at Sullivan Park.
COLCHESTER, Vt. – What a difference two weeks makes – sort of.
MANCHESTER, N.H. – Senior Mike Hayden (Cranston, R.I.) drove in three runs, but the Saint Anselm College baseball team fell to 20th-ranked Southern New Hampshire University, 10-5, Tuesday evening at Penmen Field.
EASTON, Mass. – Sophomore Joseph Levasseur (Rowley, Mass.) submitted a quality start, but the Saint Anselm College baseball team closed out its three-game weekend set at Stonehill College with a 5-0 loss Sunday afternoon at Lou Gorman Field.
EASTON, Mass. – Senior Tom Hudon (Merrimack, N.H.) did not allow an earned run over 8.0 innings, but was the tough-luck loser on the backend of a doubleheader Saturday as the Saint Anselm College baseball team was swept by Stonehill College at Lou Gorman Field. Stonehill posted a 12-1 victory in game one before eking out a 2-0 win in the second game.
MANCHESTER, N.H. – Junior Ken Morrisino (E. Longmeadow, Mass.) and freshman John Marculitis (Shrewsbury, Mass.) each drove in a run, but the Saint Anselm College baseball team only mustered three hits as Cole Warren tossed five no-hit innings to lead Southern New Hampshire University to a 14-3 victory Thursday evening at Penmen Field.
WORCESTER, Mass. – Sophomore Nick Bragole (Newburyport, Mass.) and freshman John Marculitis (Shrewsbury, Mass.) each knocked in two runs and sophomore Joseph Levasseur (Rowley, Mass.) allowed just two earned runs in 7.2 innings as the Saint Anselm College baseball team posted an 8-5 victory against Assumption College on Wednesday afternoon at Rocheleau Field.
RINDGE, N.H. – Senior Mike Hayden (Cranston, R.I.) went 3-for-5 and knocked in a pair of runs, but it was not nearly enough as the Saint Anselm College baseball team was handed a 17-4 drubbing by third-ranked Franklin Pierce University on Tuesday evening at Pappas Field.
MANCHESTER, N.H. – Senior Dave Kent (N. Falmouth, Mass.) turned in a complete-game quality start in game one and junior Ken Morrisino (E. Longmeadow, Mass.) hit a big three-run homer and made a trio of stellar defensive plays in game two as the Saint Anselm College baseball team swept a pair of cross-divisional games from Adelphi University on Sunday afternoon at Sullivan Park. The Hawks earned a 5-3 win in game one and rallied for a 4-3 victory in the second contest.
MANCHESTER, N.H. – Junior Tom Hudon (Merrimack, N.H.) kept the game tied into the fourth inning and allowed three runs over 5.2 frames, but the Saint Anselm College baseball team was held off by Southern New Hampshire University in a 4-1 defeat Saturday night at Penmen Field.
MANCHESTER, N.H. – The winter that just won’t quit. Mother Nature, which has wreaked havoc on athletic schedules this spring after dumping record snow during the winter, had one final (we hope) trick up her sleeve Wednesday.
MANCHESTER, N.H. – The Saint Anselm College baseball team's game against Saint Michael's College, originally scheduled for Wednesday, April 8 (3:30 p.m.) at home and then moved to Southern New Hampshire University's Penmen Field at 7 p.m., will now return home to Sullivan Park and be played at its original time of 3:30 p.m.
HAVERHILL, Mass. – Junior Ken Morrisino (Longmeadow, Mass.) tied game two in the bottom of the ninth with an RBI single and then scored on a wild pitch to win it, while classmate Tom Hudon (Merrimack, N.H.) allowed just one unearned run over nine innings, as the Saint Anselm College baseball team walked off with a 2-1 victory to earn a Monday matinee split with Merrimack College at Trinity Stadium. Saint Anselm also rallied to tie game one with two runs in the ninth, but fell, 7-3, in 10 innings.
HAVERHILL, Mass. – Sophomore Jon McQuarrie (Salem, N.H.) went 3-for-4 and drove in a run and freshman Shaine Carpenter (Florham Park, N.J.) recorded a pair of RBI, but the Saint Anselm College baseball team dropped Saturday evening’s 8-3 decision against Merrimack College at Trinity Stadium.
NORTHBOROUGH, Mass. – Sophomore Jon McQuarrie (Salem, N.H.) and junior Ken Morrisino (E. Longmeadow, Mass.) each knocked in two runs and junior Tom Hudon (Merrimack, N.H.) recorded his second straight victory in game one as the Saint Anselm College baseball team halved a Thursday doubleheader with Assumption College at the New England Baseball Complex. Saint Anselm took the first game, 10-4, before falling in game two, 12-4.
RINDGE, N.H. – Freshman Max Wadington (Burlington, Conn.) and senior Mike Hayden (Cranston, R.I.) each drove in a run, but the Saint Anselm College baseball team dropped an 8-2 decision to fifth-ranked Franklin Pierce University on Tuesday afternoon at Pappas Field.
DANBURY, Conn. – Junior Tom Hudon (Merrimack, N.H.) of the Saint Anselm College baseball team was named the Eastern College Athletic Conference (ECAC) Pitcher of the Week for the period ending March 29, the conference office announced Tuesday evening.
MANSFIELD, Mass. – Junior Tom Hudon (Merrimack, N.H.) of the Saint Anselm College baseball team was named the Northeast-10 Pitcher of the Week for the period ending March 29, the league office announced Monday.
RINDGE, N.H. – Junior Tom Hudon (Merrimack, N.H.) came out on the better end of a fantastic pitcher’s duel Wednesday night as he hurled a complete-game shutout and struck out a career-high 12 batters to lead the Saint Anselm College baseball team to a 1-0 victory against Saint Michael’s College at Franklin Pierce University’s Pappas Field.
MANCHESTER, N.H. – The Saint Anselm College baseball team’s contest against Saint Michael’s College, originally scheduled to be played at home Wednesday, March 25 (3:30 p.m.) and then moved to Franklin Pierce University’s Pappas Field on Monday, March 23 (3 p.m.) before being postponed, has a “new” make-up date.
HAVERHILL, Mass. – Junior Ken Morrisino (E. Longmeadow, Mass.) came through with a two-out, two-run single to left to finish off a three-run ninth that powered the Saint Anselm College baseball team to a 5-4 come-from-behind victory against American International College on a frigid Sunday afternoon at Trinity Stadium. |
# -*- coding: utf-8 -*-
from cached_property import cached_property
from django.core.exceptions import ObjectDoesNotExist
from django.views.generic import (
ListView, UpdateView, TemplateView, DetailView, CreateView
)
from django.core.urlresolvers import reverse
from braces.views import LoginRequiredMixin
from store.exceptions import OutOfStock
from .models import Flavor, Tasting
from .tasks import update_users_who_favorited
def list_flavor_line_item(sku):
try:
return Flavor.objects.get(sku=sku, quantity__gt=0)
except Flavor.DoesNotExist:
msg = "We are out of {0}".format(sku)
raise OutOfStock(msg)
def list_any_line_item(model, sku):
try:
return model.objects.get(sku=sku, quantity__gt=0)
except ObjectDoesNotExist:
msg = "We are out of {0}".format(sku)
raise OutOfStock(msg)
class TasteListView(ListView):
model = Tasting
class TasteDetailView(DetailView):
model = Tasting
class TasteResultView(TasteDetailView):
template_name = "taste/results.html"
class TasteUpdateView(UpdateView):
model = Tasting
def get_success_url(self):
return reverse("taste:detail", kwargs={"pk": self.object.pk})
class FreshFruitMixin(object):
@cached_property
def likes_and_favorites(self):
likes = self.object.likes()
favorites = self.object.favorites()
return {
"likes": likes,
"favorites": favorites,
"favorites_count": favorites.count(),
}
def get_context_data(self, **kwargs):
context = super(FreshFruitMixin, self).get_context_data(**kwargs)
context["has_fresh_fruit"] = True
return context
class FruityFlavorView(FreshFruitMixin, TemplateView):
template_name = "taste/fruity_flavor.html"
class FlavorDetailView(LoginRequiredMixin, DetailView):
model = Flavor
class FlavorCreateView(LoginRequiredMixin, CreateView):
model = Flavor
fields = ('title', 'slug', 'scoops_remaining')
def form_valid(self, form):
update_users_who_favorited(
instances=self.object,
favorites=self.likes_and_favorites['favorites']
)
return super(FlavorCreateView, self).form_valid(form)
|
The Lincoln perpetual plaque features a 12 plate design and headplate. The plaque itself measures 9" x 12" with individual plates that measure 7/8" x 2 3/4". All plates are laser engravable. |
from string import Template
from collections import deque
from decimal import Decimal
import pandas as pd
from spartan.utils.errors import *
from spartan.utils.misc import Bunch,fold_seq
def meme_minimal2transfac(meme_path,out_path):
"""
"""
meme_deck = deque(open(meme_path,'rU'))
#raise Exception
transfac_out = open(out_path,'w')
try:
while meme_deck:
motif = Bunch()
try:
motif.names = get_next_names(meme_deck)
motif.matrix = get_next_matrix(meme_deck)
motif.url = get_next_url(meme_deck)
write_next_transfac_motif(motif,transfac_out)
except StopIteration:
raise
except Exception as exc:
if len(meme_deck) == 0:
pass
else:
raise exc
finally:
transfac_out.close()
def get_next_names(meme_deck):
while meme_deck:
line = meme_deck.popleft()
if line.startswith('MOTIF'):
return line.strip().split()[1:]
else:
# chew through lines until we find the next MOTIF
pass
def get_next_matrix(meme_deck):
matrix = []
mat_info = Bunch()
# collect mat_info
while meme_deck:
line = meme_deck.popleft()
if line.startswith('letter-probability matrix:'):
line = line.strip().replace('letter-probability matrix:','').replace('= ','=').split()
for attr in line:
attr = attr.split('=')
mat_info[attr[0]] = attr[1]
break
else:
# chew through lines until we find the next matrix data
pass
# collect matrix data
while meme_deck:
line = meme_deck.popleft()
if line.startswith('\n'):
break
else:
position = pd.Series([Decimal(i) for i in line.strip().split()],index=['A','C','G','T'])
matrix.append(position)
# confirm correct length
if len(matrix) == int(mat_info.w):
matrix = pd.DataFrame(matrix)
else:
raise SanityCheckError('length of matrix (%s) does not equal "w" attribute (%s) from "letter-probability matrix" line.'
% (len(matrix),mat_info.w))
# convert probabilities into counts
matrix = (matrix.applymap(lambda x: round(x,5)) * int(mat_info.nsites)).applymap(int)
# confirm all positions sum to the same value
#if len(set(matrix.sum(1))) == 1:
#pass
#else:
#raise SanityCheckError('all positions in matrix should sum to the same value. Encountered:\n%s' % (str(matrix.sum(1))))
return matrix
def get_next_url(meme_deck):
while meme_deck:
line = meme_deck.popleft()
if line.startswith('URL'):
return line.strip().split()[-1]
else:
# chew through lines till we get to 'URL'
pass
def write_next_transfac_motif(motif,transfac_out):
"""
AC accession number
ID any_old_name_for_motif_1
BF species_name_for_motif_1
P0 A C G T
01 1 2 2 0 S
02 2 1 2 0 R
03 3 0 1 1 A
04 0 5 0 0 C
05 5 0 0 0 A
06 0 0 4 1 G
07 0 1 4 0 G
08 0 0 0 5 T
09 0 0 5 0 G
10 0 1 2 2 K
11 0 2 0 3 Y
12 1 0 3 1 G
XX
//
"""
name = motif.names[1]
ac = '_'.join(motif.names)
species = 'none_listed' #TODO: handle species field
#TODO: write a REAL consensus function that uses IUPAC degen code
matrix_line = Template('MA\t$pos\t$A\t$C\t$G\t$T\t$major_nuc\n')
#transfac_out.write('AC %s\n' % (ac))
#transfac_out.write('XX\n')
transfac_out.write('NA\t%s\n' % (name))
#transfac_out.write('XX\n')
transfac_out.write('BF\t%s\n' % (species))
#transfac_out.write('P0\tA\tC\tG\tT\n')
transfac_out.write('XX\n')
for i in list(motif.matrix.index):
m = motif.matrix
fields = dict(pos='%02d' % (i+1),
A=m.ix[i,'A'],
C=m.ix[i,'C'],
G=m.ix[i,'G'],
T=m.ix[i,'T'],
major_nuc=m.ix[i].idxmax())
transfac_out.write(matrix_line.substitute(fields))
#transfac_out.write('XX\n')
#transfac_out.write('CC %s\n' % (motif.url))
transfac_out.write('XX\n//\n')
|
Ward, Michael, "Advantages and Limitations of Different Resolution Force Fields to Study Membrane Active Peptides" (2017). Master's Theses. 1107. |
"""Exceptions and error messages used by the ecommerce API."""
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.exceptions import APIException
PRODUCT_OBJECTS_MISSING_DEVELOPER_MESSAGE = u"No product objects could be found in the request body"
PRODUCT_OBJECTS_MISSING_USER_MESSAGE = _("You can't check out with an empty basket.")
SKU_NOT_FOUND_DEVELOPER_MESSAGE = u"SKU missing from a requested product object"
SKU_NOT_FOUND_USER_MESSAGE = _("We couldn't locate the identification code necessary to find one of your products.")
PRODUCT_NOT_FOUND_DEVELOPER_MESSAGE = u"Catalog does not contain a product with SKU [{sku}]"
PRODUCT_NOT_FOUND_USER_MESSAGE = _("We couldn't find one of the products you're looking for.")
PRODUCT_UNAVAILABLE_DEVELOPER_MESSAGE = u"Product with SKU [{sku}] is [{availability}]"
PRODUCT_UNAVAILABLE_USER_MESSAGE = _("One of the products you're trying to order is unavailable.")
class ApiError(Exception):
"""Standard error raised by the API."""
pass
class ProductNotFoundError(ApiError):
"""Raised when the provided SKU does not correspond to a product in the catalog."""
pass
class BadRequestException(APIException):
status_code = status.HTTP_400_BAD_REQUEST
|
Had the previous version of this case (different looking camera hole, pull tab didn't snap back automatically) for over a year and I was IN LOVE. I never changed my case ever. After a while I decided it was time to get a new one since it had gotten pretty dirty over the year. So I got the new version of it and after a few months (got it Mid December) I'm a bit disappointed. First off, the rim of the case seems to be completely impossible to clean and my case looks like it could be a year old already! (See photos) Another thing is that whenever I take the case off, I notice new markings in the back of my phone - as if tiny rocks wedged themselves between the case and phone. BUT. there's no trace of anything in the case except for a few flat markings that look like pencil marks (see photo). I have no clue what it could be but it's really annoying since the marks are really visible on the phone (see photos). Anyways. Overall the case does its job, holds 3 cards, can squeeze 4 depending on the thickness of the cards.
I dropped my phone from 4 feet off the ground it landed on the back flat on the ground and the front screen shattered really bad. The case will protect the phone really well if it lands on the edges!!! Review by Linh D. |
# access to the beer similarity table
# cli: truncate beersimilarity, load unique feature set
# and recompute all beer similarities
from datetime import datetime as dt
from tableacc import TableAcc
class BeerSimilarity(TableAcc):
def __init__(self):
super(BeerSimilarity, self).__init__(
table_name='beersimilarity',
cols=['beer_id_ref', 'beer_id_comp', 'similarity'],
upsert_proc='similarityupsert')
def similarity(self, beer_id_ref, beer_id_comp):
return self._select(
cols=["similarity"],
where="beer_id_ref = %s and beer_id_comp = %s",
params=(beer_id_ref,beer_id_comp) )
def similar_beers(self, beer_id, ordered=True, top=0):
return self._select(
cols=["beer_id_comp","similarity"],
where="beer_id_ref = %s",
order_by="similarity desc",
limit=top,
params=(beer_id,) )
def smooth_similarity(self, similarity_records):
self._exec_many_procs("similaritysmooth", similarity_records)
def __asyncable_similarity(tup):
# bs, beer_id_ref, ref_vect, s_ids, b_ids, X_t, top = tup
# bs: beer similarity object for db commit
# ref_vects from one style
# ref_b_ids: beer ids for ref vecs
# s_ids, b_ids: style and beer indices of X_t
# X_t for beers in other styles to be compared to
# keep top similarities by style
bs, b_refs, X_t_ref, b_comps, X_t_comp, top = tup
start = dt.now()
print 'Beer ct %s vs ct %s: Compute Similarity' % (len(b_refs),len(b_comps))
try:
for i in xrange(len(b_refs)):
# compute similarity between beer_ref[i] and all b_comps
lk = linear_kernel(X_t_ref.getrow(i), X_t_comp).flatten()
# take #top of largest similarities
n = len(lk)
kp = min(top, n)
m_ixs = lk.argsort()[-kp:]
sims = [ (b_refs[i], b_comps[j], lk[j]) for j in m_ixs if b_refs[i] != b_comps[j] ]
#bs.smooth_similarity(sims)
bs.add_many(sims)
print 'Comparison Complete: %s' % (dt.now() - start)
return (b_refs, None)
except Exception as e:
return (b_refs, e)
def __asyncable_transform(tup):
vectorizer, style_id, X = tup
print 'Vectorize %s: start' % style_id
start = dt.now()
X_t = vectorizer.transform(X['review'])
print 'Vectorize %s: done %s' % (style_id, (dt.now()-start))
return (style_id, X['beer_id'].values, X_t)
if __name__ == "__main__":
import pickle
import numpy as np
import pandas as pd
from multiprocessing import Pool
from datetime import datetime as dt
# get access to custom vectorizer
import sys
sys.path.append('src/')
from reviewvectorizer import ReviewTfidf
from styles import Styles
from basewordcts import expanded_stop_words
from reviewfeatures import ReviewFeatures
# same as cosine_similarity for normalized vectors
from sklearn.metrics.pairwise import linear_kernel
# pickling helper func
def pkl_l(src):
with open(src, 'rb') as f:
res = pickle.loads(f.read())
return res
# loading/building vectorizer
def load_vec(vec_pkl):
try:
# load pickled vectorizer if available
return True, pkl_l(vec_pkl)
except Exception as e:
print "Pickled vectorizer not found."
print "Must run styletfidfnb.py to build model"
return False, None
def recompute_and_populate():
"""
- load pickled vectorizer
- transform docs
- compute cosine similarity for all vector pairs
- data is retrieved at rev_rollup_ct = 1 (beer level)
"""
vec_pkl = "src/vocab/review_vectorizer.p"
was_pkl, vec = load_vec(vec_pkl)
# load data for styles with feature sets
# overridden until full feature table is populated
styles = Styles()
top_sy = [159, 84, 157, 56, 58, 9, 128, 97, 116, 140]
print 'Comparing the top %s styles: %s' % (len(top_sy), ', '.join(str(s) for s in top_sy))
X = styles.beer_reviews_rollup(top_sy, limit=0, rev_rollup_ct=1, shuffle=False)
if was_pkl:
print "Loaded pickled vectorizer."
print "Feature count: %s" % len(vec.get_feature_names())
print "Transforming reviews"
trans_pool = Pool(min(10,len(top_sy)))
res_t = trans_pool.map(__asyncable_transform,
[ (vec, sy, X[ X['style_id'] == sy ]) for sy in top_sy])
# as style keyed dict
res_t = {
r[0]: {
'beer_ids': r[1],
'X_t': r[2]
} for r in res_t
}
else:
# exit program
return 0
print 'Truncating similarity table'
bs = BeerSimilarity()
# bs.remove_all()
dim1 = sum(v['X_t'].shape[0] for k,v in res_t.iteritems())
dim2 = sum(len(v['X_t'].data) for k,v in res_t.iteritems())
print 'Computing similarities and saving to db %s' % dim1
print 'Nonzero elements %s' % dim2
# set style RU
# will account for symmetry in the database
# ru_sids = [ (top_sy[i], top_sy[j]) for i in xrange(len(top_sy)) for j in xrange(i,len(top_sy)) ]
ru_sids = [ (top_sy[i], top_sy[i]) for i in xrange(len(top_sy)) ]
pool_inp = []
for ruc in ru_sids:
X_t_ref = res_t[ruc[0]]['X_t']
b_id_ref = res_t[ruc[0]]['beer_ids']
X_t_comp = res_t[ruc[1]]['X_t']
b_id_comp = res_t[ruc[1]]['beer_ids']
pool_inp.append((bs, b_id_ref, X_t_ref, b_id_comp, X_t_comp, 100))
p = Pool(min(10,len(top_sy)))
b_id_res = p.map(__asyncable_similarity, pool_inp)
for res in b_id_res:
if res[1] is not None:
print '%s %s' % (', '.join(str(r) for r in res[0]), res[1])
# start main
while (True):
inp = 'y' #raw_input("Are you sure you want to overwrite beerad.beersimilarity? [y/n] ")
inp = inp.strip().lower()
if inp == 'n':
break
elif inp == 'y':
recompute_and_populate()
break |
Home Blogger SEO Best Quality Blogs That Accept Guest Posts.
Best Quality Blogs That Accept Guest Posts.
Hey Friends wassup....!! Are you searching for the best quality blogs that accept guest posts then you are a perfect place. I have made a list of most recent high pr blogs which accept guest posts.
As I discussed in earlier post that what is backlink and how much backlinks are important to rank your blog in google search engine. There are so many methods to make backlinks and the most easiest which I have dicussed in earlier post is commenting on dofollow commentluv enabled blogs.
There are so many Advantages of guest posting like getting backlinks from high pr blogs. when you make any post on these guest post Accepting blogs then their huge readers will read your guest posts and then they will want to know more about you and then they come to your blog. Guest posting will definitely helps you to increase your alexa rank.
You might also like this "Tips and tricks to improve Alexa rank of blog".
I'm making list of blogs on the basis of their categories which accepts guest posts on a perticular niche or topic like blogging, seo, designing etc and also giving a direct like of that blog submit guest post page.
Shoutmeloud.com , Submit guest post now directly.
Now you have got a list of high quality blogs that accept guest posts.
We also accept guest posts! |
#!/usr/bin/env python3
"""latex2svg
Read LaTeX code from stdin and render a SVG using LaTeX + dvisvgm.
"""
__version__ = '0.1.0'
__author__ = 'Tino Wagner'
__email__ = '[email protected]'
__license__ = 'MIT'
__copyright__ = '(c) 2017, Tino Wagner'
import os
import sys
import subprocess
import shlex
import re
from tempfile import TemporaryDirectory
from ctypes.util import find_library
default_template = r"""
\documentclass[{{ fontsize }}pt,preview]{standalone}
{{ preamble }}
\begin{document}
\begin{preview}
{{ code }}
\end{preview}
\end{document}
"""
default_preamble = r"""
\usepackage[utf8x]{inputenc}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{newtxtext}
\usepackage[libertine]{newtxmath}
"""
latex_cmd = 'latex -interaction nonstopmode -halt-on-error'
dvisvgm_cmd = 'dvisvgm --no-fonts'
default_params = {
'fontsize': 12, # pt
'template': default_template,
'preamble': default_preamble,
'latex_cmd': latex_cmd,
'dvisvgm_cmd': dvisvgm_cmd,
'libgs': None,
}
if not hasattr(os.environ, 'LIBGS') and not find_library('gs'):
if sys.platform == 'darwin':
# Fallback to homebrew Ghostscript on macOS
homebrew_libgs = '/usr/local/opt/ghostscript/lib/libgs.dylib'
if os.path.exists(homebrew_libgs):
default_params['libgs'] = homebrew_libgs
if not default_params['libgs']:
print('Warning: libgs not found')
def latex2svg(code, params=default_params, working_directory=None):
"""Convert LaTeX to SVG using dvisvgm.
Parameters
----------
code : str
LaTeX code to render.
params : dict
Conversion parameters.
working_directory : str or None
Working directory for external commands and place for temporary files.
Returns
-------
dict
Dictionary of SVG output and output information:
* `svg`: SVG data
* `width`: image width in *em*
* `height`: image height in *em*
* `depth`: baseline position in *em*
"""
if working_directory is None:
with TemporaryDirectory() as tmpdir:
return latex2svg(code, params, working_directory=tmpdir)
fontsize = params['fontsize']
document = (params['template']
.replace('{{ preamble }}', params['preamble'])
.replace('{{ fontsize }}', str(fontsize))
.replace('{{ code }}', code))
with open(os.path.join(working_directory, 'code.tex'), 'w') as f:
f.write(document)
# Run LaTeX and create DVI file
try:
ret = subprocess.run(shlex.split(params['latex_cmd']+' code.tex'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=working_directory)
# LaTeX prints errors on stdout instead of stderr (stderr is empty),
# so print stdout instead
if ret.returncode: print(ret.stdout.decode('utf-8'))
ret.check_returncode()
except FileNotFoundError:
raise RuntimeError('latex not found')
# Add LIBGS to environment if supplied
env = os.environ.copy()
if params['libgs']:
env['LIBGS'] = params['libgs']
# Convert DVI to SVG
try:
ret = subprocess.run(shlex.split(params['dvisvgm_cmd']+' code.dvi'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=working_directory, env=env)
if ret.returncode: print(ret.stderr.decode('utf-8'))
ret.check_returncode()
except FileNotFoundError:
raise RuntimeError('dvisvgm not found')
with open(os.path.join(working_directory, 'code.svg'), 'r') as f:
svg = f.read()
# Parse dvisvgm output for size and alignment
def get_size(output):
regex = r'\b([0-9.]+)pt x ([0-9.]+)pt'
match = re.search(regex, output)
if match:
return (float(match.group(1)) / fontsize,
float(match.group(2)) / fontsize)
else:
return None, None
def get_measure(output, name):
regex = r'\b%s=([0-9.e-]+)pt' % name
match = re.search(regex, output)
if match:
return float(match.group(1)) / fontsize
else:
return None
output = ret.stderr.decode('utf-8')
width, height = get_size(output)
depth = get_measure(output, 'depth')
return {'svg': svg, 'depth': depth, 'width': width, 'height': height}
def main():
"""Simple command line interface to latex2svg.
- Read from `stdin`.
- Write SVG to `stdout`.
- Write metadata as JSON to `stderr`.
- On error: write error messages to `stdout` and return with error code.
"""
import json
import argparse
parser = argparse.ArgumentParser(description="""
Render LaTeX code from stdin as SVG to stdout. Writes metadata (baseline
position, width, height in em units) as JSON to stderr.
""")
parser.add_argument('--preamble',
help="LaTeX preamble code to read from file")
args = parser.parse_args()
preamble = default_preamble
if args.preamble is not None:
with open(args.preamble) as f:
preamble = f.read()
latex = sys.stdin.read()
try:
params = default_params.copy()
params['preamble'] = preamble
out = latex2svg(latex, params)
sys.stdout.write(out['svg'])
meta = {key: out[key] for key in out if key != 'svg'}
sys.stderr.write(json.dumps(meta))
except subprocess.CalledProcessError as exc:
print(exc.output.decode('utf-8'))
sys.exit(exc.returncode)
if __name__ == '__main__':
main()
|
Excel VBA 24-Hour Trainer, 2nd Edition is the quick-start guide to getting more out of Excel, using Visual Basic for Applications. This unique book/video package has been updated with fifteen new advanced video lessons, providing a total of eleven hours of video training and 45 total lessons to teach you the basics and beyond. This self-paced tutorial explains Excel VBA from the ground up, demonstrating with each advancing lesson how you can increase your productivity. Clear, concise, step-by-step instructions are combined with illustrations, code examples, and downloadable workbooks to give you a practical, in-depth learning experience and results that apply to real-world scenarios.
This is your comprehensive guide to becoming a true Excel power user, with multimedia instruction and plenty of hands-on practice.
If you're ready to get more out of this incredibly functional program, Excel VBA 24-Hour Trainer, 2nd Edition provides the expert instruction and fast, hands-on learning you need. |
#!/usr/bin/env python
"""
demo of measuring FPS performance with Matplotlib and OpenCV
i.e. how fast can I update an image plot
Example:
$ python FPS_matplotlib_image.py
matplotlib 3.0.2 imshow average FPS 27.66 over 100 frames.
matplotlib 3.0.2 pcolormesh average FPS 6.76 over 100 frames.
OpenCV 3.4.3 average FPS 226.59 over 100 frames.
Caveats:
1) I compiled OpenCV with OpenCL--it's possible imshow is using the GPU on my laptop (not sure if imshow uses the GPU)
2) This is an average measurement, so it doesn't capture bogdowns in the frame rate.
3) you must normalize your data on a [0,255] range for cv2.imshow
It's just a very simple comparison, showing OpenCV's huge FPS advantage
NOTE: we use pause(1e-3) as pause(1e-6) yields the same FPS, but doesn't give visible updates. A race condition in Matplotlib?
"""
import numpy as np
from numpy.random import rand
import matplotlib
from matplotlib.pyplot import figure, draw, pause, close
from time import time
from typing import Tuple
try:
import cv2
except ImportError:
cv2 = None
#
Nfps = 100
def randomimg(xy: Tuple[int, int]) -> np.ndarray:
"""
generate two image frames to toggle between
"""
return (rand(2, xy[0], xy[1]) * 255).astype(np.uint8)
def fpsmatplotlib_imshow(dat: np.ndarray):
fg = figure()
ax = fg.gca()
h = ax.imshow(dat[0, ...])
ax.set_title('imshow')
tic = time()
for i in range(Nfps):
h.set_data(dat[i % 2, ...])
draw(), pause(1e-3)
close(fg)
return Nfps / (time() - tic)
def fpsmatplotlib_pcolor(dat: np.ndarray):
fg = figure()
ax = fg.gca()
h = ax.pcolormesh(dat[0, ...])
ax.set_title('pcolormesh')
ax.autoscale(True, tight=True)
tic = time()
for i in range(Nfps):
h.set_array(dat[i % 2, ...].ravel())
draw(), pause(1e-3)
close(fg)
return Nfps / (time() - tic)
def fpsopencv(dat: np.ndarray):
tic = time()
for i in range(Nfps):
cv2.imshow('fpstest', dat[i % 2, ...])
cv2.waitKey(1) # integer milliseconds, 0 makes wait forever
cv2.destroyAllWindows()
return Nfps / (time() - tic)
if __name__ == '__main__':
from argparse import ArgumentParser
p = ArgumentParser(description='measure FPS for rapidly updating plot with Matplotlib vs. OpenCV')
p.add_argument('-p', '--xypixels', help='number of pixels for x and y', type=int, default=(512, 512))
P = p.parse_args()
dat = randomimg(P.xypixels)
fpsmat = fpsmatplotlib_imshow(dat)
print(f'matplotlib {matplotlib.__version__} imshow average FPS {fpsmat:.2f} over {Nfps} frames.')
fpsmat = fpsmatplotlib_pcolor(dat)
print(f'matplotlib {matplotlib.__version__} pcolormesh average FPS {fpsmat:.2f} over {Nfps} frames.')
if cv2:
fpscv = fpsopencv(dat)
print(f'OpenCV {cv2.__version__} average FPS {fpscv:.2f} over {Nfps} frames.')
|
The public shows “considerable lack of knowledge” about the risk associated with different types of tobacco products, researchers say. What people can benefit from is knowing the varying levels of risk associated with different tobacco products, according to public health researchers, who found that a large number of people aren’t aware of the differences.
A researcher is working to make everyday objects easier to maintain so they last longer and don’t end up in a landfill. His first such creation is an easy-to-clean hairbrush.
Physicists have used an X-ray free-electron laser — one of two in the world — to induce two X-ray photons to simultaneously collide with an atom, converting them into a single, higher-energy photon.
Research into 430,000-year-old fossils collected in northern Spain found that the evolution of the human body’s size and shape has gone through four main stages.
Men with relatively unaggressive prostate tumors and whose disease is carefully monitored by urologists are unlikely to develop metastatic prostate cancer or die of their cancers, according to results of a study that analyzed survival statistics up to 15 years.
Researchers from CSIRO and Imperial College London have assessed how widespread the threat of plastic is for the world’s seabirds, including albatrosses, shearwaters and penguins, and found the majority of seabird species have plastic in their gut. |
from contextlib import contextmanager
import linecache
import os
from io import StringIO
import sys
import unittest
import subprocess
from test import support
from test.script_helper import assert_python_ok
from test import warning_tests
import warnings as original_warnings
py_warnings = support.import_fresh_module('warnings', blocked=['_warnings'])
c_warnings = support.import_fresh_module('warnings', fresh=['_warnings'])
@contextmanager
def warnings_state(module):
"""Use a specific warnings implementation in warning_tests."""
global __warningregistry__
for to_clear in (sys, warning_tests):
try:
to_clear.__warningregistry__.clear()
except AttributeError:
pass
try:
__warningregistry__.clear()
except NameError:
pass
original_warnings = warning_tests.warnings
original_filters = module.filters
try:
module.filters = original_filters[:]
module.simplefilter("once")
warning_tests.warnings = module
yield
finally:
warning_tests.warnings = original_warnings
module.filters = original_filters
class BaseTest:
"""Basic bookkeeping required for testing."""
def setUp(self):
# The __warningregistry__ needs to be in a pristine state for tests
# to work properly.
if '__warningregistry__' in globals():
del globals()['__warningregistry__']
if hasattr(warning_tests, '__warningregistry__'):
del warning_tests.__warningregistry__
if hasattr(sys, '__warningregistry__'):
del sys.__warningregistry__
# The 'warnings' module must be explicitly set so that the proper
# interaction between _warnings and 'warnings' can be controlled.
sys.modules['warnings'] = self.module
super(BaseTest, self).setUp()
def tearDown(self):
sys.modules['warnings'] = original_warnings
super(BaseTest, self).tearDown()
class FilterTests(BaseTest):
"""Testing the filtering functionality."""
def test_error(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("error", category=UserWarning)
self.assertRaises(UserWarning, self.module.warn,
"FilterTests.test_error")
def test_ignore(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("ignore", category=UserWarning)
self.module.warn("FilterTests.test_ignore", UserWarning)
self.assertEqual(len(w), 0)
def test_always(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("always", category=UserWarning)
message = "FilterTests.test_always"
self.module.warn(message, UserWarning)
self.assertTrue(message, w[-1].message)
self.module.warn(message, UserWarning)
self.assertTrue(w[-1].message, message)
def test_default(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("default", category=UserWarning)
message = UserWarning("FilterTests.test_default")
for x in range(2):
self.module.warn(message, UserWarning)
if x == 0:
self.assertEqual(w[-1].message, message)
del w[:]
elif x == 1:
self.assertEqual(len(w), 0)
else:
raise ValueError("loop variant unhandled")
def test_module(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("module", category=UserWarning)
message = UserWarning("FilterTests.test_module")
self.module.warn(message, UserWarning)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn(message, UserWarning)
self.assertEqual(len(w), 0)
def test_once(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("once", category=UserWarning)
message = UserWarning("FilterTests.test_once")
self.module.warn_explicit(message, UserWarning, "test_warnings.py",
42)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn_explicit(message, UserWarning, "test_warnings.py",
13)
self.assertEqual(len(w), 0)
self.module.warn_explicit(message, UserWarning, "test_warnings2.py",
42)
self.assertEqual(len(w), 0)
def test_inheritance(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("error", category=Warning)
self.assertRaises(UserWarning, self.module.warn,
"FilterTests.test_inheritance", UserWarning)
def test_ordering(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("ignore", category=UserWarning)
self.module.filterwarnings("error", category=UserWarning,
append=True)
del w[:]
try:
self.module.warn("FilterTests.test_ordering", UserWarning)
except UserWarning:
self.fail("order handling for actions failed")
self.assertEqual(len(w), 0)
def test_filterwarnings(self):
# Test filterwarnings().
# Implicitly also tests resetwarnings().
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn, 'convert to error')
self.module.resetwarnings()
text = 'handle normally'
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertTrue(w[-1].category is UserWarning)
self.module.filterwarnings("ignore", "", Warning, "", 0)
text = 'filtered out'
self.module.warn(text)
self.assertNotEqual(str(w[-1].message), text)
self.module.resetwarnings()
self.module.filterwarnings("error", "hex*", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn, 'hex/oct')
text = 'nonmatching text'
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertTrue(w[-1].category is UserWarning)
class CFilterTests(FilterTests, unittest.TestCase):
module = c_warnings
class PyFilterTests(FilterTests, unittest.TestCase):
module = py_warnings
class WarnTests(BaseTest):
"""Test warnings.warn() and warnings.warn_explicit()."""
def test_message(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("once")
for i in range(4):
text = 'multi %d' %i # Different text on each call.
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertTrue(w[-1].category is UserWarning)
# Issue 3639
def test_warn_nonstandard_types(self):
# warn() should handle non-standard types without issue.
for ob in (Warning, None, 42):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("once")
self.module.warn(ob)
# Don't directly compare objects since
# ``Warning() != Warning()``.
self.assertEqual(str(w[-1].message), str(UserWarning(ob)))
def test_filename(self):
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam1")
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
warning_tests.outer("spam2")
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
def test_stacklevel(self):
# Test stacklevel argument
# make sure all messages are different, so the warning won't be skipped
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam3", stacklevel=1)
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
warning_tests.outer("spam4", stacklevel=1)
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
warning_tests.inner("spam5", stacklevel=2)
self.assertEqual(os.path.basename(w[-1].filename),
"test_warnings.py")
warning_tests.outer("spam6", stacklevel=2)
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
warning_tests.outer("spam6.5", stacklevel=3)
self.assertEqual(os.path.basename(w[-1].filename),
"test_warnings.py")
warning_tests.inner("spam7", stacklevel=9999)
self.assertEqual(os.path.basename(w[-1].filename),
"sys")
def test_missing_filename_not_main(self):
# If __file__ is not specified and __main__ is not the module name,
# then __file__ should be set to the module name.
filename = warning_tests.__file__
try:
del warning_tests.__file__
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam8", stacklevel=1)
self.assertEqual(w[-1].filename, warning_tests.__name__)
finally:
warning_tests.__file__ = filename
def test_missing_filename_main_with_argv(self):
# If __file__ is not specified and the caller is __main__ and sys.argv
# exists, then use sys.argv[0] as the file.
if not hasattr(sys, 'argv'):
return
filename = warning_tests.__file__
module_name = warning_tests.__name__
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam9', stacklevel=1)
self.assertEqual(w[-1].filename, sys.argv[0])
finally:
warning_tests.__file__ = filename
warning_tests.__name__ = module_name
def test_missing_filename_main_without_argv(self):
# If __file__ is not specified, the caller is __main__, and sys.argv
# is not set, then '__main__' is the file name.
filename = warning_tests.__file__
module_name = warning_tests.__name__
argv = sys.argv
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
del sys.argv
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam10', stacklevel=1)
self.assertEqual(w[-1].filename, '__main__')
finally:
warning_tests.__file__ = filename
warning_tests.__name__ = module_name
sys.argv = argv
def test_missing_filename_main_with_argv_empty_string(self):
# If __file__ is not specified, the caller is __main__, and sys.argv[0]
# is the empty string, then '__main__ is the file name.
# Tests issue 2743.
file_name = warning_tests.__file__
module_name = warning_tests.__name__
argv = sys.argv
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
sys.argv = ['']
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam11', stacklevel=1)
self.assertEqual(w[-1].filename, '__main__')
finally:
warning_tests.__file__ = file_name
warning_tests.__name__ = module_name
sys.argv = argv
def test_warn_explicit_type_errors(self):
# warn_explicit() should error out gracefully if it is given objects
# of the wrong types.
# lineno is expected to be an integer.
self.assertRaises(TypeError, self.module.warn_explicit,
None, UserWarning, None, None)
# Either 'message' needs to be an instance of Warning or 'category'
# needs to be a subclass.
self.assertRaises(TypeError, self.module.warn_explicit,
None, None, None, 1)
# 'registry' must be a dict or None.
self.assertRaises((TypeError, AttributeError),
self.module.warn_explicit,
None, Warning, None, 1, registry=42)
def test_bad_str(self):
# issue 6415
# Warnings instance with a bad format string for __str__ should not
# trigger a bus error.
class BadStrWarning(Warning):
"""Warning with a bad format string for __str__."""
def __str__(self):
return ("A bad formatted string %(err)" %
{"err" : "there is no %(err)s"})
with self.assertRaises(ValueError):
self.module.warn(BadStrWarning())
class CWarnTests(WarnTests, unittest.TestCase):
module = c_warnings
# As an early adopter, we sanity check the
# test.support.import_fresh_module utility function
def test_accelerated(self):
self.assertFalse(original_warnings is self.module)
self.assertFalse(hasattr(self.module.warn, '__code__'))
class PyWarnTests(WarnTests, unittest.TestCase):
module = py_warnings
# As an early adopter, we sanity check the
# test.support.import_fresh_module utility function
def test_pure_python(self):
self.assertFalse(original_warnings is self.module)
self.assertTrue(hasattr(self.module.warn, '__code__'))
class WCmdLineTests(BaseTest):
def test_improper_input(self):
# Uses the private _setoption() function to test the parsing
# of command-line warning arguments
with original_warnings.catch_warnings(module=self.module):
self.assertRaises(self.module._OptionError,
self.module._setoption, '1:2:3:4:5:6')
self.assertRaises(self.module._OptionError,
self.module._setoption, 'bogus::Warning')
self.assertRaises(self.module._OptionError,
self.module._setoption, 'ignore:2::4:-5')
self.module._setoption('error::Warning::0')
self.assertRaises(UserWarning, self.module.warn, 'convert to error')
def test_improper_option(self):
# Same as above, but check that the message is printed out when
# the interpreter is executed. This also checks that options are
# actually parsed at all.
rc, out, err = assert_python_ok("-Wxxx", "-c", "pass")
self.assertIn(b"Invalid -W option ignored: invalid action: 'xxx'", err)
def test_warnings_bootstrap(self):
# Check that the warnings module does get loaded when -W<some option>
# is used (see issue #10372 for an example of silent bootstrap failure).
rc, out, err = assert_python_ok("-Wi", "-c",
"import sys; sys.modules['warnings'].warn('foo', RuntimeWarning)")
# '-Wi' was observed
self.assertFalse(out.strip())
self.assertNotIn(b'RuntimeWarning', err)
class CWCmdLineTests(WCmdLineTests, unittest.TestCase):
module = c_warnings
class PyWCmdLineTests(WCmdLineTests, unittest.TestCase):
module = py_warnings
class _WarningsTests(BaseTest, unittest.TestCase):
"""Tests specific to the _warnings module."""
module = c_warnings
def test_filter(self):
# Everything should function even if 'filters' is not in warnings.
with original_warnings.catch_warnings(module=self.module) as w:
self.module.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn,
'convert to error')
del self.module.filters
self.assertRaises(UserWarning, self.module.warn,
'convert to error')
def test_onceregistry(self):
# Replacing or removing the onceregistry should be okay.
global __warningregistry__
message = UserWarning('onceregistry test')
try:
original_registry = self.module.onceregistry
__warningregistry__ = {}
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("once", category=UserWarning)
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(len(w), 0)
# Test the resetting of onceregistry.
self.module.onceregistry = {}
__warningregistry__ = {}
self.module.warn('onceregistry test')
self.assertEqual(w[-1].message.args, message.args)
# Removal of onceregistry is okay.
del w[:]
del self.module.onceregistry
__warningregistry__ = {}
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(len(w), 0)
finally:
self.module.onceregistry = original_registry
def test_default_action(self):
# Replacing or removing defaultaction should be okay.
message = UserWarning("defaultaction test")
original = self.module.defaultaction
try:
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 42,
registry=registry)
self.assertEqual(w[-1].message, message)
self.assertEqual(len(w), 1)
self.assertEqual(len(registry), 1)
del w[:]
# Test removal.
del self.module.defaultaction
__warningregistry__ = {}
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 43,
registry=registry)
self.assertEqual(w[-1].message, message)
self.assertEqual(len(w), 1)
self.assertEqual(len(registry), 1)
del w[:]
# Test setting.
self.module.defaultaction = "ignore"
__warningregistry__ = {}
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 44,
registry=registry)
self.assertEqual(len(w), 0)
finally:
self.module.defaultaction = original
def test_showwarning_missing(self):
# Test that showwarning() missing is okay.
text = 'del showwarning test'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module.showwarning
with support.captured_output('stderr') as stream:
self.module.warn(text)
result = stream.getvalue()
self.assertIn(text, result)
def test_showwarning_not_callable(self):
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
self.module.showwarning = print
with support.captured_output('stdout'):
self.module.warn('Warning!')
self.module.showwarning = 23
self.assertRaises(TypeError, self.module.warn, "Warning!")
def test_show_warning_output(self):
# With showarning() missing, make sure that output is okay.
text = 'test show_warning'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module.showwarning
with support.captured_output('stderr') as stream:
warning_tests.inner(text)
result = stream.getvalue()
self.assertEqual(result.count('\n'), 2,
"Too many newlines in %r" % result)
first_line, second_line = result.split('\n', 1)
expected_file = os.path.splitext(warning_tests.__file__)[0] + '.py'
first_line_parts = first_line.rsplit(':', 3)
path, line, warning_class, message = first_line_parts
line = int(line)
self.assertEqual(expected_file, path)
self.assertEqual(warning_class, ' ' + UserWarning.__name__)
self.assertEqual(message, ' ' + text)
expected_line = ' ' + linecache.getline(path, line).strip() + '\n'
assert expected_line
self.assertEqual(second_line, expected_line)
def test_filename_none(self):
# issue #12467: race condition if a warning is emitted at shutdown
globals_dict = globals()
oldfile = globals_dict['__file__']
try:
catch = original_warnings.catch_warnings(record=True,
module=self.module)
with catch as w:
self.module.filterwarnings("always", category=UserWarning)
globals_dict['__file__'] = None
original_warnings.warn('test', UserWarning)
self.assertTrue(len(w))
finally:
globals_dict['__file__'] = oldfile
class WarningsDisplayTests(BaseTest):
"""Test the displaying of warnings and the ability to overload functions
related to displaying warnings."""
def test_formatwarning(self):
message = "msg"
category = Warning
file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
line_num = 3
file_line = linecache.getline(file_name, line_num).strip()
format = "%s:%s: %s: %s\n %s\n"
expect = format % (file_name, line_num, category.__name__, message,
file_line)
self.assertEqual(expect, self.module.formatwarning(message,
category, file_name, line_num))
# Test the 'line' argument.
file_line += " for the win!"
expect = format % (file_name, line_num, category.__name__, message,
file_line)
self.assertEqual(expect, self.module.formatwarning(message,
category, file_name, line_num, file_line))
def test_showwarning(self):
file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
line_num = 3
expected_file_line = linecache.getline(file_name, line_num).strip()
message = 'msg'
category = Warning
file_object = StringIO()
expect = self.module.formatwarning(message, category, file_name,
line_num)
self.module.showwarning(message, category, file_name, line_num,
file_object)
self.assertEqual(file_object.getvalue(), expect)
# Test 'line' argument.
expected_file_line += "for the win!"
expect = self.module.formatwarning(message, category, file_name,
line_num, expected_file_line)
file_object = StringIO()
self.module.showwarning(message, category, file_name, line_num,
file_object, expected_file_line)
self.assertEqual(expect, file_object.getvalue())
class CWarningsDisplayTests(WarningsDisplayTests, unittest.TestCase):
module = c_warnings
class PyWarningsDisplayTests(WarningsDisplayTests, unittest.TestCase):
module = py_warnings
class CatchWarningTests(BaseTest):
"""Test catch_warnings()."""
def test_catch_warnings_restore(self):
wmod = self.module
orig_filters = wmod.filters
orig_showwarning = wmod.showwarning
# Ensure both showwarning and filters are restored when recording
with wmod.catch_warnings(module=wmod, record=True):
wmod.filters = wmod.showwarning = object()
self.assertTrue(wmod.filters is orig_filters)
self.assertTrue(wmod.showwarning is orig_showwarning)
# Same test, but with recording disabled
with wmod.catch_warnings(module=wmod, record=False):
wmod.filters = wmod.showwarning = object()
self.assertTrue(wmod.filters is orig_filters)
self.assertTrue(wmod.showwarning is orig_showwarning)
def test_catch_warnings_recording(self):
wmod = self.module
# Ensure warnings are recorded when requested
with wmod.catch_warnings(module=wmod, record=True) as w:
self.assertEqual(w, [])
self.assertTrue(type(w) is list)
wmod.simplefilter("always")
wmod.warn("foo")
self.assertEqual(str(w[-1].message), "foo")
wmod.warn("bar")
self.assertEqual(str(w[-1].message), "bar")
self.assertEqual(str(w[0].message), "foo")
self.assertEqual(str(w[1].message), "bar")
del w[:]
self.assertEqual(w, [])
# Ensure warnings are not recorded when not requested
orig_showwarning = wmod.showwarning
with wmod.catch_warnings(module=wmod, record=False) as w:
self.assertTrue(w is None)
self.assertTrue(wmod.showwarning is orig_showwarning)
def test_catch_warnings_reentry_guard(self):
wmod = self.module
# Ensure catch_warnings is protected against incorrect usage
x = wmod.catch_warnings(module=wmod, record=True)
self.assertRaises(RuntimeError, x.__exit__)
with x:
self.assertRaises(RuntimeError, x.__enter__)
# Same test, but with recording disabled
x = wmod.catch_warnings(module=wmod, record=False)
self.assertRaises(RuntimeError, x.__exit__)
with x:
self.assertRaises(RuntimeError, x.__enter__)
def test_catch_warnings_defaults(self):
wmod = self.module
orig_filters = wmod.filters
orig_showwarning = wmod.showwarning
# Ensure default behaviour is not to record warnings
with wmod.catch_warnings(module=wmod) as w:
self.assertTrue(w is None)
self.assertTrue(wmod.showwarning is orig_showwarning)
self.assertTrue(wmod.filters is not orig_filters)
self.assertTrue(wmod.filters is orig_filters)
if wmod is sys.modules['warnings']:
# Ensure the default module is this one
with wmod.catch_warnings() as w:
self.assertTrue(w is None)
self.assertTrue(wmod.showwarning is orig_showwarning)
self.assertTrue(wmod.filters is not orig_filters)
self.assertTrue(wmod.filters is orig_filters)
def test_check_warnings(self):
# Explicit tests for the test.support convenience wrapper
wmod = self.module
if wmod is not sys.modules['warnings']:
return
with support.check_warnings(quiet=False) as w:
self.assertEqual(w.warnings, [])
wmod.simplefilter("always")
wmod.warn("foo")
self.assertEqual(str(w.message), "foo")
wmod.warn("bar")
self.assertEqual(str(w.message), "bar")
self.assertEqual(str(w.warnings[0].message), "foo")
self.assertEqual(str(w.warnings[1].message), "bar")
w.reset()
self.assertEqual(w.warnings, [])
with support.check_warnings():
# defaults to quiet=True without argument
pass
with support.check_warnings(('foo', UserWarning)):
wmod.warn("foo")
with self.assertRaises(AssertionError):
with support.check_warnings(('', RuntimeWarning)):
# defaults to quiet=False with argument
pass
with self.assertRaises(AssertionError):
with support.check_warnings(('foo', RuntimeWarning)):
wmod.warn("foo")
class CCatchWarningTests(CatchWarningTests, unittest.TestCase):
module = c_warnings
class PyCatchWarningTests(CatchWarningTests, unittest.TestCase):
module = py_warnings
class EnvironmentVariableTests(BaseTest):
def test_single_warning(self):
newenv = os.environ.copy()
newenv["PYTHONWARNINGS"] = "ignore::DeprecationWarning"
p = subprocess.Popen([sys.executable,
"-c", "import sys; sys.stdout.write(str(sys.warnoptions))"],
stdout=subprocess.PIPE, env=newenv)
self.assertEqual(p.communicate()[0], b"['ignore::DeprecationWarning']")
self.assertEqual(p.wait(), 0)
def test_comma_separated_warnings(self):
newenv = os.environ.copy()
newenv["PYTHONWARNINGS"] = ("ignore::DeprecationWarning,"
"ignore::UnicodeWarning")
p = subprocess.Popen([sys.executable,
"-c", "import sys; sys.stdout.write(str(sys.warnoptions))"],
stdout=subprocess.PIPE, env=newenv)
self.assertEqual(p.communicate()[0],
b"['ignore::DeprecationWarning', 'ignore::UnicodeWarning']")
self.assertEqual(p.wait(), 0)
def test_envvar_and_command_line(self):
newenv = os.environ.copy()
newenv["PYTHONWARNINGS"] = "ignore::DeprecationWarning"
p = subprocess.Popen([sys.executable, "-W" "ignore::UnicodeWarning",
"-c", "import sys; sys.stdout.write(str(sys.warnoptions))"],
stdout=subprocess.PIPE, env=newenv)
self.assertEqual(p.communicate()[0],
b"['ignore::UnicodeWarning', 'ignore::DeprecationWarning']")
self.assertEqual(p.wait(), 0)
@unittest.skipUnless(sys.getfilesystemencoding() != 'ascii',
'requires non-ascii filesystemencoding')
def test_nonascii(self):
newenv = os.environ.copy()
newenv["PYTHONWARNINGS"] = "ignore:DeprecaciónWarning"
newenv["PYTHONIOENCODING"] = "utf-8"
p = subprocess.Popen([sys.executable,
"-c", "import sys; sys.stdout.write(str(sys.warnoptions))"],
stdout=subprocess.PIPE, env=newenv)
self.assertEqual(p.communicate()[0],
"['ignore:DeprecaciónWarning']".encode('utf-8'))
self.assertEqual(p.wait(), 0)
class CEnvironmentVariableTests(EnvironmentVariableTests, unittest.TestCase):
module = c_warnings
class PyEnvironmentVariableTests(EnvironmentVariableTests, unittest.TestCase):
module = py_warnings
class BootstrapTest(unittest.TestCase):
def test_issue_8766(self):
# "import encodings" emits a warning whereas the warnings is not loaded
# or not completely loaded (warnings imports indirectly encodings by
# importing linecache) yet
with support.temp_cwd() as cwd, support.temp_cwd('encodings'):
env = os.environ.copy()
env['PYTHONPATH'] = cwd
# encodings loaded by initfsencoding()
retcode = subprocess.call([sys.executable, '-c', 'pass'], env=env)
self.assertEqual(retcode, 0)
# Use -W to load warnings module at startup
retcode = subprocess.call(
[sys.executable, '-c', 'pass', '-W', 'always'],
env=env)
self.assertEqual(retcode, 0)
def setUpModule():
py_warnings.onceregistry.clear()
c_warnings.onceregistry.clear()
tearDownModule = setUpModule
if __name__ == "__main__":
unittest.main()
|
Note: If you’re one of the 2 remaining human beings who don’t know that Spock died at the end of Star Trek II: The Wrath of Khan, I make no apologies for revealing the ending, as you shouldn’t be watching the sequel first anyway.
Picking up where part II left off, the crew of the Starship Enterprise has left the body of Spock on the regenerative planet Genesis, and returned home in their battle-scarred ship. On Earth, Spock’s father informs Admiral Kirk (William Shatner) that as a Vulcan, Spock transferred his soul into the mind of a crew-member prior to putting himself into the situation that could kill him. They soon find Spock put his soul inside the ship’s Dr. McCoy (Deforest Kelley).
Kirk, McCoy, and the rest of the command crew defy orders and hijack the Enterprise in a bid to retrive Spock’s body and transfer his soul back into it.
Christopher Lloyd managed a level of sadistic villainy equal to that of Khan, if not necessarily as intellectual. There is a good deal of pathos in the story of Spock’s resurrection, which addresses themes on the nature of mortality, and whether the life of one man is worth risking the lives of others.
By themselves, either of the film’s chief stories might have made for an interesting episode of the series. However, there is a drastic contrast between the Klingon subplot and the story of saving Spock. While it is admirable that they tried to merge the commercial and artier aspects of the series, the two just didn’t fit very well together. What is ultimately left over is a series of passionless space battles and a two hour long bromance.
There is a distinctly dark feel to Star Trek III. This hateful tone is a direct contrast with the rest of the Trek series, which has always aspired to be hopeful. The film seems like it was supposed to end on a positive note, but they make it clear that the efforts of Kirk and the crew to bring Spock back resulted in the deaths of multiple people, and the destruction of the Enterprise ship itself. Indeed, the depressing mood this creates feels like it crippled the momentum of the series, and it would take a sequel merging into near parody to compensate.
A movie stuck between wanting to tell a new story, and tie up the loose ends of the prior film.
Take a Drink: for each stage of life soulless Spock goes through before they get him off the Genesis Planet.
That pic you posted with the caption “Hello computer” is from Star Trek IV. |
#! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import fcntl
import os
class FileLockAcquireException(Exception):
pass
class FileLockReleaseException(Exception):
pass
class FileLock(object):
""" This class implements a global file lock that can be used as a
a mutex between cooperating processes.
NOTE: the present implementation's behavior is undefined when multiple
threads may try ackquire a lock on the same file.
"""
def __init__(self, filePath):
"""
Parameters:
------------------------------------------------------------------------
filePath: Path to a file to be used for locking; The file MUST already exist.
"""
assert os.path.isabs(filePath), "not absolute path: %r" % filePath
assert os.path.isfile(filePath), (
"not a file or doesn't exist: %r" % filePath)
self.__filePath = filePath
self.__fp = open(self.__filePath, "r")
self.__fd = self.__fp.fileno()
return
def __enter__(self):
""" Context Manager protocol method. Allows a FileLock instance to be
used in a "with" statement for automatic acquire/release
Parameters:
------------------------------------------------------------------------
retval: self.
"""
self.acquire()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
""" Context Manager protocol method. Allows a FileLock instance to be
used in a "with" statement for automatic acquire/release
"""
self.release()
return False
def acquire(self):
""" Acquire global lock
exception: FileLockAcquireException on failure
"""
try:
fcntl.flock(self.__fd, fcntl.LOCK_EX)
except Exception, e:
e = FileLockAcquireException(
"FileLock acquire failed on %r" % (self.__filePath), e)
raise e, None, sys.exc_info()[2]
return
def release(self):
""" Release global lock
"""
try:
fcntl.flock(self.__fd, fcntl.LOCK_UN)
except Exception, e:
e = FileLockReleaseException(
"FileLock release failed on %r" % (self.__filePath), e)
raise e, None, sys.exc_info()[2]
return |
A Beautiful Vintage County map of Derbyshire. A reproduction of the hand drawn map by the 17th Century cartographer John Speed. Set within a decorative border and surrounded by intricate illustrations including a plan of Derby. |
import urllib.request
import zipfile
import os
import time
def download_file(src_file_url, dst_file_path, max_retry=10):
print("Downloading file %s..." % dst_file_path)
def progress_reporter(num_chunks_read, chunk_size, total_size):
read_so_far = num_chunks_read * chunk_size
print("\r - Downloaded: %d MB -" % (read_so_far / (1 << 20)), flush=True, end="")
num_retry = 0
while True:
try:
urllib.request.urlretrieve(src_file_url, dst_file_path, progress_reporter)
except Exception as e:
print(e)
if num_retry < max_retry:
print("Error downloading file, remaining trials: %d" % (max_retry - num_retry))
print("Download will restart after 30 secs")
time.sleep(30)
num_retry = num_retry + 1
continue
else:
print("Download failed.")
break
else:
print("\nDownload completed.")
break
def download_zipfile_and_extract(src_zipfile_url, dst_directory):
temp_dst_file_path = os.path.join(dst_directory, "__temp__" + str(time.time()))
download_file(src_zipfile_url, temp_dst_file_path)
print("Extracting file...")
# extract zipped resource folder
zip_file = zipfile.ZipFile(temp_dst_file_path, "r")
zip_file.extractall(dst_directory)
zip_file.close()
print("Deleting temporary file %s." % temp_dst_file_path)
# delete zipped resource file
os.remove(temp_dst_file_path)
|
With spirituality placed at the centre of the person, the Canadian Model of Occupational Performance has been a source of debate on the definition of spirituality and the centrality of the concept (CAOT, 2002). This research used a qualitative research approach, the grounded theory method of Glaser and Strauss (1967) and Strauss (1987), to explore the process by which meaning is given to occupation by community-dwelling older individuals with loss of physical autonomy. The concept of spirituality from their perspective was also explored. Eight study participants, selected for their level of loss of autonomy as well as the absence of cognitive deficits, were recruited from the three publicly-funded community support service providers in the Eastern Townships of Quebec. Data, in the form of verbatim, were gathered using in-depth, one-on-one, semi-directed interviews. This study also proposes a definition of spirituality that reflects belief in a higher power and the close cultural link between spirituality and religion, as seen from the perspective of community-dwelling older adults with loss of autonomy."--Résumé abrégé par UMI. |
import json
import time
import unittest
import mock
NOW = time.time()
NOW2 = time.time() + 42 * 1000
TABLE_NAME = 'Table-1'
TABLE_RT = 45
TABLE_WT = 123
TABLE_RT2 = 10
TABLE_WT2 = 10
TABLE_HK_NAME = u'hash_key'
TABLE_HK_TYPE = u'N'
TABLE_RK_NAME = u'range_key'
TABLE_RK_TYPE = u'S'
HEADERS = {
'x-amz-target': 'dynamodb_20111205.UpdateTable',
'content-type': 'application/x-amz-json-1.0',
}
# Goal here is not to test the full API, this is done by the Boto tests
class TestUpdateTable(unittest.TestCase):
@mock.patch("ddbmock.database.table.time") # Brrr
def setUp(self, m_time):
from ddbmock.database.db import dynamodb
from ddbmock.database.table import Table
from ddbmock.database.key import PrimaryKey
from ddbmock import main
app = main({})
from webtest import TestApp
self.app = TestApp(app)
m_time.time.return_value = NOW
dynamodb.hard_reset()
hash_key = PrimaryKey(TABLE_HK_NAME, TABLE_HK_TYPE)
range_key = PrimaryKey(TABLE_RK_NAME, TABLE_RK_TYPE)
t1 = Table(TABLE_NAME, TABLE_RT, TABLE_WT, hash_key, range_key,
status="ACTIVE")
dynamodb.data[TABLE_NAME] = t1
def tearDown(self):
from ddbmock.database.db import dynamodb
dynamodb.hard_reset()
@mock.patch("ddbmock.database.table.time")
def test_update(self, m_time):
from ddbmock.database.db import dynamodb
m_time.time.return_value = NOW2
self.maxDiff = None
request = {
"TableName": TABLE_NAME,
"ProvisionedThroughput": {
"ReadCapacityUnits": TABLE_RT2,
"WriteCapacityUnits": TABLE_WT2,
},
}
expected = {
u'TableDescription': {
u'CreationDateTime': NOW,
u'ItemCount': 0,
u'KeySchema': {
u'HashKeyElement': {
u'AttributeName': u'hash_key',
u'AttributeType': u'N',
},
u'RangeKeyElement': {
u'AttributeName': u'range_key',
u'AttributeType': u'S',
},
},
u'ProvisionedThroughput': {
u'LastDecreaseDateTime': NOW2,
u'ReadCapacityUnits': TABLE_RT2,
u'WriteCapacityUnits': TABLE_WT2,
},
u'TableName': TABLE_NAME,
u'TableSizeBytes': 0,
u'TableStatus': u'UPDATING'
}
}
# Protocol check
res = self.app.post_json('/', request, headers=HEADERS, status=200)
self.assertEqual(expected, json.loads(res.body))
self.assertEqual('application/x-amz-json-1.0; charset=UTF-8',
res.headers['Content-Type'])
# Live data check
data = dynamodb.data
assert TABLE_NAME in data
table = data[TABLE_NAME]
self.assertEqual(TABLE_RT2, table.rt)
self.assertEqual(TABLE_WT2, table.wt)
|
When you talk about justice you should talk about Haiti first starting with Jean Claude Duvalier instead of going faraway places such as Egypt, Tunisia, Liberia but with my superior intelligence i decode your thought processes I think that you mean Libya, Yemen, Syria, Iraq, Sierra Leone who are far away from the Haitian realities.
I know you have Attention Deficit Disorder with Hyperactivity because Micky Candy Baby's cracks burned the cells of your brains but I have the necessary skills to keep you in focus.
Jean Claude Duvalier has severe learning disabilities according to his teachers at St Louis de Gonzague, College Bird, and his Classmates, associates, friends and his informal teachers.
Jean Claude passion was fast cars, women and music.
He tried to learn music without success because of his learning disabilities.
While he was at the national palace, he purchased several expensive cars at the tax payer expense.
He used to close the schools whenever it was the anniversary of his Jaguar, Cadillac, Mercedes Benz, Bentley, Ferrari and his red Corvair.
When he left Haiti, he carried over 800,000,000 cash in his suit case plus one billion he spread among his friend in Haiti to manage for him. One of his associate, Luckner Cambrone boasted that he is rich for the next 10 generations because he used to sell Haitian blood and cadavers to American medical schools-and selling Haitians as slaves to the Dominican Republic refers to Haiti Observateur for more zin. Little is known on how much monies his sisters stole from the Haitian treasure and how much they generously gave to the military men there while they were sampling or trying their koks. We know Michelle Bennet was doing the same thing and sometime they had the same men-refers to Haiti Observateur.
So if you want to start justice start with the Duvalier regime and his 38,000 miliciens that committed atrocities that was never seen in the entire Caribbean region.
When you finish to judge the macoutes start with the corps of leopards.
I am sure you will not contemplate doing such action because your family members used to committed atrocities in the above organizations you will rather play the game of reconciliation when he come to the Duvalier.
The Duvalier atrocities has been resonated and recorded throughout the world.
I am asking all the people that live in Haiti and around the world to submit their stories to commandante 12 at yahoo.
in Aristide and Preval committed no known crimes.
Please leave Aristide and Preval alone or we will get you for the crimes you committed during the Duvalier era. |
# -*- coding: utf-8 -*-
import posixpath
import urllib
from .compat import urlparse
from .six import text_type, u
class Root(object):
"""A descriptor which always returns the root path."""
def __get__(self, instance, cls):
return cls('/')
class URLPath(text_type):
root = Root()
def __repr__(self):
return u('URLPath(%r)') % (text_type(self),)
@classmethod
def join_segments(cls, segments, absolute=True):
"""Create a :class:`URLPath` from an iterable of segments."""
if absolute:
path = cls('/')
else:
path = cls('')
for segment in segments:
path = path.add_segment(segment)
return path
@property
def segments(self):
"""
Split this path into (decoded) segments.
>>> URLPath('/a/b/c').segments
('a', 'b', 'c')
Non-leaf nodes will have a trailing empty string, and percent encodes
will be decoded:
>>> URLPath('/a%20b/c%20d/').segments
('a b', 'c d', '')
"""
segments = tuple(map(path_decode, self.split('/')))
if segments[0] == '':
return segments[1:]
return segments
@property
def parent(self):
"""
The parent of this node.
>>> URLPath('/a/b/c').parent
URLPath('/a/b/')
>>> URLPath('/foo/bar/').parent
URLPath('/foo/')
"""
if self.is_leaf:
return self.relative('.')
return self.relative('..')
@property
def is_leaf(self):
"""
Is this path a leaf node?
>>> URLPath('/a/b/c').is_leaf
True
>>> URLPath('/a/b/').is_leaf
False
"""
return self and self.segments[-1] != '' or False
@property
def is_relative(self):
"""
Is this path relative?
>>> URLPath('a/b/c').is_relative
True
>>> URLPath('/a/b/c').is_relative
False
"""
return self[0] != '/'
@property
def is_absolute(self):
"""
Is this path absolute?
>>> URLPath('a/b/c').is_absolute
False
>>> URLPath('/a/b/c').is_absolute
True
"""
return self[0] == '/'
def relative(self, rel_path):
"""
Resolve a relative path against this one.
>>> URLPath('/a/b/c').relative('.')
URLPath('/a/b/')
>>> URLPath('/a/b/c').relative('d')
URLPath('/a/b/d')
>>> URLPath('/a/b/c').relative('../d')
URLPath('/a/d')
"""
return type(self)(urlparse.urljoin(self, rel_path))
def add_segment(self, segment):
"""
Add a segment to this path.
>>> URLPath('/a/b/').add_segment('c')
URLPath('/a/b/c')
Non-ASCII and reserved characters (including slashes) will be encoded:
>>> URLPath('/a/b/').add_segment('dé/f')
URLPath('/a/b/d%C3%A9%2Ff')
"""
return type(self)(posixpath.join(self, path_encode(segment)))
def add(self, path):
"""
Add a partial path to this one.
The only difference between this and :meth:`add_segment` is that slash
characters will not be encoded, making it suitable for adding more than
one path segment at a time:
>>> URLPath('/a/b/').add('dé/f/g')
URLPath('/a/b/d%C3%A9/f/g')
"""
return type(self)(posixpath.join(self, path_encode(path, safe='/')))
def _path_encode_py2(s, safe=''):
"""Quote unicode or str using path rules."""
if isinstance(s, unicode):
s = s.encode('utf-8')
if isinstance(safe, unicode):
safe = safe.encode('utf-8')
return urllib.quote(s, safe=safe).decode('utf-8')
def _path_encode_py3(s, safe=''):
"""Quote str or bytes using path rules."""
# s can be bytes or unicode, urllib.parse.quote() assumes
# utf-8 if encoding is necessary.
return urlparse.quote(s, safe=safe)
def _path_decode_py2(s):
"""Unquote unicode or str using path rules."""
if isinstance(s, unicode):
s = s.encode('utf-8')
return urllib.unquote(s).decode('utf-8')
def _path_decode_py3(s):
"""Unquote str or bytes using path rules."""
if isinstance(s, bytes):
s = s.decode('utf-8')
return urlparse.unquote(s)
if hasattr(urllib, 'quote'):
path_encode = _path_encode_py2
path_decode = _path_decode_py2
del _path_encode_py3
del _path_decode_py3
else:
path_encode = _path_encode_py3
path_decode = _path_decode_py3
del _path_encode_py2
del _path_decode_py2
|
Carrie Sheffield is National Editor for Accuracy in Media.
3/26/2019 - Smollett Walks Free – Will Media Ask the Real Questions Now?
12/3/2018 - NPR Story Forced to Issue Correction After Publishing False Information on Trump Jr.
7/25/2018 - Trump’s Hollywood Walk of Fame Star Destroyed Again — Will Media Hold the Culprit Accountable? |
from django.db import models
from django.contrib.auth.models import User
from consts import KEY_SIZE, SECRET_SIZE
# lou w - removed any references to Resource
class ConsumerManager(models.Manager):
def create_consumer(self, name, user=None):
"""Shortcut to create a consumer with random key/secret."""
consumer, created = self.get_or_create(name=name)
if user is not None:
consumer.user = user
if created:
consumer.generate_random_codes()
return consumer
_default_consumer = None
def get_default_consumer(self, name):
"""Add cache if you use a default consumer."""
if self._default_consumer is None:
self._default_consumer = self.get(name=name)
return self._default_consumer
# lou w -renamed resource to scope
class TokenManager(models.Manager):
def create_token(self, consumer, token_type, timestamp, scope,
user=None, callback=None, callback_confirmed=False, lrs_auth_id=None):
"""Shortcut to create a token with random key/secret."""
token, created = self.get_or_create(consumer=consumer,
token_type=token_type,
timestamp=timestamp,
scope=scope,
user=user,
callback=callback,
callback_confirmed=callback_confirmed,
lrs_auth_id=lrs_auth_id)
if created:
token.generate_random_codes()
return token
|
New South Wales special commissioner Peter Garling has prescribed a "one-off injection" to remedy a system bogged down by, among other things, paper and technical inadequacies.
New South Wales special commissioner Peter Garling has prescribed an aggressive A$704 million (US$461 million) investment strategy to cure NSW Health's sick information technology systems, in a landmark review published late yesterday.
"What currently exists is a largely paper-based system with significant variation from clinician to clinician, ward to ward and hospital to hospital," Garling said in his comprehensive three-volume report on NSW's health system released yesterday.
"Whilst much of the work undertaken in NSW public hospitals is "high tech", its record-keeping system is a relic of the pre-computer age," he said. NSW Health's IT operations are in general led by departmental chief information officer Mike Rillstone, although each area's health service also has its own CIO.
The commissioner has recommended a "one-off injection" of A$704 million (US$461 million) to remedy a system that had been bogged down not just by paper, inconsistent documentation and illegible handwriting, but also substandard hardware, incompatible software and inadequate broadband connectivity.
"In my view, insufficient funds are allocated to information technology to get it up to the standard needed in an acceptable time frame," Garling said.
"Far more aggressive targets are necessary and must be set than those currently in place," he said.
The proposed four-year investment strategy, on top of the A$315.5 million (US$206 million) already planned, which he hopes will be rolled out by 2016, includes amongst other things an additional A$144 million (US$94 million) for a statewide rollout of an e-health record system; A$155 million (US$101 million) to boost network connectivity; A$85 million (US$55 million) on an automated rostering system; and A$112 million (US$73 million) on a medication management system.
Garling highlighted the cost of poor technology to patients' health. Following the analysis of one patient's brain injury, it was found that staff failed to monitor the patient's sodium levels. Garling said this was likely avoidable if the hospital had consistent documentation practices.
"The entry of the sodium results in the notes was not immediately obvious," Garling noted.
Dr McGlynn, who was interviewed by Garling for the inquiry, had said that the most commonly reported incidents at his hospital were prescription mistakes, because staff often failed to relate the patient's weight and age to the dosage.
NSW Health's configuration of the CERNER electronic medical record system, currently being implemented, also came under fire. NSW Health said it could not afford to include a "to do" list within the system and said that automated warnings to flag a patient's changed status during treatment wasn't necessary.
"In my view, medical alerts can play an important role in ensuring that optimal patient care is delivered," Garling said, however added that they should not be overused.
The National E-Health Transition Authority's (NEHTA) submission to the inquiry for clinicians highlighted that paper-based clinical reporting systems were not suited to current acute health care procedures, which often required a patient to move between several specialist facilities for treatment. NEHTA is the nation's peak e-health body.
Echoing a view by some clinicians, Garling brushed aside privacy concerns associated with implementing a state-wide electronic record system and has recommended one that can be "readily accessed by all health professionals". He noted that banks had overcome privacy obstacles similar to those faced in health care.
"Many privacy concerns are raised in the health sector which prove, on closer examination, to be based on unnecessary fears. Unwarranted privacy concerns may become an obstacle to much-needed reform," he said. |
from __future__ import annotations # Needed for Python 4.0 type annotations
from typing import Any, List, Optional
from flask import g
from flask_login import current_user
from psycopg2.extras import NamedTupleCursor
from openatlas import app
from openatlas.util.util import is_float
from openatlas.util.display import sanitize, uc_first
class Project:
def __init__(self, row: NamedTupleCursor.Record) -> None:
self.id = row.id
self.name = row.name
self.count = row.count
self.description = row.description if row.description else ''
self.created = row.created
self.modified = row.modified
class Import:
sql = """
SELECT p.id, p.name, p.description, p.created, p.modified, COUNT(e.id) AS count
FROM import.project p LEFT JOIN import.entity e ON p.id = e.project_id """
@staticmethod
def insert_project(name: str, description: Optional[str] = None) -> NamedTupleCursor.Record:
description = description.strip() if description else None
sql = """
INSERT INTO import.project (name, description) VALUES (%(name)s, %(description)s)
RETURNING id;"""
g.execute(sql, {'name': name, 'description': description})
return g.cursor.fetchone()[0]
@staticmethod
def get_all_projects() -> List[Project]:
g.execute(Import.sql + ' GROUP by p.id ORDER BY name;')
return [Project(row) for row in g.cursor.fetchall()]
@staticmethod
def get_project_by_id(id_: int) -> Project:
g.execute(Import.sql + ' WHERE p.id = %(id)s GROUP by p.id;', {'id': id_})
return Project(g.cursor.fetchone())
@staticmethod
def get_project_by_name(name: str) -> Optional[Project]:
g.execute(Import.sql + ' WHERE p.name = %(name)s GROUP by p.id;', {'name': name})
return Project(g.cursor.fetchone()) if g.cursor.rowcount == 1 else None
@staticmethod
def delete_project(id_: int) -> None:
g.execute('DELETE FROM import.project WHERE id = %(id)s;', {'id': id_})
@staticmethod
def check_origin_ids(project: Project, origin_ids: List[str]) -> List[str]:
""" Check if origin ids already in database"""
sql = """
SELECT origin_id FROM import.entity
WHERE project_id = %(project_id)s AND origin_id IN %(ids)s;"""
g.execute(sql, {'project_id': project.id, 'ids': tuple(set(origin_ids))})
return [row.origin_id for row in g.cursor.fetchall()]
@staticmethod
def check_duplicates(class_code: str, names: List[str]) -> List[str]:
sql = """
SELECT DISTINCT name FROM model.entity
WHERE class_code = %(class_code)s AND LOWER(name) IN %(names)s;"""
g.execute(sql, {'class_code': class_code, 'names': tuple(names)})
return [row.name for row in g.cursor.fetchall()]
@staticmethod
def update_project(project: Project) -> None:
sql = """
UPDATE import.project SET (name, description) = (%(name)s, %(description)s)
WHERE id = %(id)s;"""
g.execute(sql, {'id': project.id,
'name': project.name,
'description': sanitize(project.description, 'text')})
@staticmethod
def check_type_id(type_id: str, class_code: str) -> bool: # pragma: no cover
if not type_id.isdigit():
return False
elif int(type_id) not in g.nodes:
return False
else:
# Check if type is allowed (for corresponding form)
valid_type = False
root = g.nodes[g.nodes[int(type_id)].root[0]]
for form_id, form_object in root.forms.items():
if form_object['name'] == uc_first(app.config['CODE_CLASS'][class_code]):
valid_type = True
if not valid_type:
return False
return True
@staticmethod
def import_data(project: 'Project', class_code: str, data: List[Any]) -> None:
from openatlas.models.entity import Entity
from openatlas.models.gis import Gis
for row in data:
system_type = None
if class_code == 'E33': # pragma: no cover
system_type = 'source content'
elif class_code == 'E18':
system_type = 'place'
desc = row['description'] if 'description' in row and row['description'] else None
entity = Entity.insert(class_code, row['name'], system_type, desc)
sql = """
INSERT INTO import.entity (project_id, origin_id, entity_id, user_id)
VALUES (%(project_id)s, %(origin_id)s, %(entity_id)s, %(user_id)s);"""
g.execute(sql, {'project_id': project.id,
'entity_id': entity.id,
'user_id': current_user.id,
'origin_id': row['id'] if 'id' in row and row['id'] else None})
# Dates
if 'begin_from' in row and row['begin_from']:
entity.begin_from = row['begin_from']
if 'begin_to' in row and row['begin_to']:
entity.begin_to = row['begin_to']
if 'begin_comment' in row and row['begin_comment']:
entity.begin_comment = row['begin_comment']
if 'end_from' in row and row['end_from']:
entity.end_from = row['end_from']
if 'end_to' in row and row['end_to']:
entity.end_to = row['end_to']
if 'end_comment' in row and row['end_comment']:
entity.end_comment = row['end_comment']
entity.update()
# Types
if 'type_ids' in row and row['type_ids']: # pragma: no cover
for type_id in row['type_ids'].split():
if not Import.check_type_id(type_id, class_code):
continue
sql = """
INSERT INTO model.link (property_code, domain_id, range_id)
VALUES ('P2', %(domain_id)s, %(type_id)s);"""
g.execute(sql, {'domain_id': entity.id, 'type_id': int(type_id)})
# GIS
if class_code == 'E18':
location = Entity.insert('E53', 'Location of ' + row['name'], 'place location')
entity.link('P53', location)
if 'easting' in row and is_float(row['easting']):
if 'northing' in row and is_float(row['northing']):
Gis.insert_import(entity=entity,
location=location,
project=project,
easting=row['easting'],
northing=row['northing'])
|
How does winning the lottery change your life?
The best part of winning a lottery jackpot is being able to take care of loved ones.
That’s according to two lucky West Yorkshire couples who scooped a combined £7.9m.
The Huddersfield Examiner grilled them years after their big money wins and asked how it had changed their lives.
Graham and Amanda Nield and Michael and Susan Crossland all agreed the extra cash made their marriages better and brought their families closer together.
Unlike some big winners, they didn’t turn to lives of self-seeking luxury, private jets and flash cars. Although both cheeky couples admit they still do the lottery in the hope lightning will strike twice in the same place!
The Neilds bagged £6,676,215 on the National Lottery four years ago.
Lottery winners, Graham and Amanda Nield.
The Wakefield couple, who worked together in a textiles factory, had been together for 10 years, but had never gotten married. Five minutes after winning, Graham popped the question to Amanda and a month later, they tied the knot.
Insisting they aren’t materialistic, they said Amanda didn’t even get an engagement ring, although she did get several new rings in the following years.
They bought themselves a purpose-built bunglow with living quarters for Amanda’s parents, as well as a house and a car for each of their five children.
Amanda said: “[My dad] had Alzheimer’s for 11 years. I kept him at home and looked after him, kept his dignity and he died peacefully and that’s what I wanted.
And the Crosslands bagged £1,218,618 nearly 10 years ago.
The Mirfield couple bought a house for £170,000 for Susan’s three siblings, all of whom have profound learning difficulties, and her mum.
But sadly, her mum died six months after moving in and her dad had already passed away too. So they sold it and bought a five-storey, seven-bedroom purpose-built house and moved Susan’s brother and two sisters in with them instead.
Lottery winners, Michael and Susan Crossland.
Now, the couple are able to care for them full-time without the help of carers.
The National Lottery changes the lives of winners like the Nields and Crosslands, as well as communities across the UK. On average, players raise over £30m for National Lottery-funded projects every week.
In Kirklees, National Lottery funding has helped support over 1,000 projects totalling more than £76m.
How much did you win and when?
Graham: £6.6m on 17 August 2013.
Lotto winners Graham Nield and Amanda Vickers.
What were your numbers and did they have any significance?
Graham: Can’t remember the numbers off the top of my head and no, they were just random numbers that I picked.
Amanda: Run around room with me nightie over my head.
Graham: The first thing I bought was a new car - a Nissan.
What is the most expensive thing you’ve bought?
Amanda: Our house. We couldn’t tell you [how much it cost]. We had it built right from scratch.
Graham: We didn’t add it up as such. We were in a position where you didn’t even bother, you just had it built sort of thing.
Lottery winners, Graham and Amanda Nield (left) with fellow winners, Susan and Michael Crossland.
Amanda: We’re simple people. We still live the simple way. We don’t go out buying extravagant [things].
Graham: I’m still known as the tight one.
Amanda: If I pick up a dear loaf of bread in the supermarket, I get told to put it back and get a cheaper one. He’s always been the same, I wouldn’t change him.
How long was it before you gave up work?
Amanda: I gave a months’ notice and he gave three months’ notice.
How many requests for money have you had?
Graham: Nobody’s really come out and asked for it. We’ve given to people and things and charities. Nobody’s bothered us like that.
Amanda: I donate to a charity every month. There’s a few - sometimes a few a month or I give one big donation.
How much have you given away?
Amanda: We’ve got 5 children between us and we bought them a house and a car each.
Graham: For the better. Financially-wise, you don’t have to worry. You don’t have to get up and go to work.
Amanda: The big impact it had on my life is I was able to care for my dad 24 hours a day. He had Alzheimer’s, I lost him last year.
He had Alzheimer’s for 11 years. It was nice to be able to keep him at home, kept him at home and looked after him, kept his dignity and he died peacefully and that’s what I wanted.
As he got worse, I ended up hiring a carer. I wouldn’t have been able to afford that or pack my job in.
That’s the big impact and I will always be grateful to the lottery for that.
Graham: More time together, do things that you wanna do when you wanna do, you’re not restricted.
Has it made your marriage better?
Amanda: We won in August and got married in September. We had been together 10 years that week he asked me.
Lotto winners, Graham Nield & Amanda Vickers on their wedding day. Saturday 28th Sept 2013. ©Victor de Jesus.
Apparently, he had been planning on asking me [to marry him] for a while and he didn’t have the guts to ask me. It just came out when we found out we won and I was all giddy. He just came straight out with it and asked me about 5 minutes [after we won].
We just had a simple registration office and then a just party at night. We didn’t want a big thing.
Do you still do the lottery?
Graham: Most definitely - every week without fail.
Amanda: Both of us - I do it online and he goes and buys his tickets.
Susan: The numbers were 6, 9, 13, 18, 29 and 30 and they were all birthdays.
Susan: We didn’t actually check it until the morning after because he had done a long shift and work and he was fast asleep.
We checked it the morning after when we used to have Ceefax on TV. It was half past 6 in the morning and we checked it like we always checked it. He used to say ‘I’ll call the numbers out and you count on your fingers’.
When he said every number, I was like, ‘no, don’t be stupid’. He said ‘honestly’ and he gave me the ticket and then I checked them, threw it up in the air, ran outside in my pyjamas at half 6 in the morning, screamed in the middle of the road and ran back in the house.
Mirfield Lottery winners Susan and Michael Crossland join hunt for mystery winner - Kingsgate Centre, Huddersfield.
Susan: Bought my mum a house for £170k. She was 68. It was quite sad because she died six months after she moved in.
There’s a bit of a story that goes into it. My brother and sisters have all got learning difficulties - my sister’s got brain damage. So we bought them the house so that they could all be together and my mum died six months later. So now they all live with us.
The numbers that we had that won the lottery were my dad’s old numbers. When he died, we did them and on the anniversary of my dad’s death, we won the lottery on his numbers.
A bit heaven sent really. It still gives me goosebumps.
We part-exchnaged that [house]. We had a big seven bedroom house built so my brother and sisters could live with us.
Susan: The house that we’re in now. The builder had just started building it and we dropped on it really. We said if you build it how we need it inside because my sister’s in a wheelchair we’ll buy it so that’s what we did.
Michael: It was about 2 years after I finished milking cows. I don’t drive so Susan used to take me to work at 2am and she got a bit fed up of that. So I actually went to go work at another farm which had better hours three days a week. I more or less packed it in about a year ago.
Susan: We gave our kids a bit of money each, we’ve bought things for them, taken them on holidays, taken our family on holidays. We do a lot of fundraising for different stuff, we did a lot more at the beginning but we still do do stuff.
Susan: We haven’t got a mortgage. Knowing that the house we’ve got is ours and that all our family is together in one house, that’s the nicest thing for us. But it hasn’t changed us as people.
Both: We’re still the same.
Susan: We’re still the same in happiness, the more happiness it’s brought us is not having to worry about bills and paying the mortage. And we’ve got a lot more time together - although I don’t know whether that’s a good thing or a bad thing. We’re as happy as we ever were before but we’re more happy because we don’t have the worry either. Once you take the worry away, obviously it’s bound to make you more happy.
Michael: No - she still annoys me, she’s still the boss.
Susan and Michael Crossland who won the National Lottery in 2008 have helped organiser Christmas lunch for volunteers.
Susan: We’re just the same as we were before, but obviously Michael doesn’t have to go out to work now so we spend more of the day together. We’re like best friends. We’ve always been big kids and best friends but we see more of each other so we do more things together which is nicer.
Michael: The worst thing is - like you said - because you spend a lot of time together, obviously when I went to work, you’d come home and you always had a tale to tell each other. I think it’s that that you miss out on sometimes.
Susan: Yeah. Still do the birthdays, same numbers.
And now we’ve won the lottery and we’ve got a bit more money, we do the EuroMillions as well - stretched it a bit further.
Plus, it’s nice to know that yeah, you do the lottery for yourself to win money, but it’s nice to know that the money that you put on the lottery also goes to good causes as well. |
import sys
import os.path
import urllib2
import re
from pyquery import PyQuery as pq
import common
def getId(url):
print("=====> ", url)
arr = url.split("-")
arr1 = arr[1].split(".")
id = arr1[0]
return id
def getSiteUrl(id, monitor, rcbUrl):
result = ""
urlRequest = "http://www.{0}/{1}".format(monitor, id)
print("REQUEST: {0}".format(urlRequest))
try:
req = urllib2.urlopen(urlRequest, timeout=30)
url = req.geturl()
arr = url.split("/?")
arr1 = arr[0].split("//")
result = arr1[1].replace("www.", "")
result = result.split("/")[0]
except :
print("========== ERROR ===========")
#common.insertUnknowSite(rcbUrl, monitor)
return result
def getRcb(monitor):
print("invest_tracing.getRcb()")
rcb_url = "http://{0}/newadd.html".format(monitor)
d = pq(url=rcb_url)
tables = d(".listbody tr td[width='28%'] .pro")
siteList = []
for index, item in enumerate(tables):
try:
obj = {}
obj['id'] = getId(item.get("href"))
obj['siteRCBUrl'] = "http://{0}/rcb-{1}.html".format(monitor, obj['id'])
obj['url'] = getSiteUrl(item.get("href"), monitor, obj['siteRCBUrl'])
obj['siteId'] = ""
if obj['url'] != '':
siteId = common.insertSite(obj)
obj['siteId'] = siteId
siteList.append(obj)
print("{0} - {1} - {2}".format(obj['id'], obj['url'], obj['siteId']))
except:
pass
for item in siteList:
common.insertSiteMonitor(item, monitor)
def run():
MONITOR = "invest-tracing.com"
getRcb(MONITOR)
|
> the external module is infrequently updated.
best and which one we can built out-of-tree.
> * rt2500 - had hardware for this, but it's bust now.
now, but I will maybe use it as a server soon.
> * rt2x00-* - will be in 2.6.22 from wireless-git?
out of date afaik, but the author suggested to me to use it anyway.
Maybe we can pull the code directly from the -git tree.
actively use the first one right now. |
import sys
if '' not in sys.path:
sys.path.append('')
import pywsinfo
import unittest
class WebSiteUtilsTests(unittest.TestCase):
def test_parse_url(self):
url = 'http://www.example.com'
self.assertEqual(
pywsinfo.parse_url(url),
{'source_url': 'http://www.example.com', 'host': 'www.example.com'}
)
url = 'http://www.example.com/path'
self.assertEqual(
pywsinfo.parse_url(url),
{'source_url': 'http://www.example.com', 'host': 'www.example.com'}
)
url = 'http://www.example.com/path?12&12'
self.assertEqual(
pywsinfo.parse_url(url),
{'source_url': 'http://www.example.com', 'host': 'www.example.com'}
)
url = 'https://www.example.com'
self.assertEqual(
pywsinfo.parse_url(url),
{'source_url': 'https://www.example.com', 'host': 'www.example.com'}
)
url = 'http://localhost:8080'
self.assertEqual(
pywsinfo.parse_url(url),
{'source_url': 'http://localhost:8080', 'host': 'localhost'}
)
def test_nsloopup(self):
self.assertGreater(len(pywsinfo.nslookup('google.com')), 1)
self.assertGreater(len(pywsinfo.nslookup('www.google.com')), 1)
self.assertEqual(pywsinfo.nslookup('www.google.com2'), [])
def test_parse_html_head(self):
html = '''<head>
<meta name="Keywords" content="keyword1,keyword2">
<meta name="Description" content="description">
</head>'''
self.assertEqual(pywsinfo.parse_html_head(html),
{'keywords':['keyword1','keyword2'], 'description': 'description'})
html = '''<head>
<meta name="keywords" content="keyword1,keyword2">
<meta name="description" content="description">
</head>'''
self.assertEqual(pywsinfo.parse_html_head(html),
{'keywords':['keyword1','keyword2'], 'description': 'description'})
html = '''<head>
<meta name="keywords" content="">
<meta name="description" content="">
</head>'''
self.assertEqual(pywsinfo.parse_html_head(html), {})
html = '''<head></head>'''
self.assertEqual(pywsinfo.parse_html_head(html), {})
if __name__ == '__main__':
unittest.main()
|
Lighting Design Company starts in Tampere, Finland.
The first Valolaterna Factory opens in Kuopio, Finland.
Theatrical Design Company begins in Kuopio, Finland.
Fantasiarakenne Ltd & Valolaterna Ltd.
A merger of two companies took place.
Company was chosen for theme design and implementation of Santa Claus Residence at Rovaniemi, Finnish Lapland.
Delivery of Themed Props to The worlds first Angry Birds Land in Tampere, Finland.
Start of Themed Attractions Delivery to one of the biggest activity park chains in Nordics - Superpark Ltd.
Fantasia Works was chosen among TOP 20 companies in Finland with highest potential for international growth.
Going global with a new brand - Fantasia Works.
Today, we are proud to be an international company working with inspiring theme parks, family entertainment centers and other interesting projects around the globe.
We are entertainment design & build company from Finland designing, manufacturing and assembling projects for Theme Parks, Science Centres and Visitor Attractions over 17 years.
We are Finnish entertainment design & build company designing, manufacturing and assembling projects for Theme Parks, Science Centres and Visitor Attractions over 17 years.
Groundbreaking expertise in entertainment business with over 17 years of design & realisation experience.
Planning & Building a Theme Park can be easy having a partner with a range of Themed Attractions manufactured in Finland.
this game will make you sweat!
and accurate stars of NBA.
Themed Attraction for the youngest players who ready to score points by throwing balls inside challenging targets.
Learn more about our attractions designed and manufactured only in Finland.
make your story real with us!
©2018 Mattel. All rights reserved. ©2018 HIT Entertainment Limited. Bob the Builder: © 2018 Hit Entertainment Limited. Thomas & Friends: © 2018 Gullane (Thomas) Limited. Fireman Sam: © 2018 Prism Art & Design Limited. |
#!/usr/bin/python
# Copyright 2012 James McCauley, William Yu
# [email protected]
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This is a demonstration file that has various switch implementations.
The first example is a basic "all match" switch followed by a
destination match, pair match then finally a more ideal pair match
switch.
Mininet Command: sudo mn --topo single,3 --mac
--switch ovsk
--controller remote
Command Line: ./pox.py py --completion
log.level --DEBUG
samples.of_sw_tutorial_oo
THIS VERSION SUPPORT resend() functionality in the betta branch POX.
Object-oriented version that allows user to switch switches via the
command line interface.
"""
# These next two imports are common POX convention
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpidToStr
# Even a simple usage of the logger is much nicer than print!
log = core.getLogger()
# Create the class to hold the switch tutorial implementations
class SwitchTutorial (object):
# This table maps (switch,MAC-addr) pairs to the port on 'switch' at
# which we last saw a packet *from* 'MAC-addr'.
# (In this case, we use a Connection object for the switch.)
table = {}
# Holds the object with the default switch
handlerName = 'SW_IDEALPAIRSWITCH'
# Holds the current active PacketIn listener object
listeners = None
# Constructor and sets default handler to Ideal Pair Switch
def __init__(self, handlerName='SW_IDEALPAIRSWITCH'):
log.debug("Initializing switch %s." % handlerName)
# Method for just sending a packet to any port (broadcast by default)
def send_packet(self, event, dst_port=of.OFPP_ALL):
msg = of.ofp_packet_out(in_port=event.ofp.in_port)
if event.ofp.buffer_id != -1 and event.ofp.buffer_id is not None:
msg.buffer_id = event.ofp.buffer_id
else:
if event.ofp.data:
return
msg.data = event.ofp.data
msg.actions.append(of.ofp_action_output(port=dst_port))
event.connection.send(msg)
# Optimal method for resending a packet
def resend_packet(self, event, dst_port=of.OFPP_ALL):
msg = of.ofp_packet_out(data=event.ofp)
msg.actions.append(of.ofp_action_output(port=dst_port))
event.connection.send(msg)
# DUMB HUB Implementation
# This is an implementation of a broadcast hub but all packets go
# to the controller since no flows are installed.
def _handle_dumbhub_packetin(self, event):
# Just send an instruction to the switch to send packet to all ports
packet = event.parsed
self.resend_packet(event, of.OFPP_ALL)
log.debug("Broadcasting %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, of.OFPP_ALL))
# PAIR-WISE MATCHING HUB Implementation
# This is an implementation of a broadcast hub with flows installed.
def _handle_pairhub_packetin(self, event):
packet = event.parsed
# Create flow that simply broadcasts any packet received
msg = of.ofp_flow_mod()
msg.data = event.ofp
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.match.dl_src = packet.src
msg.match.dl_dst = packet.dst
msg.actions.append(of.ofp_action_output(port=of.OFPP_ALL))
event.connection.send(msg)
log.debug("Installing %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, of.OFPP_ALL))
# LAZY HUB Implementation (How hubs typically are)
# This is an implementation of a broadcast hub with flows installed.
def _handle_lazyhub_packetin(self, event):
packet = event.parsed
# Create flow that simply broadcasts any packet received
msg = of.ofp_flow_mod()
msg.data = event.ofp
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.actions.append(of.ofp_action_output(port=of.OFPP_ALL))
event.connection.send(msg)
log.debug("Installing %s.%i -> %s.%i" %
("ff:ff:ff:ff:ff:ff", event.ofp.in_port, "ff:ff:ff:ff:ff:ff",
of.OFPP_ALL))
# BAD SWITCH Implementation
# This is an obvious but problematic implementation of switch that
# routes based on destination MAC addresses.
def _handle_badswitch_packetin(self, event):
packet = event.parsed
# Learn the source and fill up routing table
self.table[(event.connection, packet.src)] = event.port
# install appropriate flow rule when learned
msg = of.ofp_flow_mod()
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.match.dl_dst = packet.src
msg.actions.append(of.ofp_action_output(port=event.port))
event.connection.send(msg)
log.debug("Installing %s.%i -> %s.%i" %
("ff:ff:ff:ff:ff:ff", event.ofp.in_port, packet.src, event.port))
# determine if appropriate destination route is available
dst_port = self.table.get((event.connection, packet.dst))
if dst_port is None:
# We don't know where the destination is yet. So, we'll just
# send the packet out all ports (except the one it came in on!)
# and hope the destination is out there somewhere. :)
# To send out all ports, we can use either of the special ports
# OFPP_FLOOD or OFPP_ALL. We'd like to just use OFPP_FLOOD,
# but it's not clear if all switches support this. :(
self.resend_packet(event, of.OFPP_ALL)
log.debug("Broadcasting %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, of.OFPP_ALL))
else:
# This is the packet that just came in -- we want send the packet
# if we know the destination.
self.resend_packet(event, dst_port)
log.debug("Sending %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, dst_port))
# PAIR-WISE MATCH SWITCH Implementation
# This is an implementation of an pair match switch. This only matches
# source and destination MAC addresses. Whenever a new source
# destination MAC address is detected it then add a new flow
# identifying the source destination pair. The routing table is updated
# using the detected destination MAC address to the destination port.
def _handle_pairswitch_packetin (self, event):
packet = event.parsed
# Learn the source and fill up routing table
self.table[(event.connection, packet.src)] = event.port
dst_port = self.table.get((event.connection, packet.dst))
if dst_port is None:
# We don't know where the destination is yet. So, we'll just
# send the packet out all ports (except the one it came in on!)
# and hope the destination is out there somewhere. :)
# To send out all ports, we can use either of the special ports
# OFPP_FLOOD or OFPP_ALL. We'd like to just use OFPP_FLOOD,
# but it's not clear if all switches support this. :(
self.resend_packet(event, of.OFPP_ALL)
log.debug("Broadcasting %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, of.OFPP_ALL))
else:
# This is the packet that just came in -- we want to
# install the rule and also resend the packet.
msg = of.ofp_flow_mod()
msg.data = event.ofp
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.match.dl_src = packet.src
msg.match.dl_dst = packet.dst
msg.actions.append(of.ofp_action_output(port=dst_port))
event.connection.send(msg)
log.debug("Installing %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, dst_port))
# SMARTER PAIR-WISE MATCH SWITCH Implementation
# This is an implementation of an ideal pair switch. This optimizes the
# previous example by adding both direction in one entry.
def _handle_idealpairswitch_packetin(self, event):
packet = event.parsed
# Learn the source and fill up routing table
self.table[(event.connection, packet.src)] = event.port
dst_port = self.table.get((event.connection, packet.dst))
if dst_port is None:
# We don't know where the destination is yet. So, we'll just
# send the packet out all ports (except the one it came in on!)
# and hope the destination is out there somewhere. :)
# To send out all ports, we can use either of the special ports
# OFPP_FLOOD or OFPP_ALL. We'd like to just use OFPP_FLOOD,
# but it's not clear if all switches support this. :(
self.resend_packet(event, of.OFPP_ALL)
log.debug("Broadcasting %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, of.OFPP_ALL))
else:
# Since we know the switch ports for both the source and dest
# MACs, we can install rules for both directions.
msg = of.ofp_flow_mod()
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.match.dl_dst = packet.src
msg.match.dl_src = packet.dst
msg.actions.append(of.ofp_action_output(port=event.port))
event.connection.send(msg)
# This is the packet that just came in -- we want to
# install the rule and also resend the packet.
msg = of.ofp_flow_mod()
msg.data = event.ofp
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.match.dl_src = packet.src
msg.match.dl_dst = packet.dst
msg.actions.append(of.ofp_action_output(port=dst_port))
event.connection.send(msg)
log.debug("Installing %s.%i -> %s.%i AND %s.%i -> %s.%i" %
(packet.dst, dst_port, packet.src, event.ofp.in_port,
packet.src, event.ofp.in_port, packet.dst, dst_port))
# Define the proper handler
def _set_handler_name (self, handlerName='SW_IDEALPAIRSWITCH'):
self.handlerName = handlerName
# Function to grab the appropriate handler
def _get_handler (self, event):
return self.swMap[self.handlerName](self, event)
""" Here are functions that are meant to be called directly """
# Here is a function to list all possible switches
def list_available_listeners(self):
for key in self.swMap.iterkeys():
log.info("%s" % key)
# Here is a function to displaying possible methods
def help(self):
log.info("Methods available: %s %s %s %s %s" %
('list_available_listeners()',
'attach_packetin_listener(handlerName = \'SW_IDEALPAIRSWITCH\'',
'detach_packetin_listener()',
'clear_all_flows()',
'clear_flows(connection)'))
# Here is a function to attach the listener give the default handerName
def attach_packetin_listener (self, handlerName='SW_IDEALPAIRSWITCH'):
self._set_handler_name(handlerName)
self.listeners = core.openflow.addListenerByName("PacketIn",
self._get_handler)
log.debug("Attach switch %s." % handlerName)
# Here is a function to remove the listener
def detach_packetin_listener (self):
core.openflow.removeListener(self.listeners)
log.debug("Detaching switch %s." % self.handlerName)
# Function to clear all flows from a specified switch given
# a connection object
def clear_flows (self, connection):
msg = of.ofp_flow_mod(match=of.ofp_match(), command=of.OFPFC_DELETE)
connection.send(msg)
log.debug("Clearing all flows from %s." %
dpidToStr(connection.dpid))
# Function to clear all flows from all switches
def clear_all_flows (self):
msg = of.ofp_flow_mod(match=of.ofp_match(), command=of.OFPFC_DELETE)
for connection in core.openflow._connections.values():
connection.send(msg)
log.debug("Clearing all flows from %s." %
dpidToStr(connection.dpid))
# Define various switch handlers
swMap = {
'SW_DUMBHUB' : _handle_dumbhub_packetin,
'SW_PAIRHUB' : _handle_pairhub_packetin,
'SW_LAZYHUB' : _handle_lazyhub_packetin,
'SW_BADSWITCH' : _handle_badswitch_packetin,
'SW_PAIRSWITCH' : _handle_pairswitch_packetin,
'SW_IDEALPAIRSWITCH' : _handle_idealpairswitch_packetin,
}
# function that is invoked upon load to ensure that listeners are
# registered appropriately. Uncomment the hub/switch you would like
# to test. Only one at a time please.
def launch ():
# create new tutorial class object using the IDEAL PAIR SWITCH as default
MySwitch = SwitchTutorial('SW_IDEALPAIRSWITCH')
# add this class into core.Interactive.variables to ensure we can access
# it in the CLI.
core.Interactive.variables['MySwitch'] = MySwitch
# attach the corresponding default listener
MySwitch.attach_packetin_listener()
|
My late teacher Tiantong [Master Rujing] said, “Breath enters and reaches the tanden, and yet there is no place from which it comes. (息入り来たりて、丹田に至る。雖然、従り来る処無し) Therefore it is neither long nor short. Breath emerges from the tanden, and yet there is nowhere it goes. Therefore it is neither short nor long.” (所以に、長からず、短かからず。息、丹田を出で去く。雖然、去き得る処無し。所以に、短かからず、長からず。) My late teacher said it like that. Suppose someone were to ask Eihei, “Master, how do you regulate your breath?” I would simply say to him: Although it is not the great vehicle, it differs from the lesser vehicle. Although it is not the lesser vehicle, it differs from the great vehicle. Suppose that person inquired again, “Ultimately, what is it?” I would say to him: Exhale and inhale are neither long nor short.
the eaves" without seeking enlightenment, without trying to to get rid of illusions, without aversion to the rising of thoughts, and yet without fondly continuing thoughts. If you do not continue thoughts, thoughts cannot arise by themselves. Like an empty space, like a mass of fire, letting your breathing flow naturally out and in, sit decisively without getting involved in anything at all.
At various times in the history of Soto Zen in later centuries, some have sort to introduce artifical practices to manipulate the breath in various way, probably due to the influence of various esoteric (Tendai/Shingon) Buddhist practices, Rinzai/Obaku Zen contacts or Daoist/Nativist beliefs, and Chinese traditional medicine concepts which were introduced into Japanese Soto in later centuries, an age when the Teachings of the early founders in Japan were neglected if not forgotten. Such special techniques of the age of “Kirigami” (secret esoteric practices introduced into late medieval Soto from such outside influences) represent the infiltration of “gaining mind” into Shikantaza Zazen Practice, turning it into a form of meditation aimed at realizing special states and mystical sensations and interpretations. These ideas carried over into later centuries as well, even into the modern day, resulting in even some modern teachers who confuse these later infiltrations of ideas and practices with original Soto Practice. One example is the rather eccentric, yet brilliant, 19th Century Soto Zen priest Hara Tanzan, who was also a practitioner of traditional Chinese medicine, and an advocate of a rather unusual theory of bodily systems (“He claimed both bodily illness and mental suffering were the product of a kind of mucus called Dana which he thought was running up from the hipbone through the spinal column up to the brain. According to his theory, if this flow of mucus were to be shut up by the power of Zen, brain would be cleared away and complete health would be gained.” https://ci.nii.ac.jp/naid/110007154619) It is true that he recommended various special breathing practices as part of his quaint theory of “Dana” mucus.
These days, sometimes modern students come seeking advise on breathing in Shikantaza and the Soto tradition, and are denied all the history. Unfortunately, sometimes the advice given them is based on limited information which confuses various esoteric, Rinzai Zen, Chinese medicine and other mystical “Kirigami” interpretations with traditional Soto ways as advocated by Dogen and the early founders. (At the other extreme, they are often presented nothing more than following or counting the breath to become centered or as a relaxation technique, not true Shikantaza). That is a shame, and anyone who fails to present the complete picture, and wider information to the student, may be merely introducing their own narrow biases favoring their own teachers and traditions, and guilty of intentional manipulation themselves. It denies the student the right to all relevant information.
Last edited by jundocohen on Tue Apr 03, 2018 5:39 am, edited 11 times in total.
I agree and think that is should be stressed more often, that Silent Illumination and Shikantaza are the fruit or the realization of the practice, i.e. the actual awakening or insight into our true nature, and not a method of practice.
Jundo wrote: (I would simply add one final note: In this Shikantaza Practice, we sit radically with "what is," dropping all resistance to how we would change and manipulate life to be "as we think it should be." By doing so, we realize the peace of the dropping of all division and friction with life conditions, thus the dropping of desire, anger, divided thinking in ignorance. Thus, in those cases where the body does not allow ... for example, for someone with pulmonary issues such as asthma or lung cancer ... one then sits Zazen just breathing as one can breathe, all "should be's" dropped. Then, even one's huffing and wheezing from high up in the chest, if that is all one can muster, is beyond deep or shallow, good or bad, and from the heart of the universe's own breath).
Thanks Jundo, yes too much manipulation on the breath or posture and all "should be's" etc can result in seperating life from practise.
... I am sorry Jundo, but your posts have all the hallmarks of someone describing a state, not a method (IMO).
I apologize. This is just due to my poor abilities to explain. Let me try again.
In Shikantaza, on just sits, putting aside all judgments such as good vs. bad (besides trust in the goodness of sitting), and thoughts of some other place in need of going or act to do (besides sitting), or concerns about before and after (this moment which is just this time of sitting is the only time of concern). One just sits in which the sitting itself is all good, and there is not one other place in need of going, not one other act in need of doing in the whole universe besides the sacred act of sitting. One puts aside judgments of fullness or lack, past and future, and simply trusts as an act of faith in one's heart that sitting itself is the fullness which never lacks, and has no before or after to the time of sitting. There is not goal apart from sitting, there is no prize or place to go beside here and now. There is nothing to achieve, because simply sitting is the one thing to achieve that is needed in the whole world just then. There is only sitting.
As one sits with such trust and conviction, let thoughts come and go ... of good and bad, things to do or places to see ... but simply do not grab on. Let them come but seek not to be tangled in them. Have a mind like the mirror, in which things appear and nothing is judged or rejected, nor particularly thought about. The mirror contains whatever appears before it. A clarity will manifest. Clarity will manifest, but it is neither something we seek or long for. Let all things be just as they are ... sickness and health, youth and old age, birth and death ... as just things which appear and disappear in the mirror. All is just "as it is," nothing to reject, run toward or away. This is the wonder of Just Sitting that Ejo and Keizan describe.
Why do we do this? Because we human beings do not know how to be fulfilled fully in just the doing of one thing, thus the source of Dukkha as we run around chasing this and that, filled with feelings of lack and things left undone.
Assume a balanced and stable posture as best one can, then forget about it. (If the body's physical condition is not cooperating, assume the most comfortable position on can ... and just accept one's discomfort as best one can, as just one more thing appearing in the equanimity of the mirror).
As to breathing, let breathing just be breathing in the manner Okumura Roshi describes in my post above.
Yes, this is a "method." But, because it is a method without a goal (besides the goal of sitting, which is a goal totally fulfilled just by sitting which is the one thing to do in the whole universe in that time of sitting) and there is no place to go and nothing lacking, we call it the ""method of no method" or a "non-method method."
I hope that is a bit easier to understand. Those are very specific steps and practices to realize just this, just sitting as the one thing which needs to be done in the time of sitting. Is something still unclear?
As to counting or following breaths, I also have very new people follow the breath until they develop some modicum of ability to not be tangled in thoughts, and to let the mind settle and judgments drop away. I also say that anyone can have days of disturbance and distraction when they may need to follow the breath for awhile to settle down a bit. However, as soon as one has, I encourage them to try to transition to "open spacious awareness." I just find that open awareness makes it easier to take this off the cushion, bringing the equanimity and "no place to go, nothing to do" off the cushion into this busy life of places to go and problems to solve and things to do.
Following the breath and being pleased when one attains feelings of peace, concentration and calm is nice, but is not really Shikantaza. Why? Because one seems to have a goal, and to be pleased, by attaining peace, concentration and calm. It may be a little hard to fathom, but the true Big P Peace of Shikantaza transcends small human peace or disturbance, concentration or distraction, calm or chaos. Those are just passing thoughts and emotions themselves, more objects in the mirror. In fact, we are the mirror, whole and at Peace, which encounters within peace or disturbance, concentration or distraction, calm or chaos with equanimity. We do not try to attain the goal of peace, concentration and calm, nor flee from disturbance, distraction and chaos. As strange as it sounds, such total acceptance and equanimity leads to Big P Peace. This is what separates Shikantaza from most ordinary types of meditation which seek to induce peace and be rid of disturbance, concentration without distraction, and feelings of calm instead of worry etc. The real Freedom comes in being the mirror, not running toward or away from any of that. If one uses the breath to bring about such peace, concentration and calm, one is not really breathing in a goalless way.
If you have pain as you describe, and you need to focus on the breath to deal with the pain ... just do that. That is also just what is. Bring the mirror mind to the pain as much as you can, but also accept one's gripes and complaints about the pain as well (because, when seen correctly, the gripes and complaints themselves are also just two more things in the mirror not to reject).
I hope that is a bit easier to understand now. I am sorry for being a poor explainer of the "method-non-method" which is Shikantaza. If there is something unclear above, just ask and I will do my best to do better.
Those are very specific steps and practices to realize just this, just sitting as the one thing which needs to be done in the time of sitting.
It is interesting after our recent discussions in the Forum of Shikantaza as without stages and aims.
You draw no nearer, progress no farther.
Is mountains and rivers away.
So, in our Practice, we walk forward and walk forward in daily life, avoiding the traps of excess desire, anger and divided thinking in ignorance ... all while knowing that there is just here with nothing lacking and no other place one can ever go. The Zafu is the unmoving staircase of the stepless step. You cannot step off this step, nor go up or down, even as we get up to walk through this life of many ups and downs. Thus we speak of the "method of no method."
To practice with an aim or destination in mind is a fruitless pursuit.
seek to not eat 4 a month.
d delirium ull experience will make u reconsider.
seek to not breath 4 10 minutes.
from between our mothers legs.
who is the teacher that told u so..
Actually, to practice with koans is also a "goallessness" and "non-attaining" practice. To practice with an aim or destination in mind is a fruitless pursuit.
Perhaps for you, and as your way of Koan Practice. That is lovely.
However, would you tell me how expressions such as "whether we go by the ladder or in one jump, everything depends on one's determination to reach the top. Unfortunately, the human will is so weak that unless it is forced to make desperate efforts it is often too lazy to use all its strength and courage to reach the peak" represent a "goalless" and "non-attaining" practice.? Thank you for helping me understand.
In any case, the topic here is breath and Zazen in the Soto way. What Mr. Sekida describes, no matter its value as a method, is not that.
Last edited by jundocohen on Fri Apr 06, 2018 3:19 pm, edited 6 times in total.
ok, got your non seeking message.
However, just a reminder that this section of the forum is the Soto section and the topic here is specifically breathing and zazen in Soto. No imperialism here!
Being that this is a Soto thread, I will trouble you no more, except to suggest that there is no difference, other than sectarian, between the two practices. Whether koans or shikantaza, just practice and see what happens.
I must say, I am an inveterate subscriber to the Keith-ian view and reality, there. (gee, now where is that old emoticon of a person prostrating, such as we used to have on the board at ZFI? Dang... ).
Of course, lineages and their traditions are to be respected and encouraged, because they are the (only) vessels of skilful-means, and the avenues of Salvation, carrying true Wisdom and true Compassion and Dharma into the Present, and into the Future. Without them, there is only "mush" ("moosh"). Just look at "New-Age" stuff: "mush", I call it. And still-born.
Oh, but thanks for being here too, Jundo!
Last edited by desert_woodworker on Fri Apr 06, 2018 7:24 pm, edited 1 time in total.
Yeah, a snip from Kodera's (1980) book (the book based on his PhD dissertation at my Alma Mater) about Zen Master Dogen's formative years in China.
p.s. Hmm-m. "Age of Uncertainty"... . A bit like ours, now? Thus, are Rinzai methods soon to be preferred by the masses over Soto method(s)?, for "good reason"?
I've been enjoying a copy of the Schlutter as well for a few months.
The Kodera is a fine book, and I can recommend it.
This thread seems to have come to its natural end. We are well off topic, breathing in zazen in Soto, into new territory. Unless someone has something to add on point, shall we start new threads rather than stuff too much up into this?
why, please, cao, highly interesting topic,thread, y y y would u think of closing it??? |
import itertools
import pandas as pd
import numpy as np
from zipline.data.fx import DEFAULT_FX_RATE
from zipline.testing.predicates import assert_equal
import zipline.testing.fixtures as zp_fixtures
class _FXReaderTestCase(zp_fixtures.WithFXRates,
zp_fixtures.ZiplineTestCase):
"""
Base class for testing FXRateReader implementations.
To test a new FXRateReader implementation, subclass from this base class
and implement the ``reader`` property, returning an FXRateReader that uses
the data stored in ``cls.fx_rates``.
"""
FX_RATES_START_DATE = pd.Timestamp('2014-01-01', tz='UTC')
FX_RATES_END_DATE = pd.Timestamp('2014-01-31', tz='UTC')
# Calendar to which exchange rates data is aligned.
FX_RATES_CALENDAR = '24/5'
# Currencies between which exchange rates can be calculated.
FX_RATES_CURRENCIES = ["USD", "CAD", "GBP", "EUR"]
# Fields for which exchange rate data is present.
FX_RATES_RATE_NAMES = ["london_mid", "tokyo_mid"]
# Field to be used on a lookup of `'default'`.
FX_RATES_DEFAULT_RATE = 'london_mid'
# Used by WithFXRates.
@classmethod
def make_fx_rates(cls, fields, currencies, sessions):
ndays = len(sessions)
# Give each currency a timeseries of "true" values, and compute fx
# rates as ratios between true values.
reference = pd.DataFrame({
'USD': np.linspace(1.0, 2.0, num=ndays),
'CAD': np.linspace(2.0, 3.0, num=ndays),
'GBP': np.linspace(3.0, 4.0, num=ndays),
'EUR': np.linspace(4.0, 5.0, num=ndays),
}, index=sessions, columns=currencies)
cls.tokyo_mid_rates = cls.make_fx_rates_from_reference(reference)
# Make london_mid different by adding +1 to reference values.
cls.london_mid_rates = cls.make_fx_rates_from_reference(reference + 1)
# This will be set as cls.fx_rates by WithFXRates.
return {
'london_mid': cls.london_mid_rates,
'tokyo_mid': cls.tokyo_mid_rates,
}
@property
def reader(self):
raise NotImplementedError("Must be implemented by test suite.")
def test_scalar_lookup(self):
reader = self.reader
rates = self.FX_RATES_RATE_NAMES
quotes = self.FX_RATES_CURRENCIES
bases = self.FX_RATES_CURRENCIES + [None]
dates = pd.date_range(
self.FX_RATES_START_DATE - pd.Timedelta('1 day'),
self.FX_RATES_END_DATE + pd.Timedelta('1 day'),
)
cases = itertools.product(rates, quotes, bases, dates)
for rate, quote, base, dt in cases:
dts = pd.DatetimeIndex([dt], tz='UTC')
bases = np.array([base], dtype=object)
result = reader.get_rates(rate, quote, bases, dts)
assert_equal(result.shape, (1, 1))
result_scalar = result[0, 0]
if dt >= self.FX_RATES_START_DATE and quote == base:
assert_equal(result_scalar, 1.0)
expected = self.get_expected_fx_rate_scalar(rate, quote, base, dt)
assert_equal(result_scalar, expected)
col_result = reader.get_rates_columnar(rate, quote, bases, dts)
assert_equal(col_result, result.ravel())
alt_result_scalar = reader.get_rate_scalar(rate, quote, base, dt)
assert_equal(result_scalar, alt_result_scalar)
def test_2d_lookup(self):
rand = np.random.RandomState(42)
dates = pd.date_range(
self.FX_RATES_START_DATE - pd.Timedelta('2 days'),
self.FX_RATES_END_DATE + pd.Timedelta('2 days'),
)
rates = self.FX_RATES_RATE_NAMES + [DEFAULT_FX_RATE]
possible_quotes = self.FX_RATES_CURRENCIES
possible_bases = self.FX_RATES_CURRENCIES + [None]
# For every combination of rate name and quote currency...
for rate, quote in itertools.product(rates, possible_quotes):
# Choose N random distinct days...
for ndays in 1, 2, 7, 20:
dts_raw = rand.choice(dates, ndays, replace=False)
dts = pd.DatetimeIndex(dts_raw, tz='utc').sort_values()
# Choose M random possibly-non-distinct currencies...
for nbases in 1, 2, 10, 200:
bases = (
rand.choice(possible_bases, nbases, replace=True)
.astype(object)
)
# ...And check that we get the expected result when querying
# for those dates/currencies.
result = self.reader.get_rates(rate, quote, bases, dts)
expected = self.get_expected_fx_rates(rate, quote, bases, dts)
assert_equal(result, expected)
def test_columnar_lookup(self):
rand = np.random.RandomState(42)
dates = pd.date_range(
self.FX_RATES_START_DATE - pd.Timedelta('2 days'),
self.FX_RATES_END_DATE + pd.Timedelta('2 days'),
)
rates = self.FX_RATES_RATE_NAMES + [DEFAULT_FX_RATE]
possible_quotes = self.FX_RATES_CURRENCIES
possible_bases = self.FX_RATES_CURRENCIES + [None]
reader = self.reader
# For every combination of rate name and quote currency...
for rate, quote in itertools.product(rates, possible_quotes):
for N in 1, 2, 10, 200:
# Choose N (date, base) pairs randomly with replacement.
dts_raw = rand.choice(dates, N, replace=True)
dts = pd.DatetimeIndex(dts_raw, tz='utc')
bases = (
rand.choice(possible_bases, N, replace=True)
.astype(object)
)
# ... And check that we get the expected result when querying
# for those dates/currencies.
result = reader.get_rates_columnar(rate, quote, bases, dts)
expected = self.get_expected_fx_rates_columnar(
rate,
quote,
bases,
dts,
)
assert_equal(result, expected)
def test_load_everything(self):
# Sanity check for the randomized tests above: check that we get
# exactly the rates we set up in make_fx_rates if we query for their
# indices.
for currency in self.FX_RATES_CURRENCIES:
tokyo_rates = self.tokyo_mid_rates[currency]
tokyo_result = self.reader.get_rates(
'tokyo_mid',
currency,
tokyo_rates.columns,
tokyo_rates.index,
)
assert_equal(tokyo_result, tokyo_rates.values)
london_rates = self.london_mid_rates[currency]
london_result = self.reader.get_rates(
'london_mid',
currency,
london_rates.columns,
london_rates.index,
)
default_result = self.reader.get_rates(
DEFAULT_FX_RATE,
currency,
london_rates.columns,
london_rates.index,
)
assert_equal(london_result, default_result)
assert_equal(london_result, london_rates.values)
def test_read_before_start_date(self):
# Reads from before the start of our data should emit NaN. We do this
# because, for some Pipeline loaders, it's hard to put a lower bound on
# input asof dates, so we end up making queries for asof_dates that
# might be before the start of FX data. When that happens, we want to
# emit NaN, but we don't want to fail.
for bad_date in (self.FX_RATES_START_DATE - pd.Timedelta('1 day'),
self.FX_RATES_START_DATE - pd.Timedelta('1000 days')):
for rate in self.FX_RATES_RATE_NAMES:
quote = 'USD'
bases = np.array(['CAD'], dtype=object)
dts = pd.DatetimeIndex([bad_date])
result = self.reader.get_rates(rate, quote, bases, dts)
assert_equal(result.shape, (1, 1))
assert_equal(np.nan, result[0, 0])
def test_read_after_end_date(self):
# Reads from **after** the end of our data, on the other hand, should
# fail. We can always upper bound the relevant asofs that we're
# interested in, and having fx rates forward-fill past the end of data
# is confusing and takes a while to debug.
for bad_date in (self.FX_RATES_END_DATE + pd.Timedelta('1 day'),
self.FX_RATES_END_DATE + pd.Timedelta('1000 days')):
for rate in self.FX_RATES_RATE_NAMES:
quote = 'USD'
bases = np.array(['CAD'], dtype=object)
dts = pd.DatetimeIndex([bad_date])
result = self.reader.get_rates(rate, quote, bases, dts)
assert_equal(result.shape, (1, 1))
expected = self.get_expected_fx_rate_scalar(
rate,
quote,
'CAD',
self.FX_RATES_END_DATE,
)
assert_equal(expected, result[0, 0])
def test_read_unknown_base(self):
for rate in self.FX_RATES_RATE_NAMES:
quote = 'USD'
for unknown_base in 'XXX', None:
bases = np.array([unknown_base], dtype=object)
dts = pd.DatetimeIndex([self.FX_RATES_START_DATE])
result = self.reader.get_rates(rate, quote, bases, dts)[0, 0]
assert_equal(result, np.nan)
class InMemoryFXReaderTestCase(_FXReaderTestCase):
@property
def reader(self):
return self.in_memory_fx_rate_reader
class HDF5FXReaderTestCase(zp_fixtures.WithTmpDir,
_FXReaderTestCase):
@classmethod
def init_class_fixtures(cls):
super(HDF5FXReaderTestCase, cls).init_class_fixtures()
path = cls.tmpdir.getpath('fx_rates.h5')
cls.h5_fx_reader = cls.write_h5_fx_rates(path)
@property
def reader(self):
return self.h5_fx_reader
class FastGetLocTestCase(zp_fixtures.ZiplineTestCase):
def test_fast_get_loc_ffilled(self):
dts = pd.to_datetime([
'2014-01-02',
'2014-01-03',
# Skip 2014-01-04
'2014-01-05',
'2014-01-06',
])
for dt in pd.date_range('2014-01-02', '2014-01-08'):
result = zp_fixtures.fast_get_loc_ffilled(dts.values, dt.asm8)
expected = dts.get_loc(dt, method='ffill')
assert_equal(result, expected)
with self.assertRaises(KeyError):
dts.get_loc(pd.Timestamp('2014-01-01'), method='ffill')
with self.assertRaises(KeyError):
zp_fixtures.fast_get_loc_ffilled(dts, pd.Timestamp('2014-01-01'))
|
Discusses the prevalence and nature of bacteremias and fungemias in patients with AIDS. Result of the colony counts of blood using lysis-centrifugation technique; Measurement of neutrophil from patients; Implication of the severity of the T-cell defect for the infections.
A definition of the term "lysis-centrifugation" which refers to a technique for detecting microorganisms in a specimen of body fluid, in which the cells in the fluid are mixed in a tube, and then allowed to stand to allow its cellular components to break down is presented. |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class ipset_binding(base_resource):
""" Binding class showing the resources that can be bound to ipset_binding.
"""
def __init__(self) :
self._name = ""
self.ipset_nsip_binding = []
self.ipset_nsip6_binding = []
@property
def name(self) :
"""Name of the IP set whose details you want to display.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the IP set whose details you want to display.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def ipset_nsip_bindings(self) :
"""nsip that can be bound to ipset.
"""
try :
return self._ipset_nsip_binding
except Exception as e:
raise e
@property
def ipset_nsip6_bindings(self) :
"""nsip6 that can be bound to ipset.
"""
try :
return self._ipset_nsip6_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(ipset_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ipset_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name) :
""" Use this API to fetch ipset_binding resource.
"""
try :
if type(name) is not list :
obj = ipset_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [ipset_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class ipset_binding_response(base_response) :
def __init__(self, length=1) :
self.ipset_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ipset_binding = [ipset_binding() for _ in range(length)]
|
우리는 중국에서 도어 컨트롤러 운영자 서비스 도구 제조 업체 및 공급 업체 / 공장 전문화되어 있습니다. 도어 컨트롤러 운영자 서비스 도구 중 하나 인 Realever Enterprise Limited 중국 유명 브랜드 중 하나 인 저렴한 가격 / 저렴한 가격으로 고품질의 도어 컨트롤러 운영자 서비스 도구 도매업.
Wholesale 도어 컨트롤러 운영자 서비스 도구 from China, Need to find cheap 도어 컨트롤러 운영자 서비스 도구 as low price but leading manufacturers. Just find high-quality brands on 도어 컨트롤러 운영자 서비스 도구 produce factory, You can also feedback about what you want, start saving and explore our 도어 컨트롤러 운영자 서비스 도구, We'll reply you in fastest. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.