text
stringlengths 29
850k
|
---|
# -*- coding: utf-8 -*-
import os, sys, codecs
import json
from pydash import py_ as _
ENCODING = 'utf-8'
ENCODING1 = 'gb18030'
def dec(aStr):
gb18030_encode, gb18030_decode, gb18030_reader, gb18030_writer = codecs.lookup(ENCODING)
text, length = gb18030_decode(aStr, 'replace')
return text
def enc(aStr):
gb18030_encode, gb18030_decode, gb18030_reader, gb18030_writer = codecs.lookup(ENCODING)
text, length = gb18030_encode(aStr, 'replace')
return text
def dec1(aStr):
gb18030_encode, gb18030_decode, gb18030_reader, gb18030_writer = codecs.lookup(ENCODING1)
text, length = gb18030_decode(aStr, 'replace')
return text
def enc1(aStr):
gb18030_encode, gb18030_decode, gb18030_reader, gb18030_writer = codecs.lookup(ENCODING1)
text, length = gb18030_encode(aStr, 'replace')
return text
VCFPATH = ur'd:\联系人_002.vcf'
def test():
lines = []
contacts = []
with open(VCFPATH) as f:
lines = f.readlines()
begin = False
o = None
for line in lines:
line = line.strip()
if line == 'BEGIN:VCARD':
begin = True
o = {}
continue
if line == 'END:VCARD':
begin = False
if o and o.has_key('tel') and o.has_key('name'):
contacts.append(o)
continue
if begin:
if _.starts_with(line, 'N;'):
o['name'] = line[line.index(':')+1:]
o['name'] = o['name'].split(';')
o['name'] = filter(lambda x:len(x)>0, o['name'])
o['name'] = map(convert, o['name'])
o['name'] = ''.join(o['name'])
if _.starts_with(line, 'TEL;'):
if not o.has_key('tel'):
o['tel'] = []
o['tel'].append(line[line.index(':')+1:])
# print(contacts)
s = json.dumps(contacts, ensure_ascii=False, indent=4)
with codecs.open(ur'd:\contacts.json', 'w', 'utf-8-sig') as f:
f.write(s)
def convert(s):
s = s.replace('=', '\\x')
return dec(codecs.escape_decode(s)[0])
def printf():
contacts = []
with codecs.open(ur'd:\contacts.json', 'r', 'utf-8-sig') as f:
contacts = json.loads(f.read())
for contact in contacts:
print('%s:%s' % (contact['name'], ','.join(contact['tel'])))
if __name__ == "__main__":
printf()
|
In this week's comics, Doctor Who and Star Trek cross universes!
What's coming into comic shops tomorrow? A clash of two venerable television shows, Batman battling architecture-themed supervillains, and a new issue of Jeff Smith's Rasl. Set sonic screwdrivers to stun.
When the Federation's most terrifying enemy strikes an unholy alliance with one of the Doctor's most hated antagonists, the result is devastation on a cosmic scale! Spanning the ends of space and time itself, Captain Jean-Luc Picard and the crew of the USS Enterprise find themselves joining forces with the Doctor and his companions, with the fate of the galaxy hanging in the balance.
Other debuts this week include the Teen Titans spin-off series The Ravagers, Franco and Art Baltazar's Superman Family Adventures, and the Mike Mignola-penned one-shot BPRD - Hell On Earth: Transformation Of J.H. O'Donnell.
As for other issues, there's a new installment of Jeff Smith's fabulous dimension-hopping book Rasl. Other sundry reads in the grab bag include Angel and Faith, Wolverine And The X-Men, Walking Dead, Incredible Hulk, American Vampire, and Teenage Mutant Ninja Turtles (preview here). Two consistently enjoyable DC titles — Batman and Animal Man — are also releasing annuals this week.
As chairman of the Gotham Landmarks Commission, Bruce Wayne has been a key part of this boom, which signals a golden age of architectural ingenuity for the city. And then, the explosions begin. All manner of design-related malfunctions - faulty crane calculations, sturdy materials suddenly collapsing, software glitches, walkways giving way and more - cause casualties across the city.
Other graphic novels that may be worth your gander include a new volume of Adam Warren's Empowered, the first Starman omnibus on trade paperback, the first volume of the New 52 Wonder Woman, and a new Avatar: The Last Airbender paperback. And as usual, here's the list of everything being released to comic stores tomorrow, and you can find your nearest comic retailer here. Happy reading, gang!
|
#! /usr/bin/env python
import random
import sys
import syslog
import socket
import threading
import time
import agoclient
client = agoclient.AgoConnection("simulator")
def messageHandler(internalid, content):
if "command" in content:
if content["command"] == "on":
print "switching on: " + internalid
client.emitEvent(internalid, "event.device.statechanged", "255", "")
if content["command"] == "off":
print "switching off: " + internalid
client.emitEvent(internalid, "event.device.statechanged", "0", "")
if content["command"] == "push":
print "push button: " + internalid
if content['command'] == 'setlevel':
if 'level' in content:
print "device level changed", content["level"]
client.emitEvent(internalid, "event.device.statechanged", content["level"], "")
client.addHandler(messageHandler)
client.addDevice("123", "dimmer")
client.addDevice("124", "switch")
client.addDevice("125", "binarysensor")
client.addDevice("126", "multilevelsensor")
client.addDevice("127", "pushbutton")
class testEvent(threading.Thread):
def __init__(self,):
threading.Thread.__init__(self)
def run(self):
level = 0
counter = 0
while (True):
counter = counter + 1
if counter > 3:
counter = 0
temp = random.randint(50,300) / 10
client.emitEvent("126", "event.environment.temperaturechanged", temp, "degC");
client.emitEvent("126", "event.environment.humiditychanged", random.randint(20, 75), "percent");
client.emitEvent("125", "event.security.sensortriggered", level, "")
if (level == 0):
level = 255
else:
level = 0
time.sleep (5)
background = testEvent()
background.setDaemon(True)
background.start()
syslog.syslog(syslog.LOG_NOTICE, "agosimulator.py startup")
client.run()
|
Separate loop handling from internal code.
Fix race while allocating free loop device.
Use loop functions even in api test.
Cryptsetup resize will try resize also underlying device.
|
#!/usr/bin/env python
# This example script was ported from Perl Spreadsheet::WriteExcel module.
# The author of the Spreadsheet::WriteExcel module is John McNamara
# <[email protected]>
__revision__ = """$Id: simple.py,v 1.9 2004/01/31 18:56:07 fufff Exp $"""
#######################################################################
#
# Example of how to use the WriteExcel module to write text and numbers
# to an Excel binary file.
#
# reverse('(c)'), March 2001, John McNamara, [email protected]
#
import pyXLWriter as xl
# Create a new workbook called simple.xls and add a worksheet
workbook = xl.Writer("simple.xls")
worksheet = workbook.add_worksheet()
# The general syntax is write(row, column, token). Note that row and
# column are zero indexed
# Write some text
worksheet.write([0, 0], "Hi Excel!")
# Write some numbers
worksheet.write([2, 0], 3) # Writes 3
worksheet.write([3, 0], 3.00000) # Writes 3
worksheet.write([4, 0], 3.00001) # Writes 3.00001
worksheet.write([5, 0], 3.14159) # TeX revision no.?
# Write some formulas
worksheet.write([7, 0], '=A3 + A6')
worksheet.write([8, 0], '=IF(A5>3,"Yes", "No")')
# Write a hyperlink
worksheet.write([10, 0], 'http://www.perl.com/')
workbook.close()
|
Adds a "Delete All" button in the Comment Moderation page that deletes ALL the commentary from ALL the chat rooms.
Hi i was wondering if there is anyway to delete all comments except the superuser grotto and still add the deleted comments to the audit part so i know all who deleted them ?
Moved to its own thread that can be found here.
|
from core.himesis import Himesis
class HeclassOUTeStructuralFeaturesSolveRefEClassEStructuralFeatureEClassEStructuralFeature(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HeclassOUTeStructuralFeaturesSolveRefEClassEStructuralFeatureEClassEStructuralFeature.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HeclassOUTeStructuralFeaturesSolveRefEClassEStructuralFeatureEClassEStructuralFeature, self).__init__(name='HeclassOUTeStructuralFeaturesSolveRefEClassEStructuralFeatureEClassEStructuralFeature', num_nodes=27, edges=[])
# Add the edges
self.add_edges([[0, 6], [6, 5], [0, 8], [8, 7], [1, 10], [10, 9], [1, 12], [12, 11], [5, 3], [3, 7], [9, 4], [4, 11], [9, 13], [13, 5], [11, 14], [14, 7], [9, 15], [15, 16], [17, 18], [18, 16], [17, 19], [19, 20], [11, 21], [21, 22], [23, 24], [24, 22], [23, 25], [25, 26], [0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """eclassOUTeStructuralFeaturesSolveRefEClassEStructuralFeatureEClassEStructuralFeature"""
self["GUID__"] = 2496964863449983084
# Set the node attributes
self.vs[0]["mm__"] = """MatchModel"""
self.vs[0]["GUID__"] = 7878357618895443413
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = 7103443176050273994
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["GUID__"] = 4075457791616116234
self.vs[3]["associationType"] = """eStructuralFeatures"""
self.vs[3]["mm__"] = """directLink_S"""
self.vs[3]["GUID__"] = 82093709888820896
self.vs[4]["associationType"] = """eStructuralFeatures"""
self.vs[4]["mm__"] = """directLink_T"""
self.vs[4]["GUID__"] = 7701273925430543805
self.vs[5]["name"] = """"""
self.vs[5]["classtype"] = """EClass"""
self.vs[5]["mm__"] = """EClass"""
self.vs[5]["cardinality"] = """+"""
self.vs[5]["GUID__"] = 7983479623067543816
self.vs[6]["mm__"] = """match_contains"""
self.vs[6]["GUID__"] = 2490505581444583091
self.vs[7]["name"] = """"""
self.vs[7]["classtype"] = """EStructuralFeature"""
self.vs[7]["mm__"] = """EStructuralFeature"""
self.vs[7]["cardinality"] = """+"""
self.vs[7]["GUID__"] = 3944901957551387274
self.vs[8]["mm__"] = """match_contains"""
self.vs[8]["GUID__"] = 6559812938848820978
self.vs[9]["name"] = """"""
self.vs[9]["classtype"] = """EClass"""
self.vs[9]["mm__"] = """EClass"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = 1448143835880876879
self.vs[10]["mm__"] = """apply_contains"""
self.vs[10]["GUID__"] = 122443353551202861
self.vs[11]["name"] = """"""
self.vs[11]["classtype"] = """EStructuralFeature"""
self.vs[11]["mm__"] = """EStructuralFeature"""
self.vs[11]["cardinality"] = """1"""
self.vs[11]["GUID__"] = 4455295229616770163
self.vs[12]["mm__"] = """apply_contains"""
self.vs[12]["GUID__"] = 866889581155583712
self.vs[13]["mm__"] = """backward_link"""
self.vs[13]["type"] = """ruleDef"""
self.vs[13]["GUID__"] = 1191675117595068943
self.vs[14]["mm__"] = """backward_link"""
self.vs[14]["type"] = """ruleDef"""
self.vs[14]["GUID__"] = 2798140781034193118
self.vs[15]["mm__"] = """hasAttribute_T"""
self.vs[15]["GUID__"] = 7657253808431851836
self.vs[16]["name"] = """ApplyAttribute"""
self.vs[16]["Type"] = """'String'"""
self.vs[16]["mm__"] = """Attribute"""
self.vs[16]["GUID__"] = 8743892830884456720
self.vs[17]["name"] = """eq_"""
self.vs[17]["mm__"] = """Equation"""
self.vs[17]["GUID__"] = 3267268965152823955
self.vs[18]["mm__"] = """leftExpr"""
self.vs[18]["GUID__"] = 445142670763407592
self.vs[19]["mm__"] = """rightExpr"""
self.vs[19]["GUID__"] = 3054582158653006612
self.vs[20]["name"] = """solveRef"""
self.vs[20]["Type"] = """'String'"""
self.vs[20]["mm__"] = """Constant"""
self.vs[20]["GUID__"] = 4367346554362163209
self.vs[21]["mm__"] = """hasAttribute_T"""
self.vs[21]["GUID__"] = 5673578323192681610
self.vs[22]["name"] = """ApplyAttribute"""
self.vs[22]["Type"] = """'String'"""
self.vs[22]["mm__"] = """Attribute"""
self.vs[22]["GUID__"] = 4531990103416906788
self.vs[23]["name"] = """eq_"""
self.vs[23]["mm__"] = """Equation"""
self.vs[23]["GUID__"] = 870047253623542103
self.vs[24]["mm__"] = """leftExpr"""
self.vs[24]["GUID__"] = 3086761965923965550
self.vs[25]["mm__"] = """rightExpr"""
self.vs[25]["GUID__"] = 2044225800229322622
self.vs[26]["name"] = """solveRef"""
self.vs[26]["Type"] = """'String'"""
self.vs[26]["mm__"] = """Constant"""
self.vs[26]["GUID__"] = 7763793050620366314
|
Is Continuous or Cyclic Oral Contraceptives Better Post-Surgery?
Ludovico Muzii, MD, from the Sapienza University of Rome, and colleagues conducted a systematic review and meta-analysis to compare a continuous versus a cyclic oral contraceptive schedule after surgical excision of ovarian endometriomas. Data were included from three randomized clinical trials and one prospective controlled study, with 557 patients with endometriosis, of whom 343 had ovarian endometriomas and completed the assigned treatment and follow-up.
The researchers found that a continuous schedule correlated with lower recurrence rates for dysmenorrhea (risk ratio, 0.24; 95% confidence interval, 0.06 to 0.91; P = 0.04). There were nonsignificant between-group differences for chronic pelvic pain and dyspareunia. Compared with a cyclic schedule, a continuous oral contraceptive schedule correlated with a nonsignificant reduction of cyst recurrence rates (risk ratio, 0.54; 95% confidence interval, 0.28 to 1.05; P = 0.07).
Close more info about Is Continuous or Cyclic Oral Contraceptives Better Post-Surgery?
|
import numpy as np
import re
from treegp.experiments.code.datasets import predict_results_fname, timing_results_fname, gp_fname
from treegp.gp import GP
import scipy.sparse
model_list_fname = "models"
basedir = "experiments/models/"
class NoResultsError(Exception):
pass
def extract_results(txt, prefix, require_times=True):
for line in txt:
if line.startswith(prefix):
if (not require_times) or "times" in line:
d = dict()
for w in ("mean", "std", "min", "10th", "50th", "90th", "max"):
d[w] = float(re.search(r"%s ([\.\d]+)" % w, line).group(1))
return d
raise NoResultsError("could not find line with prefix %s" % prefix)
def parse_timings(timings_lines):
sparse = extract_results(timings_lines, "sparse covar")
hybrid = extract_results(timings_lines, "sparse covar spkernel")
tree = extract_results(timings_lines, "tree: eps_abs")
return sparse, hybrid, tree
with open(model_list_fname, 'r') as f:
model_lines = f.readlines()
print_timings = False
print_fullness = True
for line in model_lines:
dataset, model, tag = line.strip().split()
accuracy_fname = predict_results_fname(dataset, model, tag)
timing_fname = timing_results_fname(dataset, model, tag)
trained_fname = gp_fname(dataset, model, tag=tag)
if print_timings:
try:
with open(timing_fname, 'r') as f:
timings_lines = f.readlines()
except IOError:
continue
sparse, hybrid, tree = parse_timings(timings_lines)
print dataset, model, sparse['mean']*1000, sparse['std']*1000, hybrid['mean']*1000, hybrid['std']* 1000, tree['mean']*1000, tree['std']*1000
else:
with open(accuracy_fname, 'r') as f:
acc_lines = f.readlines()
msll = float(acc_lines[0].split()[1])
smse = float(acc_lines[1].split()[1])
if print_fullness:
sgp = GP(fname=trained_fname, build_tree=False)
if scipy.sparse.issparse(sgp.Kinv):
fullness = float(len(sgp.Kinv.nonzero()[0])) / sgp.Kinv.shape[0]**2
else:
fullness = float(np.sum(np.abs(sgp.Kinv) > sgp.sparse_threshold)) / sgp.Kinv.shape[0]**2
fullness *= 100.0
else:
fullness = -1
print dataset, model, fullness, msll, smse
|
Rose Beef Tripe is sourced exclusively from USA raised beef, and harvested from the first stomach of the steer. The tripe is scalded, and then hand cut and packed in each can with whole milk. Beef Tripe is enjoyed by multiple generations in a variety of recipes. Enjoy it stripped and lightly fried with a spicy tomato sauce. Rose Beef Tripe is a Boone Brands product made in Sanford, NC.
Rose Beef Tripe is used by many to make Menudo, a hearty stew. One recipe for Menudo includes the following ingredients and cooking directions: Ingredients – 1 can of Rose Beef Tripe, 3 fresh cloves garlic (minced), 3 teaspoons salt, ½ cup red chili powder, 2 teaspoons oregano leaves, 1 tablespoon coarse black pepper, 1 teaspoon cumin powder, ½ small onion chopped, and 1 can hominy, yellow or white. Directions – Place Rose Beef Tripe into a pot of boiling water, just enough to cover the tripe. Add garlic and onions and salt. Cover and cook for about 1 hour. When the tripe is tender add the remaining ingredients including the hominy and cook about 15 to 20 minutes longer. Add more salt if needed. This is a spicy dish and it goes well with cilantro and a twist of lime. Corn tortillas are a good compliment. Yield – 4 servings.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for Paxo."""
import unittest
from clint import arguments
from paxo.core import Paxo
from paxo.command import Command, cmd, define_command, Collection
from paxo.util import ExitStatus, is_win, is_lin, is_osx, args
class AbstractTestCase(unittest.TestCase):
pass
class GeneralTestCase(AbstractTestCase):
"""Important things that shouldn't change across versions"""
def test_exit_status(self):
self.assertEqual(ExitStatus.OK, 0)
self.assertEqual(ExitStatus.ERROR, 1)
self.assertEqual(ExitStatus.ABORT, 2)
self.assertEqual(ExitStatus.HELP, 3)
self.assertEqual(ExitStatus.VERSION, 4)
self.assertEqual(ExitStatus.UNSUPPORTED, 5)
def test_operating_system(self):
def fn(c):
if c:
return 1
return 0
self.assertTrue(sum(map(fn, [is_win, is_lin, is_osx])) <= 1)
def test_arguments(self):
self.assertTrue(isinstance(args, arguments.Args))
class PaxoTestCase(AbstractTestCase):
"""Paxo test cases."""
def setUp(self):
self.paxo = Paxo('paxo', 'a test paxo', '<do this>', '0.1')
def tearDown(self):
"""Teardown."""
del self.paxo
def test_init(self):
self.assertEqual(self.paxo.name, 'paxo')
self.assertEqual(self.paxo.description, 'a test paxo')
self.assertEqual(self.paxo.command_info, '<do this>')
self.assertEqual(self.paxo.version, '0.1')
# self.assertEqual(self.paxo.__class__, '') # verify this later with Juan
def test_info(self):
pass
def test_help(self):
pass
class CommandTestCase(AbstractTestCase):
def tearDown(self):
Collection.clear_commands()
def test_define_command(self):
ret = define_command(name='test', fn=len, usage='test (<test_arg>)',
help='testing stuff')
self.assertTrue(isinstance(ret, Command))
self.assertEqual(len(Collection.list_commands()), 1)
class CommandManagerTestCase(AbstractTestCase):
def setUp(self):
self.testCommand = define_command(name='test', fn=len, usage='test (<test_arg>)',
help='testing stuff')
def tearDown(self):
Collection.clear_commands()
def test_cmd_decorator_command(self):
@cmd()
def hello(args):
print('Hello World!')
self.assertEqual(hello, Collection.lookup_command('hello').fn)
def test_list_commands(self):
self.assertEqual(len(Collection.list_commands()), len(Collection.COMMANDS))
self.assertEqual(len(Collection.list_commands()), 1)
def test_lookup_command(self):
self.assertTrue(isinstance(Collection.lookup_command('test'), Command))
self.assertEqual(Collection.lookup_command('test'), self.testCommand)
def test_register_command(self):
test = Command(name='test1', short=None, fn=len,
usage='test1 hi', help="testing stuff 1")
Collection.register_command(test)
self.assertEqual(test, Collection.lookup_command('test1'))
def test_double_command(self):
test = Command(name='test', short=None, fn=len,
usage='test1 hi', help="testing stuff 1")
self.assertFalse(Collection.register_command(test))
class CrossPlatformTestCase(AbstractTestCase):
pass
class ExecuteTestCase(AbstractTestCase):
def setUp(self):
self.paxo = Paxo('paxo', 'a test paxo', '<do this>', '0.1')
def tearDown(self):
del self.paxo
class TextTestCase(AbstractTestCase):
pass
class StorageTestCase(AbstractTestCase):
pass
class AutoStartTestCase(AbstractTestCase):
pass
if __name__ == '__main__':
unittest.main()
|
Hi, I'm new here, and I am jumping for eagerness to dive into the world of fall making.
I need to know the best ways to mount tubular crin to hair falls. I need them to be professional quality, can't risk them to slide off.
I need to decide wether I keep tieing them to hairbands, or start sewing them into strips of fabric.
My scalp is too sensitive for heavy stuff, so no rexlace for me, but it might work on the ones I want to sell.
Any suggestions what kind of knot to use?
Off to read tut now, thanks girls!
Rexlace doesn't actually add that much weight to it to be honest. I only ever use about 10-15 pieces per fall (30 strands total).
Just a simple knot does the trick!
Thanks for the knotting info sweetie!
That tutorial was really usefull actually, from doctored locks. Incase you didn't check it out you should. I liked it. Thanks for posting it!
|
"""empty message
Revision ID: 3f82af559c8a
Revises: None
Create Date: 2015-10-16 03:37:42.100489
"""
# revision identifiers, used by Alembic.
revision = '3f82af559c8a'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('crew_battle',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('team1_name', sa.String(length=64), nullable=False),
sa.Column('team2_name', sa.String(length=64), nullable=False),
sa.Column('total_stocks', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('crew_match',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('battle_id', sa.Integer(), nullable=False),
sa.Column('battle_match_index', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['battle_id'], ['crew_battle.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('match_result',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('match_id', sa.Integer(), nullable=False),
sa.Column('match_player_index', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=False),
sa.Column('character', sa.String(length=32), nullable=False),
sa.Column('initial_stocks', sa.Integer(), nullable=False),
sa.Column('final_stocks', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['match_id'], ['crew_match.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('match_result')
op.drop_table('crew_match')
op.drop_table('crew_battle')
### end Alembic commands ###
|
Sixteen and seventeen year ragged high college baseball avid gamers who wish to play summer baseball need to accomplish a quite numerous between American Legion baseball versus disappear baseball. And rather in overall there’s some heavy recruiting from the American Legion coaches. Over the final few years legion coaches cling viewed a descend within the assortment of avid gamers alive to to play American Legion baseball. They now need to actively pursue avid gamers. Let's receive a observe at why American Legion baseball numbers are dwindling.
The little league baseball scene exploded about fifteen years ago for ten by intention of fourteen year olds. World Sequence tournaments went from dazzling about a organizations with eight or ten teams to many organizations with tournaments of 100 or extra teams. And over the closing six years this growth of teams and tournaments has transported over to the fifteen by intention of eighty year ragged age teams.
Beforehand the baseball picks for these high college age avid gamers had been restricted to about a AAU teams and American Legion teams. Now there many teams called disappear baseball teams. These teams discontinuance dazzling what their name poses; they disappear around the nation taking part in in tournaments or showcases. Some carrying items manufacturers abet sponsor rather about a these touring teams.
With this growth of disappear teams, college baseball coaches cling learned a singular manner to scout and recruit high college avid gamers. In predicament of disappear all around the nation and journey summer teams, they now cling these touring teams near play at their stadium. Many college coaches will space up a pair of tournaments every summer. They accomplish a little bit money on the event and receive to perceive thousands avid gamers without needing to disappear.
For the avid gamers the publicity to college coaches is helpful. And they receive to play on college baseball fields and interrogate college campuses.
One other cause within the abet of the descend in avid gamers alive to to play legion baseball is diverse sports actions. It appears to be like admire every high college sport has a summer long coaching program or diverse exercise forcing young of us to play simplest one sport. Excessive college soccer and basketball avid gamers discontinuance no longer wish to switch decide weights and exercise every morning, and then hobble play a baseball doubleheader that would no longer end till eleven pm. It’s sad however true that many high college avid gamers are forced to grab one sport.
About a years abet American Legion raised the age limit of legion avid gamers from eighteen to nineteen. This expand has helped teams relieve their numbers up. Many learners in college admire coming home and taking part in a single other year of baseball. And no longer too long ago American Legion has started promoting their Jr. Legion baseball program. They in actuality feel that the earlier they receive avid gamers into the legion program the upper. It’s too early to understand if this feeder program will be a success or no longer.
So what need to nonetheless high college baseball participant discontinuance? Can cling to he play American Legion baseball along with his high college friends, or procure a disappear team to showcase his skills to many college coaches?
I will negate on this discipline from both aspects of the fence. I in actuality cling a son who played four years of legion baseball and one other one who’s taking part in disappear baseball. American Legion baseball is an ragged and proud program. Avid gamers on the team all near from the identical metropolis or metropolis. You put on an American Legion patch on your shoulder. In most cases legion coaches stick to the program for about a years. You play heaps and thousands video games, and your non-public home video games are played close by. And in overall the worth is less costly. But from my expertise no longer many college coaches or recruiters with reference to legion video games.
Creep teams are in overall made up of avid gamers from many diverse high colleges. They’ll apply plenty and it would also very neatly be a long way-off. Pretty in overall disappear teams cling hired coaches. Moreover they can simply disappear three out of every four weekends. Many tournaments open on Wednesday or Thursday all the intention by intention of the day. A full bunch automobile pooling turns into compulsory. There would possibly also very neatly be many college coaches and professional scouts at their video games. The worth of disappear teams would possibly also very neatly be very high.
I deem both kinds of teams are wanted and need to nonetheless continue to exist. Creep baseball is rarely any longer for everybody, nor can all americans give you the money for the worth. On the quite numerous hand, while you happen to or your son is dazzling enough, there can in overall be one thing worked out. Creep team coaches wish to grab, so they’re going to procure a manner for dazzling avid gamers to be on the team. If you happen to are an even participant it is worth your effort to procure a disappear team. The college and professional baseball publicity is substantially higher with disappear teams.
However the American Legion program will continue to be an unheard of quite numerous for many high college avid gamers. Many professional and division one avid gamers cling near out of the legion program. There are dazzling a little bit fewer coming from legion baseball now.
|
#!/usr/bin/python
import numpy as np
import argparse
parser = argparse.ArgumentParser(add_help=True)
parser.register('type', 'bool',
lambda v: v.lower() in ("yes", "true", "t", "1"))
parser.add_argument("-db", type="bool", action="store", default=False)
parser.add_argument("-qoi_dim", type=int, action="store", default=10)
parser.add_argument("-qoi_func", type=int, action="store", default=1)
args, unknowns = parser.parse_known_args()
if args.qoi_dim:
base = "\
mimc_run.py -mimc_TOL {TOL} -qoi_seed 0 -mimc_min_dim {qoi_dim} -qoi_dim {qoi_dim} \
-mimc_M0 1 -mimc_moments 1 -mimc_bayesian False -qoi_func {qoi_func} \
".format(TOL="{TOL}",
qoi_dim=args.qoi_dim,
qoi_func=args.qoi_func)
else:
assert False
base += " ".join(unknowns)
if not args.db:
cmd_single = "python " + base + " -mimc_verbose 10 -db False "
print(cmd_single.format(TOL=0.001))
else:
cmd_multi = "python " + base + " -mimc_verbose 0 -db True -db_tag {tag} "
print cmd_multi.format(tag="sc_d{:d}_fn{:.2g}".format(args.qoi_dim, args.qoi_func), TOL=1e-10)
|
The retail event of the year is here again!
Start off 2015 by visiting Prism in Booth #2675 at the Big Show. If you’re interested in retail analytics, visual merchandising, or simply better managing your brick-and-mortar business, we’ll be there to show you how. You can also find us in Axis Communications booth #3133, or shoot us a note and we can set up a time to chat.
Thought leaders from Cisco will examine how the next generation of retail technologies (like Prism!) will deliver brand new analytics and in-store insights.
The movement to improve physical stores’ competitive stance against online retail is looking more and more like an overall offline transformation. Executives from Levi’s, Walmart, and more will examine how brick-and-mortar is being repositioned at the center of the customer experience.
Luxury brands often serve as trendsetters — not only for their customers, but for their industry. This sessions takes a look at where luxury retail is thriving, where it’s adapting, and what new changes mean in the long run.
And of course, even more than the presentations, we’re looking forward to seeing you there!
|
#!/usr/bin/env python3
# author: @netmanchris
"""
Copyright 2016 Hewlett Packard Enterprise Development LP.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" This file will take the GET the contents of the HPE IMC Network Assets module and dump them into a CSV file called
all_assets.csv where each line of the CSV file represents one physical or logical asset as discovered by the HPE IMC
platform.
This library uses the pyhpeimc python wrapper around the IMC RESTful API to automatically push the new performance tasks
with minimal effort on the part of the user."""
import csv
from pyhpeimc.auth import *
from pyhpeimc.plat.device import *
from pyhpeimc.plat.termaccess import *
auth = IMCAuth("http://", "10.196.252.1", "8080", "admin", "admin")
all_devs = get_all_devs(auth.creds, auth.url)
def filter_icmp(all_devs):
icmp_devs = []
for dev in all_devs:
if dev['categoryId'] == '9':
icmp_devs.append(dev)
return icmp_devs
icmp_devs = filter_icmp(all_devs)
for host in icmp_devs:
locate = get_real_time_locate(host['ip'], auth.creds, auth.url)
if type(locate) is list:
if 'deviceIp' in locate[0]:
int_details = get_interface_details( locate[0]['deviceId'], locate[0]['ifIndex'], auth.creds, auth.url)
dev_details = get_dev_details(locate[0]['deviceIp'], auth.creds, auth.url)
host['SwitchIp'] = locate[0]['deviceIp']
host['SwitchInt'] = locate[0]['ifDesc']
host['intDescription'] = int_details['ifAlias']
host['SwitchName'] = dev_details['label']
host['SwitchType'] = dev_details['typeName']
host['SwitchLocation'] = dev_details['location']
host['SwitchContact'] = dev_details['contact']
else:
host['SwitchIp'] = 'Unknown'
host['SwitchInt'] = 'Unknown'
if 'mac' not in host:
host['mac'] = "Unknown"
if 'intDescription' not in host:
host['intDescription'] = "Unknown"
if 'SwitchName' not in host:
host['SwitchName'] = "Unknown"
if 'SwitchType' not in host:
host['SwitchType'] = "Unknown"
if 'SwitchLocation' not in host:
host['SwitchLocation'] = "Unknown"
if 'SwitchContact' not in host:
host['SwitchContact'] = "Unknown"
final_list = [ {'hostLabel': i['label'],
'hostIp': i['ip'],
'hostMask' : i['mask'],
'SwitchIntDesc' : i['intDescription'],
'SwitchName' : i['SwitchName'],
'SwitchType' : i['SwitchType'],
'SwitchLocation' : i['SwitchLocation'],
'SwitchContact' : i['SwitchContact'],
'hostMac' : i['mac'],
'SwitchIp' : i['SwitchIp'],
'SwitchInt' : i['SwitchInt']}for i in icmp_devs ]
keys = final_list[0].keys()
for i in final_list:
if len(i) >= len(final_list[0].keys()):
keys = final_list[final_list.index(i)].keys()
with open ('icmp_devs.csv', 'w') as file:
dict_writer = csv.DictWriter(file, keys)
dict_writer.writeheader()
dict_writer.writerows(final_list)
|
This report provides a government-specific perspective on Deloitte’s 2018 Technology Trends report. Our aim is to provide a government lens on eight trends that are shaping strategic and operational transformations and redefining IT’s role within the enterprise.
We acknowledge that government organizations are different, broad, and complex. Our scoring of organizational readiness and trend relevance is designed to represent overall patterns. We also include real-world examples and key considerations that organizations can use to incorporate these technologies into their enterprise. As always, we hope that this perspective provides a better understanding of these technological forces so that you can build a symphonic enterprise of your own.
We looked at each trend and assigned a value from one (low) and five (high) based on the trend's relevance and readiness of government adoption.
Relevance: How impactful would it be if the government adopted the trend?
Readiness: How ready is the government to adopt the trend?
Incremental adaptation to technical change and disruption is no longer enough. Organizations must take positive steps to keep pace with innovation. To do so, first consider modernizing IT infrastructure to improve efficiency and deliver services in new ways. Next, streamline the processes of IT budgeting, organization, and delivery to help drive mission success.
To meet increasing mission needs, US Customs and Border Protection (CBP) is radically changing. To innovate, they have streamlined processes to identify opportunities, generate ideas, and incubate and pilot new solutions. To support rapid, incremental modernization, they are adopting cloud-based platforms, agile/DevOps, and a "fail fast" mentality.
Modernize IT infrastructure. To support the transition to future innovation, begin by making sure your systems can support it.
Update your talent. New technology and processes may require existing workers to upskill if they want to thrive. Offer training to existing workers, and consider supplementing them with outside talent.
Automation, artificial intelligence (AI), and cognitive technologies are changing the way work gets done. Organizations should redesign systems and talent to accommodate the increased use of cognitive agents, bots, and other AI-driven technologies—the no-collar workforce. Instead of machines replacing people, they will work together as an augmented workforce. Organizations must plan through the management of this new model.
Ohio, Texas, and other states are looking at robotic process automation to develop "bots." These new "employees," powered by AI technology, will take on repetitive administrative tasks, simplify work, and allow staff to devote brainpower to higher-level, strategic activities.
Pilot automation today. Which repetitive activities contain no uniquely human work and can be automated?
Partner with others. Others within your organization or related organizations may already be up to speed with "bots."
A combination of automation and machine learning is making it possible to "free" data from the siloed systems that create and hold it. New technologies give organizations new ways to manage interrelationships, storage, and security of enterprise data, while dramatically improving both availability and security.
The US Census Bureau is looking to revolutionize its management of data by moving to a modern “enterprise data lake.” The technology will support the Bureau's goal of secure, scalable data science, and will help enable survey processing, survey analysis, research, data linking and predictive analytics.
Pay data debt. How much money and time are you spending on data—and where?
Digital innovation has redefined customer experiences—and today, leading organizations are expanding those innovations into back-office and mid-office systems. Starting with areas like finance and supply chain, technologies such as blockchain, machine intelligence, and the Internet of Things are presenting opportunities to modernize the back office and support better constituent-facing innovation and growth.
Informed Delivery® by USPS® creates a digital reflection of the supply chain, allowing users to digitally preview their mail and manage their packages via email notification, online dashboard, or mobile app. The service provides consumers with the convenience of seeing what is coming to their physical mailbox through digital channels.
Learn from others. Other leading organizations may already have a better understanding of the trend's potential. You can learn from their successes and failures.
Plan it out. Create a transformation roadmap that starts with use cases that have proven successful in other organizations.
The combination of augmented reality (AR) and virtual reality (VR) is beginning to move beyond proofs of concept to enterprise implementation. Government use cases might include training for complex tasks and group collaboration. Organizations should consider how to integrate these technologies into their existing infrastructures and get advice on navigating evolving technology and governance standards.
New York City is developing a virtual/augmented reality (AR/VR) hub that will prototype innovative solutions to help solve the city’s issues, develop a citywide AR/VR talent pipeline, and foster job growth. The goal is to create new startups and help existing companies grow.
Take a look around. Consider the use cases other organizations are piloting or moving to production.
Get started. Don’t hold out for "perfect." The imperfect technology available right now still has plenty of value to deliver.
Blockchain technologies are moving past the exploration stage and finding real-world adoption. Organizations are increasing the scope, scale, and complexity of their blockchain applications, and some organizations with multiple blockchains are piloting the most effective ways to integrate them. Organizations should be identifying use cases to pilot so they can learn about the new technologies and their implications.
The US Department of Treasury is investigating the use of blockchain to digitally trace the movement of physical assets such as smartphones and computers. This replaces a manual, several month process and will enable them to have an instantaneous view of their inventory, improving efficiency, traceability, and auditability in asset management.
Get started. Evaluate potential use cases and get started with proofs of concept to understand the implications of the technology.
Look beyond the technology. Blockchain has operational, governance, and talent implications that grow much more complex as organizations move from pilots to commercialization.
Application programming interfaces (APIs) have long been key building blocks to system and application integration, interoperability, and modularity. Now organizations are making data more accessible by treating APIs not as data integration mechanisms, but as products. Moving to a product mentality requires new technology and talent, but it can lead to improved agility, scalability, and speed.
Michigan's Department of Technology, Management, and Budget piloted an API layer on top of existing enterprise services to improve data accessibility and integration across its Health and Human Services organizations.
Create a culture. Reward those who embrace the reuse of APIs, and celebrate the wins within the organization that drive value.
Develop KPIs. Use performance indicators to determine how APIs support your organization's goals to improve the overall API impact.
Artificial intelligence, quantum encryption, and other exponential technologies may seem to be years away, but technology moves fast. Organizations should start developing partnerships and capabilities to research, vet, incubate, and scale these technologies for when they arrive. Interim steps taken now can lay the groundwork for additional measures and help prepare for potential risks as the technologies emerge.
The Department of Homeland Security Science & Technology Directorate conducts technology scouting, horizon scanning, and market analysis to identify, recommend, and report on emerging technologies and start-ups in the marketplace that may apply to the department’s mission.
Prepare now. Given the vulnerabilities to quantum hacking, consider bolstering encryption and other technologies to mitigate risks.
Collaborate with partners. Seek opportunities to collaborate with government and private entities to explore both the potential and risks exponential technologies present.
As with each edition of our annual Deloitte Insights Tech Trends report, this is part of an ongoing discussion in an ever-evolving field. Our goal is to provide you with pointers to better engage with constituents, make informed decisions, and do more with less. We hope these ideas will help inform and guide your thinking as you explore opportunities to innovate and improve.
|
#!/usr/bin/env python3
# flake8: noqa
import os
import sys
import time
import random
import threading
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from panda import Panda, PandaSerial # noqa: E402
INIT_GPS_BAUD = 9600
GPS_BAUD = 460800
def connect():
pandas = Panda.list()
print(pandas)
# make sure two pandas are connected
if len(pandas) != 2:
print("Connect white and grey/black panda to run this test!")
assert False
# connect
pandas[0] = Panda(pandas[0])
pandas[1] = Panda(pandas[1])
white_panda = None
gps_panda = None
# find out which one is white (for spamming the CAN buses)
if pandas[0].is_white() and not pandas[1].is_white():
white_panda = pandas[0]
gps_panda = pandas[1]
elif not pandas[0].is_white() and pandas[1].is_white():
white_panda = pandas[1]
gps_panda = pandas[0]
else:
print("Connect white and grey/black panda to run this test!")
assert False
return white_panda, gps_panda
def spam_buses_thread(panda):
try:
panda.set_safety_mode(Panda.SAFETY_ALLOUTPUT)
while True:
at = random.randint(1, 2000)
st = (b"test" + os.urandom(10))[0:8]
bus = random.randint(0, 2)
panda.can_send(at, st, bus)
except Exception as e:
print(e)
def read_can_thread(panda):
try:
while True:
panda.can_recv()
except Exception as e:
print(e)
def init_gps(panda):
def add_nmea_checksum(msg):
d = msg[1:]
cs = 0
for i in d:
cs ^= ord(i)
return msg + "*%02X" % cs
ser = PandaSerial(panda, 1, INIT_GPS_BAUD)
# Power cycle the gps by toggling reset
print("Resetting GPS")
panda.set_esp_power(0)
time.sleep(0.5)
panda.set_esp_power(1)
time.sleep(0.5)
# Upping baud rate
print("Upping GPS baud rate")
msg = str.encode(add_nmea_checksum("$PUBX,41,1,0007,0003,%d,0" % GPS_BAUD) + "\r\n")
ser.write(msg)
time.sleep(1) # needs a wait for it to actually send
# Reconnecting with the correct baud
ser = PandaSerial(panda, 1, GPS_BAUD)
# Sending all config messages boardd sends
print("Sending config")
ser.write(b"\xB5\x62\x06\x00\x14\x00\x03\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00\x00\x1E\x7F")
ser.write(b"\xB5\x62\x06\x3E\x00\x00\x44\xD2")
ser.write(b"\xB5\x62\x06\x00\x14\x00\x00\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x19\x35")
ser.write(b"\xB5\x62\x06\x00\x14\x00\x01\x00\x00\x00\xC0\x08\x00\x00\x00\x08\x07\x00\x01\x00\x01\x00\x00\x00\x00\x00\xF4\x80")
ser.write(b"\xB5\x62\x06\x00\x14\x00\x04\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1D\x85")
ser.write(b"\xB5\x62\x06\x00\x00\x00\x06\x18")
ser.write(b"\xB5\x62\x06\x00\x01\x00\x01\x08\x22")
ser.write(b"\xB5\x62\x06\x00\x01\x00\x02\x09\x23")
ser.write(b"\xB5\x62\x06\x00\x01\x00\x03\x0A\x24")
ser.write(b"\xB5\x62\x06\x08\x06\x00\x64\x00\x01\x00\x00\x00\x79\x10")
ser.write(b"\xB5\x62\x06\x24\x24\x00\x05\x00\x04\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x5A\x63")
ser.write(b"\xB5\x62\x06\x1E\x14\x00\x00\x00\x00\x00\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3C\x37")
ser.write(b"\xB5\x62\x06\x24\x00\x00\x2A\x84")
ser.write(b"\xB5\x62\x06\x23\x00\x00\x29\x81")
ser.write(b"\xB5\x62\x06\x1E\x00\x00\x24\x72")
ser.write(b"\xB5\x62\x06\x01\x03\x00\x01\x07\x01\x13\x51")
ser.write(b"\xB5\x62\x06\x01\x03\x00\x02\x15\x01\x22\x70")
ser.write(b"\xB5\x62\x06\x01\x03\x00\x02\x13\x01\x20\x6C")
print("Initialized GPS")
received_messages = 0
received_bytes = 0
send_something = False
def gps_read_thread(panda):
global received_messages, received_bytes, send_something
ser = PandaSerial(panda, 1, GPS_BAUD)
while True:
ret = ser.read(1024)
time.sleep(0.001)
if len(ret):
received_messages += 1
received_bytes += len(ret)
if send_something:
ser.write("test")
send_something = False
CHECK_PERIOD = 5
MIN_BYTES = 10000
MAX_BYTES = 50000
min_failures = 0
max_failures = 0
if __name__ == "__main__":
white_panda, gps_panda = connect()
# Start spamming the CAN buses with the white panda. Also read the messages to add load on the GPS panda
threading.Thread(target=spam_buses_thread, args=(white_panda,)).start()
threading.Thread(target=read_can_thread, args=(gps_panda,)).start()
# Start GPS checking
init_gps(gps_panda)
read_thread = threading.Thread(target=gps_read_thread, args=(gps_panda,))
read_thread.start()
while True:
time.sleep(CHECK_PERIOD)
if(received_bytes < MIN_BYTES):
print("Panda is not sending out enough data! Got " + str(received_messages) + " (" + str(received_bytes) + "B) in the last " + str(CHECK_PERIOD) + " seconds")
send_something = True
min_failures += 1
elif(received_bytes > MAX_BYTES):
print("Panda is not sending out too much data! Got " + str(received_messages) + " (" + str(received_bytes) + "B) in the last " + str(CHECK_PERIOD) + " seconds")
print("Probably not on the right baud rate, got reset somehow? Resetting...")
max_failures += 1
init_gps(gps_panda)
else:
print("Got " + str(received_messages) + " (" + str(received_bytes) + "B) messages in the last " + str(CHECK_PERIOD) + " seconds.")
if(min_failures > 0):
print("Total min failures: ", min_failures)
if(max_failures > 0):
print("Total max failures: ", max_failures)
received_messages = 0
received_bytes = 0
|
Quantity II covers: JG fifty three Pik-As, JG fifty four Grünherz, JG seventy seven Herz-As, JG three hundred, JG 301, JG 302 Wilde Sau, and JG four hundred.
I was once stationed at toes. Hood with B Co. , 2/158th Aviation, sixth Cav Bde (AC) whilst the photographers took the pack up pictures for this e-book. It was once enjoyable escorting them round for the photographs and it's an exceptional stroll down reminiscence lane to turn via them nearly twenty years later.
Some of the diagrams pulled from the operator's handbook are small and feature TINY textual content. the images are tremendous and the extent of aspect is such that you could be want a magnifying glass to get all of it. total a superb publication.
After the airborne dirt and dust of global warfare II had settled, the army place of the united kingdom was once faraway from simple. It used to be in fact allied to america and a part of NATO, however it was once at odds with the previous in preserving an Empire and the 2 countries additionally had competing oil pursuits within the heart East. The UKs engagement in struggle after 1945 used to be therefore a wierd mix starting from place of origin protection via insular activities in the colonies or protectorates to maintain empire - to taking part in an immense position in confronting the USSR.
The stage was now set for the soon-to-be titanic struggle for air superiority over the Yalu. com THE COMBATANTS USAF FIGHTER PILOT TRAINING The majority of American pilots making up the 4th FIW were combat-experienced World War II veterans who had flown P-38s, P-47s and P-51s against Hitler’s Luftwaffe in Europe and/or the Imperial Japanese air arms in the Pacific only five years before. However, the young lieutenants in the squadrons – as well as those who would be replacing them as the conflict dragged on – were recent graduates of the USAF’s new post-World War II pilot training program.
72. 73. 74. 75. 92 warning light Oxygen pressure gauge Emergency flap air pressure gauge Ultra-violet light rheostat (x2) Emergency landing gear pressure gauge Gun cocking button – 23mm cannon inner Gun cocking button – 23mm cannon outer Gun cocking button – 37mm cannon Ultra-violet cockpit lights (both sides of cockpit) Air emergency valve EKSR-46 signal discharger firing buttons Cockpit pressurization selector Aileron booster system pressure gauge cripple, and to clear the runway and alert the meat wagon and fire trucks.
Instruction manual he wrote as the F-86 training squadron (3596th Combat Crew Training Squadron) commander at the Nellis “Fighter School” in 1955. For attacking an enemy formation with a four-ship, “the lead F-86 element should attack the enemy element farthest back [in their formation]. As the second enemy element breaks into the attack, they will probably go down so that they can get help from the lead enemy element. The lead F-86 element then switches the attack, if possible, to the lead enemy element, in which case the second F-86 element stays high and fast and watches for the possible return of the enemy second element.
|
import gym
import random
import unittest
import ray
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.agents.pg.pg_tf_policy import PGTFPolicy
from ray.rllib.agents.dqn.dqn_policy import DQNTFPolicy
from ray.rllib.optimizers import (SyncSamplesOptimizer, SyncReplayOptimizer,
AsyncGradientsOptimizer)
from ray.rllib.tests.test_rollout_worker import (MockEnv, MockEnv2, MockPolicy)
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.policy.tests.test_policy import TestPolicy
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.env.base_env import _MultiAgentEnvToBaseEnv
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.tune.registry import register_env
def one_hot(i, n):
out = [0.0] * n
out[i] = 1.0
return out
class BasicMultiAgent(MultiAgentEnv):
"""Env of N independent agents, each of which exits after 25 steps."""
def __init__(self, num):
self.agents = [MockEnv(25) for _ in range(num)]
self.dones = set()
self.observation_space = gym.spaces.Discrete(2)
self.action_space = gym.spaces.Discrete(2)
self.resetted = False
def reset(self):
self.resetted = True
self.dones = set()
return {i: a.reset() for i, a in enumerate(self.agents)}
def step(self, action_dict):
obs, rew, done, info = {}, {}, {}, {}
for i, action in action_dict.items():
obs[i], rew[i], done[i], info[i] = self.agents[i].step(action)
if done[i]:
self.dones.add(i)
done["__all__"] = len(self.dones) == len(self.agents)
return obs, rew, done, info
class EarlyDoneMultiAgent(MultiAgentEnv):
"""Env for testing when the env terminates (after agent 0 does)."""
def __init__(self):
self.agents = [MockEnv(3), MockEnv(5)]
self.dones = set()
self.last_obs = {}
self.last_rew = {}
self.last_done = {}
self.last_info = {}
self.i = 0
self.observation_space = gym.spaces.Discrete(10)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.dones = set()
self.last_obs = {}
self.last_rew = {}
self.last_done = {}
self.last_info = {}
self.i = 0
for i, a in enumerate(self.agents):
self.last_obs[i] = a.reset()
self.last_rew[i] = None
self.last_done[i] = False
self.last_info[i] = {}
obs_dict = {self.i: self.last_obs[self.i]}
self.i = (self.i + 1) % len(self.agents)
return obs_dict
def step(self, action_dict):
assert len(self.dones) != len(self.agents)
for i, action in action_dict.items():
(self.last_obs[i], self.last_rew[i], self.last_done[i],
self.last_info[i]) = self.agents[i].step(action)
obs = {self.i: self.last_obs[self.i]}
rew = {self.i: self.last_rew[self.i]}
done = {self.i: self.last_done[self.i]}
info = {self.i: self.last_info[self.i]}
if done[self.i]:
rew[self.i] = 0
self.dones.add(self.i)
self.i = (self.i + 1) % len(self.agents)
done["__all__"] = len(self.dones) == len(self.agents) - 1
return obs, rew, done, info
class RoundRobinMultiAgent(MultiAgentEnv):
"""Env of N independent agents, each of which exits after 5 steps.
On each step() of the env, only one agent takes an action."""
def __init__(self, num, increment_obs=False):
if increment_obs:
# Observations are 0, 1, 2, 3... etc. as time advances
self.agents = [MockEnv2(5) for _ in range(num)]
else:
# Observations are all zeros
self.agents = [MockEnv(5) for _ in range(num)]
self.dones = set()
self.last_obs = {}
self.last_rew = {}
self.last_done = {}
self.last_info = {}
self.i = 0
self.num = num
self.observation_space = gym.spaces.Discrete(10)
self.action_space = gym.spaces.Discrete(2)
def reset(self):
self.dones = set()
self.last_obs = {}
self.last_rew = {}
self.last_done = {}
self.last_info = {}
self.i = 0
for i, a in enumerate(self.agents):
self.last_obs[i] = a.reset()
self.last_rew[i] = None
self.last_done[i] = False
self.last_info[i] = {}
obs_dict = {self.i: self.last_obs[self.i]}
self.i = (self.i + 1) % self.num
return obs_dict
def step(self, action_dict):
assert len(self.dones) != len(self.agents)
for i, action in action_dict.items():
(self.last_obs[i], self.last_rew[i], self.last_done[i],
self.last_info[i]) = self.agents[i].step(action)
obs = {self.i: self.last_obs[self.i]}
rew = {self.i: self.last_rew[self.i]}
done = {self.i: self.last_done[self.i]}
info = {self.i: self.last_info[self.i]}
if done[self.i]:
rew[self.i] = 0
self.dones.add(self.i)
self.i = (self.i + 1) % self.num
done["__all__"] = len(self.dones) == len(self.agents)
return obs, rew, done, info
def make_multiagent(env_name):
class MultiEnv(MultiAgentEnv):
def __init__(self, num):
self.agents = [gym.make(env_name) for _ in range(num)]
self.dones = set()
self.observation_space = self.agents[0].observation_space
self.action_space = self.agents[0].action_space
def reset(self):
self.dones = set()
return {i: a.reset() for i, a in enumerate(self.agents)}
def step(self, action_dict):
obs, rew, done, info = {}, {}, {}, {}
for i, action in action_dict.items():
obs[i], rew[i], done[i], info[i] = self.agents[i].step(action)
if done[i]:
self.dones.add(i)
done["__all__"] = len(self.dones) == len(self.agents)
return obs, rew, done, info
return MultiEnv
MultiCartpole = make_multiagent("CartPole-v0")
MultiMountainCar = make_multiagent("MountainCarContinuous-v0")
class TestMultiAgentEnv(unittest.TestCase):
def testBasicMock(self):
env = BasicMultiAgent(4)
obs = env.reset()
self.assertEqual(obs, {0: 0, 1: 0, 2: 0, 3: 0})
for _ in range(24):
obs, rew, done, info = env.step({0: 0, 1: 0, 2: 0, 3: 0})
self.assertEqual(obs, {0: 0, 1: 0, 2: 0, 3: 0})
self.assertEqual(rew, {0: 1, 1: 1, 2: 1, 3: 1})
self.assertEqual(done, {
0: False,
1: False,
2: False,
3: False,
"__all__": False
})
obs, rew, done, info = env.step({0: 0, 1: 0, 2: 0, 3: 0})
self.assertEqual(done, {
0: True,
1: True,
2: True,
3: True,
"__all__": True
})
def testRoundRobinMock(self):
env = RoundRobinMultiAgent(2)
obs = env.reset()
self.assertEqual(obs, {0: 0})
for _ in range(5):
obs, rew, done, info = env.step({0: 0})
self.assertEqual(obs, {1: 0})
self.assertEqual(done["__all__"], False)
obs, rew, done, info = env.step({1: 0})
self.assertEqual(obs, {0: 0})
self.assertEqual(done["__all__"], False)
obs, rew, done, info = env.step({0: 0})
self.assertEqual(done["__all__"], True)
def testNoResetUntilPoll(self):
env = _MultiAgentEnvToBaseEnv(lambda v: BasicMultiAgent(2), [], 1)
self.assertFalse(env.get_unwrapped()[0].resetted)
env.poll()
self.assertTrue(env.get_unwrapped()[0].resetted)
def testVectorizeBasic(self):
env = _MultiAgentEnvToBaseEnv(lambda v: BasicMultiAgent(2), [], 2)
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
self.assertEqual(rew, {0: {0: None, 1: None}, 1: {0: None, 1: None}})
self.assertEqual(
dones, {
0: {
0: False,
1: False,
"__all__": False
},
1: {
0: False,
1: False,
"__all__": False
}
})
for _ in range(24):
env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
self.assertEqual(rew, {0: {0: 1, 1: 1}, 1: {0: 1, 1: 1}})
self.assertEqual(
dones, {
0: {
0: False,
1: False,
"__all__": False
},
1: {
0: False,
1: False,
"__all__": False
}
})
env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(
dones, {
0: {
0: True,
1: True,
"__all__": True
},
1: {
0: True,
1: True,
"__all__": True
}
})
# Reset processing
self.assertRaises(
ValueError, lambda: env.send_actions({
0: {
0: 0,
1: 0
},
1: {
0: 0,
1: 0
}
}))
self.assertEqual(env.try_reset(0), {0: 0, 1: 0})
self.assertEqual(env.try_reset(1), {0: 0, 1: 0})
env.send_actions({0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0, 1: 0}, 1: {0: 0, 1: 0}})
self.assertEqual(rew, {0: {0: 1, 1: 1}, 1: {0: 1, 1: 1}})
self.assertEqual(
dones, {
0: {
0: False,
1: False,
"__all__": False
},
1: {
0: False,
1: False,
"__all__": False
}
})
def testVectorizeRoundRobin(self):
env = _MultiAgentEnvToBaseEnv(lambda v: RoundRobinMultiAgent(2), [], 2)
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0}, 1: {0: 0}})
self.assertEqual(rew, {0: {0: None}, 1: {0: None}})
env.send_actions({0: {0: 0}, 1: {0: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {1: 0}, 1: {1: 0}})
env.send_actions({0: {1: 0}, 1: {1: 0}})
obs, rew, dones, _, _ = env.poll()
self.assertEqual(obs, {0: {0: 0}, 1: {0: 0}})
def testMultiAgentSample(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_steps=50)
batch = ev.sample()
self.assertEqual(batch.count, 50)
self.assertEqual(batch.policy_batches["p0"].count, 150)
self.assertEqual(batch.policy_batches["p1"].count, 100)
self.assertEqual(batch.policy_batches["p0"]["t"].tolist(),
list(range(25)) * 6)
def testMultiAgentSampleSyncRemote(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_steps=50,
num_envs=4,
remote_worker_envs=True,
remote_env_batch_wait_ms=99999999)
batch = ev.sample()
self.assertEqual(batch.count, 200)
def testMultiAgentSampleAsyncRemote(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_steps=50,
num_envs=4,
remote_worker_envs=True)
batch = ev.sample()
self.assertEqual(batch.count, 200)
def testMultiAgentSampleWithHorizon(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: BasicMultiAgent(5),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
episode_horizon=10, # test with episode horizon set
batch_steps=50)
batch = ev.sample()
self.assertEqual(batch.count, 50)
def testSampleFromEarlyDoneEnv(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(2)
ev = RolloutWorker(
env_creator=lambda _: EarlyDoneMultiAgent(),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
"p1": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p{}".format(agent_id % 2),
batch_mode="complete_episodes",
batch_steps=1)
self.assertRaisesRegexp(ValueError,
".*don't have a last observation.*",
lambda: ev.sample())
def testMultiAgentSampleRoundRobin(self):
act_space = gym.spaces.Discrete(2)
obs_space = gym.spaces.Discrete(10)
ev = RolloutWorker(
env_creator=lambda _: RoundRobinMultiAgent(5, increment_obs=True),
policy={
"p0": (MockPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p0",
batch_steps=50)
batch = ev.sample()
self.assertEqual(batch.count, 50)
# since we round robin introduce agents into the env, some of the env
# steps don't count as proper transitions
self.assertEqual(batch.policy_batches["p0"].count, 42)
self.assertEqual(batch.policy_batches["p0"]["obs"].tolist()[:10], [
one_hot(0, 10),
one_hot(1, 10),
one_hot(2, 10),
one_hot(3, 10),
one_hot(4, 10),
] * 2)
self.assertEqual(batch.policy_batches["p0"]["new_obs"].tolist()[:10], [
one_hot(1, 10),
one_hot(2, 10),
one_hot(3, 10),
one_hot(4, 10),
one_hot(5, 10),
] * 2)
self.assertEqual(batch.policy_batches["p0"]["rewards"].tolist()[:10],
[100, 100, 100, 100, 0] * 2)
self.assertEqual(batch.policy_batches["p0"]["dones"].tolist()[:10],
[False, False, False, False, True] * 2)
self.assertEqual(batch.policy_batches["p0"]["t"].tolist()[:10],
[4, 9, 14, 19, 24, 5, 10, 15, 20, 25])
def test_custom_rnn_state_values(self):
h = {"some": {"arbitrary": "structure", "here": [1, 2, 3]}}
class StatefulPolicy(TestPolicy):
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
**kwargs):
return [0] * len(obs_batch), [[h] * len(obs_batch)], {}
def get_initial_state(self):
return [{}] # empty dict
ev = RolloutWorker(
env_creator=lambda _: gym.make("CartPole-v0"),
policy=StatefulPolicy,
batch_steps=5)
batch = ev.sample()
self.assertEqual(batch.count, 5)
self.assertEqual(batch["state_in_0"][0], {})
self.assertEqual(batch["state_out_0"][0], h)
self.assertEqual(batch["state_in_0"][1], h)
self.assertEqual(batch["state_out_0"][1], h)
def test_returning_model_based_rollouts_data(self):
class ModelBasedPolicy(PGTFPolicy):
def compute_actions(self,
obs_batch,
state_batches,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
**kwargs):
# Pretend we did a model-based rollout and want to return
# the extra trajectory.
builder = episodes[0].new_batch_builder()
rollout_id = random.randint(0, 10000)
for t in range(5):
builder.add_values(
agent_id="extra_0",
policy_id="p1", # use p1 so we can easily check it
t=t,
eps_id=rollout_id, # new id for each rollout
obs=obs_batch[0],
actions=0,
rewards=0,
dones=t == 4,
infos={},
new_obs=obs_batch[0])
batch = builder.build_and_reset(episode=None)
episodes[0].add_extra_batch(batch)
# Just return zeros for actions
return [0] * len(obs_batch), [], {}
single_env = gym.make("CartPole-v0")
obs_space = single_env.observation_space
act_space = single_env.action_space
ev = RolloutWorker(
env_creator=lambda _: MultiCartpole(2),
policy={
"p0": (ModelBasedPolicy, obs_space, act_space, {}),
"p1": (ModelBasedPolicy, obs_space, act_space, {}),
},
policy_mapping_fn=lambda agent_id: "p0",
batch_steps=5)
batch = ev.sample()
self.assertEqual(batch.count, 5)
self.assertEqual(batch.policy_batches["p0"].count, 10)
self.assertEqual(batch.policy_batches["p1"].count, 25)
def test_train_multi_cartpole_single_policy(self):
n = 10
register_env("multi_cartpole", lambda _: MultiCartpole(n))
pg = PGTrainer(env="multi_cartpole", config={"num_workers": 0})
for i in range(100):
result = pg.train()
print("Iteration {}, reward {}, timesteps {}".format(
i, result["episode_reward_mean"], result["timesteps_total"]))
if result["episode_reward_mean"] >= 50 * n:
return
raise Exception("failed to improve reward")
def test_train_multi_cartpole_multi_policy(self):
n = 10
register_env("multi_cartpole", lambda _: MultiCartpole(n))
single_env = gym.make("CartPole-v0")
def gen_policy():
config = {
"gamma": random.choice([0.5, 0.8, 0.9, 0.95, 0.99]),
"n_step": random.choice([1, 2, 3, 4, 5]),
}
obs_space = single_env.observation_space
act_space = single_env.action_space
return (None, obs_space, act_space, config)
pg = PGTrainer(
env="multi_cartpole",
config={
"num_workers": 0,
"multiagent": {
"policies": {
"policy_1": gen_policy(),
"policy_2": gen_policy(),
},
"policy_mapping_fn": lambda agent_id: "policy_1",
},
})
# Just check that it runs without crashing
for i in range(10):
result = pg.train()
print("Iteration {}, reward {}, timesteps {}".format(
i, result["episode_reward_mean"], result["timesteps_total"]))
self.assertTrue(
pg.compute_action([0, 0, 0, 0], policy_id="policy_1") in [0, 1])
self.assertTrue(
pg.compute_action([0, 0, 0, 0], policy_id="policy_2") in [0, 1])
self.assertRaises(
KeyError,
lambda: pg.compute_action([0, 0, 0, 0], policy_id="policy_3"))
def _testWithOptimizer(self, optimizer_cls):
n = 3
env = gym.make("CartPole-v0")
act_space = env.action_space
obs_space = env.observation_space
dqn_config = {"gamma": 0.95, "n_step": 3}
if optimizer_cls == SyncReplayOptimizer:
# TODO: support replay with non-DQN graphs. Currently this can't
# happen since the replay buffer doesn't encode extra fields like
# "advantages" that PG uses.
policies = {
"p1": (DQNTFPolicy, obs_space, act_space, dqn_config),
"p2": (DQNTFPolicy, obs_space, act_space, dqn_config),
}
else:
policies = {
"p1": (PGTFPolicy, obs_space, act_space, {}),
"p2": (DQNTFPolicy, obs_space, act_space, dqn_config),
}
worker = RolloutWorker(
env_creator=lambda _: MultiCartpole(n),
policy=policies,
policy_mapping_fn=lambda agent_id: ["p1", "p2"][agent_id % 2],
batch_steps=50)
if optimizer_cls == AsyncGradientsOptimizer:
def policy_mapper(agent_id):
return ["p1", "p2"][agent_id % 2]
remote_workers = [
RolloutWorker.as_remote().remote(
env_creator=lambda _: MultiCartpole(n),
policy=policies,
policy_mapping_fn=policy_mapper,
batch_steps=50)
]
else:
remote_workers = []
workers = WorkerSet._from_existing(worker, remote_workers)
optimizer = optimizer_cls(workers)
for i in range(200):
worker.foreach_policy(lambda p, _: p.set_epsilon(
max(0.02, 1 - i * .02))
if isinstance(p, DQNTFPolicy) else None)
optimizer.step()
result = collect_metrics(worker, remote_workers)
if i % 20 == 0:
def do_update(p):
if isinstance(p, DQNTFPolicy):
p.update_target()
worker.foreach_policy(lambda p, _: do_update(p))
print("Iter {}, rew {}".format(i,
result["policy_reward_mean"]))
print("Total reward", result["episode_reward_mean"])
if result["episode_reward_mean"] >= 25 * n:
return
print(result)
raise Exception("failed to improve reward")
def test_multi_agent_sync_optimizer(self):
self._testWithOptimizer(SyncSamplesOptimizer)
def test_multi_agent_async_gradients_optimizer(self):
self._testWithOptimizer(AsyncGradientsOptimizer)
def test_multi_agent_replay_optimizer(self):
self._testWithOptimizer(SyncReplayOptimizer)
def test_train_multi_cartpole_many_policies(self):
n = 20
env = gym.make("CartPole-v0")
act_space = env.action_space
obs_space = env.observation_space
policies = {}
for i in range(20):
policies["pg_{}".format(i)] = (PGTFPolicy, obs_space, act_space,
{})
policy_ids = list(policies.keys())
worker = RolloutWorker(
env_creator=lambda _: MultiCartpole(n),
policy=policies,
policy_mapping_fn=lambda agent_id: random.choice(policy_ids),
batch_steps=100)
workers = WorkerSet._from_existing(worker, [])
optimizer = SyncSamplesOptimizer(workers)
for i in range(100):
optimizer.step()
result = collect_metrics(worker)
print("Iteration {}, rew {}".format(i,
result["policy_reward_mean"]))
print("Total reward", result["episode_reward_mean"])
if result["episode_reward_mean"] >= 25 * n:
return
raise Exception("failed to improve reward")
if __name__ == "__main__":
ray.init(num_cpus=4)
unittest.main(verbosity=2)
|
From Louis C.K.’s It Gets Better. Hypergamy explained. Women maximize both short-term and long-term options simultaneously. Men don’t need a long-term option because we are the long-term prize. Invest in yourself and see your options multiple over time while hers wither away exponentially.
|
# ===============================================================================
# Copyright 2017 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
"""
The purpose of this module is to provide some simple tools needed for raster processing.
"""
import os
from numpy import array, asarray
from numpy.ma import masked_where, nomask
from osgeo import gdal, ogr
import spatial_reference_tools as srt
def raster_to_array(input_raster_path, raster=None, band=1):
"""
Convert .tif raster into a numpy numerical array.
:param input_raster_path: Path to raster.
:param raster: Raster name with *.tif
:param band: Band of raster sought.
:return: Numpy array.
"""
try:
raster_open = gdal.Open(os.path.join(input_raster_path, raster))
except TypeError:
raster_open = gdal.Open(input_raster_path)
except AttributeError:
raster_open = gdal.Open(input_raster_path)
ras = array(raster_open.GetRasterBand(band).ReadAsArray(), dtype=float)
return ras
def get_polygon_from_raster(raster):
tile_id = os.path.basename(raster)
# print 'tile number: {}'.format(tile_id)
# print 'get poly tile: {}'.format(tile_id)
# get raster geometry
tile = gdal.Open(raster)
# print 'tile is type: {}'.format(tile)
transform = tile.GetGeoTransform()
pixel_width = transform[1]
pixel_height = transform[5]
cols = tile.RasterXSize
rows = tile.RasterYSize
x_left = transform[0]
y_top = transform[3]
x_right = x_left + cols * pixel_width
y_bottom = y_top - rows * pixel_height
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(x_left, y_top)
ring.AddPoint(x_left, y_bottom)
ring.AddPoint(x_right, y_top)
ring.AddPoint(x_right, y_bottom)
ring.AddPoint(x_left, y_top)
raster_geo = ogr.Geometry(ogr.wkbPolygon)
raster_geo.AddGeometry(ring)
# print 'found poly tile geo: {}'.format(raster_geo)
return raster_geo
def find_poly_ras_intersect(shape, raster_dir, extension='.tif'):
""" Finds all the tiles falling within raster object
the get shape geometry should be seperated from the intesect check,
currently causes a exit code 139 on unix box
:param polygon:
:param extension:
:param raster_dir:
"""
print 'starting shape: {}'.format(shape)
# get vector geometry
if not os.path.isfile(shape):
raise NotImplementedError('Shapefile not found')
polygon = ogr.Open(shape)
layer = polygon.GetLayer()
feature = layer.GetFeature(0)
vector_geo = feature.GetGeometryRef()
# print 'vector geometry: {}'.format(vector_geo)
tiles = [os.path.join(raster_dir, x) for x in
os.listdir(os.path.join(raster_dir)) if x.endswith(extension)]
raster_list = []
for tile in tiles:
print tile, srt.tif_proj4_spatial_reference(tile)
if srt.check_same_reference_system(shape, tile):
raster_geo = get_polygon_from_raster(tile)
if raster_geo.Intersect(vector_geo):
print 'tile: {} intersects {}'.format(os.path.basename(tile), os.path.basename(shape))
raster_list.append(tile)
return raster_list
def apply_mask(mask_path, arr):
out = None
file_name = next((fn for fn in os.listdir(mask_path) if fn.endswith('.tif')), None)
if file_name is not None:
mask = raster_to_array(mask_path, file_name)
idxs = asarray(mask, dtype=bool)
out = arr[idxs].flatten()
return out
def remake_array(mask_path, arr):
out = None
file_name = next((filename for filename in os.listdir(mask_path) if filename.endswith('.tif')), None)
if file_name is not None:
mask_array = raster_to_array(mask_path, file_name)
masked_arr = masked_where(mask_array == 0, mask_array)
masked_arr[~masked_arr.mask] = arr.ravel()
masked_arr.mask = nomask
arr = masked_arr.filled(0)
out = arr
return out
def array_to_raster(save_array, out_path, geo):
key = None
pass
driver = gdal.GetDriverByName('GTiff')
out_data_set = driver.Create(out_path, geo['cols'], geo['rows'],
geo['bands'], geo['data_type'])
out_data_set.SetGeoTransform(geo['geotransform'])
out_data_set.SetProjection(geo['projection'])
output_band = out_data_set.GetRasterBand(1)
output_band.WriteArray(save_array, 0, 0)
print 'written array {} mean value: {}'.format(key, save_array.mean())
return None
if __name__ == '__main__':
pass
# home = os.path.expanduser('~')
# terrain = os.path.join(home, 'images', 'terrain', 'ned_tiles', 'dem')
# shape = os.path.join(home, 'images', 'vector_data', 'wrs2_descending',
# 'wrs2_036029_Z12.shp')
# find_poly_ras_intersect(shape, terrain)
# =================================== EOF =========================
|
"I am not an unemployed alcoholic, I am a bartender who works from home” is a jocular line that was cited on Twitter by comedy wirter Richard Day on January 27, 2010.
“I’m not an alcoholic; I’m a bartender who works from home” was cited on Twitter by Adel Alizedeh, a Persian stand-up comedian from Brooklyn, on March 21, 2016.
Bartender working from home, I’m doing something wrong.
I’m not an alcoholic; I’m a bartender who works from home.
|
import Gears as gears
from .. import *
class Erf(Component) :
def applyWithArgs(
self,
stimulus,
*,
toneRangeMean = 0.5,
toneRangeVar = 0.3,
dynamic = False
) :
self.stimulus = stimulus;
try:
trmean = toneRangeMean.value
except:
trmean = toneRangeMean
try:
trvar = toneRangeVar.value
except:
trvar = toneRangeVar
stimulus.setToneMappingErf(trmean, trvar, dynamic)
self.registerInteractiveControls(
None, stimulus,
"",
toneRangeMean = toneRangeMean ,
toneRangeVar = toneRangeVar ,
)
def update(self, **kwargs):
for key, control in self.interactiveControls.items() :
if key == 'toneRangeMean' :
try:
self.stimulus.toneRangeMean = control.value
except:
self.stimulus.toneRangeMean = control
if key == 'toneRangeVar' :
try:
self.stimulus.toneRangeVar = control.value
except:
self.stimulus.toneRangeVar = control
|
This song is unusual because when I first listened to the rhythm I thought it was going to be a happy song, but after I listened to the lyrics I realised that it was the complete opposite.
This song for me represents happy because the strumming of the guitar seems very calming, and the voice is really cheerful. The lyrics also makes me think of the bright side of life and to never give up.
The tone of her voice is the reason why I chose this song as calm because Priscilla’s voice is so soft and gentle that it almost made me go to sleep!
The lyrics of the song helps me get through rough times and it has also helped thousands of people and thats why I think this song represents caring in my playlist.
The guitar and both of their voice was the reason why I chose this for peaceful. and that the title says “Peace sign”. The guitar strumming pattern also makes the song peaceful.
I can see that most of the Chinese instruments are similar to Japanese ones. Maybe because some of the instruments from China was imported to Japan long time ago. The most similar ones were Koto(from Japan) and Zhen(from China) and Biwa(from Japan) and the Pipa(from China). I watched some video clips on the Zhen too see if it had similar sound to the Koto and they sounded exactly the same. Although, the chinese Zhen has 21 strings and the Koto has 13 strings. Thats quite a difference!
I also looked at western orchestra instruments and what I realised was that they both looked a bit like the string instruments like Violin, Cello, etc. The Japanese instrument Shinobue(bamboo flute) and the Shakuhachi was very much similar to the woodwind instruments such as flute, clarinet, etc. Although the japanese ones were made by wood.
There is not much different between the families and the looks, but how there are used makes them special and belong to their own country.
For the first part of the unit, we visited the Japanese music class which they teach koto, instead of western music like blues, rock, etc. Mr.Patterson played the whole grade wonderful Koto music using different techniques which were Ato-oshi(play the koto, and press), Shan-shan(play two strings two times), Kororin(onomatopoeia for the notes you play on the koto), and Shan(play two strings one time). Our task for the unit was to create a 16 measure japanese melody using one of the two pentatonic scale. One is called the “Hirajoshi” and another ones “Insen”. Out of those two scales, I used the “Insen Scale” because I really liked the sound of it. The techniques that I included in my melody were “Kororin” and “Shan”. The notes that are used in the scales are D-E♭-G-A-C.
Our unit question was “What makes music sound like it belongs to a particular part of the world?”. For Japanese music, I think the different composition elements makes listeners recognise the music more because you don’t really hear “Kororin” or “Ato-oshi” in western music, so you know that it’s Japanese music. And I also think that the scale that are used in Japanese music makes them special because it is different to other countries.
This is my melody that I have created during class. One thing that I realised after creating the piece was that the major changes towards the end because the note is supposed to end by C, but instead it ended by F. What I like about my melody is that they are connect well together and not all over the place. What I liked to improve on next time is to use more techniques and to challenge myself into more complex melody techniques.
|
import random
def write_stages( stages_l, fname ):
f = open( fname, "w" )
for (i, stage) in enumerate( stages_l ):
for (addr, data) in enumerate( stage ):
wr_str = str(i) + " " + str( addr )
for d in data:
wr_str = wr_str + " " + str( d )
f.write("%s\n" % wr_str)
f.close()
def write_table( table_l, fname ):
f = open( fname, "w" )
for (addr, row) in enumerate( table_l ):
for (i, d) in enumerate( row ):
(val, en) = d
if en == True:
wr_str = str(addr) + " " + str(i) + " " + str(val)
f.write("%s\n" % wr_str)
f.close()
if __name__ == "__main__":
STAGES = 4
D_CNT = 4
MAX_NUM = 255
TOTAL_NUMS = 101
LAST_TABLE_ADDR_CNT = pow(4, STAGES)
MAX_HOLD_DATA_CNT = LAST_TABLE_ADDR_CNT * 4
print "maximum data %d" % ( MAX_HOLD_DATA_CNT )
all_nums_s = set()
all_nums_l = list()
while ( len( all_nums_s ) < TOTAL_NUMS ) and ( len( all_nums_s ) < MAX_HOLD_DATA_CNT ):
r = random.randint(0, MAX_NUM)
all_nums_s.add( r )
all_nums_l = list( sorted( all_nums_s ) )
print all_nums_l
match_table = list()
for i in xrange( LAST_TABLE_ADDR_CNT ):
match_table.append( list() )
for j in xrange( D_CNT ):
match_table[i].append( ( MAX_NUM, False ) )
for (i, n) in enumerate( all_nums_l ):
addr = i / D_CNT
pos = i % D_CNT
match_table[addr][pos] = ( n, True )
for i in match_table:
print i
stages_l = list()
for i in xrange( STAGES ):
stages_l.append( list() )
for j in xrange( pow(4, i ) ):
stages_l[i].append( [MAX_NUM, MAX_NUM, MAX_NUM] )
print stages_l
for stage in reversed( xrange( STAGES ) ):
if stage == ( STAGES - 1 ):
for (i, row) in enumerate( match_table ):
if i % 4 != 3:
# max value in last bucket
(m, en) = row[D_CNT-1]
#print stage, i/4, i%4
stages_l[ stage ][i/4][i%4] = m
else:
for (i, row) in enumerate( stages_l[ stage + 1 ] ):
if i % 4 != 3:
m = row[2]
stages_l[ stage ][ i / 4 ][ i % 4 ] = m
write_stages( stages_l, "tree" )
write_table( match_table, "table" )
|
Little-d distributed systems: the accidental sort. You built a program, it ran on one server. Then you added a database, some caches, perhaps a job worker somewhere. Whoops, you made a distributed system! Almost everything works this way now.
Big-D distributed systems: you read the Dynamo paper, maybe some Lamport papers too, and you set out to build on the principles set forth by those who have researched the topic. This is mostly open source distributed databases, but other systems surely fall under this category.
Ph.D distributed systems: you went to a top CS school, you ended up working with a distributed systems professor, and you wrote a system. You then graduated, ended up at Google, Facebook, Amazon, etc. and ended up writing more distributed systems, on a team of even more Ph.D’s.
If you’re building a little-d distributed system, study the patterns in the Big-D distributed systems. If you’re building a Big-D distributed, study what the Ph. D guys are writing. If you’re a Ph. D distributed system guy, please, write in clear and concise language! No one knows or cares what all the little greek symbols are, they just want to know what works, what doesn’t work, and why.
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - twikidraw
This action is used to call twikidraw
@copyright: 2001 by Ken Sugino ([email protected]),
2001-2004 by Juergen Hermann <[email protected]>,
2005 MoinMoin:AlexanderSchremmer,
2005 DiegoOngaro at ETSZONE ([email protected]),
2007-2008 MoinMoin:ThomasWaldmann,
2005-2009 MoinMoin:ReimarBauer,
@license: GNU GPL, see COPYING for details.
"""
import os, re
from MoinMoin import log
logging = log.getLogger(__name__)
from MoinMoin import wikiutil, config
from MoinMoin.action import AttachFile, do_show
from MoinMoin.action.AttachFile import _write_stream
from MoinMoin.security.textcha import TextCha
action_name = __name__.split('.')[-1]
def gedit_drawing(self, url, text, **kw):
# This is called for displaying a drawing image by gui editor.
_ = self.request.getText
# TODO: this 'text' argument is kind of superfluous, replace by using alt=... kw arg
# ToDo: make this clickable for the gui editor
if 'alt' not in kw or not kw['alt']:
kw['alt'] = text
# we force the title here, needed later for html>wiki converter
kw['title'] = "drawing:%s" % wikiutil.quoteWikinameURL(url)
pagename, drawing = AttachFile.absoluteName(url, self.page.page_name)
containername = wikiutil.taintfilename(drawing)
drawing_url = AttachFile.getAttachUrl(pagename, containername, self.request)
ci = AttachFile.ContainerItem(self.request, pagename, containername)
if not ci.exists():
title = _('Create new drawing "%(filename)s (opens in new window)"') % {'filename': self.text(containername)}
img = self.icon('attachimg') # TODO: we need a new "drawimg" in similar grey style and size
css = 'nonexistent'
return self.url(1, drawing_url, css=css, title=title) + img + self.url(0)
kw['src'] = ci.member_url('drawing.png')
return self.image(**kw)
def attachment_drawing(self, url, text, **kw):
# This is called for displaying a clickable drawing image by text_html formatter.
# XXX text arg is unused!
_ = self.request.getText
pagename, drawing = AttachFile.absoluteName(url, self.page.page_name)
containername = wikiutil.taintfilename(drawing)
drawing_url = AttachFile.getAttachUrl(pagename, containername, self.request, do='modify')
ci = AttachFile.ContainerItem(self.request, pagename, containername)
if not ci.exists():
title = _('Create new drawing "%(filename)s (opens in new window)"') % {'filename': self.text(containername)}
img = self.icon('attachimg') # TODO: we need a new "drawimg" in similar grey style and size
css = 'nonexistent'
return self.url(1, drawing_url, css=css, title=title) + img + self.url(0)
title = _('Edit drawing %(filename)s (opens in new window)') % {'filename': self.text(containername)}
kw['src'] = src = ci.member_url('drawing.png')
kw['css'] = 'drawing'
try:
mapfile = ci.get('drawing.map')
map = mapfile.read()
mapfile.close()
map = map.decode(config.charset)
except (KeyError, IOError, OSError):
map = u''
if map:
# we have a image map. inline it and add a map ref to the img tag
# we have also to set a unique ID
mapid = u'ImageMapOf%s%s' % (self.request.uid_generator(pagename), drawing)
map = map.replace(u'%MAPNAME%', mapid)
# add alt and title tags to areas
map = re.sub(ur'href\s*=\s*"((?!%TWIKIDRAW%).+?)"', ur'href="\1" alt="\1" title="\1"', map)
map = map.replace(u'%TWIKIDRAW%"', u'%s" alt="%s" title="%s"' % (
wikiutil.escape(drawing_url, 1), title, title))
# unxml, because 4.01 concrete will not validate />
map = map.replace(u'/>', u'>')
title = _('Clickable drawing: %(filename)s') % {'filename': self.text(containername)}
if 'title' not in kw:
kw['title'] = title
if 'alt' not in kw:
kw['alt'] = kw['title']
kw['usemap'] = '#'+mapid
return self.url(1, drawing_url) + map + self.image(**kw) + self.url(0)
else:
if 'title' not in kw:
kw['title'] = title
if 'alt' not in kw:
kw['alt'] = kw['title']
return self.url(1, drawing_url) + self.image(**kw) + self.url(0)
class TwikiDraw(object):
""" twikidraw action """
def __init__(self, request, pagename, target):
self.request = request
self.pagename = pagename
self.target = target
def save(self):
request = self.request
_ = request.getText
if not wikiutil.checkTicket(request, request.args.get('ticket', '')):
return _('Please use the interactive user interface to use action %(actionname)s!') % {'actionname': 'twikidraw.save' }
pagename = self.pagename
target = self.target
if not request.user.may.write(pagename):
return _('You are not allowed to save a drawing on this page.')
if not target:
return _("Empty target name given.")
file_upload = request.files.get('filepath')
if not file_upload:
# This might happen when trying to upload file names
# with non-ascii characters on Safari.
return _("No file content. Delete non ASCII characters from the file name and try again.")
filename = request.form['filename']
basepath, basename = os.path.split(filename)
basename, ext = os.path.splitext(basename)
ci = AttachFile.ContainerItem(request, pagename, target)
filecontent = file_upload.stream
content_length = None
if ext == '.draw': # TWikiDraw POSTs this first
AttachFile._addLogEntry(request, 'ATTDRW', pagename, target)
ci.truncate()
filecontent = filecontent.read() # read file completely into memory
filecontent = filecontent.replace("\r", "")
elif ext == '.map':
# touch attachment directory to invalidate cache if new map is saved
attach_dir = AttachFile.getAttachDir(request, pagename)
os.utime(attach_dir, None)
filecontent = filecontent.read() # read file completely into memory
filecontent = filecontent.strip()
else:
#content_length = file_upload.content_length
# XXX gives -1 for wsgiref :( If this is fixed, we could use the file obj,
# without reading it into memory completely:
filecontent = filecontent.read()
ci.put('drawing' + ext, filecontent, content_length)
def render(self):
request = self.request
_ = request.getText
pagename = self.pagename
target = self.target
if not request.user.may.read(pagename):
return _('You are not allowed to view attachments of this page.')
if not target:
return _("Empty target name given.")
ci = AttachFile.ContainerItem(request, pagename, target)
if ci.exists():
drawurl = ci.member_url('drawing.draw')
pngurl = ci.member_url('drawing.png')
else:
drawurl = 'drawing.draw'
pngurl = 'drawing.png'
pageurl = request.href(pagename)
saveurl = request.href(pagename, action=action_name, do='save', target=target,
ticket=wikiutil.createTicket(request))
helpurl = request.href("HelpOnActions/AttachFile")
html = """
<p>
<applet code="CH.ifa.draw.twiki.TWikiDraw.class"
archive="%(htdocs)s/applets/TWikiDrawPlugin/twikidraw.jar" width="640" height="480">
<param name="drawpath" value="%(drawurl)s">
<param name="pngpath" value="%(pngurl)s">
<param name="savepath" value="%(saveurl)s">
<param name="basename" value="%(basename)s">
<param name="viewpath" value="%(pageurl)s">
<param name="helppath" value="%(helpurl)s">
<strong>NOTE:</strong> You need a Java enabled browser to edit the drawing.
</applet>
</p>
""" % dict(
htdocs=request.cfg.url_prefix_static,
basename=wikiutil.escape(target, 1),
drawurl=wikiutil.escape(drawurl, 1),
pngurl=wikiutil.escape(pngurl, 1),
pageurl=wikiutil.escape(pageurl, 1),
saveurl=wikiutil.escape(saveurl, 1),
helpurl=wikiutil.escape(helpurl, 1),
)
title = "%s %s:%s" % (_("Edit drawing"), pagename, target)
request.theme.send_title(title, page=request.page, pagename=pagename)
request.write(request.formatter.startContent("content"))
request.write(request.formatter.rawHTML(html))
request.write(request.formatter.endContent())
request.theme.send_footer(pagename)
request.theme.send_closing_html()
def execute(pagename, request):
target = request.values.get('target')
target = wikiutil.taintfilename(target)
twd = TwikiDraw(request, pagename, target)
do = request.values.get('do')
if do == 'save':
msg = twd.save()
else:
msg = twd.render()
if msg:
request.theme.add_msg(msg, 'error')
do_show(pagename, request)
|
In the wake of a convulsion of violence against foreigners, international relief officials said Wednesday that South African authorities are planning to establish refugee camps to house tens of thousands of displaced people who had fled their homes in impoverished squatter areas, but the government contended that no decision had been made yet.
The Cabinet's decision on whether to set up the camps will be announced today, according to a statement issued by the government.
Initially, the camps would house 11,000 people who are now being sheltered near police stations scattered around South Africa.
Muriel Cornelis, a representative in Johannesburg for Doctors Without Borders, said the authorities had promised a decision by Wednesday night on the proposal, which could eventually provide shelter for as many as 70,000 people.
More than 50 people were killed in the attacks, which began near Johannesburg earlier this month and spread to other places including Cape Town. The authorities in neighboring Mozambique declared a state of emergency to cope with thousands of their citizens returning unexpectedly.
Even during the apartheid era, South Africa's mineral wealth attracted thousands of migrants from black-ruled countries like Malawi and Mozambique to work in the mines. But, since the dawn of majority rule in the 1990s, the country has been a magnet for foreigners from as far away as Somalia, drawn to South Africa as their own countries descend into chaos.
In addition, as many as 3 million Zimbabweans have fled their country to look for work in South Africa, provoking the resentment of some South Africans.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gsutil.py."""
from unittest import mock
import pytest
from common import gsutil
from test_libs import utils as test_utils
def test_gsutil_command():
"""Tests gsutil_command works as expected."""
arguments = ['hello']
with test_utils.mock_popen_ctx_mgr() as mocked_popen:
gsutil.gsutil_command(arguments)
assert mocked_popen.commands == [['gsutil'] + arguments]
@pytest.mark.parametrize(('must_exist'), [True, False])
def test_ls_must_exist(must_exist):
"""Tests that ls makes a correct call to new_process.execute when
must_exist is specified."""
with mock.patch('common.new_process.execute') as mocked_execute:
gsutil.ls('gs://hello', must_exist=must_exist)
mocked_execute.assert_called_with(['gsutil', 'ls', 'gs://hello'],
expect_zero=must_exist)
class TestGsutilRsync:
"""Tests for gsutil_command works as expected."""
SRC = '/src'
DST = 'gs://dst'
def test_rsync(self):
"""Tests that rsync works as intended."""
with mock.patch(
'common.gsutil.gsutil_command') as mocked_gsutil_command:
gsutil.rsync(self.SRC, self.DST)
mocked_gsutil_command.assert_called_with(
['rsync', '-d', '-r', '/src', 'gs://dst'], parallel=False)
def test_gsutil_options(self):
"""Tests that rsync works as intended when supplied a gsutil_options
argument."""
flag = '-flag'
with mock.patch(
'common.gsutil.gsutil_command') as mocked_gsutil_command:
gsutil.rsync(self.SRC, self.DST, gsutil_options=[flag])
assert flag == mocked_gsutil_command.call_args_list[0][0][0][0]
def test_options(self):
"""Tests that rsync works as intended when supplied a gsutil_options
argument."""
flag = '-flag'
with mock.patch(
'common.gsutil.gsutil_command') as mocked_gsutil_command:
gsutil.rsync(self.SRC, self.DST, options=[flag])
assert flag in mocked_gsutil_command.call_args_list[0][0][0]
@pytest.mark.parametrize(('kwarg_for_rsync', 'flag'), [('delete', '-d'),
('recursive', '-r')])
def test_no_flag(self, kwarg_for_rsync, flag):
"""Tests that rsync works as intended when caller specifies not
to use specific flags."""
kwargs_for_rsync = {}
kwargs_for_rsync[kwarg_for_rsync] = False
with mock.patch(
'common.gsutil.gsutil_command') as mocked_gsutil_command:
gsutil.rsync(self.SRC, self.DST, **kwargs_for_rsync)
assert flag not in mocked_gsutil_command.call_args_list[0][0][0]
|
The federal Liberal government is projecting a deficit of at least $18.4 billion next year, nearly five times its $3.9-billion projection just three months ago, it was revealed today.
That means the shortfall could well exceed $20 billion once a number of big-ticket Liberal campaign promises are factored in.
The estimates released Monday do not include billions of dollars in Liberal spending commitments that are expected in the upcoming federal budget, such as infrastructure investments.
Canadian Finance Minister Morneau announced that the Federal budget will be tabled March 22nd, and provided an updated look at the fiscal situation. Ottawa is now looking at an $18.4 billion deficit for the upcoming fiscal year (FY16/17), but that could reach upwards of $30 billion on budget day. Note that this includes an even larger contingency than Ottawa was using in past projections, of roughly $6 billion per year (previously $3 billion). Also, we’ve assumed additional stimulus of roughly $10 billion in year 1, as in the election platform, will make its way into the fiscal year starting April 1st. The updated outlook now also points to a deficit of roughly $26 billion in FY17/18, but the underlying shortfall in each of the two upcoming years could be $6 billion smaller if the economy performs as expected, and that is allowed to flow to the bottom line. We fully agree that a moderate dose of stimulus is an entirely appropriate response to current economic realities, but we would counsel caution in minding the dosage, for two distinct reasons: 1) The growth restraints on Canada look to be structural in nature (a reset lower on commodity prices), and not a short-term cyclical phenomena that can be countered with a quick fiscal boost, and 2) an overly aggressive fiscal boost could do lasting damage to Canada’s finances, casting doubt on the country’s hard-won triple-A credit rating.
Finance Minister Morneau’ s updated picture of the economic backdrop heading into this year’s budget isn’t pretty, but that’s not news to anyone who has followed the deterioration in private sector forecasts. The 2016 real GDP forecast sits at 1.4%, nearly in line with our projection, but that’s down from 2% assumed in the November 2015 mid-year update, and nominal GDP has seen a deterioration due to the lower track for oil. Finance Minister Morneau’s updated picture of the economic backdrop heading into this year’s budget isn’t pretty The result is that before applying new measures in the budget, the status-quo fiscal outlook would be for a deficit of $18.4 bn, compared with a November estimate of $3.9 bn (and a surplus of $2.7 bn assumed for the coming year in the spring 2015 Budget). Note that the Liberal’s campaign pledge to run a $10 bn deficit was keyed off of that $2.7 bn surplus estimate for the status quo. Since that base case has deteriorated by a total of $21 bn, keeping the planned stimulus in place would now mean a deficit of roughly $30 bn. That’s still only 1 ½% of GDP, hardly a blow-up. The only question is whether the modest dose of stimulus pledged in the campaign (roughly a half-percent of GDP) is enough to counter the drag on the economy from low commodity prices.
With Morneau stating that it’s “difficult” to balance the budget by 2019-2020, we expect an extended period of large deficits. The main driver of the budget is a weaker profile for growth, with GDP growth slashed to 1.4% this year from 2.0% assumed previously. The deficit shortfall includes the impact of new tax measures announced in December, although infrastructure stimulus impacts are not factored in. New infrastructure spending, which could total $5B in 2016, could trigger an even larger deficit. As we have argued, the government has room for roughly $30B in deficit spending this year if they wish to hold the net-debt-to-GDP ratio constant. Thus, although the government has less room for additional stimulus measures, given the weaker deficit starting point, we think there is still some room to surprise to the upside relative to campaign promises. For Bank of Canada policy, the implication is clear: a flexible, stimulus-ready Federal government is taking the pressure off of monetary policy as the sole driver of growth. Thus, we reiterate our view that the BoC will stay on hold in 2016 despite weak economic growth. We see no rate hikes or cuts over our forecast horizon.
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import sys
if sys.version_info >= (3,):
from io import BytesIO
else:
try:
from cStringIO import StringIO as BytesIO
except:
from StringIO import StringIO as BytesIO
try:
from xml.etree import cElementTree as ETree
except ImportError:
from xml.etree import ElementTree as ETree
from azure.storage.common._common_conversion import (
_str,
)
from ._encryption import (
_encrypt_queue_message,
)
def _get_path(queue_name=None, include_messages=None, message_id=None):
'''
Creates the path to access a queue resource.
queue_name:
Name of queue.
include_messages:
Whether or not to include messages.
message_id:
Message id.
'''
if queue_name and include_messages and message_id:
return '/{0}/messages/{1}'.format(_str(queue_name), message_id)
if queue_name and include_messages:
return '/{0}/messages'.format(_str(queue_name))
elif queue_name:
return '/{0}'.format(_str(queue_name))
else:
return '/'
def _convert_queue_message_xml(message_text, encode_function, key_encryption_key):
'''
<?xml version="1.0" encoding="utf-8"?>
<QueueMessage>
<MessageText></MessageText>
</QueueMessage>
'''
queue_message_element = ETree.Element('QueueMessage')
# Enabled
message_text = encode_function(message_text)
if key_encryption_key is not None:
message_text = _encrypt_queue_message(message_text, key_encryption_key)
ETree.SubElement(queue_message_element, 'MessageText').text = message_text
# Add xml declaration and serialize
try:
stream = BytesIO()
ETree.ElementTree(queue_message_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml')
output = stream.getvalue()
finally:
stream.close()
return output
|
There’s this friend of mine, Ganesh (name changed for anonymity’s sake). He’s a school-college-buddy and lives near my place. We’ve known each other for over over sixteen years now. He’s a guy I adore and admire a lot, mainly for some of his principles which he holds strong. Even though fate’s played some nasty games with him, he’s come out of all adversities bearing a characterestic smile on his face (and a tika on his forehead – our friend’s a devout ‘Shiv-bhakt’).
Like me, Ganesh always depended on KSRTC for his transport needs. That is, until he secured a well-paying job. He decided to put an end to the qualms of daily-commute by buying himself a good motorcycle. And he had no second thoughts about the model – he went for one among the best bikes in the market – The Yamaha R15 Limited Edition. Now, there are only a thousand of such bikes in the market which upped the oomph factor of the bike.
The sudden step-up from mundane commuting to stylish biking was a shot in the arm for Ganesh. He would flash his new toy, zipping through the crowded streets, showing off his beauty. He was too humble to flaunt his bike. Yet, his babe was the object of our collective envy. Ganesh handled his ‘babe’ with utmost care. He would wash and clean it every day, following every rule in the owners manual down to the last dot. In fact, he was obsessed about the R15, albeit in a healthy way.
One fine morning, Ganesh was all set to leave for office. He had an early appointment that morning, hence he’d woken up early to give his bike its daily wash. Making sure that every part of the macho bike gleamed like a new pin, Ganesh mounted his stallion. It was time to hit the road. Turning on the ignition, he pressed the start button. The familiar ignition rattle was music to his ears.
The bike did not start.
His faithful warrior always responded to the first attempt. Ganesh tried again. The ignition-noise emanated again and died down. He tried again. And again. And again.
One of the few cons of the R15 is that it lacks kick start. Ganesh remembered his friend recommending him Pulsar 220 because of the same reason. He’d then decided to go against his friend’s advice. Ganesh wasn’t worried. It must be a temporary problem, he decided. He thought he’d wait for a while and try again.
He waited, and tried another hand, to no avail. No matter however hard he tried, the bike failed to respond.
Beads of sweat started pouring down from Ganesh’s forehead. He was running late for his appointment. After a few more tries, Ganesh threw up his hands in despair. He kept his bike back into the shed and took a bus to office. He was fifteen minutes late for his meeting, and his boss was certainly not impressed. After an abnormally-long day, Ganesh reached home, tired and panting. Before he retired to his bedroom, he pulled the bike out of his shed and tried another attempt, in vain. Dejected, Ganesh decided to call it a day. Bikes always have starting problems, he reassured himself. It’d be alright by tomorrow.
For the next two days, Ganesh switched back to KSRTC for his daily commute. Day-in and day-out, he would try starting his bike, only to stand dejected and depressed. How could his brand new bike fall ill despite his careful attention? Machines have the same indiscretions as do humans, he realized.
The very next day, he decided enough was enough. Ganesh called the nearest Yamaha service center. The mechanic said he’d drop by that evening. Ganesh was relieved. His baby’d be back in action within no time, he told himself.
The mechanic promptly arrived, that evening (on a Yamaha RXG, nothing less). Brash and young, he was a Rajnikanth-worshiping chap, oozing ‘style’ in every movement. Humming a Rajni song, he gingerly unveiled an array of spanners and started work on the bike. He examined every part possible, trying to start the bike every two minutes. The ignition would sputter, and then stop. For a brief instant, the bike made a slight ‘vroom’ sound, much to Ganesh’s excitement. But then it died down, as soon as it started.
Fifteen minutes later, the mechanic stood up and took a stretch. He took a casual glance at the bike’s right side. Suddenly, he fixed his glance at one point. He beckoned Ganesh towards him. His right index finger pointed towards the side of the bike. Ganesh saw it for himself. No sooner did he saw what the mechanic pointed, a smile, or rather, a sheepish grin developed on his face.
Ganesh looked at the mechanic, who was now grinning back at him.
hari mone so u dont buy bike. ok.
Hehe . It happens everywhere . normally we wish to be on the observer side rather than the victim . Any way u explained it on a cherishful manner . Seems u r a perfect blogger . Well done !!
|
# -*- coding: utf-8 -*-
'''
Rickshaw
-------
Python Pandas + Rickshaw.js
'''
from __future__ import print_function
from __future__ import division
import time
import json
import os
from collections import defaultdict
from pkg_resources import resource_string
import pandas as pd
import numpy as np
from jinja2 import Environment, PackageLoader
class Chart(object):
'''Visualize Pandas Timeseries with Rickshaw.js'''
def __init__(self, data=None, width=750, height=400, plt_type='line',
colors=None, x_time=True, y_zero=False, **kwargs):
'''Generate a Rickshaw time series visualization with Pandas
Series and DataFrames.
The bearcart Chart generates the Rickshaw visualization of a Pandas
timeseries Series or DataFrame. The only required parameters are
data, width, height, and type. Colors is an optional parameter;
bearcart will default to the Rickshaw spectrum14 color palette if
none are passed. Keyword arguments can be passed to disable the
following components:
- x_axis
- y_axis
- hover
- legend
Parameters
----------
data: Pandas Series or DataFrame, default None
The Series or Dataframe must have a Datetime index.
width: int, default 960
Width of the chart in pixels
height: int, default 500
Height of the chart in pixels
plt_type: string, default 'line'
Must be one of 'line', 'area', 'scatterplot' or 'bar'
colors: dict, default None
Dict with keys matching DataFrame or Series column names, and hex
strings for colors
x_time: boolean, default True
If passed as False, the x-axis will have non-time values
y_zero: boolean, default False
The y-axis defaults to auto scaling. Pass True to set the min
y-axis value to 0.
kwargs:
Keyword arguments that, if passed as False, will disable the
following components: x_axis, y_axis, hover, legend
Returns
-------
Bearcart object
Examples
--------
>>>vis = bearcart.Chart(data=df, width=800, height=300, type='area')
>>>vis = bearcart.Chart(data=series,type='scatterplot',
colors={'Data 1': '#25aeb0',
'Data 2': '#114e4f'})
#Disable x_axis and legend
>>>vis = bearcart.Chart(data=df, x_axis=False, legend=False)
'''
self.defaults = {'x_axis': True, 'y_axis': True, 'hover': True,
'legend': True}
self.env = Environment(loader=PackageLoader('bearcart', 'templates'))
#Colors need to be js strings
if colors:
self.colors = {key: "'{0}'".format(value)
for key, value in colors.iteritems()}
else:
self.colors = None
self.x_axis_time = x_time
self.renderer = plt_type
self.width = width
self.height = height
self.y_zero = y_zero
self.template_vars = {}
#Update defaults for passed kwargs
for key, value in kwargs.iteritems():
self.defaults[key] = value
#Get templates for graph elements
for att, val in self.defaults.iteritems():
render_vars = {}
if val:
if not self.x_axis_time:
if att == 'x_axis':
att = 'x_axis_num'
elif att == 'hover':
render_vars = {'x_hover': 'xFormatter: function(x)'
'{return Math.floor(x / 10) * 10}'}
temp = self.env.get_template(att + '.js')
self.template_vars.update({att: temp.render(render_vars)})
#Transform data into Rickshaw-happy JSON format
if data is not None:
self.transform_data(data)
def transform_data(self, data):
'''Transform Pandas Timeseries into JSON format
Parameters
----------
data: DataFrame or Series
Pandas DataFrame or Series must have datetime index
Returns
-------
JSON to object.json_data
Example
-------
>>>vis.transform_data(df)
>>>vis.json_data
'''
def type_check(value):
'''Type check values for JSON serialization. Native Python JSON
serialization will not recognize some Numpy data types properly,
so they must be explictly converted.'''
if pd.isnull(value):
return None
elif (isinstance(value, pd.tslib.Timestamp) or
isinstance(value, pd.Period)):
return time.mktime(value.timetuple())
elif isinstance(value, (int, np.integer)):
return int(value)
elif isinstance(value, (float, np.float_)):
return float(value)
elif isinstance(value, str):
return str(value)
else:
return value
objectify = lambda dat: [{"x": type_check(x), "y": type_check(y)}
for x, y in dat.iteritems()]
self.raw_data = data
if isinstance(data, pd.Series):
data.name = data.name or 'data'
self.json_data = [{'name': data.name, 'data': objectify(data)}]
elif isinstance(data, pd.DataFrame):
self.json_data = [{'name': x[0], 'data': objectify(x[1])}
for x in data.iteritems()]
def _build_graph(self):
'''Build Rickshaw graph syntax with all data'''
#Set palette colors if necessary
if not self.colors:
self.palette = self.env.get_template('palette.js')
self.template_vars.update({'palette': self.palette.render()})
self.colors = {x['name']: 'palette.color()' for x in self.json_data}
template_vars = []
for index, dataset in enumerate(self.json_data):
group = 'datagroup' + str(index)
template_vars.append({'name': str(dataset['name']),
'color': self.colors[dataset['name']],
'data': 'json[{0}].data'.format(index)})
variables = {'dataset': template_vars, 'width': self.width,
'height': self.height, 'render': self.renderer}
if not self.y_zero:
variables.update({'min': "min: 'auto',"})
graph = self.env.get_template('graph.js')
self.template_vars.update({'graph': graph.render(variables)})
def create_chart(self, html_path='index.html', data_path='data.json',
js_path='rickshaw.min.js', css_path='rickshaw.min.css',
html_prefix=''):
'''Save bearcart output to HTML and JSON.
Parameters
----------
html_path: string, default 'index.html'
Path for html output
data_path: string, default 'data.json'
Path for data JSON output
js_path: string, default 'rickshaw.min.js'
If passed, the Rickshaw javascript library will be saved to the
path. The file must be named "rickshaw.min.js"
css_path: string, default 'rickshaw.min.css'
If passed, the Rickshaw css library will be saved to the
path. The file must be named "rickshaw.min.css"
html_prefix: Prefix path to be appended to all the other paths for file
creation, but not in the generated html file. This is needed if the
html file does not live in the same folder as the running python
script.
Returns
-------
HTML, JSON, JS, and CSS
Example
--------
>>>vis.create_chart(html_path='myvis.html', data_path='visdata.json'),
js_path='rickshaw.min.js',
cs_path='rickshaw.min.css')
'''
self.template_vars.update({'data_path': str(data_path),
'js_path': js_path,
'css_path': css_path})
self._build_graph()
html = self.env.get_template('bcart_template.html')
self.HTML = html.render(self.template_vars)
with open(os.path.join(html_prefix, html_path), 'w') as f:
f.write(self.HTML)
with open(os.path.join(html_prefix, data_path), 'w') as f:
json.dump(self.json_data, f, sort_keys=True, indent=4,
separators=(',', ': '))
if js_path:
js = resource_string('bearcart', 'rickshaw.min.js')
with open(os.path.join(html_prefix, js_path), 'w') as f:
f.write(js)
if css_path:
css = resource_string('bearcart', 'rickshaw.min.css')
with open(os.path.join(html_prefix, css_path), 'w') as f:
f.write(css)
|
Callum Rudge reflects on a memorable night as Kell Brook's brave battle to conquer unbeaten middleweight ogre Gennady Golovkin ended in noble failure.
Luke G. Williams looks ahead to Saturday’s Swedish super-fight between bitter rivals Mikaela Laurén and Klara Svensson which headlines a big night of boxing in Stockholm.
No fairytale as teenager Riku Kano failed in his attempt to become Japan's youngest ever world champion and much more, including useful video links.
|
"""
Stack-In-A-Box: Python Responses Support
"""
from __future__ import absolute_import
import logging
import re
import responses
from stackinabox.stack import StackInABox
from stackinabox.util import deprecator
from stackinabox.util.tools import CaseInsensitiveDict
logger = logging.getLogger(__name__)
def responses_callback(request):
"""Responses Request Handler.
Converts a call intercepted by Responses to
the Stack-In-A-Box infrastructure
:param request: request object
:returns: tuple - (int, dict, string) containing:
int - the HTTP response status code
dict - the headers for the HTTP response
string - HTTP string response
"""
method = request.method
headers = CaseInsensitiveDict()
request_headers = CaseInsensitiveDict()
request_headers.update(request.headers)
request.headers = request_headers
uri = request.url
return StackInABox.call_into(method,
request,
uri,
headers)
def registration(uri):
"""Responses handler registration.
Registers a handler for a given URI with Responses
so that it can be intercepted and handed to
Stack-In-A-Box.
:param uri: URI used for the base of the HTTP requests
:returns: n/a
"""
# log the URI that is used to access the Stack-In-A-Box services
logger.debug('Registering Stack-In-A-Box at {0} under Python Responses'
.format(uri))
# tell Stack-In-A-Box what URI to match with
StackInABox.update_uri(uri)
# Build the regex for the URI and register all HTTP verbs
# with Responses
regex = re.compile(r'(http)?s?(://)?{0}:?(\d+)?/'.format(uri),
re.I)
METHODS = [
responses.DELETE,
responses.GET,
responses.HEAD,
responses.OPTIONS,
responses.PATCH,
responses.POST,
responses.PUT
]
for method in METHODS:
responses.add_callback(method,
regex,
callback=responses_callback)
@deprecator.DeprecatedInterface("responses_registration", "registration")
def responses_registration(uri):
return registration(uri)
|
Home›LifeStyle›If You Love Pani Puri,Then Must Read.
There is something comfortingly familiar about eating a humble pani puri – a crispy hollow ball made of semolina or wheat, filled with spicy potatoes and topped with tangy, spicy tamarind water made fragrant by mint leaves and black salt.
It may sound like culinary chaos, but that this spicy, crunchy wonder is absolutely delicious.
In the epic Mahabharata, a newly-wedded Draupadi returns home to be given a task by her mother-in-law Kunti. The Pandavas were in exile and Kunti wanted to test if her new daughter-in-law would be able to manage with the scarce resources.
So she gave Draupadi some leftover potato sabzi and just enough wheat dough to make one puri, instructing her to make food that would satisfy the hunger of all five of her sons. It is believed that this was when the new bride invented pani puri. Impressed with her daughter-in-law’s ingenuity, Kunti blessed the dish with immortality.
While the origins of this delicious snack are yet to be pinpointed with historical accuracy, the one thing that is clear is that pani puri traveled across India and made the country fall head over heels in love with it. Over the years, the combinations underwent many changes as each region developed its own version according to its preferences.
As a result, pani puri today has almost a dozen different names that change from region to region. In most parts of central and southern India, it is called pani puri but the recipes have subtle variations. While in Maharashtra, hot ragda (white peas curry) is added to the potato mash, in Gujarat, it is boiled moong and in Karnataka, it is chopped onions.
In north India, Pani puri is called gol gappe, gup chup, pani ke pataashe or phulkis. The signature element of this recipe is a spicy stuffing made out of a potato-chickpea mash and really tangy water, liberally infused with mint leaves. Interestingly, in Hoshangabad in Madhya Pradesh, Pani puri is called Tikki, which is usually used to denote crispy potato patties in north India!
In West Bengal, Pani puri is called phuchka, probably due to the ‘phuch’ sound it makes when you take a bite. The unique feature of the phuchka lies in the fact that it’s made of whole wheat, unlike the other that are usually made of flour or semolina. The phuchka water is also a little spicier and tangier than that used in the rest of the country.
|
#!/usr/bin/env python
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import optparse
import sys
from subunit import v2 as subunit_v2
from subunit.v2 import ByteStreamToStreamResult
from testtools import StreamResult
import yaml
def make_options():
parser = optparse.OptionParser(description=__doc__)
parser.add_option(
"--shouldfail-file",
type=str,
help="File with list of test ids that are expected to fail; "
"on failure their result will be changed to xfail; on success "
"they will be changed to error.",
dest="shouldfail_file",
action="append")
return parser
class ProcessedStreamResult(StreamResult):
def __init__(self, output, shouldfail):
self.output = output
self.shouldfail = shouldfail
def startTestRun(self):
self.output.startTestRun()
def stopTestRun(self):
self.output.stopTestRun()
def status(self, test_id=None, test_status=None, test_tags=None,
runnable=True, file_name=None, file_bytes=None, eof=False,
mime_type=None, route_code=None, timestamp=None):
if ((test_status in ['fail', 'success', 'xfail', 'uxsuccess', 'skip'])
and (test_id in self.shouldfail)):
if test_status == 'fail':
test_status = 'xfail'
elif test_status == 'success':
test_status = 'uxsuccess'
if self.shouldfail[test_id]:
self.output.status(test_id=test_id,
test_tags=test_tags,
file_name='shouldfail-info',
mime_type='text/plain; charset="utf8"',
file_bytes=self.shouldfail[test_id],
route_code=route_code,
timestamp=timestamp)
self.output.status(test_id=test_id, test_status=test_status,
test_tags=test_tags, runnable=runnable,
file_name=file_name, file_bytes=file_bytes,
mime_type=mime_type, route_code=route_code,
timestamp=timestamp)
def read_shouldfail_file(options):
shouldfail = {}
for path in options.shouldfail_file or ():
f = open(path, 'rb')
try:
content = yaml.safe_load(f)
for item in content:
if not isinstance(item, dict):
shouldfail[item] = None
else:
shouldfail.update(item)
finally:
f.close()
return shouldfail
def main():
parser = make_options()
(options, args) = parser.parse_args()
output = subunit_v2.StreamResultToBytes(sys.stdout)
shouldfail = read_shouldfail_file(options)
result = ProcessedStreamResult(output, shouldfail)
converter = ByteStreamToStreamResult(source=sys.stdin,
non_subunit_name='process-stderr')
result.startTestRun()
converter.run(result)
result.stopTestRun()
if __name__ == '__main__':
main()
|
There are growing concerns about local and regional ecosystems and their vulnerability in relation to human activities.
This case study evaluates 10 Integrated Land Management (ILM) projects from Canada, the U.S. and Europe to provide information that will help promote better awareness of potential environmental and cumulative impacts due to development priorities and choices. ILM builds on a spectrum of approaches including integrated resource management, integrated watershed management, comprehensive regional land use planning and ecosystem-based management. The study found that ILM approaches could provide significant benefits for local and regional decision-makers by helping them understand the linkages between environment and humans, and by providing opportunities to explore potential future development pathways and policies.
|
from .adb import adb
_LOG_TAG_PROPERTY = 'log.tag.{tag}'
LOG_LEVELS = ('VERBOSE', 'DEBUG', 'INFO', 'WARN', 'ERROR', 'ASSERT')
def is_valid_log_level(level):
return level.upper() in LOG_LEVELS
def set_loggable_level_for_tag(tag, level='VERBOSE'):
"""
Set the minimum loggable level for a tag.
:param tag: TAG name
:param level: Log level.
"""
level = level.upper()
if not is_valid_log_level(level):
raise ValueError("Unknown log level %s" % level)
return adb.set_property(_LOG_TAG_PROPERTY.format(tag=tag), level)
def set_loggable_level_for_tags(tags, default_level='VERBOSE'):
"""
Set the minimum log level for a set of tags.
:param tags: A mapping of tags and their minimum loggable level.
:param default_level: If `tags` is a list use this level as the default.
"""
try:
for tag, level in tags.iteritems():
set_loggable_level_for_tag(tag, level)
except AttributeError:
for tag in tags:
set_loggable_level_for_tag(tag, default_level)
|
Louisiana dancehalls and festivals intrigue everyone from shy bystanders to outgoing dancers. Regardless of your dance skill level, Moriah will take you to the next one. She teaches the leader and follower roles for a variety of dances including Zydeco, Cajun Two Step, Waltz, Jitterbug, Whiskey River, and East Coast Swing, to name a few.
Moriah has been featured in several music videos, often serving as casting director. She has performed on stages for crowds of thousands, created line dances for country artists, and choreographed dance moves for entertainers in the hip hop industry. For many years, Moriah has conducted private lessons and instructed groups ranging from 5 to 500 people. Her award-winning dance skills and engaging personality will set you at ease and give you the confidence you need to step out onto any dance floor in Louisiana!
The Louisiana Boot Troupe features some of Louisiana’s finest and most versatile dancers who will have you moving and grooving right along with them during their impressive performance.
Entertaining and interactive, the Boot Troupe invites you to learn and enjoy Louisiana’s amazing range of diverse dance styles including traditional Cajun Two Steps and lovely Waltzes alongside innovative Zydeco and soulful Swingout steps.
Their high-energy repertoire also involves original Line Dances and moves from the brand new ZydeFit routine. Get those boots shined and ready to step out for a wonderful dance experience with the Louisiana Boot Troupe!
|
# encoding: utf-8
# Copyright 2013 maker
# License
"""
Project management: admin page
"""
from maker.projects.models import Project, Milestone, Task, TaskTimeSlot, TaskStatus
from django.contrib import admin
class ProjectAdmin(admin.ModelAdmin):
""" Project admin """
list_display = ('name', 'details', 'parent', 'manager', 'client')
search_fields = ['name']
class MilestoneAdmin(admin.ModelAdmin):
""" Milestone admin """
list_display = ('name', 'details', 'project')
search_fields = ['name']
class TaskAdmin(admin.ModelAdmin):
""" Task admin """
list_display = ('name', 'details', 'project', 'priority', 'parent', 'milestone', 'caller')
search_fields = ['name']
class TaskStatusAdmin(admin.ModelAdmin):
""" Task status admin """
list_display = ('name', 'details')
search_fields = ['name']
class TaskTimeSlotAdmin(admin.ModelAdmin):
""" Task time slot admin """
list_display = ('task', 'time_from', 'time_to', 'timezone', 'details')
date_hierarchy = 'time_from'
search_fields = ['task']
class TaskRecordAdmin(admin.ModelAdmin):
""" Task record admin """
list_display = ('task', 'record_type')
list_filter = ['record_type']
admin.site.register(Project, ProjectAdmin)
admin.site.register(Milestone, MilestoneAdmin)
admin.site.register(Task, TaskAdmin)
admin.site.register(TaskStatus, TaskStatusAdmin)
admin.site.register(TaskTimeSlot, TaskTimeSlotAdmin)
|
We rightly ask who are these 'least of these my brethren' with whom the King identifies? There surely is an ethical application of meaning to this passage and we do well to take it to heart. There is, however, a oneness of the King with those who suffer for the sake of his name. In my preparation for this reflection I read that scholars see these brethren of Christ as those who are witnesses of Christ.
It is therefore appropriate and pleasing to the Holy Spirit that we should see how closely Christ is to those who bring his light to the darkness in our world. In the news in recent days are reports of those who claim the name of Christ being chased from their ancestral lands of family and faith. Pursued like Israel of old, the new Pharaonic despots of our day pillage the ancient homes and Churches of Christians and then behead those whose only crime is that they love Jesus. Fathers and mothers and their children in Syria and Iraq fleeing these ghastly atrocities are grossly missing from the radar screens of democratically elected officials including our own government.
These are our brothers and sisters in Christ. Many of our kinsmen with whom we are united in baptism, languish in prisons far from their family and friends. In scripture we read: Remember those who are in prison, as though in prison with them; and those who are ill-treated, since you also are in the body. (Hebrews 13:3) In Revelation 12:11, the visionary John explains that the dark work and resistance of Satan's kingdom is under attack. How? By those who hearts are drenched by the blood of the Lamb, and who in their witness to Christ love not their lives even unto death.
Will knowing and sharing Christ make you popular? Sometimes maybe not. Jesus said, "Whoever listens to you listens to me, and whoever rejects you rejects me, and whoever rejects me rejects the one who sent me." Do not be dismayed, if they persecuted Christ, they will persecute you who carry his cross.
May we hear the King say to us on Judgement Day, "Truly, I say to you, as you did it to one of the least of these my brethren, you did it to me." Amen.
|
#!/usr/bin/env python
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Generates the file /etc/udev/rules.d/android_swarming_bot.rules to enable
automatic Swarming bot to be fired up when an Android device with USB debugging
is connected.
"""
__version__ = '0.1'
import getpass
import optparse
import os
import string
import subprocess
import sys
import tempfile
THIS_FILE = os.path.abspath(__file__)
ROOT_DIR = os.path.dirname(THIS_FILE)
HEADER = '# This file was AUTOMATICALLY GENERATED with %s\n' % THIS_FILE
RULE_FILE = '/etc/udev/rules.d/android_swarming_bot.rules'
LETTERS_AND_DIGITS = frozenset(string.ascii_letters + string.digits)
def gen_udev_rule(user, dev_filters):
"""Generates the content of the udev .rules file."""
# The command executed must exit immediately.
script = os.path.join(ROOT_DIR, 'udev_start_bot_deferred.sh')
items = [
'ACTION=="add"',
'SUBSYSTEM=="usb"',
]
items.extend(dev_filters)
# - sudo -u <user> is important otherwise a user writeable script would be run
# as root.
# - -H makes it easier to find the user's local files.
# - -E is important, otherwise the necessary udev environment variables won't
# be set. Also we don't want to run the script as root.
items.append('RUN+="/usr/bin/sudo -H -E -u %s %s"' % (user, script))
line = ', '.join(items)
# https://code.google.com/p/swarming/issues/detail?id=127
# TODO(maruel): Create rule for ACTION=="remove" which would send a signal to
# the currently running process.
# TODO(maruel): The add rule should try to find a currently running bot first.
return HEADER + line + '\n'
def write_udev_rule(filepath):
"""Writes the udev rules file in /etc/udev/rules.d when run as root."""
with open(filepath, 'rb') as f:
content = f.read()
if os.path.isfile(RULE_FILE):
print('Overwritting existing file')
with open(RULE_FILE, 'w+b') as f:
f.write(content)
print('Wrote %d bytes successfully to %s' % (len(content), RULE_FILE))
def work(user, dev_filters):
"""The guts of this script."""
content = gen_udev_rule(user, dev_filters)
print('WARNING: About to write in %s:' % RULE_FILE)
print('***')
sys.stdout.write(content)
print('***')
raw_input('Press enter to continue or Ctrl-C to cancel.')
handle, filepath = tempfile.mkstemp(
prefix='swarming_bot_udev', suffix='.rules')
os.close(handle)
try:
with open(filepath, 'w+') as f:
f.write(content)
command = ['sudo', sys.executable, THIS_FILE, '--file', filepath]
print('Running: %s' % ' '.join(command))
return subprocess.call(command)
finally:
os.remove(filepath)
def test_device_rule(device):
# To find your device:
# unbuffer udevadm monitor --environment --udev --subsystem-match=usb
# | grep DEVNAME
# udevadm info -a -n <value from DEVNAME>
#
# sudo udevadm control --log-priority=debug
# udevadm info --query all --export-db | less
cmd = ['sudo', 'udevadm', 'test', '--action=add', device]
print('Running: %s' % ' '.join(cmd))
return subprocess.call(cmd)
def main():
if not sys.platform.startswith('linux'):
print('Only tested on linux')
return 1
parser = optparse.OptionParser(
description=sys.modules[__name__].__doc__, version=__version__)
parser.add_option('--file', help=optparse.SUPPRESS_HELP)
parser.add_option(
'-d',
'--dev_filters',
default=[],
action='append',
help='udev filters to use; get device id with "lsusb" then udev details '
'with "udevadm info -a -n /dev/bus/usb/002/001"')
parser.add_option(
'--user',
default=getpass.getuser(),
help='User account to start the bot with')
parser.add_option('--test', help='Tests the rule for a device')
options, args = parser.parse_args()
if args:
parser.error('Unsupported arguments %s' % args)
if options.test:
return test_device_rule(options.test)
if options.file:
if options.user != 'root':
parser.error('When --file is used, expected to be run as root')
else:
if options.user == 'root':
parser.error('Run as the user that will be used to run the bot')
if not LETTERS_AND_DIGITS.issuperset(options.user):
parser.error('User must be [a-zA-Z0-9]+')
os.chdir(ROOT_DIR)
if not os.path.isfile(os.path.join(ROOT_DIR, 'swarming_bot.zip')):
print('First download swarming_bot.zip aside this script')
return 1
if options.file:
write_udev_rule(options.file)
return 0
# 18d1==Google Inc. but we'd likely want to filter more broadly.
options.dev_filters = options.dev_filters or ['ATTR{idVendor}=="18d1"']
work(options.user, options.dev_filters)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Need another great resource to use for research? Use the WorldBook Encyclopedia online! See one of the library staff for the username and password to use at home.
The Florida Electronic Library is an Internet resource that offers access to comprehensive, accurate, and reliable information. Available resources include electronic magazines, newspapers, almanacs, encyclopedias, and books.
|
# -*- coding: UTF-8 -*-
__revision__ = '$Id$'
# Copyright (c) 2005-2009 Vasco Nunes, Piotr Ożarowski
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# You may use and distribute this software under the terms of the
# GNU General Public License, version 2 or later
import gtk
import os
import pango
import string
import sys
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter, A4
from reportlab.lib.units import mm
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import Image
from reportlab.lib import colors
import db
import gutils
import version
exec_location = os.path.abspath(os.path.dirname(sys.argv[0]))
def cover_image(self,number):
filename = gutils.file_chooser(_("Select image"), \
action=gtk.FILE_CHOOSER_ACTION_OPEN, \
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, \
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
if filename[0]:
cover_image_process(self, filename[0], number)
def cover_image_process(self, filename, number):
size = self.widgets['print_cover']['ci_size'].get_active()
print_number = self.widgets['print_cover']['ci_number'].get_active()
if self.config.get('font', '') != '':
fontName = "custom_font"
pdfmetrics.registerFont(TTFont(fontName,self.config.get('font', '')))
else:
fontName = "Helvetica"
if size == 0:
#standard
cover_x=774
cover_y=518
elif size == 1:
#slim
cover_x=757;
cover_y=518
else:
#double slim
cover_x=757
cover_y=518
# A4 landscape definition
pageWidth = 842
pageHeight = 595
# hardcoded to A4
pos_x=(pageWidth-cover_x)/2;
pos_y=(pageHeight-cover_y)/2;
# make a pdf
# using a truetype font with unicode support
c = canvas.Canvas(os.path.join(self.griffith_dir, "cover.pdf"), \
(pageWidth, pageHeight))
c.setFont(fontName, 8)
# copyright line
c.drawString(20, 20 ,_("Cover generated by Griffith v").encode('utf-8') + \
version.pversion+" (C) 2004-2009 Vasco Nunes/Piotr Ozarowski - "+ \
_("Released Under the GNU/GPL License").encode('utf-8'))
# get movie information from db
movie = self.db.session.query(db.Movie).filter_by(number=number).first()
if movie is not None:
c.drawImage(filename, pos_x, pos_y, cover_x, cover_y)
if print_number:
c.setFillColor(colors.white)
c.rect((pageWidth/2)-13, 520, 26, 70, fill=1, stroke=0)
c.setFillColor(colors.black)
c.setFont(fontName, 10)
c.drawCentredString(pageWidth/2, 530, number)
# draw cover area
c.rect(pos_x, pos_y, cover_x, cover_y)
c.showPage()
c.save()
self.widgets['print_cover']['window_simple'].hide()
cover_file = os.path.join(self.griffith_dir, "cover.pdf")
if self.windows:
os.popen3("\"" + cover_file + "\"")
else:
os.popen3(self.pdf_reader + " " + cover_file)
def cover_simple(self, number):
size = self.widgets['print_cover']['cs_size'].get_active()
print_number = self.widgets['print_cover']['cs_include_movie_number'].get_active()
poster = self.widgets['print_cover']['cs_include_poster'].get_active()
if self.config.get('font', '')!='':
fontName = "custom_font"
pdfmetrics.registerFont(TTFont(fontName,self.config.get('font', '')))
else:
fontName = "Helvetica"
if size == 0:
#standard
cover_x=774
cover_y=518
elif size == 1:
#slim
cover_x=757;
cover_y=518
else:
#double slim
cover_x=757
cover_y=518
# A4 landscape definition
pageWidth = 842
pageHeight = 595
# hardcoded to A4
pos_x=(pageWidth-cover_x)/2;
pos_y=(pageHeight-cover_y)/2;
# make a pdf
c = canvas.Canvas(os.path.join(self.griffith_dir, "cover.pdf"), (pageWidth, pageHeight))
c.setFont(fontName,8)
# copyright line
c.drawString(20,20,_("Cover generated by Griffith v").encode('utf-8') + \
version.pversion+" (C) 2004-2009 Vasco Nunes/Piotr Ozarowski - "+ \
_("Released Under the GNU/GPL License").encode('utf-8'))
# draw cover area
c.rect(pos_x, pos_y, cover_x, cover_y)
# get movie information from db
movie = self.db.session.query(db.Movie).filter_by(number=number).first()
if movie is not None:
if print_number:
c.setFont(fontName, 10)
c.drawCentredString(pageWidth/2, 530, number)
c.setFont(fontName, 16)
c.rotate(90)
c.drawString(60, (-pageWidth/2)-8, movie.o_title.encode('utf-8'))
c.rotate(-90)
if movie.poster_md5:
filename = gutils.get_image_fname(movie.poster_md5, self.db)
if filename:
c.drawImage(filename, x=(pageWidth-30)/2, y=470, width=30, height=50)
# print movie info
c.setFont(fontName, 8)
textObject = c.beginText()
textObject.setTextOrigin(pageWidth-cover_x, 300)
textObject.setFont(fontName, 8)
textObject.textLine("%s: %s" % (_('Original Title'), movie.o_title))
textObject.textLine("%s: %s" % (_('Title'), movie.title))
textObject.textLine('')
textObject.textLine("%s: %s" % (_('Director'), movie.director))
textObject.textLine('')
textObject.textLine("%s: %s %s" % (_('Running Time'), movie.runtime, _(' min')))
textObject.textLine("%s: %s" % (_('Country'), movie.country))
textObject.textLine("%s: %s" % (_('Genre'), movie.genre))
textObject.textLine('')
c.drawText(textObject)
# draw bigger poster image
if poster and movie.poster_md5 and filename:
c.drawImage(filename, x=(pageWidth-(pageWidth-cover_x)-235), y=(pageHeight/2)-125, width=180, height=250)
c.showPage()
c.save()
self.widgets['print_cover']['window_simple'].hide()
cover_file = os.path.join(self.griffith_dir, 'cover.pdf')
if self.windows:
os.popen3("\"" + cover_file + "\"")
elif self.mac:
os.popen3("open -a Preview" + " " + cover_file)
else:
os.popen3(self.pdf_reader + " " + cover_file)
|
and good thinking. Simple and effective wallpaper. Good work. Voted up for this.
thanks for the advice :) !!
|
# coding=utf-8
from typing import List
import aiohttp
from bot.sections.base import BaseSection
from bot.utils import line_splitter
__author__ = "Gareth Coles"
class URLSection(BaseSection):
_type = "url"
cached_lines = []
def __init__(self, name, url=None, header="", footer=""):
super().__init__(name, header=header, footer=footer)
self.url = url or ""
async def process_command(self, command, data, data_string, client, message) -> str:
if command == "set":
if len(data) < 1:
return "Usage: `set \"<url>\"`"
url = data[0]
if not url:
return "Please supply a URL to retrieve text from"
while url[0] in "`<" and url[-1] in "`>" and url[0] == url[-1]:
url = url[1:-1]
session = aiohttp.ClientSession()
try:
async with session.get(url, timeout=30) as resp:
text = await resp.text()
self.cached_lines = self.split_paragraphs(text)
except Exception as e:
return "Failed to retrieve URL: `{}`".format(e)
else:
self.url = url
client.sections_updated(message.server)
return "URL set; retrieved `{}` messages' worth of text".format(len(self.cached_lines))
finally:
session.close()
if command == "get":
if not self.url:
return "No URL has been set."
return "Current URL: `{}`".format(self.url)
return "Unknown command: `{}`\n\nAvailable command: `set`, `get`".format(command)
def split_paragraphs(self, text):
parts = text.split("\n\n")
done = []
for i, part in enumerate(parts):
if i < len(parts) - 1:
done.append(part + "\n\u200b")
else:
done.append(part)
return line_splitter(done, 2000, split_only=True)
async def render(self) -> List[str]:
if not self.url:
return ["**A URL has not been set for this section**"]
session = aiohttp.ClientSession()
try:
async with session.get(self.url, timeout=30) as resp:
text = await resp.text()
self.cached_lines = self.split_paragraphs(text)
except Exception as e:
return ["**ERROR**: Failed to retrieve URL: `{}`".format(self.url, e)]
else:
return self.cached_lines
finally:
session.close()
async def show(self) -> List[str]:
return [
"section \"{}\" set \"{}\"".format(self.name, self.url)
]
def to_dict(self) -> dict:
return {
"url": self.url,
"header": self.header,
"footer": self.footer
}
@staticmethod
def from_dict(name, data) -> "URLSection":
return URLSection(name, **data)
|
2. The latest technology is applied in the production of the smart Weigh packing machine. Smart Weigh believes the achievement of customer expectation will enhance the customer satisfication.
3. Smart Weigh pouch helps products to maintain their properties. multihead weigher,multihead weigher packing machine based on multihead weigher price materials has the characteristic of multihead weigher for sale.
2. Smart Weigh is committed to wining wide market with its core competitiveness. Ask online!
|
# pylint: disable=W0511
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" Copyright (c) 2000-2010 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:[email protected]
Check source code is ascii only or has an encoding declaration (PEP 263)
"""
import re
from pylint.interfaces import IRawChecker
from pylint.checkers import BaseChecker
MSGS = {
'W0511': ('%s',
'fixme',
'Used when a warning note as FIXME or XXX is detected.'),
'W0512': ('Cannot decode using encoding "%s", unexpected byte at position %d',
'invalid-encoded-data',
'Used when a source line cannot be decoded using the specified '
'source file encoding.',
{'maxversion': (3, 0)}),
}
class EncodingChecker(BaseChecker):
"""checks for:
* warning notes in the code like FIXME, XXX
* encoding issues.
"""
__implements__ = IRawChecker
# configuration section name
name = 'miscellaneous'
msgs = MSGS
options = (('notes',
{'type' : 'csv', 'metavar' : '<comma separated values>',
'default' : ('FIXME', 'XXX', 'TODO'),
'help' : 'List of note tags to take in consideration, \
separated by a comma.'
}),
)
def _check_note(self, notes, lineno, line):
match = notes.search(line)
if match:
self.add_message('fixme', args=line[match.start():-1], line=lineno)
def _check_encoding(self, lineno, line, file_encoding):
try:
return unicode(line, file_encoding)
except UnicodeDecodeError, ex:
self.add_message('invalid-encoded-data', line=lineno,
args=(file_encoding, ex.args[2]))
def process_module(self, module):
"""inspect the source file to find encoding problem or fixmes like
notes
"""
stream = module.file_stream
stream.seek(0) # XXX may be removed with astroid > 0.23
if self.config.notes:
notes = re.compile('|'.join(self.config.notes))
else:
notes = None
if module.file_encoding:
encoding = module.file_encoding
else:
encoding = 'ascii'
for lineno, line in enumerate(stream):
line = self._check_encoding(lineno+1, line, encoding)
if line is not None and notes:
self._check_note(notes, lineno+1, line)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(EncodingChecker(linter))
|
William (Bill) W. Koehnlein, life-long peace and social justice activist, organizer and educator, died in Manhattan on November 19, 2017, after a long, courageous struggle with colon cancer.
A native New Yorker, Bill was born May 10, 1949, and spent the first six years of his life in the Payson Avenue section of Manhattan (1949-51) and in Elmhurst, Queens (1951-55), where he attended kindergarten at P.S. 89. For the next twelve years, he lived in Huntington Station, Long Island, subsequently moving to the Lower East Side of Manhattan, his main residence until his death.
He is survived by his wife of 37 years, Marie-Claire Picher; his son, Lyle Koehnlein, and daughter-in-law, Jessica Weiser; his mother, Joyce Johnson Koehnlein; two sisters—Janice Van Horne and Margaret Guiliano; his brother, John Koehnlein; two sisters-in-law—Patricia Di Grado Koehnlein and Annette Picher; three brothers-in-law—Philip Guiliano, Harry Van Horne and Edward Blechner; three nieces—Keely Guiliano, Zoey Guiliano and Selina Koehnlein; six nephews—Ry Guiliano, Zachary Guiliano, Delaney Guiliano, Kieran Koehnlein, Matthew Van Horne, Daniel Picher Fisher; his ex-wife, Sandra Gallagher Kynes; and many cousins and friends. He was pre-deceased by his father, William V. Koehnlein, and grandparents—Karl Koehnlein, Kristina Piskla Koehnlein, Viora Andren and Wayne Johnson.
1) Bill demonstrated with SDS at the 1968 Democratic Convention in Chicago.
2) From the 1960s to the late 1980s, he was an active member of the anarchist community. During this period of time, a) he participated in the Free School movement as a member of the collective that ran Free Space/Alternate University (in the A. J. Muste building and on 14th Street); b) he was a member of the Libertarian Book Club; and c) in the early 1980s, he co-founded the Anarchist Switchboard (East 9th Street/Second Avenue in Manhattan), a three-year project partly modeled on the original Free Space.
3) Since the 1960s, he actively supported the War Resisters League.
4) From the late 1980s until his death, Bill was an active member and constant supporter of the Brecht Forum/New York Marxist School, and subsequently, the Marxist Education Project.
5) He was the administrator of the Theater of the Oppressed Laboratory (TOPLAB) (late 1990s – 2017).
6) For three years, he volunteered weekly at the Earth Matter NY Compost Learning Center, on Governors Island, caring for the chickens.
7) He was a working member of the Fourth Street Food Co-op in Manhattan.
Bill lived a full life with integrity. He also touched hearts and minds with his encyclopedic knowledge and wit. He did extensive research on many subjects, including organic farming and sustainable agriculture, edible mushrooms, alternative medicine and the politics of food. He was also an animal whisperer and defender of animal rights who vigorously promoted the health and ecological benefits of the vegan diet. He fed birds and squirrels, loved crows, brushed his cat, gazed at the moon, climbed rocks, and felt at home in the woods and on mountain hiking trails. Bill will be lovingly remembered by his family and friends as an engaging raconteur; a writer of both incisive political commentary and whimsical fiction; a disseminator of information—news, films, music, recipes; an accomplished photographer; a lover of music ranging from Wagnerian operas to John Coltrane and Sun Ra; and a creative vegan cook.
Bill's family would like to thank the Visiting Nurse Service of New York Hospice and Palliative Care Program for their invaluable support. We request that any monetary contributions in memory of Bill be sent to The War Resisters League, The Marxist Education Project or The Catholic Worker.
TOPLAB's Winter 2018 activities and workshops will begin in mid-December.
and send the schedule to our announcement list.
Many thanks for being such loyal supporters for all these years.
|
from trac import perm
from trac.core import *
from trac.test import EnvironmentStub
import unittest
class DefaultPermissionStoreTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[perm.DefaultPermissionStore,
perm.DefaultPermissionGroupProvider])
self.store = perm.DefaultPermissionStore(self.env)
def tearDown(self):
self.env.reset_db()
def test_simple_actions(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('john', 'WIKI_MODIFY'),
('john', 'REPORT_ADMIN'),
('kate', 'TICKET_CREATE')])
self.assertEquals(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
self.assertEquals(['TICKET_CREATE'],
self.store.get_user_permissions('kate'))
def test_simple_group(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('john', 'dev')])
self.assertEquals(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
def test_nested_groups(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('admin', 'dev'),
('john', 'admin')])
self.assertEquals(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
def test_mixed_case_group(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('Dev', 'WIKI_MODIFY'),
('Dev', 'REPORT_ADMIN'),
('Admin', 'Dev'),
('john', 'Admin')])
self.assertEquals(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
def test_builtin_groups(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('authenticated', 'WIKI_MODIFY'),
('authenticated', 'REPORT_ADMIN'),
('anonymous', 'TICKET_CREATE')])
self.assertEquals(['REPORT_ADMIN', 'TICKET_CREATE', 'WIKI_MODIFY'],
sorted(self.store.get_user_permissions('john')))
self.assertEquals(['TICKET_CREATE'],
self.store.get_user_permissions('anonymous'))
def test_get_all_permissions(self):
self.env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('john', 'dev')])
expected = [('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('john', 'dev')]
for res in self.store.get_all_permissions():
self.failIf(res not in expected)
class TestPermissionRequestor(Component):
implements(perm.IPermissionRequestor)
def get_permission_actions(self):
return ['TEST_CREATE', 'TEST_DELETE', 'TEST_MODIFY',
('TEST_CREATE', []),
('TEST_ADMIN', ['TEST_CREATE', 'TEST_DELETE']),
('TEST_ADMIN', ['TEST_MODIFY'])]
class PermissionSystemTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[perm.PermissionSystem,
perm.DefaultPermissionStore,
TestPermissionRequestor])
self.perm = perm.PermissionSystem(self.env)
def tearDown(self):
self.env.reset_db()
def test_all_permissions(self):
self.assertEqual({'EMAIL_VIEW': True, 'TRAC_ADMIN': True,
'TEST_CREATE': True, 'TEST_DELETE': True,
'TEST_MODIFY': True, 'TEST_ADMIN': True},
self.perm.get_user_permissions())
def test_simple_permissions(self):
self.perm.grant_permission('bob', 'TEST_CREATE')
self.perm.grant_permission('jane', 'TEST_DELETE')
self.perm.grant_permission('jane', 'TEST_MODIFY')
self.assertEqual({'TEST_CREATE': True},
self.perm.get_user_permissions('bob'))
self.assertEqual({'TEST_DELETE': True, 'TEST_MODIFY': True},
self.perm.get_user_permissions('jane'))
def test_meta_permissions(self):
self.perm.grant_permission('bob', 'TEST_CREATE')
self.perm.grant_permission('jane', 'TEST_ADMIN')
self.assertEqual({'TEST_CREATE': True},
self.perm.get_user_permissions('bob'))
self.assertEqual({'TEST_CREATE': True, 'TEST_DELETE': True,
'TEST_MODIFY': True, 'TEST_ADMIN': True},
self.perm.get_user_permissions('jane'))
def test_get_all_permissions(self):
self.perm.grant_permission('bob', 'TEST_CREATE')
self.perm.grant_permission('jane', 'TEST_ADMIN')
expected = [('bob', 'TEST_CREATE'),
('jane', 'TEST_ADMIN')]
for res in self.perm.get_all_permissions():
self.failIf(res not in expected)
def test_expand_actions_iter_7467(self):
# Check that expand_actions works with iterators (#7467)
perms = set(['EMAIL_VIEW', 'TRAC_ADMIN', 'TEST_DELETE', 'TEST_MODIFY',
'TEST_CREATE', 'TEST_ADMIN'])
self.assertEqual(perms, self.perm.expand_actions(['TRAC_ADMIN']))
self.assertEqual(perms, self.perm.expand_actions(iter(['TRAC_ADMIN'])))
class PermissionCacheTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[perm.DefaultPermissionStore,
perm.DefaultPermissionPolicy,
TestPermissionRequestor])
self.perm_system = perm.PermissionSystem(self.env)
# by-pass DefaultPermissionPolicy cache:
perm.DefaultPermissionPolicy.CACHE_EXPIRY = -1
self.perm_system.grant_permission('testuser', 'TEST_MODIFY')
self.perm_system.grant_permission('testuser', 'TEST_ADMIN')
self.perm = perm.PermissionCache(self.env, 'testuser')
def tearDown(self):
self.env.reset_db()
def test_contains(self):
self.assertEqual(True, 'TEST_MODIFY' in self.perm)
self.assertEqual(True, 'TEST_ADMIN' in self.perm)
self.assertEqual(False, 'TRAC_ADMIN' in self.perm)
def test_has_permission(self):
self.assertEqual(True, self.perm.has_permission('TEST_MODIFY'))
self.assertEqual(True, self.perm.has_permission('TEST_ADMIN'))
self.assertEqual(False, self.perm.has_permission('TRAC_ADMIN'))
def test_require(self):
self.perm.require('TEST_MODIFY')
self.perm.require('TEST_ADMIN')
self.assertRaises(perm.PermissionError, self.perm.require, 'TRAC_ADMIN')
def test_assert_permission(self):
self.perm.assert_permission('TEST_MODIFY')
self.perm.assert_permission('TEST_ADMIN')
self.assertRaises(perm.PermissionError,
self.perm.assert_permission, 'TRAC_ADMIN')
def test_cache(self):
self.perm.assert_permission('TEST_MODIFY')
self.perm.assert_permission('TEST_ADMIN')
self.perm_system.revoke_permission('testuser', 'TEST_ADMIN')
# Using cached GRANT here
self.perm.assert_permission('TEST_ADMIN')
def test_cache_shared(self):
# we need to start with an empty cache here (#7201)
perm1 = perm.PermissionCache(self.env, 'testcache')
perm1 = perm1('ticket', 1)
perm2 = perm1('ticket', 1) # share internal cache
self.perm_system.grant_permission('testcache', 'TEST_ADMIN')
perm1.assert_permission('TEST_ADMIN')
self.perm_system.revoke_permission('testcache', 'TEST_ADMIN')
# Using cached GRANT here (from shared cache)
perm2.assert_permission('TEST_ADMIN')
class TestPermissionPolicy(Component):
implements(perm.IPermissionPolicy)
def __init__(self):
self.allowed = {}
self.results = {}
def grant(self, username, permissions):
self.allowed.setdefault(username, set()).update(permissions)
def revoke(self, username, permissions):
self.allowed.setdefault(username, set()).difference_update(permissions)
def check_permission(self, action, username, resource, perm):
result = action in self.allowed.get(username, set()) or None
self.results[(username, action)] = result
return result
class PermissionPolicyTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[perm.DefaultPermissionStore,
perm.DefaultPermissionPolicy,
TestPermissionPolicy,
TestPermissionRequestor])
self.env.config.set('trac', 'permission_policies', 'TestPermissionPolicy')
self.policy = TestPermissionPolicy(self.env)
self.perm = perm.PermissionCache(self.env, 'testuser')
def tearDown(self):
self.env.reset_db()
def test_no_permissions(self):
self.assertRaises(perm.PermissionError,
self.perm.assert_permission, 'TEST_MODIFY')
self.assertRaises(perm.PermissionError,
self.perm.assert_permission, 'TEST_ADMIN')
self.assertEqual(self.policy.results,
{('testuser', 'TEST_MODIFY'): None,
('testuser', 'TEST_ADMIN'): None})
def test_grant_revoke_permissions(self):
self.policy.grant('testuser', ['TEST_MODIFY', 'TEST_ADMIN'])
self.assertEqual('TEST_MODIFY' in self.perm, True)
self.assertEqual('TEST_ADMIN' in self.perm, True)
self.assertEqual(self.policy.results,
{('testuser', 'TEST_MODIFY'): True,
('testuser', 'TEST_ADMIN'): True})
def test_policy_chaining(self):
self.env.config.set('trac', 'permission_policies', 'TestPermissionPolicy,DefaultPermissionPolicy')
self.policy.grant('testuser', ['TEST_MODIFY'])
system = perm.PermissionSystem(self.env)
system.grant_permission('testuser', 'TEST_ADMIN')
self.assertEqual(list(system.policies),
[self.policy,
perm.DefaultPermissionPolicy(self.env)])
self.assertEqual('TEST_MODIFY' in self.perm, True)
self.assertEqual('TEST_ADMIN' in self.perm, True)
self.assertEqual(self.policy.results,
{('testuser', 'TEST_MODIFY'): True,
('testuser', 'TEST_ADMIN'): None})
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DefaultPermissionStoreTestCase, 'test'))
suite.addTest(unittest.makeSuite(PermissionSystemTestCase, 'test'))
suite.addTest(unittest.makeSuite(PermissionCacheTestCase, 'test'))
suite.addTest(unittest.makeSuite(PermissionPolicyTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main()
|
An Anthology of Way-Back-When Futures. US edition: St. Martins 1977.
An Anthology of Way-Back-When Futures US edition: St. Martins 1977.
Also in The Penguin Science Fiction Omnibus.
Also in pb (Avon Feb 80).
1 Uninhabited Planets "...Because Theyre There"
Also in hc (Weidenfeld & Nicolson Mar 75). Pagination taken from US edition (Doubleday 1976).
Differs from Weidenfeld & Nicolson edition.
The Paradox Men · Charles L. Harness · ex Bouregy & Curl 1953; the novel The Paradox Men is an expanded version of Flight Into Yestersay, Startling Stories May 1949.
Contents differ from the earlier Futura edition. US editions: Doubleday 1975 (hc) and Berkley Mar 77 (pb).
248 · The Paradox Men · Charles L. Harness · ex Bouregy & Curl 1953; the novel The Paradox Men is an expanded version of Flight Into Yestersay, Startling Stories May 1949.
|
"""Package for parsing and generating FASTA files of biological sequences.
Use the :class:`tinyfasta.FastaParser` class to parse FASTA files.
To generate FASTA files use the :func:`tinyfasta.FastaRecord.create` static
method to create :class:`tinyfasta.FastaRecord` instances, which can be written
to file.
"""
__version__ = "0.1.0"
class _FastaRecordComponent(object):
"""Component of a FastaRecort."""
def contains(self, search_term):
"""Return True if the component contains the search term.
:param search_term: string or compiled regular expression to search for
:returns: bool
"""
if hasattr(search_term, "search"):
return search_term.search(self._content) is not None
return self._content.find(search_term) != -1
class Sequence(_FastaRecordComponent):
"""Class representing a biological sequence."""
def __init__(self):
self._sequences = []
def __str__(self):
return self._content
def __len__(self):
"""Return the length of the biological sequence."""
return sum(len(s) for s in self._sequences)
@property
def _content(self):
"""Return the sequence as a string.
:returns: str
"""
return ''.join(self._sequences)
def add_sequence_line(self, sequence_line):
"""
Add a sequence line to the :class:`tinyfasta.Sequence` instance.
This function can be called more than once. Each time the function is
called the :class:`tinyfasta.Sequence` is extended by the sequence line
provided.
:param sequence_line: string representing (part of) a sequence
"""
self._sequences.append( sequence_line.strip() )
def format_line_length(self, line_length=80):
"""Format line length used to represent the sequence.
The full sequence is stored as list of shorter sequences. These shorter
sequences are used verbatim when writing out the
:class:`tinyfasta.FastaRecord` over several lines.
:param line_length: length of the sequences used to make up the full
sequence
"""
def string_to_list(seq, n):
"""Return list of strings of length n."""
return [seq[i:i+n] for i in range(0, len(seq), n)]
self._sequences = string_to_list(self._content, line_length)
class FastaRecord(object):
"""Class representing a FASTA record."""
class Description(_FastaRecordComponent):
"""Description line in a :class:`tinyfasta.FastaRecord`."""
def __init__(self, description):
self.update(description)
def __str__(self):
return self._content
def update(self, description):
"""Update the content of the description.
This function can be used to replace the existing description with
a new one.
:param description: new description string
"""
if not description.startswith(">"):
description = ">{}".format(description)
self._content = description.strip()
@staticmethod
def create(description, sequence):
"""Return a FastaRecord.
:param description: description string
:param sequence: full sequence string
:returns: :class:`tinyfasta.FastaRecord`
"""
fasta_record = FastaRecord(description)
fasta_record.add_sequence_line(sequence)
fasta_record.sequence.format_line_length()
return fasta_record
def __init__(self, description):
"""Initialise an instance of the :class:`tinyfasta.FastaRecord` class.
:param description: description string
"""
self.description = FastaRecord.Description(description)
self.sequence = Sequence()
def __str__(self):
"""String representation of the :class:`tinyfasta.FastaRecord` instance."""
lines = [str(self.description),]
lines.extend(self.sequence._sequences)
return '\n'.join(lines)
def __len__(self):
"""Return the length of the biological sequence."""
return len(self.sequence)
def add_sequence_line(self, sequence_line):
"""Add a sequence line to the :class:`tinyfasta.FastaRecord` instance.
This function can be called more than once. Each time the function is
called the :attr:`tinyfasta.sequence` is extended by the sequence line
provided.
:param sequence_line: string representing (part of) a sequence
"""
self.sequence.add_sequence_line(sequence_line)
class FastaParser(object):
"""Class for parsing FASTA files."""
def __init__(self, fpath):
"""Initialise an instance of the FastaParser.
:param fpath: path to the FASTA file to be parsed
"""
self.fpath = fpath
def __iter__(self):
"""Yield FastaRecord instances."""
fasta_record = None
with open(self.fpath, 'r') as fh:
for line in fh:
if line.startswith('>'):
if fasta_record:
yield fasta_record
fasta_record = FastaRecord(line)
else:
fasta_record.add_sequence_line(line)
yield fasta_record
|
Because it was a successful mining town long before it became a jet-set favorite, there are many old and classic buildings from its time as a mining capitol.
I couldn’t decide if this beauty was old or a just well done new facsimile.
The Wheeler Opera House is old and authentic. Only wish it had been open to take a peek inside.
Just about any high end brand was represented here – Dior and Gucci live side by side inside the appropriately named Brand Building (1891). Yes, it’s an expensive town but it didn’t cost us a thing to just wander around.
There was a fair amount of art scattered about. There was a sizable memorial garden to John Denver as well.
Well, we almost got through town without spending any money. The gelato was excellent, by the way.
Lovely buildings…but I wonder how you decided on the gelato flavor when there were so many!
Yes, I’ve spent many hours in the John Denver Sanctuary with it’s beautiful granite boulders etched with lyrics from my youth. I recommend you bring along your iPod and listen to what he left behind as you enjoy this peaceful park.
|
from django.db import models
from django.contrib.auth.models import User
from microdata.models import Device, Appliance
from django.conf import settings
from recurrence.fields import RecurrenceField
from paintstore.fields import ColorPickerField
# Create your models here.
class EventNotification(models.Model):
"""
Notification sent to users via email whenever a notable event is detected.
This class is not currently in use since the system is not set up in such
a way as to detect any events. However, the notification framework is in
place such that when the functionality is added, this class should be called
in response to an event.
These notifications can be added/modified via the admin interface.
"""
description = models.CharField(
max_length=300,
help_text="Label to notification as shown to a user"
)
""" The description of the event notification as a user would see it when selecting/deselecting the notification in the settings interface"""
keyword = models.CharField(
max_length=300,
help_text="Keyword used to launch manage.py email_event",
)
""" Used to trigger the event notification in the django manager."""
watts_above_average = models.FloatField()
""" Proof of concept field to provide a threshold. If a group of appliances surpasses the threshold for a period of time, then send the email."""
period_of_time = models.FloatField(
help_text="Period of time to watch for irregularity"
)
""" Proof of concept field to provide a threshold. If a group of appliances surpasses the threshold for a period of time, then send the email."""
appliances_to_watch = models.ManyToManyField(Appliance)
""" Assemble a group of appliances to watch. Could be one or many."""
email_subject = models.CharField(max_length=300)
""" An email-friendly subject for the event notification."""
email_body = models.FileField(
help_text="Template file for email body. {{ x }} denotes x is a template variable",
upload_to="event"
)
""" A template used to generate the notification email body."""
def __unicode__(self):
return self.description
class IntervalNotification(models.Model):
"""
Notifications sent to users when a specified period has elapsed.
This class is also proof-of-concept, and it relies upon the `Amazon Simple Email Service <http://aws.amazon.com/ses/>`_.
An email will be sent to users who opt in to the notification summarizing their devices' energy usage over the specified
period.
"""
description = models.CharField(
max_length=300,
help_text="Label to notification as shown to a user",
)
""" The description of the event notification as a user would see it when selecting/deselecting the notification in the settings interface"""
recurrences = RecurrenceField(blank=True, null=True)
""" This field is treated much like a Google Calendars recurrence field. Provides an easy way for an admin to define new periods of time."""
email_subject = models.CharField(max_length=300)
""" An email-friendly subject for the event notification."""
email_body = models.FileField(
help_text="Template file for email body. {{ x }} denotes template variable",
upload_to="interval"
)
""" A template used to generate the notification email body."""
def __unicode__(self):
return self.description
class Notification(models.Model):
"""
DEPRECATED
"""
user = models.OneToOneField(User)
interval_notification = models.ManyToManyField(IntervalNotification)
def __unicode__(self):
return 'Notification '+str(self.pk)
class UserSettings(models.Model):
"""
An encapsulating module that links a user's settings together.
This model can be extended to include new settings that may come to be in the future.
"""
user = models.OneToOneField(User)
""" The related model for a settings model."""
interval_notification = models.ManyToManyField(IntervalNotification, blank=True)
""" A list of interval notifications that the user has opted in to. Default to none."""
event_notification = models.ManyToManyField(EventNotification, blank=True)
""" A list of event notifications that the user has opted in to. Default to none."""
class UtilityCompany(models.Model):
"""
A placeholder class to describe a Utility Company.
Since PG&E is the only company that was developed on during the proof-of-concept phase,
it is the company that was used to model the pricing structures. In the future, in order
to integrate new types of companies, a Utility Company model should reflect how the Utility
Company calculates cost.
"""
description = models.CharField(max_length=300)
""" A label that describes what company this is. Used for selection."""
class Meta:
verbose_name_plural = "Utility Companies"
def __unicode__(self):
return self.description
class RatePlan(models.Model):
"""
The base class that describes how a user is charged in the Utility Company.
This class is linked to :class:`webapp.models.UtilityCompany` via a ForeignKey.
In addition, the class contains a list of :class:`webapp.models.Tier` objects
that describe how the charges change based on usage.
"""
utility_company = models.ForeignKey(UtilityCompany)
""" Utility company relation. Describe who owns the :class:`webapp.models.RatePlan`"""
description = models.CharField(max_length=300)
""" A short description for the user when selecting their :class:`webapp.models.RatePlan`."""
data_source = models.URLField()
""" A simple URL field that links to the source of the data for this :class:`webapp.models.RatePlan`."""
min_charge_rate = models.FloatField(help_text="$ Per meter per day")
""" The minimum amount charged to a user's account. Not currently in use."""
california_climate_credit = models.FloatField(help_text="$ Per household, per semi-annual payment occurring in the April and October bill cycles")
""" A credit applied to a user's account twice yearly. Not currently in use."""
def __unicode__(self):
return self.utility_company.__unicode__() + ": " + self.description
class Tier(models.Model):
"""
A class that defines the cost and threshold of a :class:`webapp.models.RatePlan`.
A :class:`webapp.models.RatePlan` typically has 4-5 :class:`webapp.models.Tier` objects
as a relation. These objects keep track of the cost modifier as well as the KWh threshold
for a given device.
"""
rate_plan = models.ForeignKey(RatePlan)
""" This object is related to a :class:`webapp.models.RatePlan`."""
tier_level = models.IntegerField(blank=True, null=True)
""" An Integer, starting at 1, indicating the current level of the device."""
max_percentage_of_baseline = models.FloatField(help_text="blank for no maximum",blank=True, null=True)
""" This defines the threshold for a given :class:`webapp.models.Tier`. I.e. 100% - 130%"""
rate = models.FloatField(help_text="$",blank=True, null=True)
""" The actual cost of a KWh at this level."""
chart_color = ColorPickerField()
""" Color used by charts when graphing a :class:`webapp.models.Tier`."""
def __unicode__(self):
return 'Tier ' + str(self.tier_level)
class Territory(models.Model):
"""
A :class:`webapp.models.Territory` defines specifically key fields associated with a :class:`webapp.models.RatePlan`.
This class specifies the base rates of a given :class:`webapp.models.RatePlan` as well as defining
the winter and summer seasons for seasonal pricing.
"""
rate_plan = models.ForeignKey(RatePlan)
""" This object is related to a :class:`webapp.models.RatePlan`."""
description = models.CharField(max_length=300)
""" A short description for the user when selecting their :class:`webapp.models.RatePlan`."""
data_source = models.URLField()
""" A simple URL field that links to the source of the data for this :class:`webapp.models.RatePlan`."""
summer_start = models.IntegerField(blank=True,null=True,help_text="Specify Month of year")
""" A month of the year that specifies the start of summer. 1-12."""
winter_start = models.IntegerField(blank=True,null=True,help_text="Specify Month of year")
""" A month of the year that specifies the start of winter. 1-12."""
summer_rate = models.FloatField(help_text="Baseline quantity (kWh per day)")
""" The base rate for the summer season."""
winter_rate = models.FloatField(help_text="Baseline quantity (kWh per day)")
""" The base rate for the winter season."""
class Meta:
verbose_name_plural = "Territories"
def __unicode__(self):
return self.description
class DeviceWebSettings(models.Model):
"""
An encapsulating module that links a device's settings together.
This model can be extended to include new settings that may come to be in the future.
"""
device = models.OneToOneField(Device)
utility_companies = models.ManyToManyField(UtilityCompany, default=[1])
rate_plans = models.ManyToManyField(RatePlan, default=[1])
territories = models.ManyToManyField(Territory, default=[1])
current_tier = models.ForeignKey(Tier, default=1)
class DashboardSettings(models.Model):
user = models.OneToOneField(User)
stack = models.BooleanField(
default=True,
help_text="Specifies the default behavior for a graph: stacked or unstacked line chart"
)
|
The steeper, the better. The Deacon 76 Pro redefines the way you experience the slopes.
The new Deacon 76 Pro is one of 4 new frontside models intended to make firm and even icy conditions more fun. Völkl engineers have developed a 3D.Glass sandwich construction with two layers of fiberglass – one that wraps around the base and extends over the sidewalls, and one that sits just below the metal layer on top. This setup provides more power while allowing a smooth, progressive flex down the entire length of the ski. Combined with a full wood core, vertical sidewall construction and two sheets of titanal, the new Deacon models breathe new life into the frontside ski category. The Pro version includes our World Cup Piston Plate for added stability, leverage, and grip.
The titanium setup makes the ski agile and dynamic. Especially friends of a hard, fast and aggressive riding style benefit from the quick response of this construction.
For a better grip in fast and tight turns our World Cup skis come with a special race finish edging. While the SL models feature 0,7° on the base and 87,6° on the side, the GLS models come with a 0,8° base and 87,6° on the side.
|
import binascii
import sys
from cert_core import Chain, UnknownChainError
unhexlify = binascii.unhexlify
hexlify = binascii.hexlify
if sys.version > '3':
unhexlify = lambda h: binascii.unhexlify(h.encode('utf8'))
hexlify = lambda b: binascii.hexlify(b).decode('utf8')
def obfuscate_email_display(email):
"""Partially hides email before displaying"""
hidden_email_parts = email.split("@")
hidden_email = hidden_email_parts[0][:2] + ("*" * (len(hidden_email_parts[0]) - 2)) + "@" + hidden_email_parts[1]
return hidden_email
def get_tx_lookup_chain(chain, txid):
if chain == Chain.bitcoin_testnet:
return 'https://live.blockcypher.com/btc-testnet/tx/' + txid
elif chain == Chain.bitcoin_mainnet:
return 'https://blockchain.info/tx/' + txid
elif chain == Chain.bitcoin_regtest or chain == Chain.mockchain:
return 'This has not been issued on a blockchain and is for testing only'
elif chain == Chain.ethereum_mainnet:
return 'https://api.etherscan.io/tx/' + txid
elif chain == Chain.ethereum_ropsten:
return 'https://ropsten.etherscan.io/tx/' + txid
else:
raise UnknownChainError(
'unsupported chain (%s) requested with blockcypher collector. Currently only testnet and mainnet are supported' % chain)
|
Looking for Millionaires? Try These U.S. Cities.
Moneyed Americans are multiplying around Houston, Dallas and San Jose, Calif.
Fueled by the vibrant energy and technology sectors, those three metropolitan areas saw some of the fastest growth over the past five years in the number of residents with at least $1 million in investible assets (not counting a primary residence), according to a new report on U.S. wealth by RBC Wealth Management, a unit of Royal Bank of Canada, and Capgemini, a Paris-based consulting firm.
The report points to the diminished stature of Wall Street and the finance industry among the rich. Though New York City remains home to the largest number of high-net-worth individuals by far, with 894,000 people with at least $1 million to invest as of last year, growth of the wealthy population in Gotham hasn’t kept pace with other markets.
Houston’s high-net-worth population of 131,000 in 2013 was up 92% from 2008 and up 18% from 2012, the report says. Dallas’s rich population of 113,000 was up 79% and 20%, respectively. San Jose, near Silicon Valley, counted 122,000 millionaires in 2013, up 77% and 14%, respectively.
|
import warnings
import pkg_resources
# __version__ = pkg_resources.get_distribution("bika.lims").version
# import this to create messages in the bika domain.
from dependencies.dependency import MessageFactory
bikaMessageFactory = MessageFactory('bika')
from dependencies.dependency import PloneMessageFactory as PMF
# import this to log messages
import logging
logger = logging.getLogger('Bika')
from lims.validators import *
from lims.config import *
from lims.permissions import *
# from dependencies.dependency import ModuleSecurityInfo, allow_module
from dependencies.dependency import process_types, listTypes
from dependencies.dependency import registerDirectory
from dependencies.dependency import ContentInit, ToolInit, getToolByName
from dependencies.dependency import PloneMessageFactory
from dependencies.dependency import IPloneSiteRoot
from dependencies.dependency import EXTENSION
# from dependencies.dependency import _profile_registry as profile_registry
# allow_module('AccessControl')
# allow_module('bika.lims')
# allow_module('bika.lims.config')
# allow_module('bika.lims.permissions')
# allow_module('bika.lims.utils')
# allow_module('json')
# allow_module('pdb')
# allow_module('zope.i18n.locales')
# allow_module('zope.component')
# allow_module('plone.registry.interfaces')
def initialize(context):
from content.analysis import Analysis
from content.analysiscategory import AnalysisCategory
from content.analysisrequest import AnalysisRequest
from content.analysisrequestsfolder import AnalysisRequestsFolder
from content.analysisservice import AnalysisService
from content.analysisspec import AnalysisSpec
from content.arimport import ARImport
from content.arimportitem import ARImportItem
from content.arpriority import ARPriority
from content.analysisprofile import AnalysisProfile
from content.arreport import ARReport
from content.artemplate import ARTemplate
from content.attachment import Attachment
from content.attachmenttype import AttachmentType
from content.batch import Batch
from content.batchfolder import BatchFolder
from content.batchlabel import BatchLabel
from content.bikaschema import BikaSchema
from content.bikasetup import BikaSetup
from content.calculation import Calculation
from content.client import Client
from content.clientfolder import ClientFolder
from content.contact import Contact
from content.container import Container
from content.containertype import ContainerType
from content.department import Department
from content.duplicateanalysis import DuplicateAnalysis
from content.instrument import Instrument
from content.instrumentcalibration import InstrumentCalibration
from content.instrumentcertification import InstrumentCertification
from content.instrumentmaintenancetask import InstrumentMaintenanceTask
from content.instrumentscheduledtask import InstrumentScheduledTask
from content.instrumentvalidation import InstrumentValidation
from content.instrumenttype import InstrumentType
from content.invoice import Invoice
from content.invoicebatch import InvoiceBatch
from content.invoicefolder import InvoiceFolder
from content.labcontact import LabContact
from content.laboratory import Laboratory
from content.labproduct import LabProduct
from content.manufacturer import Manufacturer
from content.method import Method
from content.methods import Methods
from content.multifile import Multifile
from content.organisation import Organisation
from content.person import Person
from content.preservation import Preservation
from content.pricelist import Pricelist
from content.pricelistfolder import PricelistFolder
from content.queryfolder import QueryFolder
from content.query import Query
from content.referenceanalysis import ReferenceAnalysis
from content.referencedefinition import ReferenceDefinition
from content.referencesample import ReferenceSample
from content.referencesamplesfolder import ReferenceSamplesFolder
from content.rejectanalysis import RejectAnalysis
from content.report import Report
from content.reportfolder import ReportFolder
from content.sample import Sample
from content.samplecondition import SampleCondition
from content.samplematrix import SampleMatrix
from content.samplepartition import SamplePartition
from content.samplepoint import SamplePoint
from content.storagelocation import StorageLocation
from content.samplesfolder import SamplesFolder
from content.sampletype import SampleType
from content.samplingdeviation import SamplingDeviation
from content.srtemplate import SRTemplate
from content.subgroup import SubGroup
from content.supplier import Supplier
from content.suppliercontact import SupplierContact
from content.supplyorderfolder import SupplyOrderFolder
from content.supplyorder import SupplyOrder
from content.worksheet import Worksheet
from content.worksheetfolder import WorksheetFolder
from content.worksheettemplate import WorksheetTemplate
from controlpanel.bika_analysiscategories import AnalysisCategories
from controlpanel.bika_analysisservices import AnalysisServices
from controlpanel.bika_analysisspecs import AnalysisSpecs
from controlpanel.bika_analysisprofiles import AnalysisProfiles
from controlpanel.bika_artemplates import ARTemplates
from controlpanel.bika_arpriorities import ARPriorities
from controlpanel.bika_attachmenttypes import AttachmentTypes
from controlpanel.bika_batchlabels import BatchLabels
from controlpanel.bika_calculations import Calculations
from controlpanel.bika_containers import Containers
from controlpanel.bika_containertypes import ContainerTypes
from controlpanel.bika_departments import Departments
from controlpanel.bika_instruments import Instruments
from controlpanel.bika_instrumenttypes import InstrumentTypes
from controlpanel.bika_labcontacts import LabContacts
from controlpanel.bika_labproducts import LabProducts
from controlpanel.bika_manufacturers import Manufacturers
from controlpanel.bika_preservations import Preservations
from controlpanel.bika_referencedefinitions import ReferenceDefinitions
from controlpanel.bika_sampleconditions import SampleConditions
from controlpanel.bika_samplematrices import SampleMatrices
from controlpanel.bika_samplepoints import SamplePoints
from controlpanel.bika_storagelocations import StorageLocations
from controlpanel.bika_sampletypes import SampleTypes
from controlpanel.bika_samplingdeviations import SamplingDeviations
from controlpanel.bika_srtemplates import SRTemplates
from controlpanel.bika_subgroups import SubGroups
from controlpanel.bika_suppliers import Suppliers
from controlpanel.bika_worksheettemplates import WorksheetTemplates
content_types, constructors, ftis = process_types(
listTypes(PROJECTNAME),
PROJECTNAME)
# Register each type with it's own Add permission
# use ADD_CONTENT_PERMISSION as default
allTypes = zip(content_types, constructors)
for atype, constructor in allTypes:
kind = "%s: Add %s" % (config.PROJECTNAME, atype.portal_type)
perm = ADD_CONTENT_PERMISSIONS.get(atype.portal_type,
ADD_CONTENT_PERMISSION)
ContentInit(kind,
content_types = (atype,),
permission = perm,
extra_constructors = (constructor,),
fti = ftis,
).initialize(context)
def deprecated(comment=None, replacement=None):
""" A decorator which can be used to mark functions as deprecated.
Emits a DeprecationWarning showing the module and method being flagged
as deprecated. If replacement is set, the warn will also show which is
the function or class to be used instead.
"""
def old(oldcall):
def new(*args, **kwargs):
message = "Deprecated: '%s.%s'" % \
(oldcall.__module__,
oldcall.__name__)
if replacement is not None:
message += ". Use '%s.%s' instead" % \
(replacement.__module__,
replacement.__name__)
if comment is not None:
message += ". %s" % comment
warnings.warn(message, category=DeprecationWarning, stacklevel=2)
return oldcall(*args, **kwargs)
return new
return old
class _DeprecatedClassDecorator(object):
""" A decorator which can be used to mark symbols as deprecated.
Emits a DeprecationWarning showing the symbol being flagged as
deprecated. For add comments, use deprecated() instead of it
"""
def __call__(self, symbol):
message = "Deprecated: '%s.%s'" % \
(symbol.__module__,
symbol.__name__)
warnings.warn(message, category=DeprecationWarning, stacklevel=2)
return symbol
deprecatedsymbol = _DeprecatedClassDecorator()
del _DeprecatedClassDecorator
def enum(**enums):
return type('Enum', (), enums)
|
Follow the instructions below to delete an existing Group from CDP.
All users that belong to the deleted group will lose any permissions set at the group-level.
1. Click on "Groups" in the Main Menu to access the "Groups" screen.
2. In the "Groups" list, find the appropriate Group and click on the "Delete" icon in the "Actions" column.
3. Confirm your request to delete the Group by clicking on "Delete" in the displayed dialog.
4. The Group will disappear from the "Groups" list.
|
from __future__ import print_function
import sys
import numpy
from matplotlib.pylab import *
def plt_ref_orbit(s, lng):
subplot(1, 2, 1)
plot(s, lng[:, 0], '-b')
xlabel('s [m]'); ylabel('phase [rad]')
subplot(1, 2, 2)
plot(s, lng[:, 1], '-b')
xlabel('s [m]'); ylabel('E_k [MeV]')
def plt_moment0(s, moment0):
for i, L in zip(range(6),
('x [mm]', 'p_x [mrad]', 'y [mm]', 'p_y [mrad]',
'z [rad]', 'p_z')):
subplot(3, 2, i+1)
for j in range(5):
plot(s, moment0[:, j, i], '-b')
plot(s, moment0[:, 5, i], '-r')
xlabel('s [m]'); ylabel(L)
def plt_moment1(s, moment1):
for i, L in zip(range(6),
('s_x [mm]', 's_p_x [mrad]', 's_y [mm]', 's_p_y [mrad]',
's_z [rad]', 's_p_z')):
subplot(3, 2, i+1)
for j in range(5):
plot(s, moment1[:, j, i], '-b')
plot(s, moment1[:, 5, i], '-r')
xlabel('s [m]'); ylabel(L)
def plt_moment0_diff(s, moment0):
for i, L in zip(range(6),
('x [mm]', 'p_x [mrad]', 'y [mm]', 'p_y [mrad]',
'z [rad]', 'p_z')):
subplot(3, 2, i+1)
for j in range(5):
plot(s, moment0[:, j, i], '-b')
plot(s, moment0[:, 5, i], '-r')
xlabel('s [m]'); ylabel(L)
def plt_moment1_diff(s, moment1):
for i, L in zip(range(6),
('s_x [mm]', 's_p_x [mrad]', 's_y [mm]', 's_p_y [mrad]',
's_z [rad]', 's_p_z')):
subplot(3, 2, i+1)
for j in range(5):
plot(s, moment1[:, j, i], '-b')
plot(s, moment1[:, 5, i], '-r')
xlabel('s [m]'); ylabel(L)
def rd_long(file_name):
file = open(file_name, 'r')
s = []; lng = []
first = True
for line in file:
fields = line.strip().split()
s = numpy.append(s, float(fields[0]))
term = numpy.array([[fields[1], fields[2]]], dtype=float)
if first:
lng = term
first = False
else:
lng = numpy.append(lng, term, 0)
return [s, lng]
def rd_long_TLM(file_name):
file = open(file_name, 'r')
s = []; lng = []
first = True
for line in file:
fields = line.strip().split()
s = numpy.append(s, float(fields[0]))
term = numpy.array([[fields[2], fields[1]]], dtype=float)
if first:
lng = term
first = False
else:
lng = numpy.append(lng, term, 0)
return [s, lng]
def rd_data(file_name):
file = open(file_name, 'r')
s = []; moment0 = []; padding = numpy.array([NaN, NaN, NaN, NaN, NaN, NaN])
first = True
for line in file:
fields = line.strip().split()
s = numpy.append(s, float(fields[0]))
if len(fields) == 19:
term = numpy.array([[fields[1:7], fields[7:13],
padding, padding, padding,
fields[13:19]]], dtype=float)
else:
term = numpy.array([[fields[1:7], fields[7:13], fields[13:19],
fields[19:25], fields[25:31],
fields[31:37]]], dtype=float)
if first:
moment0 = term
first = False
else:
moment0 = numpy.append(moment0, term, 0)
return [s, moment0]
def plt_long(fig_no, title, s, lng):
fig = figure(fig_no)
subplots_adjust(hspace=0.6) # default is 0.2.
subplots_adjust(wspace=0.4) # default is 0.2.
suptitle(title)
plt_ref_orbit(s, lng)
return fig
def plt0(fig_no, title, s, moment0):
fig = figure(fig_no)
subplots_adjust(hspace=0.6) # default is 0.2.
subplots_adjust(wspace=0.4) # default is 0.2.
suptitle(title)
plt_moment0(s, moment0)
return fig
def plt1(fig_no, title, s, moment1):
fig = figure(fig_no)
subplots_adjust(hspace=0.6) # default is 0.2.
subplots_adjust(wspace=0.4) # default is 0.2.
suptitle(title)
plt_moment0(s, moment1)
return fig
#[file_name1, file_name2] = [sys.argv[1], sys.argv[2]]
file_name1 = '/home/johan/git_repos/flame/build/src/ref_orbit.txt'
file_name2 = '/home/johan/tlm_workspace/TLM_JB/tab_jb.txt'
file_name3 = '/home/johan/git_repos/flame/build/src/moment0.txt'
file_name4 = '/home/johan/git_repos/flame/build/src/moment1.txt'
file_name5 = '/home/johan/tlm_workspace/TLM_JB/moment0_TLM.txt'
file_name6 = '/home/johan/tlm_workspace/TLM_JB/moment1_TLM.txt'
[s, lng] = rd_long(file_name1)
lng[:, 1] /= 1e6
[s, lng_TLM] = rd_long_TLM(file_name2)
[s, moment0] = rd_data(file_name3)
[s, moment1] = rd_data(file_name4)
[s_TLM, moment0_TLM] = rd_data(file_name5)
[s_TLM, moment1_TLM] = rd_data(file_name6)
lng_diff = lng - lng_TLM
moment0_diff = moment0 - moment0_TLM
moment1_diff = moment1 - moment1_TLM
fig1 = plt_long(1, 'Ref Orbit for Corrected TLM', s, lng_TLM)
fig2 = plt_long(2, 'Ref Orbit for FLAME', s, lng)
fig3 = plt_long(3, 'Ref Orbit Difference Between FLAME and Corrected TLM',
s, lng_diff)
fig4 = plt0(4, 'Orbit for Corrected TLM', s, moment0_TLM)
fig5 = plt0(5, 'Orbit for FLAME', s, moment0)
fig6 = plt0(6, 'Orbit Difference Between FLAME and Corrected TLM',
s, moment0_diff)
fig7 = plt1(7, 'RMS Beam Size for Corrected TLM', s, moment1_TLM)
fig8 = plt1(8, 'RMS Beam Size for FLAME', s, moment1)
fig9 = plt1(9, 'RMS Beam Size Difference Between FLAME and Corrected TLM',
s, moment1_diff)
plt.rcParams['savefig.dpi'] = 600 # For png.
fig1.savefig('fig1_LS1-Target.eps', orientation='landscape')
fig2.savefig('fig2_LS1-Target.eps', orientation='landscape')
fig3.savefig('fig3_LS1-Target.eps', orientation='landscape')
fig4.savefig('fig4_LS1-Target.eps', orientation='landscape')
fig5.savefig('fig5_LS1-Target.eps', orientation='landscape')
fig6.savefig('fig6_LS1-Target.eps', orientation='landscape')
fig7.savefig('fig7_LS1-Target.eps', orientation='landscape')
fig8.savefig('fig8_LS1-Target.eps', orientation='landscape')
fig9.savefig('fig9_LS1-Target.eps', orientation='landscape')
ion(); show(); ioff()
raw_input('<ret> to continue>')
|
Winter has officially arrived in Texas, in case you were wondering. I've been freezing my tail off for days, and the coats have officially come out of the closet. Now that I've been walking around in my old coat that I keep meaning to get hemmed and mended, I am noticing lots of gorgeous options on the internets. Plus, I wanted to share some of my top picks for other cold weather accessories that won't make you look like a kid from A Christmas Story.
Let's start with the most important splurge - a nice coat that fits well. I'm dying for a new trench in a bright color like emerald green or bright blue. I'm also drawn to fun details like an unexpected ruffle or gold zipper.
Warm and cozy accessories are also a must, and I'm loving options in colors besides basic black. I'm also insistent that hats do not ruin my hair for the rest of the day, which is a challenge.
And finally, I think rubber boots are a must when it's wet and possibly snowy. Everyone and her cousin loves Hunter wellies, but there are tons of other cute options out there!
Now I wish we had more than 2.5 months of cold weather! What's you're most indispensable winter accessory?
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_log import log
from oslo_log import versionutils
from oslo_utils import importutils
import stevedore
LOG = log.getLogger(__name__)
def response_truncated(f):
"""Truncate the list returned by the wrapped function.
This is designed to wrap Manager list_{entity} methods to ensure that
any list limits that are defined are passed to the driver layer. If a
hints list is provided, the wrapper will insert the relevant limit into
the hints so that the underlying driver call can try and honor it. If the
driver does truncate the response, it will update the 'truncated' attribute
in the 'limit' entry in the hints list, which enables the caller of this
function to know if truncation has taken place. If, however, the driver
layer is unable to perform truncation, the 'limit' entry is simply left in
the hints list for the caller to handle.
A _get_list_limit() method is required to be present in the object class
hierarchy, which returns the limit for this backend to which we will
truncate.
If a hints list is not provided in the arguments of the wrapped call then
any limits set in the config file are ignored. This allows internal use
of such wrapped methods where the entire data set is needed as input for
the calculations of some other API (e.g. get role assignments for a given
project).
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if kwargs.get('hints') is None:
return f(self, *args, **kwargs)
list_limit = self.driver._get_list_limit()
if list_limit:
kwargs['hints'].set_limit(list_limit)
return f(self, *args, **kwargs)
return wrapper
def load_driver(namespace, driver_name, *args):
try:
driver_manager = stevedore.DriverManager(namespace,
driver_name,
invoke_on_load=True,
invoke_args=args)
return driver_manager.driver
except RuntimeError as e:
LOG.debug('Failed to load %r using stevedore: %s', driver_name, e)
# Ignore failure and continue on.
@versionutils.deprecated(as_of=versionutils.deprecated.LIBERTY,
in_favor_of='entrypoints',
what='direct import of driver')
def _load_using_import(driver_name, *args):
return importutils.import_object(driver_name, *args)
# For backwards-compatibility, an unregistered class reference can
# still be used.
return _load_using_import(driver_name, *args)
class Manager(object):
"""Base class for intermediary request layer.
The Manager layer exists to support additional logic that applies to all
or some of the methods exposed by a service that are not specific to the
HTTP interface.
It also provides a stable entry point to dynamic backends.
An example of a probable use case is logging all the calls.
"""
driver_namespace = None
def __init__(self, driver_name):
self.driver = load_driver(self.driver_namespace, driver_name)
def __getattr__(self, name):
"""Forward calls to the underlying driver."""
f = getattr(self.driver, name)
setattr(self, name, f)
return f
def create_legacy_driver(driver_class):
"""Helper function to deprecate the original driver classes.
The keystone.{subsystem}.Driver classes are deprecated in favor of the
new versioned classes. This function creates a new class based on a
versioned class and adds a deprecation message when it is used.
This will allow existing custom drivers to work when the Driver class is
renamed to include a version.
Example usage:
Driver = create_legacy_driver(CatalogDriverV8)
"""
module_name = driver_class.__module__
class_name = driver_class.__name__
class Driver(driver_class):
@versionutils.deprecated(
as_of=versionutils.deprecated.LIBERTY,
what='%s.Driver' % module_name,
in_favor_of='%s.%s' % (module_name, class_name),
remove_in=+2)
def __init__(self, *args, **kwargs):
super(Driver, self).__init__(*args, **kwargs)
return Driver
|
Over 6,000 Food Growers and Artisans.
Join us in Rebuilding our Local Food Systems.
"Hi I get raw milk 24/7 from Bakewell creamery on Wayby valley rd Wellsford. I understand that they deliver as far south as 1 hour from Wellsford so I would say you could get it in takapuna. The milk is fantastic for cheese and is great to use for…"
|
from pysb import Model, Monomer, Parameter
from pysb.core import SelfExporter
from bel import bel_api
from biopax import biopax_api
from trips import trips_api
SelfExporter.do_export = False
class BaseAgentSet(object):
"""A container for a set of BaseAgents. Wraps a dict of BaseAgent instances."""
def __init__(self):
self.agents = {}
def get_create_agent(self, name):
"""Return agent with given name, creating it if needed."""
try:
agent = self.agents[name]
except KeyError:
agent = BaseAgent(name)
self.agents[name] = agent
return agent
def iteritems(self):
return self.agents.iteritems()
def __getitem__(self, name):
return self.agents[name]
class BaseAgent(object):
def __init__(self, name):
self.name = name
self.sites = []
self.site_states = {}
# The list of site/state configurations that lead to this agent
# being active (where the agent is currently assumed to have only
# one type of activity)
self.activating_mods = []
def create_site(self, site, states=None):
"""Create a new site on an agent if it doesn't already exist"""
if site not in self.sites:
self.sites.append(site)
if states is not None:
self.site_states.setdefault(site, [])
try:
states = list(states)
except TypeError:
return
self.add_site_states(site, states)
def add_site_states(self, site, states):
"""Create new states on a agent site if the site doesn't exist"""
for state in states:
if state not in self.site_states[site]:
self.site_states[site].append(state)
def add_activating_modification(self, activity_pattern):
self.activating_mods.append(activity_pattern)
def add_default_initial_conditions(model):
# Iterate over all monomers
for m in model.monomers:
set_base_initial_condition(model, m, 100.0)
def set_base_initial_condition(model, monomer, value):
# Build up monomer pattern dict
sites_dict = {}
for site in monomer.sites:
if site in monomer.site_states:
sites_dict[site] = monomer.site_states[site][0]
else:
sites_dict[site] = None
mp = monomer(**sites_dict)
pname = monomer.name + '_0'
try:
p = model.parameters[pname]
p.value = value
except KeyError:
p = Parameter(pname, value)
model.add_component(p)
model.initial(mp, p)
class PysbAssembler(object):
def __init__(self):
self.statements = []
self.agent_set = None
def add_statements(self, stmts):
self.statements.extend(stmts)
def make_model(self, initial_conditions=True, policies=None):
model = Model()
# Keep track of which policies we're using
self.policies = policies
self.agent_set = BaseAgentSet()
# Collect information about the monomers/self.agent_set from the
# statements
for stmt in self.statements:
stmt.monomers(self.agent_set, policies=policies)
# Add the monomers to the model based on our BaseAgentSet
for agent_name, agent in self.agent_set.iteritems():
m = Monomer(agent_name, agent.sites, agent.site_states)
model.add_component(m)
# Iterate over the statements to generate rules
for stmt in self.statements:
stmt.assemble(model, self.agent_set, policies=policies)
# Add initial conditions
if initial_conditions:
add_default_initial_conditions(model)
return model
if __name__ == '__main__':
pa = PysbAssembler()
bp = bel_api.process_belrdf('data/RAS_neighborhood.rdf')
pa.add_statements(bp.statements)
# bp = bel_api.process_ndex_neighborhood("ARAF")
# pa.add_statements(bp.statements)
# tp = trips_api.process_text("BRAF phosphorylates MEK1 at Ser222")
# pa.add_statements(tp.statements)
model = pa.make_model()
|
LOW MILES - 29,259! 4x4, Aluminum Wheels, Satellite Radio, Onboard Communications System, CD Player, CHEVROLET MYLINK AUDIO SYSTEM, 4.2 D... SEATS, FRONT 40/20/40 SPLIT-BENCH, 3-... TRANSMISSION, 6-SPEED AUTOMATIC, HEAV... CLICK ME! KEY FEATURES INCLUDE 4x4, Satellite Radio, CD Player, Onboard Communications System, Aluminum Wheels. Privacy Glass, Keyless Entry, Steering Wheel Controls, Electronic Stability Control, Heated Mirrors. OPTION PACKAGES ENGINE, VORTEC 6.0L VARIABLE VALVE TIMING V8 SFI E85-COMPATIBLE, FLEXFUEL capable of running on unleaded or up to 85% ethanol (360 hp [268.4 kW] @ 5400 rpm, 380 lb-ft of torque [515.0 N-m] @ 4200 rpm) with (E63) fleetside pickup box; (322 hp [240.1 kW] @ 4400 rpm, 380 lb-ft of torque [515.0 N-m] @ 4200 rpm) with (ZW9) pickup box delete (STD), TRANSMISSION, 6-SPEED AUTOMATIC, HEAVY-DUTY, ELECTRONICALLY CONTROLLED with overdrive and tow/haul mode. Includes Cruise Grade Braking and Powertrain Grade Braking (STD), SEATS, FRONT 40/20/40 SPLIT-BENCH, 3-PASSENGER. Available in cloth or leather. Includes driver and front passenger recline with outboard head restraints and center fold-down armrest with storage. Also includes manually adjustable driver lumbar, lockable storage compartment in seat cushion, and storage pockets. (STD), CHEVROLET MYLINK AUDIO SYSTEM, 4.2 DIAGONAL COLOR SCREEN WITH AM/FM STEREO WITH USB PORTS auxiliary jack, SD card slot, Bluetooth streaming audio for music and most phones, hands-free smartphone integration, Pandora Internet radio and voice-activated technology for radio and phone (STD). EXPERTS RAVE AutoCheck One Owner All prices must add for an additional negotiable documentary fee of up to $150, Tax, Title, and license fee's. Not responsible for typographical errors. Subject to prior sale. Some restrictions apply Pricing analysis performed on 3/9/2018. Horsepower calculations based on trim engine configuration. Please confirm the accuracy of the included equipment by calling us prior to purchase.
|
# -*- coding: utf-8 -*-
from david.lib.mixins.props import PropsMixin, PropsItem
from .attachment import Attachment
from .form import AttachmentFieldList
def _get_ids(items):
return [(i.id if hasattr(i, 'id') else i) for i in items]
class AttachmentMixin(PropsMixin):
""" Mixin for a db.Model """
attachments = PropsItem('attachments', [])
def attachment_items(self, media_type=None):
media_filter = None
if media_type:
if isinstance(media_type, str):
media_type = (media_type,)
media_filter = lambda x: x and any([getattr(x, 'is_' + m) for m in media_type])
ret = filter(media_filter, Attachment.gets(self.attachments))
return ret
def attachment_pics(self):
return [x for x in self.attachment_items('image') if x.is_image]
def attachments_info(self, *args, **kwargs):
return [item.serialize() for item in self.attachment_items(*args, **kwargs)]
def add_attachments(self, items):
items = _get_ids(items)
self.attachments = list(set(self.attachments + items))
def remove_attachments(self, items):
items = _get_ids(items)
self.attachments = [i for i in self.attachments if i not in items]
@property
def _attachment_field(self, name):
return AttachmentField(name)
def attachment_fields(self, label=None, name='attachments',
max_entries=None):
if label is None:
label = _('Attachments')
attached = [x for x in self.attachment_items]
return AttachmentFieldList(
self._attachment_field(name),
label=label,
min_entries=1,
max_entries=max_entries,
default=attached)
class PictureMixin(AttachmentMixin):
_DEFAULT_PIC = None
@property
def picture(self):
if hasattr(self, 'attachments'):
for key in self.attachments:
a = Attachment.get(key)
if a and a.is_image:
return a
if hasattr(self, 'picture_id'):
return Attachment.get(self.picture_id)
def picture_url(self, category='small', default=True):
pic = self.picture
if pic:
return pic.url(category)
if not default:
return None
dft = self._DEFAULT_PIC.replace('%25s', '%s', 1)
if '%s' in dft:
return dft % category
return self._DEFAULT_PIC
class MediaMixin(PictureMixin):
def attachment_medias(self):
audios, videos = [], []
items = self.attachment_items()
for x in items:
if x.is_audio: audios.append(x)
elif x.is_video: videos.append(x)
return audios, videos
|
2 Bedroom Semi Detached Bungalow For Sale in Preston for Asking Price £190,000.
A contemporary two bedroom fully renovated semi detached true bungalow offered with no chain delay, presented with a high specification and extended with a beautiful conservatory opening onto the wonderful south-facing garden. The light and airy home, briefly comprises of a welcoming entrance hallway, bright and spacious lounge with gas fire and large window to the front elevation, a contemporary open plan kitchen complete with a breakfast bar and extending into the conservatory, perfect for bringing the outside in. There are two good sized double bedrooms, a luxurious bathroom suite and a loft room with velux window. All this is complemented by double glazing, gas central heating and a completely new rewire. Outside, the property benefits from off road parking leading to a detached single garage complete with a new roof. The beautiful south-facing garden is well-stocked with mature borders, secure fencing, majority laid to lawn. View to appreciate.
Hardwood entrance door. Radiator, laminate flooring. Loft access with pull down loft ladder.
UPVC double glazed door, opening onto the garden. Double glazed uPVC window facing the rear. Radiator, laminate flooring, tiled splashbacks. A contemporary fitted kitchen with wood effect work surfaces and a range of fitted wall and base units, breakfast bar with fitted base units and complimentary work surface, stainless steel sink single sink with drainer, integrated electric oven, gas hob, stainless steel extractor, space for a dishwasher, space for washing machine, space for a fridge/freezer.
UPVC French double glazed door, opening onto the garden. Double glazed uPVC window facing the rear. Radiator, laminate flooring.
Double glazed uPVC window facing the side. Chrome heated towel rail, vinyl flooring, tiled walls. Built-in WC, panelled bath with shower over the bath, wash hand basin with vanity unit.
Insulated and fully boarded. Velux skylight window facing the front. Radiator, carpeted flooring, two under the eaves storage cupboard, TV aerial and power sockets.
|
# Copyright 2010-2012 Canonical Ltd. This software is licensed under
# the GNU Lesser General Public License version 3 (see the file LICENSE).
from django.core import serializers
from django.db import models
from django.db.models.fields.files import FileField
from django.utils import simplejson
class AuditLog(models.Model):
"""
Records of all changes made via Django admin interface.
"""
username = models.TextField()
user_id = models.IntegerField()
model = models.TextField()
change = models.CharField(max_length=100)
representation = models.TextField()
values = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
@classmethod
def create(cls, user, obj, change, new_object=None):
assert change in ['create', 'update', 'delete']
values = serializers.serialize("json", [obj])
# json[0] is for removing outside list, this serialization is only for
# complete separate objects, the list is unnecessary
json = simplejson.loads(values)[0]
if new_object:
values_new = serializers.serialize("json", [new_object])
json_new = simplejson.loads(values_new)[0]
json = {'new': json_new, 'old': json}
if change == 'delete':
file_fields = [f for f in obj._meta.fields
if isinstance(f, FileField)]
if len(file_fields) > 0:
json['files'] = {}
for file_field in file_fields:
field_name = file_field.name
file = getattr(obj, field_name)
if file.name:
json['files'][file.name] = file.read().encode('base64')
values_pretty = simplejson.dumps(json, indent=2, sort_keys=True)
return cls.objects.create(
username=user.username,
user_id=user.id,
model=str(obj._meta),
values=values_pretty,
representation=unicode(obj),
change=change,
)
class AdminAuditMixin(object):
def _flatten(self, lst):
result = []
for item in lst:
if isinstance(item, list):
result.extend(self._flatten(item))
else:
result.append(item)
return result
def _collect_deleted_objects(self, obj):
result = []
try:
# This is for Django up to 1.2
from django.db.models.query_utils import CollectedObjects
seen_objs = CollectedObjects()
obj._collect_sub_objects(seen_objs)
for cls, subobjs in seen_objs.iteritems():
for subobj in subobjs.values():
result.append(subobj)
except ImportError:
# Django 1.3 solution, those imports needs to be here, because
# otherwise they will fail on Django < 1.3.
from django.contrib.admin.util import NestedObjects
from django.db import router
using = router.db_for_write(obj)
collector = NestedObjects(using=using)
collector.collect([obj])
result = self._flatten(collector.nested())
return result
def log_addition(self, request, obj, *args, **kwargs):
AuditLog.create(request.user, obj, 'create')
super(AdminAuditMixin, self).log_addition(request, obj, *args, **kwargs)
def log_deletion(self, request, obj, *args, **kwargs):
for subobj in self._collect_deleted_objects(obj):
AuditLog.create(request.user, subobj, 'delete')
super(AdminAuditMixin, self).log_deletion(request, obj, *args, **kwargs)
def save_model(self, request, new_obj, form, change):
if change:
# This is so that we'll get the values of the object before the
# change
old_obj = new_obj.__class__.objects.get(pk=new_obj.pk)
AuditLog.create(request.user, old_obj, 'update', new_object=new_obj)
super(AdminAuditMixin, self).save_model(
request, new_obj, form, change)
|
Real estate prices are forever in flux. Normally, house values appreciate in the long term. But, of course, there is always a certain amount of risk in real estate.
When your property appreciates you have a bigger asset to borrow against, and you make a higher profit when you sell. But how will you be sure what you're buying this year will appreciate over time? Property values in Newburgh move up and down for various reasons. It's important that you choose a REALTOR® in Newburgh who is familiar with the factors that drive local prices.
The economy is believed to be the top factor impacting real estate appreciation. It goes without saying that there are several factors on a national level that change your property's value: unemployment, interest rates, consumer confidence, and more. However, your house's value and the elements that play the most significant role in its appreciation are particular to the local Newburgh economy and housing market.
Location in a community - Many home buyers want to live in districts with the most helpful traits for households to thrive, such as a close proximity to schools, jobs, and work. So when it comes to retaining their value, these areas consistently appreciate much more reliably than areas lacking key features.
The latest home sales - You should receive statistics on the recent real estate sales in the areas that you're asking about from your agent. You'll want to know figures like how long a house stays on the market and listing price versus selling price.
History of appreciation - In the past 5-10 years, have property prices risen or declined? Does location or affordability affect how desirable the area is thought to be?
Local economy - Is there a nice combination of business in an area, or does it rely upon just one industry? Have companies moved into or away from an area? Are local businesses hiring? All these play a role.
Knowing the factors that affect your house's financial worth is important. Visit this link for an estimation of your property's value. And if you have any questions, call me at (845) 567-6789 or e-mail me .
|
# Fetches twits from Twitter using one of many possible download mechanisms and parameters given via command line arguments
# This particular implementation uses twitterscraper as an example implementation.
# Warning: scraping twitter may be not completely legal in your country.
# You could use tweepy for a legal option that uses Twitter API.
import argparse
import pymongo
from pymongo.errors import BulkWriteError
from twitterscraper import query_tweets
# parse command line arguments
parser = argparse.ArgumentParser("fetcher")
parser.add_argument("database", help="Database to save to", type=str)
parser.add_argument("collection", help="Collection to save to", type=str)
parser.add_argument("query", help="Query", type=str)
parser.add_argument("limit", help="Limit of tweets to download", type=int, default=None)
args = parser.parse_args()
# connect to database
client = pymongo.MongoClient()
db = client[args.database]
collection = db[args.collection]
# get tweets
# other download mechanisms could be used instead of query_tweets() here.
tweets = []
for tweet in query_tweets(args.query, args.limit):
tweets.append({
"_id" : tweet.id,
"timestamp" : tweet.timestamp,
"user" : tweet.user,
"fullname" : tweet.fullname,
"text" : tweet.text
})
# save tweets to mongodb
try:
collection.insert_many(tweets)
print args.collection + " done"
except BulkWriteError as bwe:
print(bwe.details)
#you can also take this component and do more analysis
#werrors = bwe.details['writeErrors']
raise
|
The Blot is so freaking pumped about the return of Better Call Saul tonight on AMC! The prequel/sequel of Breaking Bad really hit its stride last year, and the sky is truly the limit for Better Call Saul Season 3 with the addition of Giancarlo Esposito’s Gustavo Fring to the cast. Things are totally going to get crazy this season! I can’t wait to finally learn the true backstory of one of television’s greatest characters, Gus Fring.
Better Call Saul Season 3 premieres tonight, Monday, April 10th, at 10/9c on AMC. This is a new night for the show so make sure your DVRs are set. And what do you think of S3’s new one sheet television poster!?!
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from celery.utils.log import get_task_logger
from lib.fingerprinting import HttpFingerprinter, HttpsFingerprinter
from .....app import websight_app
from ....base import ServiceTask, NetworkServiceTask
logger = get_task_logger(__name__)
#USED
@websight_app.task(bind=True, base=NetworkServiceTask)
def check_service_for_http(
self,
org_uuid=None,
network_service_scan_uuid=None,
ip_address=None,
port=None,
network_service_uuid=None,
order_uuid=None,
):
"""
Check to see if the given remote service is running HTTP.
:param org_uuid: The UUID of the organization to check the service on behalf of.
:param network_service_scan_uuid: The UUID of the network service scan that this service fingerprinting is
associated with.
:param ip_address: The IP address where the service is running.
:param port: The port where the service is running.
:param network_service_uuid: The UUID of the network service to check for HTTP service.
:return: None
"""
logger.info(
"Now checking to see if remote TCP service at %s:%s is running HTTP. Organization is %s, scan is %s."
% (ip_address, port, org_uuid, network_service_scan_uuid)
)
fingerprinter = HttpFingerprinter(ip_address=ip_address, port=port)
fingerprinter.perform_fingerprinting()
logger.info(
"TCP service at %s:%s found %s running HTTP."
% (ip_address, port, "to be" if fingerprinter.fingerprint_found else "not to be")
)
result_record = fingerprinter.to_es_model(model_uuid=network_service_scan_uuid, db_session=self.db_session)
result_record.save(org_uuid)
logger.info(
"Elasticsearch updated with HTTP fingerprint result for TCP endpoint %s:%s. Organization was %s, scan was %s."
% (ip_address, port, org_uuid, network_service_scan_uuid)
)
#USED
@websight_app.task(bind=True, base=NetworkServiceTask)
def check_service_for_https(
self,
org_uuid=None,
network_service_scan_uuid=None,
ip_address=None,
port=None,
ssl_version=None,
network_service_uuid=None,
order_uuid=None,
):
"""
Check to see if the given remote service is running HTTPS.
:param org_uuid: The UUID of the organization to check the service on behalf of.
:param network_service_scan_uuid: The UUID of the network service scan that this service fingerprinting is
associated with.
:param ip_address: The IP address where the service is running.
:param port: The port where the service is running.
:param ssl_version: The version of SSL to use to connect to the remote service.
:param network_service_uuid: The UUID of the network service to check for HTTP service.
:return: None
"""
logger.info(
"Now checking to see if remote TCP service at %s:%s is running HTTPS with SSL version %s. "
"Organization is %s, scan is %s."
% (ip_address, port, ssl_version, org_uuid, network_service_scan_uuid)
)
fingerprinter = HttpsFingerprinter(ip_address=ip_address, port=port, ssl_version=ssl_version)
fingerprinter.perform_fingerprinting()
logger.info(
"TCP service at %s:%s found %s running HTTPS."
% (ip_address, port, "to be" if fingerprinter.fingerprint_found else "not to be")
)
result_record = fingerprinter.to_es_model(model_uuid=network_service_scan_uuid, db_session=self.db_session)
result_record.save(org_uuid)
logger.info(
"Elasticsearch updated with HTTPS fingerprint result for TCP endpoint %s:%s. Organization was %s, scan was %s."
% (ip_address, port, org_uuid, network_service_scan_uuid)
)
|
WHAT – President Barack Obama will award Chaplain (Captain) Emil J. Kapaun, U.S. Army, the Medal of Honor posthumously for conspicuous gallantry.
WHEN – The event will be held Thursday, April 11. A second ceremony will occur the next day, April 12 at the Pentagon.
WHERE – The award ceremony will occur at the White House in Washington, D.C. A second ceremony will occur the next day, April 12 at the Pentagon.
WHY – Fr. Kapaun died as a prisoner of war, and the Korean War veterans who served with him, who were saved by him, have lobbied the Army for more than 60 years to award Kapaun the Medal of Honor for his acts of bravery.
Roy Wenzl and Travis Heying, the authors of “The Miracle of Father Kapaun,” interviewed the dozens of men who survived the POW camp because of the courageous acts of this young priest. “They said he repeatedly ran through machine gun fire, dragging wounded soldiers to safety. In the prison camp, he shaped roofing tin into cooking pots so prisoners could boil water, which prevented dysentery. He picked lice off sick prisoners. He stole food from his captors and shared it with his starving comrades. Most of all, Kapaun rallied all of them, as they starved during subzero temperatures, to stay alive. When their future seemed hopeless, he persuaded them to hope. Hundreds died in the camps, but hundreds more survived,” recounts Wenzl and Heying.
According to the press release distributed by the White House, Fr. Kapaun is being given this honor because he displayed extraordinary heroism while serving as a military chaplain during combat, and as a prisoner of war from November 2, 1950 – May 23, 1951.
HOW – Contact Lisa Wheeler (770-591-0045 or by email at [email protected]) or Kevin Wandra (404-788-1276 or by email [email protected]) of Carmel Communications to schedule interviews with Roy Wenzl and Travis Heying, co-authors of The Miracle of Fr. Kapaun: Priest, Soldier and Korean War Hero,” a new book published by Ignatius Press that chronicles the heroic life of Fr. Kapaun. Both Wenzl and Heying will be at the Medal of Honor ceremonies in Washington, D.C., next month.
MORE INFORMATION – For more information on the Medal of Honor ceremony, or to schedule an interview with Roy Wenzl and Travis Heying, or to request a galley copy of The Miracle of Fr. Kapaun: Priest, Soldier and Korean War Hero, please contact Lisa Wheeler (770-591-0045 or by email at [email protected]) or Kevin Wandra (404-788-1276 or by email [email protected]) of Carmel Communications.
Also, see The Wichita Eagle’s story on Fr. Kapaun and the Medal of Honor announcement for more information: http://www.kansas.com/2013/03/11/2711367/white-house-kapaun-to-get-medal.html.
|
import time
import uuid
import subprocess
import logging
log = logging.getLogger(__name__)
def check_call(*args, **kwargs):
start = time.time()
command_uuid = uuid.uuid4()
command_line = subprocess.list2cmdline(args[0])
log.debug("%s: Executing: %s" % (command_uuid, command_line))
data = None
if "data" in kwargs:
data = kwargs["data"]
del(kwargs["data"])
kwargs.setdefault("close_fds", True)
proc = subprocess.Popen(
*args, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs
)
(stdout_data, stderr_data) = proc.communicate(input=data)
if stdout_data.strip():
log.debug("OUT: %s" % stdout_data.strip().replace('\n', ' -- '))
if stderr_data.strip():
log.debug("ERR: %s" % stderr_data.strip().replace('\n', ' -- '))
log.debug("%s: %s: [code=%s, duration=%.1fs]" % (command_uuid, command_line, proc.returncode, time.time() - start))
if proc.returncode != 0:
return stderr_data.strip()
return stdout_data.strip()
|
When asked to describe MLB Fan Cave to people I tell them it’s kind of like Survivor meets The Real World in the fan cave in New York.
The cave dwellers blog and film videos about the experience, cover every MLB game, and get to interact with players and celebrities.
The current video up on the MLB FanCave website has the Pirates’ Andrew McCutchen doing Tom Cruise impressions.
For anyone that knows me, being a part of this wonderful combination of baseball and pop culture entertainment would be a dream come true.
Inspired, I created a Step Brothers parody video with my buddy from class who is a Giants fan. After scoping out some of the competition, I knew I had a solid video.
On Jan. 31, I was announced as a top-52 finalist!
I’ve been tweeting up a storm, posting relentlessly on Facebook, dusted off my Instagram, and attempting to dominate social media in an effort to make the top-30.
Those who move on to the next round will go to Spring Training in Arizona for four days to try and make the final cut to make it to New York.
I believe I’ve got what it takes to be one of those people.
The Campaign: I’ve mostly been tweeting multiple times a day with the link to the MLB FanCave site, where fans can vote.
I’ve been on the lookout for photos to share. One was a cool photo of Clayton Kershaw and A.J. Ellis looking like action stars strolling out from in front of a huge explosion.
I also challenged Matt Kemp to a game of H.O.R.S.E. if he visits the FanCave (via social media, of course).
I’ve picked up a ton of new followers along the way, including the official Dodgers twitter, which is actively promoting us, and Jeff G, the Sports Dude from Power 106.
I’ve hash-tagged Dodgers Nation in most of my posts, as well to generate some Dodgers buzz around my campaign. Above all else, it’s been really nice to see all the support I’ve gotten over social media and it’s nice to know I have some fans out there.
The Other Finalists: I’ve also gotten the chance to meet and interact with some of the other 51 finalists. This is a solid group of people who are passionate and enthusiastic about their teams, and all seem like real fun individuals.
I’m convinced that if I made it to New York with this group, this would definitely be the best FanCave ever.
The FanCave is such a unique experience because it’s all about having fun and being yourself, so to get the opportunity to do that with other great people who are striving to do the same, I’d relish that.
We’ve been engaging in tons of baseball and pop-culture related banter, such as which musical act we’d like to see perform at the Cave or which player we’d most like to play catch with.
There are also two other Dodger fans in the top-52, and the Dodgers have yet to be represented in the Cave.
Perhaps, there will be multiple Dodgers representatives this season.
Voting for the Top 30 ends on Feb. 13. Hopefully I will be among those selected to go to Spring Training and ultimately represent the Dodgers in New York at the MLB FanCave.
Thanks to everyone who has supported the campaign so far by voting and spreading the word on social media. You can vote as many times as you’d like, so take a moment out of your day to #PickFabrick and vote for all of your favorites for the 2013 MLB FanCave.
NextHow will the Dodgers Rotation Round Out?
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.patterns import subtitle_exts
from guessit.textutils import reorder_title, find_words
import logging
log = logging.getLogger(__name__)
def process(mtree):
# 1- try to promote language to subtitle language where it makes sense
for node in mtree.nodes():
if 'language' not in node.guess:
continue
def promote_subtitle():
if 'language' in node.guess:
node.guess.set('subtitleLanguage', node.guess['language'],
confidence=node.guess.confidence('language'))
del node.guess['language']
# - if we matched a language in a file with a sub extension and that
# the group is the last group of the filename, it is probably the
# language of the subtitle
# (eg: 'xxx.english.srt')
if (mtree.node_at((-1,)).value.lower() in subtitle_exts and
node == mtree.leaves()[-2]):
promote_subtitle()
# - if we find the word 'sub' before the language, and in the same explicit
# group, then upgrade the language
explicit_group = mtree.node_at(node.node_idx[:2])
group_str = explicit_group.value.lower()
if ('sub' in find_words(group_str) and
0 <= group_str.find('sub') < (node.span[0] - explicit_group.span[0])):
promote_subtitle()
# - if a language is in an explicit group just preceded by "st",
# it is a subtitle language (eg: '...st[fr-eng]...')
try:
idx = node.node_idx
previous = mtree.node_at((idx[0], idx[1] - 1)).leaves()[-1]
if previous.value.lower()[-2:] == 'st':
promote_subtitle()
except IndexError:
pass
# 2- ", the" at the end of a series title should be prepended to it
for node in mtree.nodes():
if 'series' not in node.guess:
continue
node.guess['series'] = reorder_title(node.guess['series'])
|
Jessica joined the Crest team in 2015, and following her time managing the paraplanning team, she is now an Adviser within the firm. Jessica has been able to explore the practical application of her studies in the various facets of her professional role. She enjoys the challenge, with no two days being the same.
Jessica graduated from the University of Newcastle in 2016 with a Bachelor of Commerce (Finance). She is planning on furthering her education by undertaking post-graduate studies.
In her spare time, Jessica can be found in the kitchen baking, planning her next overseas adventure or spending quality time with her family and friends.
|
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.py3compat import *
import os
import sys
from libopensesame import misc, metadata
from libqtopensesame.extensions import base_extension
from libqtopensesame.misc.translate import translation_context
from libqtopensesame.misc import template_info
_ = translation_context(u'get_started', category=u'extension')
class get_started(base_extension):
"""
desc:
Shows the get-started tab and opens an experiment on startup, if one was
passed on the command line.
"""
def activate(self):
"""
desc:
Is called when the extension is activated through the menu/ toolbar
action.
"""
# Initialize templates
templates = []
for i, (path, desc) in enumerate(template_info.templates):
try:
path = self.experiment.resource(path)
except:
continue
if not i:
cls = u'important-button'
else:
cls = u'button'
path = os.path.abspath(path)
md = u'<a href="opensesame://%s" class="%s">%s</a><br />' \
% (path, cls, desc)
templates.append(md)
# Initialize recent experiments
if not self.main_window.recent_files:
recent = []
else:
recent = [_(u'Continue with a recent experiment:')+u'<br />']
for i, path in enumerate(self.main_window.recent_files):
cls = u'important-button' if not i else u'button'
md = u'<a href="opensesame://event.open_recent_%d" class="%s">%s</a><br />' % \
(i, cls, self._unambiguous_path(path))
recent.append(md)
# Create markdown
with safe_open(self.ext_resource(u'get_started.md')) as fd:
md = fd.read()
md = md % {
u'version' : metadata.__version__,
u'codename' : metadata.codename,
u'templates' : u' \n'.join(templates),
u'recent_experiments' : u' \n'.join(recent)
}
self.tabwidget.open_markdown(md, title=_(u'Get started!'),
icon=u'document-new')
def _unambiguous_path(self, path):
"""
desc:
If the path basename is unique among the resent experiments, this is
used. Otherwise, the full path is used.
arguments:
path: The path to shorten unambiguously.
returns:
The unambiguously shortened path.
"""
basename = os.path.basename(path)
basenames = \
[os.path.basename(_path) for _path in self.main_window.recent_files]
return path if basenames.count(basename) > 1 else basename
def event_open_recent_0(self):
self.main_window.open_file(path=self.main_window.recent_files[0])
def event_open_recent_1(self):
self.main_window.open_file(path=self.main_window.recent_files[1])
def event_open_recent_2(self):
self.main_window.open_file(path=self.main_window.recent_files[2])
def event_open_recent_3(self):
self.main_window.open_file(path=self.main_window.recent_files[3])
def event_open_recent_4(self):
self.main_window.open_file(path=self.main_window.recent_files[4])
def event_startup(self):
"""
desc:
Called on startup.
"""
# Open an experiment if it has been specified as a command line argument
# and suppress the new wizard in that case.
if len(sys.argv) >= 2 and os.path.isfile(sys.argv[1]):
path = safe_decode(sys.argv[1], enc=misc.filesystem_encoding(),
errors=u'ignore')
self.main_window.open_file(path=path)
return
self.activate()
|
We are looking for experienced line cooks who are eager to learn our recipes.
We desire a leader ready to take initiative and who has the ability to display professional care, and is committed to long term full time employment.
Knowledge of basic French cuisine is an asset but not mandatory. The desire to learn new techniques is most important.
If you don’t have the experience, we are ready to teach you all the basics of cooking and working in a kitchen, as long as you care and have the pride of doing your job well.
No prior kitchen knowledge is necessary, we welcome amazing, hardworking, clever employees ready to learn.
Crepe Montagne is a small but very busy place.
If you are energetic, reliable, and detail oriented, this job is for you!
We are looking for full time servers who are organized, efficient and have excellent knowledge of table service.
3 years minimum table service experience and must speak English fluently.
Your duties will be to make and deliver drinks, run food to tables as needed and clear/reset tables in a very organized manner.
Crepe Montagne requires a multitasker who enjoys a fast paced, challenging work environment.
Servers Assistants have the opportunity for job growth into serving positions. The quality of focus and attention to detail with allow you to move up in our establishment quickly.
Employee Satisfaction: We support a fun and friendly team environment and we expect staff to be committed to their responsibilities. We value employees who work hard and take their job seriously.
Crepe Montagne Mission: Every customer should leave happy because we pride ourselves on serving excellent food in a very friendly and professional manner. If you think you can contribute to our team, please forward your resume to [email protected] or drop by in person with your resume before 3pm.
|
import sys, os
sys.path = [os.path.join(os.getcwd(), "..") ] + sys.path
from tileConfig import TileConfig, TileDesc, MachineDesc, SaveConfig, LoadConfig, TileLocation, Rect, LocalWindow
def CreateLocalTestConfig():
c = TileConfig()
t0 = TileDesc( (400, 400), (0,0), ":0", localWindowId=0)
t1 = TileDesc( (400, 400), (400, 0), ":0", lrtbMullions=(0,0,0,0), location=TileLocation( (400,0), relative=t0.uid), localWindowId=0)
print "t1 relative:", t1.location.relative
t2 = TileDesc( (400, 400), (0,400), ":0", localWindowId=0, location=TileLocation( (0,400), relative=t0.uid))
t3 = TileDesc( (400, 400), (400, 400), ":0", lrtbMullions=(0,0,0,0), location=TileLocation( (400,0), relative=t2.uid), localWindowId=0)
localWindow = LocalWindow(Rect(0,0,800,800))
m3 = MachineDesc( "maze", tiles = [t0, t1, t2, t3], windows=[localWindow])
c.addMachine(m3)
return c
if __name__ == "__main__":
c = CreateLocalTestConfig()
SaveConfig(c, "/tmp/testconfig")
print c.asDict()
c2 = LoadConfig("/tmp/testconfig")
if c == c2:
print "PASS: Saved and reread config matched original."
else:
print "FAIL: Saved and reread config did not match original. Saving as testconfig2 for comparison"
SaveConfig(c2, "/tmp/testconfig2")
|
Shop latest outdoors tiles online from our range of Sunglasses at au.dhgate.com, free and fast delivery to Australia. DHgate offers a large selection of eye glasses colours and uvb with superior quality and exquisite craft.
ABC Bahrain Business Directory for Ceramics & Tiles ... The Gulf region now hosts some of the world’s largest producers of ceramic tiles and sanitary ware that boast designs and quality on par with international standards.
PORCELAIN OUTDOORS TILES (2CM) Viewgres 2CM top quality fine porcelain is perfect for outdoor use and exterior wall cladding projects, and available in a wide variety of ... The stability and innovation of Viewgres products, enlarges the tile application.
Outdoor tiles - ArchiExpo - The online architecture and design exhibition: kitchen, bathroom, lighting, furniture, office, etc.
Find your outdoor tile easily amongst the 4,539 products from the leading brands (Porcelanosa, COTTO D'ESTE, CERAMICHE CAESAR, ...) on ArchiExpo, the architecture and design specialist for your professional purchases.
中國香港歷史最悠久,專營西班牙及意大利高級進口磁磚,原價現金退貨,誠實可靠。 ... Tai Yick Building Materials Co., Ltd. 328, G/F Portland Street, Mongkok, Kowloon, Hong kong. Tel:2394 1993 Fax:2789 8570 Tai Yick Building Materials Co., Ltd.
|
"""File system utilities
.. codeauthor:: Joe DeCapo <[email protected]>
"""
import errno
import os
import shutil
from pathlib import Path
import clowder.util.formatting as fmt
from clowder.util.error import ExistingFileError, MissingSourceError
from clowder.util.logging import LOG
def symlink_clowder_yaml(source: Path, target: Path) -> None:
"""Force symlink creation
:param Path source: File to create symlink pointing to
:param Path target: Symlink location
:raise ExistingFileError:
:raise MissingSourceError:
"""
if not target.is_symlink() and target.is_file():
raise ExistingFileError(f"Found non-symlink file {fmt.path(target)} at target path")
if not Path(target.parent / source).exists():
raise MissingSourceError(f"Symlink source {fmt.path(source)} appears to be missing")
if target.is_symlink():
remove_file(target)
try:
path = target.parent
fd = os.open(path, os.O_DIRECTORY)
os.symlink(source, target, dir_fd=fd)
os.close(fd)
except OSError:
LOG.error(f"Failed to symlink file {fmt.path(target)} -> {fmt.path(source)}")
raise
def remove_file(file: Path) -> None:
"""Remove file
:param Path file: File path to remove
"""
os.remove(str(file))
def create_backup_file(file: Path) -> None:
"""Copy file to {file}.backup
:param Path file: File path to copy
"""
shutil.copyfile(str(file), f"{str(file)}.backup")
def restore_from_backup_file(file: Path) -> None:
"""Copy {file}.backup to file
:param Path file: File path to copy
"""
shutil.copyfile(f"{file}.backup", file)
def make_dir(directory: Path, check: bool = True) -> None:
"""Make directory if it doesn't exist
:param str directory: Directory path to create
:param bool check: Whether to raise exceptions
"""
if directory.exists():
return
try:
os.makedirs(str(directory))
except OSError as err:
if err.errno == errno.EEXIST:
LOG.error(f"Directory already exists at {fmt.path(directory)}")
else:
LOG.error(f"Failed to create directory {fmt.path(directory)}")
if check:
raise
def remove_directory(dir_path: Path, check: bool = True) -> None:
"""Remove directory at path
:param str dir_path: Path to directory to remove
:param bool check: Whether to raise errors
"""
try:
shutil.rmtree(dir_path)
except shutil.Error:
LOG.error(f"Failed to remove directory {fmt.path(dir_path)}")
if check:
raise
|
Last lot available in the popular Brittany Hills subdivision. No tap fees with this lot. Great opportunity to build your dream home!! Seller is related to the listing Realtor.
|
#!/usr/bin/env python
# -*- coding: utf-8
from __future__ import print_function
import argparse
import codecs
import cPickle as pickle
import numpy as np
import os
from load_tweets import load_tweets
from collections import Counter
NAME='ef_list_baseline'
prefix='list_baseline'
if __name__ == "__main__":
# Las opciones de línea de comando
p = argparse.ArgumentParser(NAME)
p.add_argument("DIR",default=None,
action="store", help="Directory with corpus")
p.add_argument("LIST1",default=None,
action="store", help="File with list of words")
p.add_argument("LIST2",default=None,
action="store", help="File with list of words")
p.add_argument("-d", "--dir",
action="store", dest="dir",default="feats",
help="Default directory for features [feats]")
p.add_argument("-p", "--pref",
action="store", dest="pref",default=prefix,
help="Prefix to save the file of features %s"%prefix)
p.add_argument("--mix",
action="store_true", dest="mix",default=True,
help="Mix tweets into pefiles")
p.add_argument("--format",
action="store_true", dest="format",default="pan15",
help="Change to pan14 to use format from 2015 [feats]")
p.add_argument("-v", "--verbose",
action="store_true", dest="verbose",
help="Verbose mode [Off]")
p.add_argument("--stopwords", default=None,
action="store", dest="stopwords",
help="List of stop words [data/stopwords.txt]")
opts = p.parse_args()
if opts.verbose:
def verbose(*args):
print(*args)
else:
verbose = lambda *a: None
# Colecta los tweets y sus identificadores (idtweet y idusuario)
tweets,ids=load_tweets(opts.DIR,opts.format,mix=opts.mix)
# Imprime alguna información sobre los tweets
if opts.verbose:
for i,tweet in enumerate(tweets[:10]):
verbose('Tweet example',i+1,tweet[:100])
verbose("Total tweets : ",len(tweets))
try:
verbose("Total usuarios : ",len(set([id for x,id in ids])))
except ValueError:
verbose("Total usuarios : ",len(ids))
# Calculamos los features
# - Cargar lista de palabras uno
list_of_words1 = [line.strip() for line in codecs.open(opts.LIST1,encoding='utf-8') if
len(line.strip())>0]
list_of_words2 = [line.strip() for line in codecs.open(opts.LIST2,encoding='utf-8') if
len(line.strip())>0]
counts = []
for i,j in enumerate(tweets):
c=Counter(j)
countador=sum([c[x] for x in list_of_words1])
countador_2=sum([c[x] for x in list_of_words2])
counts.append((countador,countador_2))
# - Contamos las palabras en los tweets
feats = np.asarray(counts)
# Guarda la matrix de features
with open(os.path.join(opts.dir,opts.pref+'.dat'),'wb') as idxf:
pickle.dump(feats, idxf, pickle.HIGHEST_PROTOCOL)
# Imprimimos información de la matrix
verbose("Total de features :",feats.shape[1])
verbose("Total de renglones:",feats.shape[0])
# Guarda los indices por renglones de la matrix (usuario o tweet, usuario)
with open(os.path.join(opts.dir,opts.pref+'.idx'),'wb') as idxf:
pickle.dump(ids, idxf, pickle.HIGHEST_PROTOCOL)
|
Since forcibly exiled on February 29, 2004, Washington and Haiti denied his right to return, though affirmed in Haiti's Constitution and international law.
Article 9: "No one shall be subjected to arbitrary arrest, detention or exile."
Article 13(2): "Everyone has the right to leave any country, including his own, and to return to his country."
Article 12(2)(4): "Everyone shall be free to leave any country, including his own....No one shall be arbitrarily deprived of the right to enter his own country."
Article 5(d)(ii): Civil rights for everyone include "(t)he right to leave any country, including one's own, and to return to one's country."
Article 5(2): They have "(t)he right to leave the country."
Article 10 of the Convention on the Rights of the Child affords the same right of return to children. So does Article 8 of the International Convention on the Protection of the Rights of All Migrant Workers and Members of the Their Families (ICPMMW). Everyone has the right to go home.
International law provides clear affirmation, including freedom of movement as a fundamental human right. Hegemons like America, however, ignore it, forcing vassal states like Haiti to concur - at least up to now, so at issue has anything changed?
On February 9, the State Department said Aristide's return would be an "unfortunate distraction" from the scheduled March 20 electoral runoffs. Washington, in fact, manipulated the November and runoff rounds to exclude all candidates favoring democracy, including those from Aristide's Fanmi Lavalas party, by far the most popular.
VOA, however, said the: "State Department is making clear publicly that it would consider (his return) in the midst of the campaign (a) bad idea."
"Do you know a better time?....We will not have to wait too long (for his return). The food is cooking," citing a Haitian Creole proverb.
"We now are looking to the government of South Africa that has been such a gracious host to former President Aristide these past seven years to work with the government of Haiti to ensure the President's smooth transition back to his country."
State Department spokesman PJ Crowley said he's unaware of his specific travel plans, but that Washington "would hate to see....divisiveness" introduced into the electoral process. "I think that we would be concerned that if former President Aristide returns to Haiti before the election, it would prove to be an unfortunate distraction. The people of Haiti should be evaluating the two (US approved) candidates that will participate in the (presidential) runoff, and that I think should be their focus."
Aristide was twice democratically elected, VOA saying he: "was quickly ousted by the military" in 1991. "He was restored to power after US intervention in (October) 1994 (to February 1996, then overwhelmingly reelected in 2000 with 92% of the vote), but driven from office amid charges of corruption and autocratic rule."
Several previous articles debunked them, calling them spurious attempts to vilify a beloved leader, ousted by America because he was one. In Washington-speak, that made him "polarizing."
"....Mr. Aristide claim(s he's) interested in national reconciliation," but "critics" doubt it. "Experts inside and outside Haiti (representing imperial Washington) fear (his) presence....could further destabilize the country," when, in fact, it'll be powerfully inspiring and unifying.
According to Jocelyn McCalla, senior advisor to Haiti's special UN envoy: "Once Duvalier was back, there could be no rationale for keeping Aristide out," and without question, Washington and France colluded to bring Baby Doc back for whatever purpose they plan.
Practically acting as Washington's spokesman, Cave stressed that "members of the international community have expressed concern that Mr. Aristide - who 'was' beloved by the poor but criticized by many (read imperial supporters and media flacks) for demagoguery, corruption, and the suppression of political opponents - could create widespread instability at a precarious moment."
According to OAS Secretary General Jose Miguel Insulza, a reliable imperial tool, consideration for Aristide's return should be delayed until a new president takes office. He added, however, that both candidates oppose him, likely complicating matters further.
"(W)hat Haiti needs right now, coming out of a prolonged first round of (sham) election(s), is a period of calm, not divisive actions that can only distract from the vital task of forming a legitimate and credible government."
As several previous articles explained, the entire electoral process was rigged. Fifteen parties were excluded, including Fanmi Lavalas, by far the most popular. Massive fraud was also confirmed, exposing the entire process as fraudulent and illegitimate, but Washington's heavy-handedness endorsed it.
Former US ambassador during Aristide's tenure, Brian Dean Curran, didn't comment but called his return "a colossal mistake. It's particularly bad at this moment when the political situation is so fragile." He also doubted Aristide would restrict his activities to education, saying: "No one should believe that for an instant," though no explanation was given why not. Numerous times in exile, Aristide expressed no interest in returning to politics in any capacity. He wants only to serve his people as a private citizen. Believe it. It's true.
Haitians "would rise en masse to greet him and that the airport scene would be like nothing anyone has witnessed in recent times in Haiti." Perhaps never there or anywhere with millions across the country rallying in support for their first joyous moment since his ouster.
They now await his return. For them, millions of global supporters, and President Aristide and his wife Mildred, it can't come a moment too soon.
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# (c) Lankier mailto:[email protected]
import sys, os
import time
import shutil
import sqlite3
from cStringIO import StringIO
from lxml import etree
from copy import deepcopy
from optparse import OptionParser, make_option
import zipfile
import traceback
from utils import walk, read_file, prog_version, print_log, LogOptions, check_xml, count_files, print_exc
from parser import FB2Parser
# global vars
db_file = None
_connect = None
options = None
not_deleted_list = None
update_time = None
fsenc = sys.getfilesystemencoding()
namespaces = {'m': 'http://www.gribuser.ru/xml/fictionbook/2.0',
'xlink':'http://www.w3.org/1999/xlink',
'l':'http://www.w3.org/1999/xlink'}
# statistics
class stats:
total_files = 0
total = 0
passed = 0
fixed = 0
errors = 0
def insert_values(curs, tbl_name, s):
i = s.index('VALUES') + len('VALUES')
values = s[i:].strip()
if values.endswith(';'):
values = values[:-1]
values = values.split('),(')
for v in values:
if not v.startswith('('):
v = '(' + v
if not v.endswith(')'):
v = v + ')'
v = v.replace('\\\\', '\x00') # temporary replace backslashes
v = v.replace("\\'", "''") # replace escape \' -> ''
v = v.replace('\x00', '\\') # return backslashes
sql = 'insert into %s values %s' % (tbl_name, v)
try:
curs.execute(sql)
except:
print 'SQL:', repr(sql)
raise
def mksql(fn, tbl_name):
global _connect
curs = _connect.cursor()
curs.execute('DROP TABLE IF EXISTS `%s`' % tbl_name)
sql = []
start = False
data = open(fn).read(2)
if data == '\x1f\x8b':
import gzip
f = gzip.open(fn, 'rb')
data = f.read()
f.close()
fd = StringIO(data)
else:
fd = open(fn)
for s in fd:
if s.startswith(')'):
break
if s.startswith('CREATE TABLE'):
start = True
sql.append('CREATE TABLE `%s` (\n' % tbl_name)
elif start:
if s.strip().startswith('KEY'):
continue
elif s.strip().startswith('FULLTEXT KEY'):
continue
elif s.strip().startswith('UNIQUE KEY'):
continue
else:
#s = s.replace('auto_increment', 'AUTOINCREMENT')
s = s.replace('auto_increment', '')
s = s.replace('character set utf8', '')
s = s.replace('collate utf8_bin', '')
s = s.replace('collate utf8_unicode_ci', '')
s = s.replace('unsigned', '')
s = s.replace('COMMENT', ', --')
s = s.replace('USING BTREE', '')
#s = s.replace('UNIQUE KEY', 'UNIQUE')
sql.append(s)
sql = ''.join(sql).strip()
if sql.endswith(','):
sql = sql[:-1]
sql = sql+'\n)'
curs.execute(sql)
#
update_time = None
found = False
for s in fd:
if s.startswith('INSERT INTO'):
insert_values(curs, tbl_name, s)
found = True
elif s.startswith('-- Dump completed on'):
ut = s[len('-- Dump completed on'):].strip().replace(' ', ' ')
if update_time is None:
update_time = ut
else:
update_time = min(ut, update_time)
_connect.commit()
if not found:
raise ValueError('insert sql instruction not found')
return update_time
def update_db():
global _connect
sql_tables = (
#('lib.libactions.sql', 'libactions'),
#('lib.libavtoraliase.sql', 'libavtoraliase'),
('lib.libavtorname.sql', 'libavtorname'),
('lib.libavtor.sql', 'libavtor'),
#('lib.libblocked.sql', 'libblocked'),
('lib.libbook.old.sql', 'libbookold'),
('lib.libbook.sql', 'libbook'),
#('lib.libdonations.sql', 'libdonations'),
#('lib.libfilename.sql', 'libfilename'),
('lib.libgenrelist.sql', 'libgenrelist'),
('lib.libgenre.sql', 'libgenre'),
#('lib.libjoinedbooks.sql', 'libjoinedbooks'),
#('lib.libpolka.sql', 'libpolka'),
('lib.libseqname.sql', 'libseqname'),
('lib.libseq.sql', 'libseq'),
#('lib.libsrclang.sql', 'libsrclang'),
('lib.libtranslator.sql', 'libtranslator'),
)
update_time = None
for fn, tbl_name in sql_tables:
fn = os.path.join(options.sql_dir, fn)
if not os.path.exists(fn):
fn = fn + '.gz'
if not os.path.exists(fn):
print_log('ERROR: file not found:', fn, level=3)
return False
ut = mksql(fn, tbl_name)
if tbl_name != 'libbookold':
# skip libbookold
update_time = ut
curs = _connect.cursor()
curs.execute('DROP TABLE IF EXISTS librusec')
curs.execute('CREATE TABLE librusec ( update_time varchar(32) )')
curs.execute('INSERT INTO librusec VALUES (?)', (update_time,))
_connect.commit()
return True
# ????????????:
# T - translit
# L - lowercase
# R - remove FAT invalid chars
# B - big file names (do not strip file names to 255 chars)
# _ - replace all space to underscore
# ??????????:
# m - meta genre
# g - genre
# L - first letter in author last-name
# f - authors full-name
# F - first author full-name
# a - authors last-name (or nickname)
# A - first author last-name (or nickname)
# t - title
# s - (sequence #numder)
# S - sequence number
def get_filename(book_info):
format = options.fn_format
f = format.split(':')
mods = ''
if len(f) > 2:
return None
if len(f) == 2:
mods, format = f
if '_' in mods:
sep = '_'
else:
sep = ' '
fn_tbl = {
'm': 'metagen',
'g': 'genre',
'l': 'lang',
't': 'title',
'L': 'first_letter',
'a': 'name',
'A': 'first_name',
'f': 'full_name',
'F': 'first_full_name',
's': 'seq1',
'S': 'seq2',
'b': 'bookid',
}
#
book_info['bookid'] = str(book_info['bookid'])
# metagenre
book_info['metagen'] = list(book_info['metagen'])[0]
# genre
book_info['genre'] = book_info['genres'][0]
# authors
full_names = []
names = []
first_name = ''
first_full_name = ''
first_letter = ''
for a in book_info['authors']:
aut = []
name = a[2]
if a[2]: # last-name
aut.append(a[2])
aut.append(a[0])
aut.append(a[1])
elif a[3]: # nickname
aut.append(a[3])
name = a[3]
else:
aut.append(a[2])
aut.append(a[0])
aut.append(a[1])
aut = sep.join(aut).strip()
full_names.append(aut)
names.append(name)
if not first_name:
first_name = name
first_full_name = aut
first_letter = aut[0]
if len(names) > 3:
# ???????
names = [names[0], '...']
full_names = [full_names[0], '...']
if '_' in mods:
book_info['name'] = '_'.join(names)
book_info['full_name'] = '_'.join(full_names)
else:
book_info['name'] = ', '.join(names)
book_info['full_name'] = ', '.join(full_names)
book_info['first_name'] = first_name
book_info['first_full_name'] = first_full_name
book_info['first_letter'] = first_letter.upper()
# sequence
if book_info['sequences']:
seq = tuple(book_info['sequences'][0])
book_info['seq1'] = '(%s #%s)' % seq
book_info['seq2'] = '%s %s' % seq
else:
book_info['seq1'] = book_info['seq2'] = ''
# replace '/' and '\'
for n in ('name', 'full_name', 'first_name', 'first_full_name',
'title', 'seq1', 'seq2'):
book_info[n] = book_info[n].replace('/', '%').replace('\\', '%')
# generate filename
f = []
for c in list(format):
if c in fn_tbl:
k = book_info[fn_tbl[c]]
if k:
f.append(k)
elif c in 'sS':
if f and f[-1] == ' ':
f = f[:-1]
else:
f.append(c)
fn = ''.join(f)
#
fn = fn.strip()
if 'R' in mods:
for c in '|?*<>":+[]': # invalid chars in VFAT
fn = fn.replace(c, '')
if '_' in mods:
fn = fn.replace(' ', '_')
if 'L' in mods:
fn = fn.lower()
if 'T' in mods:
# translit
from unidecode import unidecode
fn = unidecode(fn)
elif not os.path.supports_unicode_filenames:
fn = fn.encode(fsenc, 'replace')
max_path_len = 247
if 'B' not in mods and len(fn) > max_path_len:
fn = fn[:max_path_len]
if fsenc.lower() == 'utf-8':
# utf-8 normalisation
fn = unicode(fn, 'utf-8', 'ignore').encode('utf-8')
fn = os.path.join(options.out_dir, fn)
return fn
def get_bookid(filename, fb2):
global _connect
# search bookid in fb2
if options.search_id and fb2 is not None:
find = xpath('/m:FictionBook/m:description/m:custom-info')
bookid = None
for e in find(fb2):
bid = e.get('librusec-book-id')
if bid is not None:
try:
bookid = int(bid)
except:
pass
else:
return bookid
# search bookid by filename
try:
bookid = int(filename)
except ValueError:
curs = _connect.cursor()
curs.execute("SELECT BookId FROM libbookold WHERE FileName = ?",
(filename,))
res = curs.fetchone()
if res is None:
print_log('ERROR: file not found in db:', filename, level=3)
return None
return res[0]
return bookid
def is_deleted(bookid):
global _connect
curs = _connect.cursor()
curs.execute("SELECT Deleted FROM libbook WHERE BookId = ?", (bookid,))
res = curs.fetchone()
if res is None:
print >> sys.stderr, 'updatedb.is_deleted: internal error'
return None
return bool(res[0])
def create_fb2(data):
if not check_xml(data):
return None
try:
fb2 = etree.XML(data)
except:
#print_exc()
if not options.nofix:
try:
data = str(FB2Parser(data, convertEntities='xml'))
options.file_fixed = True
fb2 = etree.XML(data)
except:
print_exc()
return None
else:
stats.fixed += 1
else:
return None
return fb2
_xpath_cash = {}
def xpath(path):
# optimisation
if path in _xpath_cash:
return _xpath_cash[path]
find = etree.XPath(path, namespaces=namespaces)
_xpath_cash[path] = find
return find
def update_fb2(fb2, bookid):
# initialisation
# 1. db
global _connect
curs = _connect.cursor()
# 2. xml
find = xpath('/m:FictionBook/m:description/m:title-info')
old_ti = find(fb2)[0] # old <title-info>
new_ti = etree.Element('title-info') # new <title-info>
# 3. routines
xp_prefix = '/m:FictionBook/m:description/m:title-info/m:'
def copy_elem(elem):
# just copy old elements
find = xpath(xp_prefix+elem)
for e in find(fb2):
new_ti.append(deepcopy(e))
def add_authors(table, column, elem_name, add_unknown=False):
authors = []
sql = '''SELECT
FirstName, MiddleName, LastName, NickName, Homepage, Email
FROM libavtorname JOIN %s ON libavtorname.AvtorId = %s.%s
WHERE BookId = ?''' % (table, table, column)
curs.execute(sql, (bookid,))
res = curs.fetchall()
if res:
for a in res:
author = etree.Element(elem_name)
aut = []
i = 0
for e in ('first-name', 'middle-name', 'last-name',
'nickname', 'home-page', 'email'):
if a[i]:
elem = etree.Element(e)
elem.text = a[i]
author.append(elem)
aut.append(a[i])
else:
aut.append('')
i += 1
new_ti.append(author)
authors.append(aut)
elif add_unknown:
author = etree.Element(elem_name)
elem = etree.Element('last-name')
elem.text = u'????? ??????????'
author.append(elem)
new_ti.append(author)
authors.append(['', '', u'????? ??????????', ''])
return authors
#
book_info = {'bookid': bookid}
# generation <title-info>
# 1. <genre>
curs.execute('SELECT GenreId FROM libgenre WHERE BookId = ?', (bookid,))
genres = []
metagen = set()
res = curs.fetchall()
if res:
for i in res:
curs.execute('''SELECT GenreCode, GenreMeta FROM libgenrelist
WHERE GenreId = ? LIMIT 1''', i)
res = curs.fetchone()
name = res[0]
genre = etree.Element('genre')
genre.text = name
new_ti.append(genre)
genres.append(name)
metagen.add(res[1])
else:
genres = ['other']
genre = etree.Element('genre')
genre.text = 'other'
new_ti.append(genre)
metagen = [u'??????']
book_info['genres'] = genres
book_info['metagen'] = metagen
# 2. <author>
authors = add_authors('libavtor', 'AvtorId', 'author', add_unknown=True)
book_info['authors'] = authors
# 3. <book-title>
curs.execute('''SELECT Title, Title1, Lang, Time FROM libbook
WHERE BookId = ? LIMIT 1''', (bookid,))
title_text, title1_text, lang_text, added_time = curs.fetchone()
lang_text = lang_text.lower()
title_text = title_text.strip()
title1_text = title1_text.strip()
title = etree.Element('book-title')
if title1_text:
title.text = '%s [%s]' % (title_text, title1_text)
else:
title.text = title_text
new_ti.append(title)
book_info['title'] = title_text
book_info['title1'] = title1_text
# 4. <annotation>
copy_elem('annotation')
# 5. <keywords>
copy_elem('keywords')
# 6. <date>
copy_elem('date')
# 7. <coverpage>
copy_elem('coverpage')
# 8. <lang>
lang = etree.Element('lang')
lang.text = lang_text
new_ti.append(lang)
book_info['lang'] = lang_text
# 9. <src-lang>
copy_elem('src-lang')
# 10. <translator>
add_authors('libtranslator', 'TranslatorId', 'translator')
# 11. <sequence>
sequences = []
if 1:
curs.execute("""SELECT SeqName, SeqNumb
FROM libseq JOIN libseqname USING (SeqId)
WHERE BookId = ? AND SeqName != '' """, (bookid,))
else:
curs.execute("""SELECT SeqName, SeqNumb
FROM libseq JOIN libseqname USING(SeqId)
WHERE BookId = ? ORDER BY level LIMIT 1""", (bookid,))
for seq in curs.fetchall():
sequence = etree.Element('sequence')
sequence.attrib['name'] = seq[0]
sequence.attrib['number'] = str(seq[1])
new_ti.append(sequence)
sequences.append([seq[0], str(seq[1])])
book_info['sequences'] = sequences
# finalisation
# 1. replace <title-info>
find = xpath('/m:FictionBook/m:description')
desc = find(fb2)[0]
desc.replace(old_ti, new_ti)
# 2. add/update <custom-info>
bookid_found = False
add_ti_found = False
added_time_found = False
update_time_found = False
updater_found = False
fixer_found = False
find = xpath('/m:FictionBook/m:description/m:custom-info')
for ci in find(fb2):
it = ci.get('info-type')
if not it:
if it is None:
print_log('WARNING: <custom-info> has no attribute "info-type"')
elif it == 'librusec-book-id':
bookid_found = True
elif it == 'librusec-add-title-info':
ci.text = title1_text
add_ti_found = True
elif it == 'librusec-added-at':
ci.text = added_time
added_time_found = True
elif it == 'librusec-updated-at':
ci.text = update_time
update_time_found = True
elif it == 'librusec-updater' and ci.text == 'fb2utils':
updater_found = True
elif it == 'fixed-by' and ci.text == 'fb2utils':
fixer_found = True
if not bookid_found:
ci = etree.Element('custom-info')
ci.attrib['info-type'] = 'librusec-book-id'
ci.text = str(bookid)
desc.append(ci)
if not add_ti_found and title1_text:
ci = etree.Element('custom-info')
ci.attrib['info-type'] = 'librusec-add-title-info'
ci.text = title1_text
desc.append(ci)
if not added_time_found:
ci = etree.Element('custom-info')
ci.attrib['info-type'] = 'librusec-added-at'
ci.text = added_time
desc.append(ci)
if not update_time_found:
ci = etree.Element('custom-info')
ci.attrib['info-type'] = 'librusec-updated-at'
ci.text = update_time
desc.append(ci)
if not updater_found:
ci = etree.Element('custom-info')
ci.attrib['info-type'] = 'librusec-updater'
ci.text = 'fb2utils'
desc.append(ci)
if options.file_fixed and not fixer_found:
ci = etree.Element('custom-info')
ci.attrib['info-type'] = 'fixed-by'
ci.text = 'fb2utils'
desc.append(ci)
# done
return etree.tostring(fb2, encoding=options.output_encoding,
xml_declaration=True), book_info
def copy_fb2(filename, data, to_dir=None, msg='save bad fb2 file:'):
if to_dir is None:
if not options.save_bad:
return
to_dir = options.save_bad
filename = str(filename)+'.fb2'
fn = os.path.join(to_dir, filename)
print_log(msg, fn)
if options.nozip:
open(fn).write(data)
else:
save_zip(fn, filename, data)
def save_zip(out_file, out_fn, data):
out_file = out_file+'.zip'
zf = zipfile.ZipFile(out_file, 'w', zipfile.ZIP_DEFLATED)
zipinfo = zipfile.ZipInfo()
zipinfo.filename = out_fn
zipinfo.external_attr = 0644 << 16L # needed since Python 2.5
zipinfo.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(zipinfo, data)
#zf.writestr(out_fn, data)
def base_name(filename, ext='.fb2'):
if not filename.endswith(ext):
return None
return os.path.basename(filename)[:-len(ext)]
def process_file(fn, ftype, z_filename, data):
# 0. logging
LogOptions.filename = os.path.abspath(fn)
stats.total += 1
options.file_fixed = False
if options.log_file and (stats.total % 10) == 0:
# progress
tm = time.time() - stats.starttime
eta = stats.total_files * tm / stats.total - tm
h = int(eta / 3600)
m = (eta - h * 3600) / 60
s = eta % 60
sys.stdout.write('\r%d out of %d (ETA: %02dh %02dm %02ds)' %
(stats.total, stats.total_files, h, m, s))
sys.stdout.flush()
if ftype == 'error':
# unzip error
print_log('ERROR:', z_filename, level=3)
stats.errors += 1
return False
filename = fn
if z_filename:
LogOptions.z_filename = z_filename
filename = z_filename
# 1. search bookid
f = base_name(filename)
if f is None:
# filename does not ends with 'fb2'
stats.errors += 1
print_log('ERROR: bad filename:', z_filename, level=3)
copy_fb2('unknown-id-'+str(stats.errors), data)
return False
if options.search_id:
fb2 = create_fb2(data)
bookid = get_bookid(f, fb2)
else:
bookid = get_bookid(f, None)
if bookid is None:
stats.errors += 1
print_log('ERROR: unknown bookid', level=3)
copy_fb2('unknown-id-'+str(stats.errors), data)
return False
print_log('bookid =', str(bookid))
# 2. check is deleted
if not options.nodel and bookid not in not_deleted_list:
print_log('deleted, skip')
if options.save_deleted:
copy_fb2(bookid, data, options.save_deleted,
'save deleted file:')
return False
# 3. update not_deleted_list
if bookid in not_deleted_list:
not_deleted_list.remove(bookid)
else:
print 'INTERNAL ERROR:', bookid, 'not in not_deleted_list'
# 4. create fb2 (dom) if not
if not options.search_id:
fb2 = create_fb2(data)
if fb2 is None:
stats.errors += 1
copy_fb2(bookid, data)
return False
# 5. update
if not options.noup:
try:
d, book_info = update_fb2(fb2, bookid)
except:
print_exc()
stats.errors += 1
copy_fb2(bookid, data)
return False
data = d
# 6. save result
out_fn = str(bookid)+'.fb2'
if options.fn_format:
out_file = get_filename(book_info)
if not out_file:
out_file = os.path.join(options.out_dir, out_fn)
else:
out_file = out_file+'.fb2'
d = os.path.dirname(out_file)
if os.path.isdir(d):
pass
elif os.path.exists(d):
print_log('ERROR: file exists:', d, level=3)
return False
else:
os.makedirs(d)
else:
out_file = os.path.join(options.out_dir, out_fn)
if options.nozip:
open(out_file, 'w').write(data)
else:
try:
save_zip(out_file, out_fn, data)
except:
print
print '>>', len(out_file), out_file
raise
stats.passed += 1
return True
def process(arg):
global not_deleted_list, update_time
curs = _connect.cursor()
res = curs.execute("SELECT BookId FROM libbook WHERE NOT (Deleted&1) and FileType = 'fb2' ")
not_deleted_list = curs.fetchall()
not_deleted_list = set([i[0] for i in not_deleted_list])
curs.execute('SELECT * FROM librusec')
update_time = curs.fetchone()[0]
for fn in walk(arg):
for ftype, z_filename, data in read_file(fn, zip_charset='utf-8'):
process_file(fn, ftype, z_filename, data)
if options.search_deleted:
deleted = set()
for fn in walk(options.search_deleted):
bookid = base_name(fn, '.fb2.zip')
try:
bookid = int(bookid)
except ValueError:
continue
if bookid in not_deleted_list:
deleted.append(fn)
for fn in deleted:
for ftype, z_filename, data in read_file(fn, zip_charset='utf-8'):
ret = process_file(fn, ftype, z_filename, data)
if ret:
print_log('restore deleted:', bookid)
print
print 'processed:', stats.total
print 'passed:', stats.passed
print 'fixed:', stats.fixed
print 'errors:', stats.errors
if options.not_found:
fd = open(options.not_found, 'w')
for bookid in not_deleted_list:
print >> fd, bookid
def main():
# parsing command-line options
global options, db_file, _connect
sql_dir = os.path.join(os.path.dirname(sys.argv[0]), 'sql')
option_list = [
make_option("-o", "--out-dir", dest="out_dir",
metavar="DIR", help="save updated fb2 files to this dir"),
make_option("-g", "--generate-db", dest="update_db",
action="store_true", default=False,
help="generate db"),
make_option("-d", "--do-not-delete", dest="nodel",
action="store_true", default=False,
help="don't delete duplicate files"),
make_option("-f", "--do-not-fix", dest="nofix",
action="store_true", default=False,
help="don't fix an xml"),
make_option("-u", "--do-not-update", dest="noup",
action="store_true", default=False,
help="don't update fb2 meta info"),
make_option("-z", "--do-not-zip", dest="nozip",
action="store_true",
default=False, help="don't zip result files"),
make_option("-i", "--search-id", dest="search_id",
action="store_true",
default=False, help="search bookid in fb2"),
make_option("-a", "--save-deleted", dest="save_deleted",
metavar="DIR", help="save deleted fb2 files to this dir"),
make_option("-c", "--search-deleted", dest="search_deleted",
metavar="DIR", help="search deleted fb2 files in this dir"),
make_option("-b", "--save-bad-fb2", dest="save_bad",
metavar="DIR", help="save bad fb2 files to this dir"),
make_option("-s", "--sql-dir", dest="sql_dir",
default=sql_dir, metavar="DIR",
help="search sql files in this dir"),
make_option("-e", "--output-encoding", dest="output_encoding",
default = 'utf-8', metavar="ENC",
help="fb2 output encoding"),
make_option("-l", "--log-file", dest="log_file",
metavar="FILE",
help="output log to this file"),
make_option("-n", "--not-found-file", dest="not_found",
metavar="FILE",
help="save missing books to this file"),
make_option("-F", "--filename-pattern", dest="fn_format",
metavar="PATTERN",
help="output filenames pattern"),
]
parser = OptionParser(option_list=option_list,
usage=("usage: %prog [options] "
"input-files-or-dirs"),
version="%prog "+prog_version)
options, args = parser.parse_args()
LogOptions.level = 0
db_file = os.path.join(options.sql_dir, 'db.sqlite')
_connect = sqlite3.connect(db_file)
if options.update_db:
# update db
print_log('start update db')
ret = update_db()
if ret:
print_log('done')
else:
print_log('fail')
return
if len(args) == 0:
return
#
if len(args) == 0:
sys.exit('wrong num args')
in_file = args[0]
if not options.out_dir:
sys.exit('option --out-dir required')
for f in args:
if not os.path.exists(f):
sys.exit('file does not exists: '+f)
if not os.path.isdir(options.out_dir):
sys.exit('dir does not exists: '+options.out_dir)
if options.save_bad and not os.path.isdir(options.save_bad):
sys.exit('dir does not exists: '+options.save_bad)
if options.save_deleted and not os.path.isdir(options.save_deleted):
sys.exit('dir does not exists: '+options.save_deleted)
if not os.path.exists(db_file):
print_log('start update db')
ret = update_db()
if ret:
print_log('done')
else:
print_log('fail')
return
#
stats.total_files = count_files(args)
print 'total files:', stats.total_files
if options.log_file:
LogOptions.outfile = open(options.log_file, 'w')
stats.starttime = time.time()
process(args)
et = time.time() - stats.starttime
print 'elapsed time: %.2f secs' % et
if __name__ == '__main__':
#main()
print update_fb2(open('../example.fb2').read(), 55142)
|
Hello Derry friends of Vanguard Dental Group! When you think about getting into shape, your teeth are probably not the first body parts that come to mind. But your Derry dentist may be able to improve your smile with bonding and enamel shaping, and you won’t even have to join a gym!
We’re Drs. Rothenberg and Moskowitz, a team of experienced Derry dentists, and today on our dental blog we want to explain a little more about these relatively conservative cosmetic dentistry procedures that can improve your smile without major remodeling.
Bonding is a great way to improve the appearance of teeth that are cracked, stained, broken, chipped, or have large spaces between them. When teeth are bonded, your Manchester dentist applies tooth-colored materials, and “bonds” them to the tooth surface.
Usually used in conjunction with bonding, enamel shaping is the process of modifying teeth by contouring or removing enamel. Something our Londonderry, Windham, and Chester patients love about enamel shaping is that it’s quick, painless, and the often dramatic results are immediate.
If you are looking for quality, affordable Derry dental care, give us a call at 603-435-1482. Our high tech dental team would be happy to answer any questions you have about any of our cosmetic dentistry procedures such as veneers and tooth whitening. We would love to explain the latest advances in sleep apnea and snoring treatment, restorative dentistry, and porcelain veneers and let you see our gallery of before and after photos of smile makeovers.
If it has been a while since you have treated your teeth to a deep cleaning, call today to book an appointment with the most gentle yet thorough dental hygienists in Derry!
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='LeadSource',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=100, blank=True)),
('incoming_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, unique=True)),
('forwarding_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, blank=True)),
],
),
migrations.CreateModel(
name='Lead',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=2)),
('source', models.ForeignKey(to='call_tracking.LeadSource')),
],
),
]
|
Christmas! Where does it come from and how is it celebrated?
On December 25, Christian all over the world celebrate Christmas Day as the anniversary of the birth of Jesus of Nazareth, a spiritual leader whose teachings form the basis of their religion. The tradition of celebrating this holiday involves gift giving, decorating Christmas trees, attending church, sharing meals with family and friends and, of course, waiting for Santa to arrive.
The story tells that Jesus Christ was born in Bethlehem in Judea, during the time of King Herod.
At the night of his birth it is said that the Star of Bethlehem (also called the Christmas Star) rose in the sky. When the Magi, also known in the Bible as the three wise men from the East saw it, they knew that this was a sign of the birth of a special King in Israel, which made them go in its direction.
King Herod and all Jerusalem were very frightened when they heard of that. He gathered all the chief priests and teachers of the people and asked them where the Messiah was to be born.
They went on their way, and the star they had seen in the east was leading their way until it stopped over the place where the child was. When they reached the house they saw the child with his mother Mary. They bowed down, worshipped him and presented him with gifts of gold and of incense and of myrrh. And having been warned in a dream not to go back to Herod, they returned to their country by another route.
From then until now it has become a tradition for us to celebrate Christmas as an annual holiday commemorating the birth of Jesus Christ.
Christmas is a day that has an effect on the entire world. On this day people from countries all around the world are celebrating this public holiday. The interesting thing is that this includes not only countries with Christian religion, but also such whose population is mostly non – Christian.
Japan is example for that. Religion in Japan is dominated by Shinto and by Buddhist schools and organisations and there is only a small number of Christians (2,3%), but despite that many of the aspects of Christmas have been adopted, such as gift-giving, decorations and Christmas trees.
Countries in which Christmas is not a formal public holiday include Afghanistan, China (excepting Hong Kong and Macao), Iran, Israel, Japan, Kuwait, North Korea, Turkey and many others.
December 25 or how was the date chosen?
The date of December 25 comes from Rome. That was the day on which originally was celebrated the festival of the Dies Natalis Solis Invicti (meaning “the birthday of the Unconquered Sun”) in Rome. The Catholic Church choose the same date for commemorating the birth of Jesus Christ, because what they wanted was to Christianise this pagan festival. This was done because the church wanted to absorb the customs, traditions and general paganism of every tribe, culture and nation in their efforts to increase the number of people under their control.
Some churches (mainly Orthodox churches) use a different calendars for their religious celebrations. Orthodox Churches in Russia, Serbia, Jerusalem, Ukraine, Ethiopia and other countries use the old ‘Julian’ calendar and people in those churches celebrate Christmas on January 7. According to the Greek Orthodox Church most people celebrate Christmas on December 25. But there are some who still use the Julian calendar and also celebrate Christmas on January 7! And according to the Apostolic Church in Armenia, the people there celebrate Christmas on January 6.
Santa Claus is a universally recognised symbol of Christmas. He is a fictional figure of Western culture who is said to bring gifts to the homes of well-behaved (“good” or “nice”) children on Christmas Eve (December 24) and the early morning hours of Christmas Day (December 25).
You just can’t have the one without the other. The idea of Christmas all around the world is tightly connected to the image of a chubby old man with long white beard, dressed in red and wearing black boots and belt.
Santa Claus is called by many different names in different countries. He is most known as: Saint Nicholas, Saint Nick, Kris Kringle, Father Christmas, Santa Clous or simply Santa.
The origin of Santa Claus can be found in the Greek bishop St Nicholas of Myra (Turkey), who was a very generous man that used to help people in need.
In USA, the Christmas and holiday season begins around the end of November. It starts with a major shopping kickoff on Black Friday, the day after the U.S. holiday of Thanksgiving and is marked by Christmas decorations and music playing in stores. The interior and exterior of houses are decorated during the weeks leading up to Christmas Eve.
In the period between Christmas and the New Year’s Day holiday many schools and businesses are closed, because this is time supposed to be spend with family and friends.
So wherever you are and whatever you do don’t forget to spend some time with your family and friends on Christmas. Say “Hello” to the holidays with big smile on your face and have fun!
|
# Echo client program
import socket
import time
import os
os.system("protoc -I=../proto --python_out=. ../proto/navirice_image.proto")
import navirice_image_pb2
class Image:
def __init__(self, width, height, channels, type_, data, data_size):
self.width = width
self.height = height
self.channels = channels
self.type_ = type_
self.data = data
self.data_size = data_size
class KinectClient:
def __init__(self, host, port):
self.host = host
self.port = port
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((host, port))
self.last_count = 0
def reconnect(self):
self.s.close()
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.host, self.port))
def navirice_capture_settings(self, rgb, ir, depth):
print("---Requesting new settings...")
settings = navirice_image_pb2.ProtoCaptureSetting()
settings.IR = ir
settings.RGB = rgb
settings.Depth = depth
settings.count = 1
request_msg = navirice_image_pb2.ProtoRequest()
request_msg.state = navirice_image_pb2.ProtoRequest.CAPTURE_SETTING
request_msg.count = 1
request_msg.capture_setting_value.CopyFrom(settings)
bytes_sent = self.s.send(request_msg.SerializeToString())
count_msg = self.s.recv(1024)
def navirice_get_image(self):
print("---Requesting new image...")
request_msg = navirice_image_pb2.ProtoRequest()
request_msg.state = navirice_image_pb2.ProtoRequest.IMAGE
request_msg.count = 1
bytes_sent = self.s.send(request_msg.SerializeToString())
count_msg = self.s.recv(1024)
count_obj = navirice_image_pb2.ProtoImageCount()
count_obj.ParseFromString(count_msg)
count = count_obj.count
print("image count: ", count)
continue_msg = navirice_image_pb2.ProtoAcknowledge()
continue_msg.count = 1
if self.last_count >= count:
print("Requesting stop because image count not new")
continue_msg.state = navirice_image_pb2.ProtoAcknowledge.STOP
bytes_sent = self.s.send(continue_msg.SerializeToString())
return None, self.last_count
else:
print("Requesting --continue")
continue_msg.state = navirice_image_pb2.ProtoAcknowledge.CONTINUE
bytes_sent = self.s.send(continue_msg.SerializeToString())
data = "".encode()
b_size = count_obj.byte_count
print("going to receive ", b_size, " bytes")
t = self.s.recv(b_size, socket.MSG_WAITALL)
data += t
print("received total of ", len(data), " bytes")
img_set = navirice_image_pb2.ProtoImageSet()
img_set.ParseFromString(data)
self.last_count = count
return img_set, count
HOST = '127.0.0.1' # The remote host
PORT = 29000 # The same port as used by the server
kc = KinectClient(HOST, PORT)
while(1):
kc.navirice_capture_settings(True, True, True)
img_set, last_count = kc.navirice_get_image()
if img_set is not None:
print("IMG#: ", img_set.count)
print("RGB width: ", img_set.RGB.width)
print("RGB height: ", img_set.RGB.height)
print("RGB channels: ", img_set.RGB.channels)
|
Councillors have voted to stop the sale of the former Magdalene Laundry site on Sean McDermott Street.
Plans were put forward last year to sell the site to a Japanese hotel chain.
However, a decision to stop the proposed sale was made at tonight's Dublin City Council meeting.
Social Democrats Councillor Gary Gannon is welcoming the development, saying there was a lot of support for his motion.
He said: "It means that we maintain ownership over that particular site, and we can build a memorial and commemoration centre of real substance to honour women who were incarcerated and abused at that location.
"We get to own the site and not outsource it to a private company - I think that's vital for our city."
More than 10,000 people had signed a petition calling for the sale to be halted.
A report on the proposed sale found that any development must include a permanent memorial at the site.
However, it also claimed the redevelopment was a "powerful opportunity and really the chance of a lifetime for the Sean McDermott area".
|
#
# @lc app=leetcode id=661 lang=python3
#
# [661] Image Smoother
#
# https://leetcode.com/problems/image-smoother/description/
#
# algorithms
# Easy (52.47%)
# Total Accepted: 55.7K
# Total Submissions: 106.1K
# Testcase Example: '[[1,1,1],[1,0,1],[1,1,1]]'
#
# An image smoother is a filter of the size 3 x 3 that can be applied to each
# cell of an image by rounding down the average of the cell and the eight
# surrounding cells (i.e., the average of the nine cells in the blue smoother).
# If one or more of the surrounding cells of a cell is not present, we do not
# consider it in the average (i.e., the average of the four cells in the red
# smoother).
#
# Given an m x n integer matrix img representing the grayscale of an image,
# return the image after applying the smoother on each cell of it.
#
#
# Example 1:
#
#
# Input: img = [[1,1,1],[1,0,1],[1,1,1]]
# Output: [[0,0,0],[0,0,0],[0,0,0]]
# Explanation:
# For the points (0,0), (0,2), (2,0), (2,2): floor(3/4) = floor(0.75) = 0
# For the points (0,1), (1,0), (1,2), (2,1): floor(5/6) = floor(0.83333333) = 0
# For the point (1,1): floor(8/9) = floor(0.88888889) = 0
#
#
# Example 2:
#
#
# Input: img = [[100,200,100],[200,50,200],[100,200,100]]
# Output: [[137,141,137],[141,138,141],[137,141,137]]
# Explanation:
# For the points (0,0), (0,2), (2,0), (2,2): floor((100+200+200+50)/4) =
# floor(137.5) = 137
# For the points (0,1), (1,0), (1,2), (2,1): floor((200+200+50+200+100+100)/6)
# = floor(141.666667) = 141
# For the point (1,1): floor((50+200+200+200+200+100+100+100+100)/9) =
# floor(138.888889) = 138
#
#
#
# Constraints:
#
#
# m == img.length
# n == img[i].length
# 1 <= m, n <= 200
# 0 <= img[i][j] <= 255
#
#
#
from typing import List
class Solution:
def imageSmoother(self, img: List[List[int]]) -> List[List[int]]:
output = []
for i in range(len(img)):
output.append([])
for j in range(len(img[0])):
sum = 0
total = 0
for k in range(-1, 2):
for m in range(-1, 2):
row = i + k
col = j + m
if row >= 0 and row < len(img) and col >= 0 and col < len(img[0]):
sum += img[row][col]
total += 1
output[i].append(sum // total)
return output
|
I provide a cozy, home like environment which provides comfort for the child as well as the parents. Separate, quiet nursery for restful naps, privacy fenced outdoor play yard for exploring, playing, and picnics. Large indoor playroom with lots of natural light, that' s stocked with toys to spark the imagination and encourage social interaction.
|
#!/usr/bin/env python
from setuptools import setup, find_packages
install_requires = [
'sockjs-tornado==1.0.0',
'tornado==3.2.2',
'argparse',
# simplejson is really required for py3 support to avoid encoding problems
'simplejson'
]
tests_require = [
'mock==2.0.0'
]
setup(
name='thunderpush',
version='1.0.1',
author='Krzysztof Jagiello',
author_email='[email protected]',
description='Tornado and SockJS based, complete Web push solution.',
packages=find_packages(),
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
license='BSD',
include_package_data=True,
url='https://github.com/thunderpush/thunderpush',
test_suite='thunderpush.tests.suite',
entry_points={
'console_scripts': [
'thunderpush = thunderpush.runner:main',
],
},
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Internet',
],
)
|
In this article, I will teach you how to easily activate Glo Yakata plan on your mobile phone for free browsing and downloads for all Glo subscribers.
I will also state & explain the numerous benefits you get to enjoy when you subscribe to Glo Yakata plan.
If you know you are a glo subscriber. You should know that you can get 6GB monthly for 6 Months whenever you recharges your mobile phone with recharge code. All you need to do is make a minimum recharge of N100 and above and BOOM, you get a total of 2200% value on recharge in voice calls, data and SMS.
A recharge of N100 and above on Glo Yakata gives the customer N350 and above in their bonus accounts for voice calls and SMS. Free data worth 50mb will also be added on their first recharge of the month as well as subsequent recharges in the month.
N5000 recharge will have your bonus account credited with N17,500 , free 6.25GB of data for first recharge in the month, 2.5GB for subsequent recharges in a month.
There is a 7-day validity for all bonuses gotten on Glo Yakata plan. Glo Yakata bonuses can be accumulated within the validity period while validity days cannot be accumulated. Recharges made will increase the total airtime balance in the Yakata bonus account.
How To Migrate to Glo Yakata Plan.
To get started and migrate to Glo Yakata plan, simply dial *220# on your Glo SIM. If you want to confirm you are on Yakata plan, dial #100#.
How to Check Glo Yakata Bonus?
All Glo Yakata bonuses cannot be used to purchase data plans or any value added service subscriptions. Any of this will be charged only from the main account.
Also note that the bonuses on Glo Yakata is not shareable or transferable to another Glo line.
For Voice calls, tariff charges from the main account as well as the bonus account is charged at 55K/S (N33/Min) on Glo to Glo calls, while calls are charged at 70K/S (N42/Min) on calls from Glo line other networks.
SMS charges is staked at N4/msg from main account and N14/msg from bonus account.
|
#!/usr/bin/env python
# pylint: disable=W0212
from agate.rows import Row
from agate import utils
@utils.allow_tableset_proxy
def join(self, right_table, left_key, right_key=None, inner=False, require_match=False, columns=None):
"""
Create a new table by joining two table's on common values.
This method performs the equivalent of SQL's "left outer join", combining
columns from this table and from :code:`right_table` anywhere that the
:code:`left_key` and :code:`right_key` are equivalent.
Where there is no match for :code:`left_key` the left columns will
be included with the right columns set to :code:`None` unless
the :code:`inner` argument is specified.
If :code:`left_key` and :code:`right_key` are column names, only
the left columns will be included in the output table.
Column names from the right table which also exist in this table will
be suffixed "2" in the new table.
:param right_table:
The "right" table to join to.
:param left_key:
Either the name of a column from the this table to join on, a
sequence of such column names, or a :class:`function` that takes a
row and returns a value to join on.
:param right_key:
Either the name of a column from :code:table` to join on, a
sequence of such column names, or a :class:`function` that takes a
row and returns a value to join on. If :code:`None` then
:code:`left_key` will be used for both.
:param inner:
Perform a SQL-style "inner join" instead of a left outer join. Rows
which have no match for :code:`left_key` will not be included in
the output table.
:param require_match:
If true, an exception will be raised if there is a left_key with no
matching right_key.
:param columns:
A sequence of column names from :code:`right_table` to include in
the final output table. Defaults to all columns not in
:code:`right_key`.
:returns:
A new :class:`.Table`.
"""
if right_key is None:
right_key = left_key
# Get join columns
right_key_indices = []
left_key_is_func = hasattr(left_key, '__call__')
left_key_is_sequence = utils.issequence(left_key)
# Left key is a function
if left_key_is_func:
left_data = [left_key(row) for row in self._rows]
# Left key is a sequence
elif left_key_is_sequence:
left_columns = [self._columns[key] for key in left_key]
left_data = zip(*[column.values() for column in left_columns])
# Left key is a column name/index
else:
left_data = self._columns[left_key].values()
right_key_is_func = hasattr(right_key, '__call__')
right_key_is_sequence = utils.issequence(right_key)
# Right key is a function
if right_key_is_func:
right_data = [right_key(row) for row in right_table._rows]
# Right key is a sequence
elif right_key_is_sequence:
right_columns = [right_table._columns[key] for key in right_key]
right_data = zip(*[column.values() for column in right_columns])
right_key_indices = [right_table._columns._keys.index(key) for key in right_key]
# Right key is a column name/index
else:
right_column = right_table._columns[right_key]
right_data = right_column.values()
right_key_indices = [right_table._columns._keys.index(right_key)]
# Build names and type lists
column_names = list(self._column_names)
column_types = list(self._column_types)
for i, column in enumerate(right_table._columns):
name = column.name
if columns is None and i in right_key_indices:
continue
if columns is not None and name not in columns:
continue
if name in self.column_names:
column_names.append('%s2' % name)
else:
column_names.append(name)
column_types.append(column.data_type)
if columns is not None:
right_table = right_table.select([n for n in right_table._column_names if n in columns])
right_hash = {}
for i, value in enumerate(right_data):
if value not in right_hash:
right_hash[value] = []
right_hash[value].append(right_table._rows[i])
# Collect new rows
rows = []
if self._row_names is not None:
row_names = []
else:
row_names = None
# Iterate over left column
for left_index, left_value in enumerate(left_data):
matching_rows = right_hash.get(left_value, None)
if require_match and matching_rows is None:
raise ValueError('Left key "%s" does not have a matching right key.' % left_value)
# Rows with matches
if matching_rows:
for right_row in matching_rows:
new_row = list(self._rows[left_index])
for k, v in enumerate(right_row):
if columns is None and k in right_key_indices:
continue
new_row.append(v)
rows.append(Row(new_row, column_names))
if self._row_names is not None:
row_names.append(self._row_names[left_index])
# Rows without matches
elif not inner:
new_row = list(self._rows[left_index])
for k, v in enumerate(right_table._column_names):
if columns is None and k in right_key_indices:
continue
new_row.append(None)
rows.append(Row(new_row, column_names))
if self._row_names is not None:
row_names.append(self._row_names[left_index])
return self._fork(rows, column_names, column_types, row_names=row_names)
|
The 2018 Black Alumni Reunion will #RaiseTheBAR. This reunion will feature more alumni, more free time to catch up with friends, and fun exciting events.The reunion will be held Sept. 21-23 at University Park. Proceeds from reunion tickets sales will benefit the Penn State African American Alumni Organization Emergency Fund in Educational Equity.
Campus bus tours hosted by S.M.A.R.T.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.