text
stringlengths 29
850k
|
---|
# Copyright The IETF Trust 2007, All Rights Reserved
# Portion Copyright (C) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved. Contact: Pasi Eronen <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Nokia Corporation and/or its
# subsidiary(-ies) nor the names of its contributors may be used
# to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.conf.urls import patterns
from django.views.generic import RedirectView
urlpatterns = patterns('',
(r'^telechat/.*$', RedirectView.as_view(url='https://www.ietf.org/iesg/minutes.html')),
(r'^ann/(?:ind|new|prev)/$', RedirectView.as_view(url="/iesg/decisions/", permanent=True )),
(r'^telechatdates/$', RedirectView.as_view(url='/admin/iesg/telechatdate/')),
(r'^decisions/(?:(?P<year>[0-9]{4})/)?$', "ietf.iesg.views.review_decisions"),
(r'^agenda/(?:(?P<date>\d{4}-\d{2}-\d{2})/)?$', "ietf.iesg.views.agenda"),
(r'^agenda/(?:(?P<date>\d{4}-\d{2}-\d{2})/)?agenda.txt$', "ietf.iesg.views.agenda_txt"),
(r'^agenda/(?:(?P<date>\d{4}-\d{2}-\d{2})/)?agenda.json$', "ietf.iesg.views.agenda_json"),
(r'^agenda/(?:(?P<date>\d{4}-\d{2}-\d{2})/)?scribe_template.html$', "ietf.iesg.views.agenda_scribe_template"),
(r'^agenda/(?:(?P<date>\d{4}-\d{2}-\d{2})/)?moderator_package.html$', "ietf.iesg.views.agenda_moderator_package"),
(r'^agenda/(?:(?P<date>\d{4}-\d{2}-\d{2})/)?agenda_package.txt$', "ietf.iesg.views.agenda_package"),
(r'^agenda/documents.txt$', "ietf.iesg.views.agenda_documents_txt"),
(r'^agenda/documents/$', "ietf.iesg.views.agenda_documents"),
(r'^agenda/telechat-(?:(?P<date>\d{4}-\d{2}-\d{2})-)?docs.tgz', "ietf.iesg.views.telechat_docs_tarfile"),
(r'^discusses/$', "ietf.iesg.views.discusses"),
(r'^milestones/$', "ietf.iesg.views.milestones_needing_review"),
)
|
The 2nd Annual Historic Goshen Ornament is available now! The First Presbyterian Church is beautifully depicted. A portion of each sale is donated to the Church.
|
#!/usr/bin/python
#
# vim: tabstop=4 expandtab shiftwidth=4 noautoindent
#
# nymserv.py - A Basic Nymserver for delivering messages to a shared mailbox
# such as alt.anonymous.messages.
#
# Copyright (C) 2012 Steve Crook <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from distutils.core import setup
setup(
name='nymserv',
author='Steve Crook',
author_email='[email protected]',
version='0.4',
packages=['nymserv', ],
scripts=['nymserv/nymserv', ],
license='GPLv3',
url='https://github.com/crooks/nymserv',
long_description=open('README').read(),
install_requires=['pyaxo>=0.4.1', ],
#data_files=[('man/man1', ['man/nymserv.1'])],
)
|
Colleagues from the O2OA project attended the recent Jisc Open Access Good Practice Workshop (17th June) and Programme meeting (25th June). Lorna Everall (CU project lead) and Alan Cope (DMU) presented on the project plan (slides available below) and enjoyed speed dating with other project partners! Takeaways from the day include clear crossovers between approaches and the chance to share learning and experiences throughout the project lifespan. We will be contacting partners asap, whilst our own needs assessment is running, to establish relationships and strengthen our collective approach.
|
# Copyright (C) 2010-2012 Red Hat, Inc.
# This work is licensed under the GNU GPLv2 or later.
import libvirt
from libvirt import libvirtError
from libvirttestapi.repos.domain import domain_common
from libvirttestapi.utils import utils
required_params = ('transport',
'target_machine',
'username',
'password',
'guestname',
'poststate')
optional_params = {}
def get_state(state):
dom_state = ''
if state == libvirt.VIR_DOMAIN_NOSTATE:
dom_state = 'nostate'
elif state == libvirt.VIR_DOMAIN_RUNNING:
dom_state = 'running'
elif state == libvirt.VIR_DOMAIN_BLOCKED:
dom_state = 'blocked'
elif state == libvirt.VIR_DOMAIN_PAUSED:
dom_state = 'paused'
elif state == libvirt.VIR_DOMAIN_SHUTDOWN:
dom_state = 'shutdown'
elif state == libvirt.VIR_DOMAIN_SHUTOFF:
dom_state = 'shutoff'
elif state == libvirt.VIR_DOMAIN_CRASHED:
dom_state = 'crashed'
else:
dom_state = 'no sure'
return dom_state
def clean_guest(conn, guestname, logger):
running_guests = []
ids = conn.listDomainsID()
for id in ids:
obj = conn.lookupByID(id)
running_guests.append(obj.name())
if guestname in running_guests:
logger.info("Destroy guest: %s" % guestname)
domobj = conn.lookupByName(guestname)
domobj.destroy()
define_guests = conn.listDefinedDomains()
if guestname in define_guests:
logger.info("Undefine guest: %s" % guestname)
domobj = conn.lookupByName(guestname)
domobj.undefine()
def env_clean(srcconn, dstconn, guestname, logger):
logger.info("destroy and undefine %s on both side if it exsits", guestname)
clean_guest(srcconn, guestname, logger)
clean_guest(dstconn, guestname, logger)
def migrate_tls(params):
""" migrate a guest back and forth between two machines"""
logger = params['logger']
transport = params['transport']
target_machine = params['target_machine']
username = params['username']
password = params['password']
guestname = params['guestname']
poststate = params['poststate']
domain_common.config_ssh(target_machine, username, password, logger)
target_hostname = utils.get_target_hostname(target_machine, username, password, logger)
dsturi = "qemu+%s://%s/system" % (transport, target_hostname)
try:
# Connect to local hypervisor connection URI
srcconn = libvirt.open()
srcdom = srcconn.lookupByName(guestname)
dstconn = libvirt.open(dsturi)
logger.info("use migrate() to migrate")
srcdom.migrate(dstconn, libvirt.VIR_MIGRATE_TLS | libvirt.VIR_MIGRATE_UNSAFE, None, None, 0)
except libvirtError as err:
logger.error("API error message: %s, error code is %s"
% (err.get_error_message(), err.get_error_code()))
logger.error("Migration Failed")
env_clean(srcconn, dstconn, guestname, logger)
return 1
dstdom = dstconn.lookupByName(guestname)
dstdom_state = dstdom.info()[0]
if get_state(dstdom_state) != poststate:
logger.error("Dst VM wrong state %s, should be %s", get_state(dstdom_state), poststate)
env_clean(srcconn, dstconn, guestname, logger)
return 1
logger.info("Migration PASS")
env_clean(srcconn, dstconn, guestname, logger)
return 0
|
Home/Dorset and Sherborne/Your Own Online Shop!
At Kitz UK in Dorset we can create you a professional online store for your school, sports team or club. This personalised service takes away the administration and time of managing your team kit! All of your specific garments will be situated in one, easy to access place. To speak to a member of our helpful team, simply give us a call on 01684 892439 – We’ll be able to discuss how we can best suit your needs and design garments so they meet the requirements of your teams kit colours and logo.
By Andy Nolan| 2019-04-04T14:18:26+00:00 April 4th, 2019|Dorset and Sherborne|Comments Off on Your Own Online Shop!
|
#!/usr/bin/python
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Add malware files and their hashes to a MySQL database, saving them as LONGBLOBs in the database structure
import sys
import os
import re
import hashlib
from optparse import OptionParser
try:
import MySQLdb
except ImportError:
print "Cannot import MySQLdb, fix it."
sys.exit()
#MySQL Connection Info
host = ''
username = ''
password = ''
database = ''
def initdb():
conn = MySQLdb.connect(host=host, user=username, passwd=password, db=database)
curs = conn.cursor()
curs.execute("""
CREATE TABLE files (
id INTEGER PRIMARY KEY AUTO_INCREMENT,
md5 TEXT,
sha1 TEXT,
sha256 TEXT,
malware LONGBLOB,
time DATETIME
) ENGINE=INNODB, ROW_FORMAT=DYNAMIC;
""")
curs.close()
conn.commit()
conn.close()
def savetodb(filename, force):
conn = MySQLdb.connect(host=host, user=username, passwd=password, db=database)
curs = conn.cursor()
md5 = hashlib.md5(open(filename, 'rb').read()).hexdigest()
sha1 = hashlib.sha1(open(filename, 'rb').read()).hexdigest()
sha256 = hashlib.sha256(open(filename, 'rb').read()).hexdigest()#
file = open(filename, 'rb').read()
curs.execute("SELECT id FROM files WHERE md5=%s", (md5,))
ids = curs.fetchall()
if len(ids):
if not force:
ids = ["%d" % id[0] for id in ids]
print "The sample exists in the database with ID %s" % (','.join(ids))
print "Use the -o or --overwrite option to force"
return
else:
curs.execute("DELETE FROM files WHERE md5=%s", (md5,))
curs.execute("INSERT INTO files VALUES (NULL,%s,%s,%s,%s,NOW())", (md5,sha1,sha256,file))
curs.close()
conn.commit()
conn.close()
def main():
parser = OptionParser()
parser.add_option("-i", "--init", action="store_true",
dest="init", default=False, help="initialize database")
parser.add_option("-o", "--overwrite", action="store_true",
dest="force", default=False,
help="overwrite existing DB entry")
parser.add_option("-f", "--file", action="store", dest="filename",
type="string", help="save FILENAME")
parser.add_option("-u", "--upload", action="store_true",
dest="savetodb", default=False,
help="Save file to database")
(opts, args) = parser.parse_args()
if opts.init:
initdb()
sys.exit()
if opts.filename == None:
parser.print_help()
parser.error("You must supply a filename!")
if not os.path.isfile(opts.filename):
parser.error("%s does not exist" % opts.filename)
if opts.savetodb:
print "Saving " + opts.filename + " to the database"
savetodb(opts.filename, opts.force)
print "Done"
print
if __name__ == '__main__':
main()
|
Dimensions: W 45cm x D 45cm x H 45cm.
Available in Natural Oak or Dove Grey Paint.
This Scandinavian styled range comes complete with this stylish Oak Painted Oak framed mirror, with a quality flat edge mirror with mounting brackets fitted to the back for easy installation.
Made from Solid Oak and Oak veneers, to the highest of standards, finished with a durable natural lacquer to give it that warm natural look and that Scandinavian style. With solid backs, drawer bottoms and dovetailed drawers. Each item comes with a choice of Wooden or Chrome handles. Also available in beautiful Dove Grey Paint. A fantastic range for all styles and needs.
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import auth
class FakeCredentials(auth.Credentials):
def is_valid(self):
return True
class FakeKeystoneV2Credentials(auth.KeystoneV2Credentials):
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
tenant_name='fake_tenant_name'
)
super(FakeKeystoneV2Credentials, self).__init__(**creds)
class FakeKeystoneV3Credentials(auth.KeystoneV3Credentials):
"""Fake credentials suitable for the Keystone Identity V3 API"""
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
user_domain_name='fake_domain_name',
project_name='fake_tenant_name',
project_domain_name='fake_domain_name'
)
super(FakeKeystoneV3Credentials, self).__init__(**creds)
class FakeKeystoneV3DomainCredentials(auth.KeystoneV3Credentials):
"""Fake credentials for the Keystone Identity V3 API, with no scope"""
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
user_domain_name='fake_domain_name'
)
super(FakeKeystoneV3DomainCredentials, self).__init__(**creds)
class FakeKeystoneV3AllCredentials(auth.KeystoneV3Credentials):
"""Fake credentials for the Keystone Identity V3 API, with no scope"""
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
user_domain_name='fake_domain_name',
project_name='fake_tenant_name',
project_domain_name='fake_domain_name',
domain_name='fake_domain_name'
)
super(FakeKeystoneV3AllCredentials, self).__init__(**creds)
|
Build your business by giving your babies the best! Shop our extensive selection of adorable dance costumes for your tiniest dancers. Quality kids dance costumes for ballet, lyrical, tap, jazz, hip hop and contemporary.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import six
from django import forms
from django.contrib import admin
from django.contrib.admin import SimpleListFilter
from django.forms.utils import flatatt
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from suit.admin import SortableStackedInline
from dynamic_forms.formfields import formfield_registry
from dynamic_forms.models import FormFieldModel, FormModel, FormModelData
from dynamic_forms.utils import export_as_csv_action
class ReadOnlyWidget(forms.Widget):
def __init__(self, attrs=None, **kwargs):
self.show_text = kwargs.pop('show_text', None)
super(ReadOnlyWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
content = ''
if value is not None:
content = value
if self.show_text is not None:
content = self.show_text
final_attrs = self.build_attrs(attrs)
return format_html('<span{0}>{1}</span>',
flatatt(final_attrs),
force_text(content),
)
class OptionsWidget(forms.MultiWidget):
def __init__(self, option_names, widgets, attrs=None):
self.option_names = option_names
super(OptionsWidget, self).__init__(widgets, attrs)
def decompress(self, value):
mapping = json.loads(value) if value else {}
return [mapping.get(key, None) for key in self.option_names]
def format_output(self, rendered_widgets, id_):
output = []
i = 0
for n, (r, w) in six.moves.zip(self.option_names, rendered_widgets):
output.append(
format_html(
'<label for="{0}_{1}">{2}:</label>{3}',
w.id_for_label(id_), i, n, r
)
)
i += 1
return mark_safe('<div style="display:inline-block;">' +
('<br />\n'.join(output)) + '</div>')
def render(self, name, value, attrs=None):
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
rendered = widget.render(name + '_%s' % i, widget_value,
final_attrs)
output.append((rendered, widget))
return mark_safe(self.format_output(output, id_))
class OptionsField(forms.MultiValueField):
def __init__(self, meta, *args, **kwargs):
self.option_names = []
self.option_fields = []
self.option_widgets = []
initial = {}
for name, option in sorted(meta.items()):
self.option_names.append(name)
initial[name] = option[1]
formfield = option[2]
if isinstance(formfield, forms.Field):
self.option_fields.append(formfield)
self.option_widgets.append(formfield.widget)
elif isinstance(formfield, (tuple, list)):
if isinstance(formfield[0], forms.Field):
self.option_fields.append(formfield[0])
else:
self.option_fields.append(formfield[0]())
if isinstance(formfield[1], forms.Widget):
self.option_widgets.append(formfield[1])
else:
self.option_widgets.append(formfield[1]())
elif isinstance(formfield, type):
self.option_fields.append(formfield())
self.option_widgets.append(formfield.widget)
kwargs['widget'] = OptionsWidget(self.option_names,
self.option_widgets)
if 'initial' in kwargs:
kwargs['initial'].update(initial)
else:
kwargs['initial'] = initial
super(OptionsField, self).__init__(self.option_fields, *args, **kwargs)
def compress(self, data_list):
data = {}
for name, value in six.moves.zip(self.option_names, data_list):
if value is not None:
data[name] = value
return json.dumps(data)
class AdminFormModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AdminFormModelForm, self).__init__(*args, **kwargs)
choices = self.fields['actions'].choices
self.fields['actions'].choices = sorted(choices, key=lambda x: x[1])
class AdminFormFieldInlineForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
meta = None
if instance:
df = formfield_registry.get(instance.field_type)
if df:
meta = df._meta
super(AdminFormFieldInlineForm, self).__init__(*args, **kwargs)
choices = self.fields['field_type'].choices
self.fields['field_type'].choices = sorted(choices, key=lambda x: x[1])
if meta is not None:
self.fields['_options'] = OptionsField(meta, required=False,
label=_('Options'))
else:
self.fields['_options'].widget = ReadOnlyWidget(show_text=_(
'The options for this field will be available once it has '
'been stored the first time.'
))
class FormFieldModelInlineAdmin(SortableStackedInline):
extra = 0
form = AdminFormFieldInlineForm
list_display = ('field_type', 'name', 'label')
model = FormFieldModel
prepopulated_fields = {"name": ("label",)}
sortable = 'position'
class FormModelAdmin(admin.ModelAdmin):
form = AdminFormModelForm
inlines = (FormFieldModelInlineAdmin,)
list_display = ('name', 'allow_display')
list_filter = ('name',)
model = FormModel
actions = [export_as_csv_action("Export form submissions as CSV")]
admin.site.register(FormModel, FormModelAdmin)
class FormFilter(SimpleListFilter):
title = 'Selected Form'
parameter_name = 'form'
def lookups(self, request, model_admin):
forms = set([f for f in FormModel.objects.all()])
return [(f.id, f.name) for f in forms]
def queryset(self, request, queryset):
if self.value():
return FormModelData.objects.filter(form__id__exact=self.value())
else:
return queryset
class FormModelDataAdmin(admin.ModelAdmin):
fields = ('form', 'value', 'submitted', 'show_url_link')
model = FormModelData
readonly_fields = ('submitted', 'show_url_link',)
list_filter = (FormFilter,)
actions_on_top = False
actions_on_bottom = True
date_hierarchy = 'submitted'
def get_list_display(self, request):
if not request.GET.get('form', None):
return ('form', 'submitted')
else:
list_display_tuple = ['form', 'submitted']
form_obj = FormModel.objects.get(pk=int(request.GET.get('form')))
self.form_obj = form_obj
fields = form_obj.fields.all()
for field in fields:
if field.field_type in ('dynamic_forms.formfields.StartGroupField',
'dynamic_forms.formfields.EndGroupField'):
continue
field_slug = slugify(field.name).replace('-', '_')
list_display_tuple.append("get_form_data_value_for_%s" % field_slug)
self.add_form_value_display(field.label, field_slug)
return list_display_tuple
def add_form_value_display(self, label, name):
def inner_add_form_value_display(obj):
json_value = json.loads(obj.value)
return json_value[label]
inner_add_form_value_display.short_description = name
inner_add_form_value_display.name = name
setattr(self, "get_form_data_value_for_%s" % name, inner_add_form_value_display)
admin.site.register(FormModelData, FormModelDataAdmin)
|
Q: The truth is that even though there are many characters in SING each and every one of them have a well flushed-out storyline.
There are many storylines – with Buster in the middle – that you can follow and that all come together in SING. And each time you get back to one you think, “Oh, I’m glad I’m back here!” because you had missed that character. Not many films are capable of doing that.
Q: Did the director of movie SING Garth’s openness allow you to bring your own input to the role of Buster Moon?
Q: The film talks about overcoming our fears. Can you relate to that in any way?
Q: How did SING surprise you when you finally saw it completed?
Q: Does music play an important role in your life?
« Happy Birthday Matthew McConaughey!
|
import logging
import cPickle
import subprocess
from satnogsclient.upsat import packet_settings
from satnogsclient import settings as client_settings
from satnogsclient.observer.udpsocket import Udpsocket
from satnogsclient.upsat import packet
logger = logging.getLogger('satnogsclient')
backend_listener_sock = Udpsocket(('0.0.0.0', client_settings.BACKEND_LISTENER_PORT)) # Port in which client listens for frames from gnuradio
ui_listener_sock = Udpsocket(('127.0.0.1', client_settings.BACKEND_FEEDER_PORT))
ecss_feeder_sock = Udpsocket([]) # The socket with which we communicate with the ecss feeder thread
backend_feeder_sock = Udpsocket([])
ld_socket = Udpsocket([])
ld_uplink_socket = Udpsocket([])
ld_downlink_socket = Udpsocket([])
def write_to_gnuradio(buf):
backend_feeder_sock.sendto(buf, (client_settings.GNURADIO_IP, client_settings.GNURADIO_UDP_PORT))
def read_from_gnuradio():
logger.info('Started gnuradio listener process')
while True:
conn = backend_listener_sock.recv()
buf_in = bytearray(conn[0])
ecss_dict = {}
ret = packet.deconstruct_packet(buf_in, ecss_dict, "gnuradio")
ecss_dict = ret[0]
pickled = cPickle.dumps(ecss_dict)
if len(ecss_dict) == 0:
logger.error('Ecss Dictionary not properly constructed. Error occured')
continue
try:
if ecss_dict['ser_type'] == packet_settings.TC_LARGE_DATA_SERVICE:
if ecss_dict['ser_subtype'] <= 8: # 8 is sthe maximum service subtype corresponding to Large Data downlink
ld_downlink_socket.sendto(pickled, ('127.0.0.1', client_settings.LD_DOWNLINK_LISTEN_PORT))
else:
ld_uplink_socket.sendto(pickled, ('127.0.0.1', client_settings.LD_UPLINK_LISTEN_PORT))
else:
ecss_feeder_sock.sendto(pickled, ('127.0.0.1', client_settings.ECSS_FEEDER_UDP_PORT))
except KeyError:
logger.error('Ecss Dictionary not properly constructed. Error occured. Key \'ser_type\' not in dictionary')
def exec_gnuradio(observation_file, waterfall_file, freq):
arguments = {'filename': observation_file,
'waterfall': waterfall_file,
'rx_device': client_settings.SATNOGS_RX_DEVICE,
'center_freq': str(freq)}
arg_string = ' '
arg_string += '--rx-sdr-device=' + arguments['rx_device'] + ' '
arg_string += '--file-path=' + arguments['filename'] + ' '
arg_string += '--waterfall-file-path=' + arguments['waterfall'] + ' '
arg_string += '--rx-freq=' + arguments['center_freq'] + ' '
logger.info('Starting GNUradio python script')
proc = subprocess.Popen([client_settings.GNURADIO_SCRIPT_FILENAME + " " + arg_string], shell=True)
return proc
|
Primary call is taken by the first year residents. The second year resident will rotate every 5th night for six weeks on “buddy” call with the first year residents. Around August 15, first year residents start to take call on their own.
After the first 6 weeks of the year, a night float system begins.
Night float is one of the 5 rotations for first year residents.
The night float and adult daytime consult resident (also a first year rotation) alternate night float and daytime consults responsibilities every 2 weeks.
Night float covers Sunday night through Thursday night.
Friday night (4:30 p.m. - 7:00 a.m), Saturday (7:00 a.m - 7:00 a.m.), and Sunday daytime (7:00 a.m. - 7:00 p.m.) are covered on a rotating call schedule by the first year residents not on night float.
Night float shifts begin at 4:30 p.m. and end at 7:00 a.m. The night float resident is expected to attend morning lecture from 7 to 8 AM and sign out to the daytime consult resident.
Call is rotated among the second and third year residents on a weekly basis.
|
#!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Install Fusion and Earth Server, build tutorial databases, and test."""
import os
import subprocess
BASE_DIR = os.getcwd()
LOG_SHELL_CMDS = True
GEE_TESTS_LOG = "%s/gee_tests.log" % BASE_DIR
BYTES_PER_MEGABYTE = 1024.0 * 1024.0
class OsCommandError(Exception):
"""Thrown if os command fails."""
pass
def BaseDir():
"""Returns the directory that contains the application that is running."""
return BASE_DIR
def ClearLog():
"""Clear content of log file."""
fp = open(GEE_TESTS_LOG, "w")
fp.close()
def Log(message):
"""If logging is on, log the message."""
if LOG_SHELL_CMDS:
fp = open(GEE_TESTS_LOG, "a")
fp.write(message + "\n")
fp.close()
def ExecuteCmd(os_cmd, do_log=False, err2out=False):
"""Execute and log os command.
If the shell command fails, an exception is thrown.
Args:
os_cmd: (string) linux shell command to run.
do_log: whether to do logging.
err2out: whether to send stderr to the same file handle as for stdout.
Returns:
results of the linux shell command.
Raises:
OsCommandError: if error from shell command is not None.
"""
print "Executing: %s" % os_cmd
if do_log:
Log(os_cmd)
try:
p = subprocess.Popen(
os_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT if err2out else subprocess.PIPE)
result, error = p.communicate()
if (not err2out) and error:
print "ERROR: %s" % error
return "Unable to execute %s" % os_cmd
return result
except Exception, e:
print "FAILED: %s" % e.__str__()
raise OsCommandError()
def DiskSpace(path):
"""Returns remaining disk space in Megabytes."""
mount_info = os.statvfs(path)
return mount_info.f_bsize * mount_info.f_bavail / BYTES_PER_MEGABYTE
def ChDir(path):
"""Changes directory so that it is logged."""
Log("cd %s" % path)
os.chdir(path)
def GetFileWithReplace(path, replace):
"""Return content of file after replacing any keys in replace with values."""
fp = open(path)
content = fp.read()
fp.close()
for key in replace.iterkeys():
content = content.replace(key, replace[key])
return content
def main():
# Quick test of module.
ClearLog()
print ExecuteCmd("pwd")
ChDir("/tmp")
print ExecuteCmd("pwd")
ChDir(BASE_DIR)
print ExecuteCmd("pwd")
if __name__ == "__main__":
main()
|
As I sit and write this article, life is anything but simple. You see, we’re packing up our home and our lives for a six month adventure in beautiful (sunny) South Africa.
There are a lot of logistics to consider – insurances, travel plans, tenants and rental agreements, packing up so much stuff. It’s easy to get overwhelmed and my desire for simplicity grows by the day.
It’s also really forced me to consider what is most essential when it comes to my PCOS. You see, it is so easy to get caught up in all of the things that we “need” to take, eat or do for our PCOS. It can leave us feeling overwhelmed.
So, let’s take a step back, especially as we’re entering the festive season, and have a look at the most essential elements of our PCOS management.
Now, we’ve said time and time again that the way that we eat is the most effective way of managing our PCOS and our symptoms. In fact, researchers have found that eating well for our PCOS is more effective than clomid and Metformin in stimulating ovulation and generally managing our symptoms.
Now, this entire site is dedicated to managing our PCOS using food (mostly) and you will find articles on the main principles of eating well for your PCOS – dairy free, gluten free and low GL.
Also, if you want some more ideas of recipes that meet the PCOS Diet guidelines, why not check out PCOS Foodies? It's an entire meal planning app specifically designed for women with PCOS.
So, how can we go about simplifying our food, following the basic guidelines?
Remember, we’re looking for dairy free, gluten free and low glycemic load foods. The first thing that springs to mind is whole unprocessed foods. You can eat these kinds of foods anytime, anywhere.
Going to a restaurant? A steak and some vegetables is a great option.
Picnic? Deli meats, salad, fruit, olives – you’re sorted.
Dinner at home? Chicken breasts and vegetables.
It is AMAZING how much food you can happily enjoy that follow these guidelines. Really, it is AMAZING how much food you can happily enjoy that follow these guidelines.
Now, I have a confession to make. My children (5 and 2) crave sugar. I have not been all that successful in staying away from processed foods with them. So, I’m seeing our trip to South Africa as a wonderful opportunity to reset their diets and way of eating.
I’m seriously considering a new rule – if it has more than 3 ingredients or comes in some kind of commercial packaging – it’s out.
I know that sounds extreme but if you had this wonderful opportunity to re-establish traditions and habits for your family, what changes would you make?
We have spoken a lot about the best supplments for women with PCOS. And I often get asked which supplements are the most helpful for women with PCOS. You’ll find that there are so many options out there for women with PCOS.
But, I don’t want to be taking a million different tablets and supplements a day and I want to know that what I am taking will be effective for me. I also just don’t want to travel half way across the world with a bag full of supplements.
So, I have 3 essential PCOS supplements that I just won’t go without.
Vitamin D – 85% of women with PCOS are deficient in Vitamin D. You can read more about it here.
Omega 3 – It helps to lower testosterone levels in women with PCOS.
Ovasitol – My number 1 PCOS supplement. If you can’t get Ovasitol, Inositol is you next-best bet.
Yes, there are other things that I could add to my regime but this combination has been amazingly effective for me and so many others.
Now, this is also something that we tend to over think. We want to know what exercise we should be doing, how much and how often. And these are all legitimate questions with some very good answers that you can find here and here and here.
But remember, at the moment we’re talking about simplifying with PCOS.
So, what exercise should we be doing? Anything. Just make sure that you are doing something.
I find that sometimes the day runs away with me and I’ve hardly moved. That is why I love my Fitbit so much. It helps to remind me and motivate me to get moving and make sure that I get as close to my 10000 steps a day as possible.
Yes, I could be doing HIIT workouts and that would be even better. But as long as I’m moving and being active everyday, I’m happy with that.
So, my Fitbit will definitely be making it half way across the world!
At the moment, I’m very mindful of the number of distractions, adverts and pressures, to spend more – more time, more money, more stuff. With Black Friday and Cyber Monday just around the corner and Christmas not too far off, I just want to encourage you to be mindful.
Mindful of the things that you dedicate your time, money and energy to. For those of you who celebrate Thanksgiving tomorrow, may you have a wonderful day spent with family and friends, focusing on the things in your life that are most important for you.
And in closing, I’d love to hear from you. If you had an opportunity to “reset” and start fresh, what changes would you make – for you and for your family?
Awesome read! Thanks you Tarryn!
One question I have for you is do you have an article or information about how to explain PCOS to family members? I am educating my husband and other family members and close friends that I have PCOS and need to eat gluten and dairy free but there’s so much more to it that I don’t think they totally understand especially since I am learning it all and its new to myself too. 🙂 Any help or advice it greatly appreciated!
Thank you again for everything you do, especially how busy you are as a mom and wife. Keep on sharing your stories and guiding us with such powerful knowledge for those of us who have PCOS.
I’m a teacher in Texas and we just had our Thanksgiving break. I’m not proud of this, but I did not eat mindfully for the whole 5 days I was away from my students. I ate horribly. Whatever I wanted, whenever I wanted. Let’s just say my pants were a little snug this morning when I got dressed for work. That being said, I already knew that starting today I would be resetting. I have been doing a good job staying active in gym, but as we all know, diet is key. I’m excited to get back on track. It is much needed!
Great post! It is so nice to know what works. I have been considering trying Ovasitol for a couple of weeks now. Has removing parabens and other chemicals made any sort of a difference for you? I have been avoiding them for a while, but I can’t tell if it really helped or not.
|
"""
STATEMENT
Given a binary tree, return the bottom-up level order traversal of its nodes' values.
(ie, from left to right, level by level from leaf to root).
CLARIFICATIONS
- Do we print all 'None' values that a level may have? Yes.
EXAMPLES
[3,9,20,null,null,15,7] -> [[15,7],[9,20],[3]]
COMMENTS
- The usual tree level traversal would work
(https://github.com/RatulSaha/leetcode/blob/master/101-150/102-binary-tree-level-order-reversal.py).
- We can use deque from collections module to do appendleft instead of append.
"""
def levelOrderBottom(root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
to_return = []
if not root:
return to_return
level = [root]
while level:
current_level_val = [node.val for node in level]
to_return = [current_level_val] + to_return
next_level = []
for node in level:
if node.left is not None:
next_level.append(node.left)
if node.right is not None:
next_level.append(node.right)
level = next_level
return to_return
|
The Sun, known for their cracking investigative journalism, reignited the halal meat debate recently with the big scoop that Pizza Express serve halal chicken. The revelation is listed on the Pizza Express website, next to a clarification as to how their Worcester Sauce is uniquely vegetarian friendly.
There are two dimensions to this storm. The first dimension is about animal rights, and the second dimension is about a media that is intent on maligning and stirring hate against a minority faith group. It’s disappointing to see or hear people I respect for their commitment to animal welfare getting muddled up in what is clearly a media-frenzy.
For those vegans, veggies and animal-lovers who are concerned about halal, and what it might mean, well I’d admit to you, there is a problem in the halal industry, in fact I believe there is a systematic problem with the meat industry in general. Driven as it is by profit, some abattoirs have failed to meet the most basic tenets of animal welfare, let alone issues such as battery farming. It is the presence of these problems in the halal industry that means there are countless Muslim organisations that try to regulate the halal industry, and often do so on a voluntary rather than commercial basis. Why? Well, because humane slaughter is central to the concept of halal.
‘Allah Who is Blessed and Exalted, has prescribed benevolence toward everything; so, when you must kill a living being, do it in the best manner, thus when you slaughter an animal, you should sharpen your knife so as to cause the animal as little pain as possible.’ (Riyadus Salihin. Hadith No. 643).
The hadith clearly indicates animals do feel pain (something which is, in some quarters of the modern world, still up for debate) and that slaughter should be as painless as possible. Other teachings related to halal instruct the animal should be given a meal beforehand (so it isn’t agitated), that the animal shouldn’t even see the knife before slaughter, that the act should take place away from where other animals might see it and be distressed.
There is then ample common ground between animal rights campaigners and Muslims concerned with halal meat. The problem within the halal industry is due its profit-driven mechanistic approach. Rather than trying to ban halal and kosher, like Denmark already has, I would hope the two groups, with their shared concern for humane slaughter, would begin a conversation, away from the media frenzy.
But that leads onto the second dimension of the ‘halal hysteria’. There has been a repeating pattern in print and online media recently of stories that feign to be about a moral issue but are in fact simply reinforcing a narrative of hate, difference, and other-ing against British Muslims. The debate about segregation on campus, supposedly about women’s right, is a good example. It dominated headlines and columns for months. Compare it with how much attention was received by the absolutely jaw dropping statistic that nearly 1/3 students in Cambridge University have been sexually assaulted. The many voices that commented on segregation as the issue of sexism on campus are deeply uninterested in sexual abuse at Cambridge.
Laurie Pennie argued in her column that feminists should not allow Islamophobes to appropriate the language of feminism for their own purposes. I would call on animal rights campaigners, who might be tempted to jump into the halal debate, to ensure they don’t allow Islamophobes to appropriate the language of animal welfare either.
On Religion is a quarterly print magazine that provides in-depth coverage on religion, faith and current affairs. Subscribe for a one-off annual payment of £19.
|
import os
import sys
import struct
import time
import unittest
sys.path = ["./py"] + sys.path
import cbox
global Document
Document = cbox.Document
scene = Document.get_scene()
scene.clear()
instrument = scene.add_new_instrument_layer("test_sampler", "sampler").get_instrument()
npfs = instrument.engine.load_patch_from_string(0, '.', '', 'new_patch')
instrument.engine.set_patch(1, 0)
mgrp = npfs.get_global().get_children()[0]
g1 = mgrp.new_child()
g1.set_param("cutoff", "100")
g1.set_param("resonance", "6")
g1.set_param("fil_type", "lpf_4p")
g1.set_param("fileg_start", "50")
g1.set_param("fileg_attack", "0.01")
g1.set_param("fileg_decay", "0.2")
g1.set_param("fileg_sustain", "20")
g1.set_param("fileg_depth", "5400")
g1.set_param("fileg_release", "10")
g1.set_param("ampeg_release", "0.1")
g1.set_param("amp_veltrack", "0")
g1.set_param("volume", "-12")
g1.set_param("fileg_depthcc14", "-5400")
#g1.set_param("cutoff", "1000")
#g1.set_param("fillfo_freq", "4")
#g1.set_param("fillfo_depth", "2400")
#g1.set_param("fillfo_wave", "12")
#g1.set_param("fillfo_freqcc2", "4")
r1 = g1.new_child()
r1.set_param("sample", "*saw")
r1.set_param("transpose", "0")
r1.set_param("tune", "5")
r1.set_param("gain_cc17", "12")
r2 = g1.new_child()
r2.set_param("sample", "*sqr")
r2.set_param("transpose", "12")
r2.set_param("gain_cc17", "-12")
print(instrument.engine.status())
print("Ready!")
while True:
cbox.call_on_idle()
|
Greetings! Happy middle of the season of the Sun. Keep cool and enjoy the power and the love, as Venus is transiting Leo, hand-in-hand with the Sun, ’til August 21. The Fool Moon is in Aquarius on Saturday, August 13th, at three ’til 2 a.m.
LEO (7.23-8.22) Ahhhh … Time for you to look gooood! Here comes Venus, ’til the 21st. Pleasure is your treasure, whether it’s having a party for your friends or dealing love that never ends! You know how the MOJO knows… Plan something unique, unexpected and surprising for August 13th, the weekend after next. It’ll work for you, ’cuz you’re looking soooo good! Hey! You forget something?
VIRGO (8.23-9.22) Oh, oh! Oh my God, it’s ALL coming out! Every little detail, every little, private, personal “sin.” They know ’cuz you(’ll) know! Jesus! They(’ve) got you! They’re going to know everything! And it’s a Leo that’s going to rat you out! Vacation? Hide? How can you hide? Everybody knows!
LIBRA (9.23-10.22) Love’s at a party for this coming month. Does anybody still have parties? Then why wasn’t I invited? We’re in the season of the Sun (Leo). Next comes the realm of Mother Nature (Virgo), leading to the harmony that is you (Libra). You deserve it. You’re halfway through. Do yoga for balance to/and drink from the chalice.
SCORPIO (10.23-11.22) How come you feel sooo good? Ahhh… You are well known. Popular people seem to get along a little easier, due to little assistances here and there, ’cuz (somewhat unexpectedly) other people care. You are happy to be alive! Please, allow me to share some jive? Luck’s not/will not be with you this year — but the next. Hard work transcends this hex.
SAGITTARIUS (11.23-12.21) Fire it up at noon on Sunday, the 7th, and keep it lit ’til 3:33 on Tuesday afternoon. Too soon? The honeymoon is over, Grover. Your money dream is due at midnight. Is it money or honey (love)? Something’s “runny”… I’m just being funny. Now it’s your turn to burn. Flame on!
CAPRICORN (12.22-1.20) Dream next Tuesday at midnight about who you want to be. Is it love or is it money? We’ll see, won’t we?
PISCES (2.20-3.20) Oh my God, are you screwed. Mercury is retrograde in opposition to your esoteric ruler, Neptune, which is also retrograde early in the esoterically-ruled sign of Pisces. Unexplainable confusion opposite the mystical, subconscious planet of illusion, confusion and delusion. Now you know how I feel all the time.
ARIES (3.21-4.20) Okay. You got it now, and you gotta git it home then… Well then, when? You feel inordinate pride in your home setting. Mars is in the domestic sign of Cancer. Cleaning, landscaping, gardening…making it look better. Well then, at least take out the trash?
TAURUS (4.21-5.20) Time to let it rip! Venus and Jupiter are kickin’ it! Love is shinin’ like the Sun, with enough to shine on everyone. Traveling, school, unraveling the Golden Rule? “The one with the gold rules,” right? Make a move toward where you want to be in December.
GEMINI (5.21-6.21) Trouble at home. Miss Understanding runs rampant. Dammit! When you can’t understand the tune, you have to vamp it, and you’re on the wrong side of the Moon. Dammit! Jam it or cram it! You’re out of tune/town for three weeks, I thinks… Nature speaks, but who can listen?
CANCER (6.22-7.22) Gear up for the Fool Moon next Saturday, August 13th, at 1:57 in the afternoon, Omax time. It’s in Aquarius, so you know the fools will be out! Money through art (Venus in Leo). Expansion (Jupiter) through parties (11th House). Hey! Don’t forget to invite me this time! See ya then/there.
|
import logging
from hazelcast.future import make_blocking
from hazelcast.partition import string_partition_strategy
from hazelcast.util import enum, thread_id
def default_response_handler(future, codec, to_object):
response = future.result()
if response:
try:
codec.decode_response
except AttributeError:
return
decoded_response = codec.decode_response(response, to_object)
try:
return decoded_response['response']
except AttributeError:
pass
class Proxy(object):
def __init__(self, client, service_name, name):
self.service_name = service_name
self.name = name
self.partition_key = string_partition_strategy(self.name)
self._client = client
self.logger = logging.getLogger("%s(%s)" % (type(self).__name__, name))
self._to_object = client.serializer.to_object
self._to_data = client.serializer.to_data
self._start_listening = client.listener.start_listening
self._stop_listening = client.listener.stop_listening
def destroy(self):
self._on_destroy()
return self._client.proxy.destroy_proxy(self.service_name, self.name)
def _on_destroy(self):
pass
def __repr__(self):
return '%s(name="%s")' % (type(self).__name__, self.name)
def _encode_invoke(self, codec, response_handler=default_response_handler, **kwargs):
request = codec.encode_request(name=self.name, **kwargs)
return self._client.invoker.invoke_on_random_target(request).continue_with(response_handler, codec,
self._to_object)
def _encode_invoke_on_target(self, codec, _address, response_handler=default_response_handler, **kwargs):
request = codec.encode_request(name=self.name, **kwargs)
return self._client.invoker.invoke_on_target(request, _address).continue_with(response_handler, codec,
self._to_object)
def _encode_invoke_on_key(self, codec, key_data, **kwargs):
partition_id = self._client.partition_service.get_partition_id(key_data)
return self._encode_invoke_on_partition(codec, partition_id, **kwargs)
def _encode_invoke_on_partition(self, codec, _partition_id, response_handler=default_response_handler, **kwargs):
request = codec.encode_request(name=self.name, **kwargs)
return self._client.invoker.invoke_on_partition(request, _partition_id).continue_with(response_handler,
codec, self._to_object)
def blocking(self):
"""
:return: Return a version of this proxy with only blocking method calls
"""
return make_blocking(self)
class PartitionSpecificProxy(Proxy):
def __init__(self, client, service_name, name):
super(PartitionSpecificProxy, self).__init__(client, service_name, name)
self._partition_id = self._client.partition_service.get_partition_id(self.partition_key)
def _encode_invoke(self, codec, response_handler=default_response_handler, **kwargs):
return super(PartitionSpecificProxy, self)._encode_invoke_on_partition(codec, self._partition_id,
response_handler=response_handler,
**kwargs)
class TransactionalProxy(object):
def __init__(self, name, transaction):
self.name = name
self.transaction = transaction
self._to_object = transaction.client.serializer.to_object
self._to_data = transaction.client.serializer.to_data
def _encode_invoke(self, codec, response_handler=default_response_handler, **kwargs):
request = codec.encode_request(name=self.name, txn_id=self.transaction.id, thread_id=thread_id(), **kwargs)
return self.transaction.client.invoker.invoke_on_connection(request, self.transaction.connection).continue_with(
response_handler, codec, self._to_object)
def __repr__(self):
return '%s(name="%s")' % (type(self).__name__, self.name)
ItemEventType = enum(added=1, removed=2)
EntryEventType = enum(added=1,
removed=1 << 1,
updated=1 << 2,
evicted=1 << 3,
evict_all=1 << 4,
clear_all=1 << 5,
merged=1 << 6,
expired=1 << 7)
class ItemEvent(object):
def __init__(self, name, item_data, event_type, member, to_object):
self.name = name
self._item_data = item_data
self.event_type = event_type
self.member = member
self._to_object = to_object
@property
def item(self):
return self._to_object(self._item_data)
class EntryEvent(object):
def __init__(self, to_object, key, old_value, value, merging_value, event_type, uuid,
number_of_affected_entries):
self._key_data = key
self._value_data = value
self._old_value_data = old_value
self._merging_value_data = merging_value
self.event_type = event_type
self.uuid = uuid
self.number_of_affected_entries = number_of_affected_entries
self._to_object = to_object
@property
def key(self):
return self._to_object(self._key_data)
@property
def old_value(self):
return self._to_object(self._old_value_data)
@property
def value(self):
return self._to_object(self._value_data)
@property
def merging_value(self):
return self._to_object(self._merging_value_data)
def __repr__(self):
return "EntryEvent(key=%s, old_value=%s, value=%s, merging_value=%s, event_type=%s, uuid=%s, " \
"number_of_affected_entries=%s)" % (
self.key, self.old_value, self.value, self.merging_value, self.event_type, self.uuid,
self.number_of_affected_entries)
class TopicMessage(object):
def __init__(self, name, message_data, publish_time, member, to_object):
self.name = name
self._message_data = message_data
self.publish_time = publish_time
self.member = member
self._to_object = to_object
@property
def message(self):
return self._to_object(self._message_data)
def get_entry_listener_flags(**kwargs):
flags = 0
for (key, value) in kwargs.iteritems():
if value:
flags |= getattr(EntryEventType, key)
return flags
|
1 Fixed rate reverts to our Basic Investment Residential Loan rate after fixed term period.
2 The comparison rates are calculated on a loan amount of $150,000 over a term of 25 years. WARNING: This comparison rate is true only for the example given and may not include all fees and charges. Different terms, fees or other loan amounts might result in a different comparison rate. All loans are secured. Fixed interest rate investment loans are fixed for the term specified in the Offer and Loan Contract after which the interest rate will revert to the Basic Investment variable interest rate applicable at the time. The standard variable rate is published at each Credit Union branch. Terms and Conditions, fees and charges apply and are available on request.
1 Fixed rate reverts to our Basic Investment Residential Loan variable rate after the fixed term period.
|
#!/usr/bin/env python
# William Lam
# www.virtuallyghetto.com
"""
vSphere Python SDK program for listing Datastores in Datastore Cluster
"""
import argparse
import atexit
from pyVmomi import vim
from pyVmomi import vmodl
from pyVim import connect
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description='Process args for retrieving all the Virtual Machines')
parser.add_argument('-s', '--host',
required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port',
type=int, default=443,
action='store', help='Port to connect on')
parser.add_argument('-u', '--user', required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=True, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--dscluster', required=True, action='store',
help='Name of vSphere Datastore Cluster')
args = parser.parse_args()
return args
def main():
"""
Simple command-line program for listing Datastores in Datastore Cluster
"""
args = get_args()
try:
service_instance = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
if not service_instance:
print("Could not connect to the specified host using "
"specified username and password")
return -1
atexit.register(connect.Disconnect, service_instance)
content = service_instance.RetrieveContent()
# Search for all Datastore Clusters aka StoragePod
obj_view = content.viewManager.CreateContainerView(content.rootFolder,
[vim.StoragePod],
True)
ds_cluster_list = obj_view.view
obj_view.Destroy()
for ds_cluster in ds_cluster_list:
if ds_cluster.name == args.dscluster:
datastores = ds_cluster.childEntity
print "Datastores: "
for datastore in datastores:
print datastore.name
except vmodl.MethodFault as error:
print "Caught vmodl fault : " + error.msg
return -1
return 0
# Start program
if __name__ == "__main__":
main()
|
As one of Europe’s leading research universities, Utrecht is renowned for its innovative interdisciplinary research and emphasis on high-quality education. These two factors make the university a particularly attractive place for (guest) researchers, (prospective) PhD-candidates and other visitors throughout the world.
Utrecht University's international working community consists of different groups: staff, PhD-candidates and guests. Rules and regulations may vary: see what applies to your situation. Most information provided goes for all internationals. We are happy to inform you about living in the Netherlands and help you to prepare for your stay.
Come to Utrecht and visit Utrecht University locations.
What do you need to arrange before coming to Utrecht?
Before you start your preparations, it's important to contact the Faculty you are going to visit and follow its registration procedure. As a (guest) researcher or guest, your first and main point of contact will be the Faculty. Each Faculty has its own procedures concerning the registration of (guest) researchers and guests.
What information applies to you depends on your situation. When applicable, distinctions will be marked in the text.
You are considered staff when you have a paid position with Utrecht University. More information for staff is available at working at Utrecht University or - for current staff - on the intranet.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import argparse
import os
from chgk_parser import gui_parse
from chgk_composer import gui_compose
try:
from Tkinter import *
except:
from tkinter import *
import tkFileDialog
import tkFont
debug = False
def gui_choose_action():
def parsereturn():
root.ret = 'parse'
root.quit()
root.destroy()
def composereturn():
root.ret = 'compose'
root.quit()
root.destroy()
root = Tk()
root.ret = 'None'
frame = Frame(root)
frame.pack()
bottomframe = Frame(root)
bottomframe.pack(side = 'bottom')
Button(frame, command=
parsereturn, text = 'Parse').pack(side = 'left',
padx = 20, pady = 20,
ipadx = 20, ipady = 20,)
Button(frame, command=
composereturn, text = 'Compose').pack(side = 'left',
padx = 20, pady = 20,
ipadx = 20, ipady = 20,)
root.mainloop()
return root.ret
def main():
action = gui_choose_action()
if action == 'parse':
gui_parse()
if action == 'compose':
gui_compose()
if __name__ == "__main__":
main()
|
This is me, beginning to write! I had to laugh, thanks.
Don’t worry there is padding at the bottom of the slide. =P You’re doing great! Keep it up!
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame
def __convert_to_df(a, val_col: str = 'vals', group_col: str = 'groups',
val_id: int = None, group_id: int = None) -> DataFrame:
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary, i.e. groups
may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values
(test or response variable). Values should have a non-nominal scale.
Must be specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.diff(ax).item():
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
# TODO: refactor conversion of block data to DataFrame
if melted and not all([i is not None for i in [block_col, group_col, y_col]]):
raise ValueError('`block_col`, `group_col`, `y_col` should be explicitly specified if using melted data')
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif isinstance(a, DataFrame) and melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.rename(columns={group_col: 'groups', block_col: 'blocks', y_col: 'y'}, inplace=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, y_col, group_col, block_col
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - h_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df=n-x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col, _val_col], ascending=True) if sort else x
n = len(x.index)
x_groups_unique = x[_group_col].unique()
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
h = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
h_cor = h / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col, _val_col], ascending=True) if sort else x
n = len(x.index)
x_groups_unique = x[_group_col].unique()
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for i, j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col, _val_col], ascending=True) if sort else x
n = len(x.index)
x_groups_unique = x[_group_col].unique()
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
if dist == 'chi':
for i, j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i, j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
x = x.sort_values(by=[_group_col, _block_col], ascending=True) if sort else x
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df=(m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
x = x.sort_values(by=[_group_col, _block_col], ascending=True) if sort else x
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1. / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / (m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x['ranks'] = x[_val_col].rank()
ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(ri.loc[groups[u]]-ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, df)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
np.fill_diagonal(p_values, 1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
x = x.sort_values(by=[group_col, block_col], ascending=True) if sort else x
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
x = x.sort_values(by=[group_col, block_col], ascending=True) if sort else x
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=float)
combs = it.combinations(range(k), 2)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
def compare_stats(i, j):
dif = np.abs(rj[groups[i]] - rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df=df)
return pval
x = x.sort_values(by=[block_col, group_col], ascending=True) if sort else x
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k - 1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]], midrank=midrank)[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df=(b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
x = x.sort_values(by=[block_col, group_col], ascending=True) if sort else x
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda row: row['rr'] * q[row[block_col]], axis=1)
x['w'] = x.apply(lambda row: row['r'] * q[row[block_col]], axis=1)
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1.) * (2. * n + 1.) * (k-1.)) / (18. * n * (n + 1.)))
ff = 1. / (b * (b + 1.)/2.)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
A = aj / nj
vs = np.zeros((k, k), dtype=float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df=n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=False):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
k = groups.size
xg = x.groupby(by=_group_col)[_val_col]
vs = np.zeros((k, k), dtype=float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(k), 2)
if pool_sd:
ni = xg.count()
m = xg.mean()
sd = xg.std(ddof=1)
deg_f = ni - 1.
total_deg_f = np.sum(deg_f)
pooled_sd = np.sqrt(np.sum(sd ** 2. * deg_f) / total_deg_f)
def compare_pooled(i, j):
diff = m.iloc[i] - m.iloc[j]
se_diff = pooled_sd * np.sqrt(1. / ni.iloc[i] + 1. / ni.iloc[j])
t_value = diff / se_diff
return 2. * ss.t.cdf(-np.abs(t_value), total_deg_f)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i, j in combs:
vs[i, j] = ss.ttest_ind(xg.get_group(groups[i]), xg.get_group(groups[j]), equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[-1]) == 'True' else 0
vsu = np.triu(vs)
np.fill_diagonal(vsu, 1)
tri_lower = np.tril_indices(vsu.shape[0], -1)
vsu[tri_lower] = np.transpose(vsu)[tri_lower]
return DataFrame(vsu, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col, _val_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x_len = groups.size
vs = np.zeros((x_len, x_len))
xg = x.groupby(_group_col)[_val_col]
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = ss.mannwhitneyu(
xg.get_group(groups[i]),
xg.get_group(groups[j]),
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col, _val_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x_len = groups.size
vs = np.zeros((x_len, x_len))
xg = x.groupby(_group_col)[_val_col]
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = ss.wilcoxon(
xg.get_group(groups[i]),
xg.get_group(groups[j]),
zero_method=zero_method,
correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
xi = x_grouped.mean()
si = x_grouped.var()
n = ni.sum()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni.loc[i] + 1. / ni.loc[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=float)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(groups.size), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = np.transpose(vs)[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, 1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
xi = x_grouped.mean()
si = x_grouped.var()
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
# checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (si[i] / ni[i]) / (si[j] / ni[j])) and\
((si[i] / ni[i]) / (si[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and\
(1./2. <= (si[i] / ni[i]) / (si[j] / ni[j])) and\
((si[i] / ni[i]) / (si[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and\
(3./4. <= (si[i] / ni[i]) / (si[j] / ni[j]))\
and ((si[i] / ni[i]) / (si[j] / ni[j]) <= 4./3.)
OK = any([ok1, ok2, ok3, ok4])
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(groups.size), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = np.transpose(vs)[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col: str = None,
group_col: str = None, sort: bool = False) -> DataFrame:
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(groups.size), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[j, i]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
|
"We say that revelation is not sealed but we often act as if our purses were."
Unitarians should tithe. Yes I said it. Let me say it again: Unitarians should give away 10% of their income.
Sound radical? Crazy? Controversial? If so maybe we should ask ourselves why.
Unitarianism is a radical liberal way of being religious. We say what matters is not what you believe but how we live our lives. We've signed up to the Charter of Compassion which speaks about restoring compassion to the centre of religion and morality. Well, what do we think that means? What concretely does it mean to have a faith based on love and compassion?
What it means is, amongst other things, is giving. Let me be clear that I'm not saying that people should give 10% of their income to their congregation. Between 1% and 5% I would think is OK. And the rest should be given to other charities and groups.
Unitarianism is not an easy religon. It should not be. It's a religion that asks you to transform your life to become rooted in deeper spirituality, love and justice (as any other religion worth its salt also does). It requires us to reject the values of materialism, violence and anger. One way we begin to do that is to give generously. Generosity is an essential spiritual practice.
|
#v1.0.1
import win32gui, win32con, sys, os
import dataIO
sp_json = "setpoints.json"
sens = 0
def set_spi(spi, value):
value = int(value)
try:
cset=getattr(win32con,spi)
set = win32gui.SystemParametersInfo(cset, Param=value, WinIni=1)
return True
except WindowsError:
return False
def get_spi(spi):
try:
cget=getattr(win32con,spi)
value=win32gui.SystemParametersInfo(cget)
return value
except WindowsError:
return None
def checkValue(sens):
if sens.isdigit():
if int(sens) in range(1,21):
return True
else:
print ('setpoint out of range, 1...20')
return False
else:
print('Numbers only, 1...20')
return False
def writeSense(sens):
if set_spi('SPI_SETMOUSESPEED', str(sens)) == True: #MouseSensitivity = 1/20
print ('\nAdjusting sensitivity to {} went successful.'.format(sens))
else:
print ('Something went wrong while writing.')
if __name__ == '__main__':
#Appdata/Roaming
global ROAMING
setpoints = {}
APP_DIRNAME = 'SwitchMouseSensitivity'
if not os.path.exists(os.path.join(os.environ['APPDATA'],APP_DIRNAME)):
try:
os.mkdir(os.path.join(os.environ['APPDATA'],APP_DIRNAME))
ROAMING = os.path.join(os.environ['APPDATA'],APP_DIRNAME)
except:
print('Error creating settings folder')
ROAMING = os.environ['APPDATA']+'\\'+APP_DIRNAME+'\\'
dataIO.loadAndCheckSettings(ROAMING, True)
setpoints = dataIO.fileIO(ROAMING+sp_json, "load")
#Get current sensitivity and save to json
setpoints["curReg"] = setpoints["curReg"] = get_spi('SPI_GETMOUSESPEED')
dataIO.fileIO(ROAMING+sp_json, "save", setpoints)
print ('Current mouse sensitivity : '+str(setpoints["curReg"]))
#Check for run parameter
if len(sys.argv) > 1: #arguments is given
sens = sys.argv[1]
if checkValue(sens) == True:
print ('Changing the sensitivity by run parameter to: '+sens)
writeSense(sens)
os.system('pause')
exit(1)
#Read previous setpoint
if setpoints["lastSp"] == "sp2":
sens = setpoints["sp1"]
elif setpoints["lastSp"] == "sp1":
sens = setpoints["sp2"]
#Confirm switch setpoint
if sens is not 0:
print ('The setpoint before the previous one was {}, apply this value? y/n: '.format(sens), end='')
confirmInput = input()
else:
confirmInput = 'n'
if confirmInput == 'y' or confirmInput == '':
if setpoints["lastSp"] == "sp2":
sens = setpoints["sp1"]
setpoints["lastSp"] = setpoints["lastSp"] ='sp1'
dataIO.fileIO(ROAMING+sp_json, "save", setpoints)
elif setpoints["lastSp"] == "sp1":
sens = setpoints["sp2"]
setpoints["lastSp"] = setpoints["lastSp"] ='sp2'
dataIO.fileIO(ROAMING+sp_json, "save", setpoints)
elif confirmInput == 'n':
sens = input("Enter the preferred sensitivity : ")
if checkValue(sens) == True:
if setpoints["lastSp"] == "sp2" and setpoints["sp2"] is not '0':
setpoints["sp1"] = setpoints["sp1"] = sens
sens = setpoints["sp1"]
setpoints["lastSp"] = setpoints["lastSp"] ='sp1'
dataIO.fileIO(ROAMING+sp_json, "save", setpoints)
print('Setpoint {} is now registered'.format(sens))
elif setpoints["lastSp"] == "sp1" and setpoints["sp1"] is not '0':
setpoints["sp2"] = setpoints["sp2"] = sens
sens = setpoints["sp2"]
setpoints["lastSp"] = setpoints["lastSp"] ='sp2'
dataIO.fileIO(ROAMING+sp_json, "save", setpoints)
print('Setpoint {} is now registered'.format(sens))
else:#should be displayed only the first two sp changes
print('Setpoint {} is now registered'.format(sens))
writeSense(sens)
dataIO.fileIO(ROAMING+sp_json, "save", setpoints)
os.system('pause')
|
An evocative picture book perfect for introducing the classic tale of ‘Heidi’ to younger children.
Orphaned Heidi is sent to live with her grumpy grandfather in the Alps, but soon grows to love the mountains. When she is taken to Frankfurt, her homesickness and a new friendship with a disabled girl called Clara lead to an extraordinary turn of events.
With simple and engaging text accompanied by beautiful illustrations, this is sure to become a firm favourite.
|
import json, os, sys, shutil
toVerifiy = ["config.json", "src/", "src/imgs/", "src/section.html", "src/style.css", "src/template.html"]
print("Verifying everything is good...\n")
# Making sure that every file needed is here
for v in toVerifiy:
if os.path.exists(v):
print("File " + v + " found!")
else:
print("File " + v + " is missing!")
print("Please refer to https://github.com/thejumono/ResumeGen for more information.")
sys.exit(1)
# Removing an old build directory if there's one.
if os.path.exists("build/"):
print("\nRemoving the last build.")
shutil.rmtree("build/")
print("Creating temporary folder...")
try:
os.mkdir("build")
print("Created successfully!")
except:
print("Couldn't create temporary folder!")
print("Check the folder permission...")
sys.exit(1)
print("\nGenerating your resume page...\n")
json_file = open("config.json", "r")
parsed_json = json.loads(json_file.read())
json_file.close()
template_html = open("src/template.html", "r").read()
# Basically generate the whole HTML file.
try:
print("\tRenaming some lines...")
template_html = template_html.replace("{title}", parsed_json["title"])
template_html = template_html.replace("{name}", parsed_json["name"])
section_container = "";
sections = 0;
print("\tCreating the sections:\n")
for p in parsed_json["projects"]:
print("\t\tGenerating the " + p + " section!")
section_file = open("src/section.html", "r")
section_template = section_file.read()
section_file.close()
print("\t\tSetting the section name.")
section_template = section_template.replace("{section-name}", p)
print("\t\tAdding some color to it.")
project_data = parsed_json["projects"][p]
section_template = section_template.replace("{section-background-color}", project_data["color"])
if sections % 2 == 1:
section_template = section_template.replace("{class}", 'class="reversed"')
else:
section_template = section_template.replace("{class}", '')
print("\t\tGenerating some text and links.")
section_about = "";
for a in project_data["about"]:
section_about += "<p>" + a + "</p>\n\t\t\t\t\t"
for l in project_data["links"]:
section_about = section_about.replace("<" + l + ">", '<a href="' + project_data["links"][l] + '">' + l + "</a>")
print("\t\tAdding the text and links.")
section_template = section_template.replace("{section-text}", section_about)
print("\t\tSetting the picture...")
if os.path.exists("src/imgs/" + project_data["filename"]):
print("\t\tThe " + project_data["filename"] + "file was found, setting it.")
else:
print("\t\tThe file " + project_data["filename"] + " wasn't found!")
print("\t\tThis won't cause any errors, but you should fix it manually.")
section_template = section_template.replace("{section-image-name}", project_data["filename"])
print("\t\tThe section " + p + " was generated succesfully!\n")
section_container += section_template + "\n"
sections += 1
print("\tAdding the sections to the template!")
template_html = template_html.replace("{section-container}", section_container)
print("\tEditing the \"about me\" section.\n")
about_info = parsed_json["about"]
print("\t\tAdding a title.")
template_html = template_html.replace("{about-me-title}", about_info["title"])
print("\t\tAdding some text.")
about_text = ""
for t in about_info["text"]:
about_text += "<p>" + t + "</p>\n\t\t\t\t\t"
print("\t\tAdding the contact links.")
contact_links = ""
for c in about_info["contact"]:
contact_links += "<p><a href=\"" + about_info["contact"][c]["link"] + "\">" + about_info["contact"][c]["account"] + "</a> (" + c + ")</p>\n\t\t\t\t\t"
print("\t\tAdding the text and links to the template.\n")
template_html = template_html.replace("{about-me-text}", about_text)
template_html = template_html.replace("{contact-links}", contact_links)
except:
print("Something went horribly wrong!")
print("Make sure that your config.json is correct.")
sys.exit(1)
print("\tHTML Generation is done!\n")
print("Exporting everything to the build folder.")
try:
index_html = open("build/index.html", "w+")
index_html.write(template_html)
except:
print("An error happened when writing the index.html file!")
print("Check if you have the correct permissions.")
sys.exit(1)
print("index.html generated...")
print("copying style.css")
try:
shutil.copy2("src/style.css", "build/")
except:
print("An error happened when copying the style.css file!")
print("Check if you have the correct permissions.")
sys.exit(1)
print("style.css copied!")
print("Copying images...")
try:
os.mkdir("build/imgs")
for img in os.listdir("src/imgs/"):
shutil.copy2("src/imgs/" + img, "build/imgs/")
except:
print("An error happened when copying images!")
print("Check if you have the correct permissions.")
sys.exit(1)
print("Images copied!\n")
print("Your new resume has been exported at " + os.getcwd() + "/build/\n")
print("Simply copy its content to your web server and it should work without any problem.")
print("Thank you for using ResumeGen!")
print("By @BestGirlZoey")
|
Risotto is one of the staples of northern Italian cooking, nowhere more so than in Lombardy. Risotto alla milanese is the dish that perhaps best typifies the cooking of Milan, the capital of Lombardy region and the economic and financial capital of Italy.
This risotto follows the classic method for making risotto, which we have gone over before, but it has two defining ingredients that give it its special flavor and character. As many of you will already know, it is flavored with saffron—which gives it a beautiful gold color—but perhaps fewer people know that in a true risotto alla milanese, the soffritto must include beef marrow. The marrow lends a beefy background flavor to the dish, as well as a subtle richness and unctuousness. The ingredient is so characteristic of the dish that I have seen this risotto listed on menus in Milan as risotto al midollo, the word ‘midollo‘ being Italian for bone marrow.
1 liter (1 qt.) broth, or q.b.
Make a soffritto by sautéing the onion and marrow in the butter. When the marrow has melted and the onion is well wilted, add the rice and let it ‘toast’ in the soffritto without browning. Add a splash of white wine and let it evaporate.
Proceed with the risotto in the usual way—with one little catch: While the rice is simmering, take your saffron and simmer it very gently with a ladleful of broth so it releases its flavor and color into the broth. Then, either about halfway through the cooking process, or if you prefer a more assertive flavor, about 5 minutes before the rice is done, add the saffron and its broth into the risotto. Continue to cook as usual.
Risotto alla milanese is usually served all’onda, which is to say, rather more loose than firm, so begin the final enrichment, called the mantecatura in Italian, while the rice is stlll fairly brothy. Add the cheese and, if you like, a dab of butter for extra richness (never cream!) to the rice, then stir to toss the rice around vigorously for about two minutes. (Skilled risotto chefs are a marvel to watch as they work the rice, they toss the rice into the air while they stir—not something I’ve tried at home., though.) If you like your risotto a bit firmer, let it sit, covered, for a minute or two before serving.
To prepare bone marrow for use in risotto, I like to simmer the marrow bones for just a minute or two to loosen the marrow a bit. Then, using a small spoon or knife, scrape out the marrow from the bone. Depending on how long you’ve simmered the bone, the marrow may also just slip out on its own.
Now for those of you who may be a bit squeamish about bone marrow, it can be left out of the dish, adding perhaps some additional butter during the mantecatura to make up for it. What you will have made is more properly called risotto allo zafferano rather than a true risotto alla milanese—but it will still be delicious!
Saffron, as we all know, it very expensive, but a little goes a long way. It is sold in threads (the stigma of the crocus flower) and also ground into powder. Avoid the powder if you can. It melts more easily but it tends to have a rather faded flavor. A powder also allows for extraneous ‘filler’ or substitute ingredients. With the threads, you can be sure you are getting the real thing.
As for the rice, we’ve covered the three main varieties before: arborio, the most common and usually least expensive of the three, will work fine, but for this dish I would prefer Carnaroli, although many recipes call for vialone nano, which is an excellent risotto rice from the Veneto. For details, check out my post on the Italian Pantry.
There are some subtle variations to this dish you can try as you like: The broth is usually a rich beef broth, but you can equally try a mixed meat broth, a chicken broth or a vegetable broth. Some cooks use shallots rather than onion, a variation that I find particularly nice. And some avoid making a soffritto, putting the shallot or onion into the simmering rice and removing it before serving, which they claim make the risotto lighter. The famed Milanese chef Gualtiero Marchesi calls for making the onion soffritto separately, adding white wine to it, and allowing the onion to simmer until creamy. The onion cream is added at the very end of cooking to add ‘acidity and aroma’. An intriguing idea, but one that I have not yet tried.
Risotto alla milanese is perhaps most commonly served together with ossobuco, as a piatto unico—one of the very few examples of rice being a ‘side dish’ in traditional Italian cookery. But it is equally good as a primo, followed, say, by a cotoletta alla milanese or perhaps a brasato.
Make a soffritto by sautéing the onion and marrow in the butter. When the marrow has melted and the onion is well wilted, add the rice and let it 'toast' in the soffritto without browning. Add a splash of white wine and let it evaporate.
Risotto alla milanese is usually served all'onda, which is to say, rather more loose than firm, so begin the final enrichment, called the mantecatura in Italian, while the rice is stlll fairly brothy. Add the cheese and, if you like, a dab of butter for extra richness to the rice, then stir to toss the rice around vigorously for about two minutes. If you like your risotto a bit firmer, let it sit, covered, for a minute or two before serving.
Thanks, again, folks! I hadn't even notices the Daily Special until you pointed it out.
I mean, congrats on the Daily Special feature!
Beautiful, all around. The picture, the recipe, the story. And congrats on the Top 9!
Thanks, guys! As always, your readership and feedback is much appreciated.
@Simona: What a lovely blog you have! I've added it to my blogroll.
@bella: Thanks for stopping by. First time around these parts?
Great instructions and lovely picture.
Thank you for sharing this delicious dish! I would have never known that you can use marrow in risotto. Great recipe!
I imagine the marrow makes all the difference. What a gorgeous plate of risotto!
I belong to the squeamish party. But I agree that a well-prepared risotto alla milanese is a thing of beauty, of satisfying flavor and of pleasant texture.
Aha! Bone marrow, eh? Interesting read! Thanks for sharing.
what a gorgeous risotto! Thanks for sharing the recipe.
Ah-hah! Brilliant – looks so perfect. Gordon Ramsey would be drooling.
Bone marrow is not new to me but using it in risotto definitely is. I know I would love this. Beautiful color from the saffron, too.
very nice instructions and with the marrow, this must be the best risotto ever….
Risotto equals comfort food to me. I've been craving it for weeks now and to find the risotto from my favorite part of Italy in front of me; well, now I have to make it.
Once again, as always. many thanks for sharing this amazing recipe.
The saffron makes the Risotto alla Milanese such a beautiful color! I didn't know that risotto in Milan usually involves bone marrow. I imagine it adds a wonderful flavor and richness!
Thanks for sharing useful info about risotto. I love rice and would love this. Would want to try cooking it one of these days.
What is perfect – is when I decide I want to make a risotto next weekend – here you are. In fact – a risotto alla milanese is what I planned. And after being away from the blogging world for three days, I turn on my computer and serendipity? Fate? I've never made with the marrow. I shall. Now I feel like it's destiny.
|
"""
Various useful stuff.
For now there are some wrappers to pickle objects.
"""
import sys
import os
import glob
import cPickle as pickle
import numpy as np
import math, datetime
import pycs.gen.lc
tracei = 1 # global variable, filename to write trace pkl.
def writepickle(obj, filepath, verbose=True, protocol = -1):
"""
I write your python object obj into a pickle file at filepath.
If filepath ends with .gz, I'll use gzip to compress the pickle.
Leave protocol = -1 : I'll use the latest binary protocol of pickle.
"""
if os.path.splitext(filepath)[1] == ".gz":
pkl_file = gzip.open(filepath, 'wb')
else:
pkl_file = open(filepath, 'wb')
pickle.dump(obj, pkl_file, protocol)
pkl_file.close()
if verbose: print "Wrote %s" % filepath
def readpickle(filepath, verbose=True):
"""
I read a pickle file and return whatever object it contains.
If the filepath ends with .gz, I'll unzip the pickle file.
"""
if os.path.splitext(filepath)[1] == ".gz":
pkl_file = gzip.open(filepath,'rb')
else:
pkl_file = open(filepath, 'rb')
obj = pickle.load(pkl_file)
pkl_file.close()
if verbose: print "Read %s" % filepath
return obj
def oldwritepickle(obj, filepath):
"""
DO NOT USE ME ANYMORE
Simplistic wrapper around pickle, to writes an object into a file, using cpickle.
@type obj: object
@param obj: e.g. a dict of lightcurves
@type filepath: string
@param filepath: filename or path to write
"""
output = open(filepath, 'wb')
pickle.dump(obj, output)
output.close()
print "Wrote %s" % filepath
def oldreadpickle(filepath):
"""
DO NOT USE ME ANYMORE
Reads a pickle and returns it.
@type filepath: string
@param filepath: filename or path to read
@rtype: object
@return: whatever was in that pickle
"""
pkl_file = open(filepath, 'rb')
obj = pickle.load(pkl_file)
pkl_file.close()
print "Read %s" % filepath
return obj
def readidlist(filepath, verbose=True):
"""
Reads a textfile with "one point per line", probably a skiplists.
Accepts blank lines, and lines starting with # will not be read.
Format of the lines is : id [comment]
If this is a skiplist, id is a MJD.
"""
if not os.path.exists(filepath):
raise RuntimeError("File does not exist : %s" % (filepath))
myfile = open(filepath, "r")
lines = myfile.readlines()
myfile.close
table=[]
for line in lines:
if line[0] == '#' or len(line) < 4:
continue
if len(line.split()) > 1:
id = line.split()[0]
comment = line.split(None, 1)[1:][0].rstrip('\n')
else:
id = line.split()[0]
comment = ""
table.append([id, comment])
if verbose:
print "I've read %i lines from %s" % (len(table), os.path.basename(filepath))
return table
def trace(lclist=[], splist=[], tracedir = "trace", level="Full"):
"""
Function to save a "trace" of processes modifying lightcurves and splines, like optimizers do.
Just call this from inside your loop etc, I will save your current lightcurves and splines into a pickle inside the tracedir.
I increment the filenames.
The argument "level" is about what should be saved.
level = "Full" : Everything you give me is saved in the pickle. Now this is large ...
level = "Light" : I try to reduce filesize of the pickle, by removing the splines datapoints etc. You can still plot these objects.
"""
if not os.path.exists(tracedir):
os.mkdir(tracedir)
global tracei
filepath = os.path.join(tracedir, "%06i.pkl" % (tracei))
if os.path.exists(filepath):
raise RuntimeError("Sorry, I don't want to overwrite the existing trace inside '%s'." % (tracedir))
now = datetime.datetime.now()
writepickle({"lclist":lclist, "splist":splist, "datetime":now}, filepath, verbose=True, protocol = -1)
tracei += 1
def plottrace(tracedir = "trace", reset=False, showspl=True, **kwargs):
"""
Turns a trace into plots ...
reset = True : I will remove all shifts/ML etc, just show the real "observations".
kwargs are passed to the display function.
"""
tracepkls = glob.glob(os.path.join(tracedir, "??????.pkl"))
def plot(tracepkl):
pkl = readpickle(tracepkl, verbose=True)
if reset:
for l in pkl["lclist"]:
l.timeshift = 0.0
l.magshift = 0.0
l.fluxshift = 0.0
l.ml = None
if not showspl:
pkl["splist"] = []
#pycs.gen.lc.display(pkl["lclist"], pkl["splist"], title = pkl["datetime"], filename=tracepkl+".png", **kwargs)
# hmm, ugly datetime ...
shiftstxt = "(%s)" % "/".join(["%+.1f" % (getattr(l, "truetimeshift", 0.0)) for l in pkl["lclist"]])
titletxt = "%s %s %s" % (tracedir, "", shiftstxt)
pycs.gen.lc.display(pkl["lclist"], pkl["splist"], title = titletxt, filename=tracepkl+".png", **kwargs)
map(plot, tracepkls)
"""
if ncpu == 1:
map(plot, tracepkls)
else:
if ncpu == None:
ncpu = multiprocessing.cpu_count()
print "I will use %i CPUs." % (ncpu)
pool = multiprocessing.Pool(processes=ncpu)
answers = pool.map(plot, tracepkls)
"""
def multilcsexport(lclist, filepath, separator="\t", rdbunderline=True, verbose=True, properties=None):
"""
Writes the lightcurves as flat acscii files into one single file.
Normally you should prefer writing each lightcurve into a single file, using
:py:meth:`pycs.gen.lc.lightcurve.rdbexport`.
Note that only lightcurves of same length and sampling can be written with this function !
:param lclist: A list of lightcurve objects to write
:type lclist: list
:param filepath: where to write
:type filepath: string
:param separator: how to separate the collumns
:type separator: string
:param rdbunderline: do you want the "=====" underlining ?
:type rdbunderline: boolean
:param properties: properties of the lightcurves to include in the file.
:type properties: list of strings
.. todo:: We might need here an extra argument that specifies how to format the properties.
"""
import csv
# We start with a few tests to see if it is possible to write these lcs into a single file ...
commonjds = lclist[0].getjds()
lenlc = len(lclist[0])
for thislc in lclist:
thislc.validate() # Good idea to keep this here, as the code below is so ugly ...
if len(thislc) != lenlc:
print "First lightcurve has %i points" % len(commonjds)
raise RuntimeError, "Lightcurve %s has not the same length !" % str(thislc)
if not np.allclose(thislc.getjds(), commonjds, rtol=0.0, atol=1e-5):
raise RuntimeError, "Lightcurve %s has different epochs !" % str(thislc)
# Now we check the properties. At least a minimal check : they should be available for all the
# lightcurves.
if properties == None:
properties = []
for property in properties:
for l in lclist:
if not property in l.commonproperties():
raise RuntimeError, "Lightcurve %s has no property %s" % (l, property)
# We also have to check that all those properties are identical for all lcs !
firstprops = [p[property] for p in lclist[0].properties]
for l in lclist:
if not firstprops == [p[property] for p in l.properties]:
raise RuntimeError("Properties not identical !")
# Ok, now we prepare the data to write into that file.
colnames = []
data = []
colnames.append("mhjd")
data.append(["%.5f" % commonjd for commonjd in commonjds])
for thislc in lclist:
print str(thislc)
colnames.append("mag_" + thislc.object)
#data.append(["%09.5f" % mag for mag in thislc.getmags()])
data.append(["%.5f" % mag for mag in thislc.getmags()])
colnames.append("magerr_" + thislc.object)
data.append(["%.5f" % magerr for magerr in thislc.magerrs])
# And now the properties
for property in properties:
values = [p[property] for p in lclist[0].properties]
colnames.append(property)
data.append(values)
# We put all this together :
datatransposed = zip(*data) # Yep !
rdbunderlines = ["="*len(colname) for colname in colnames]
if rdbunderline:
biglist = [colnames, rdbunderlines]
else:
biglist = [colnames]
biglist.extend(datatransposed)
# biglist now contains the file items line by line.
# we write the file
csvwriter = csv.writer(open(filepath, 'w'), delimiter=separator, quotechar='"', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerows(biglist)
if verbose:
print "Wrote the lightcurves into %s" % filepath
def datetimefromjd(JD):
"""
Copy and past from cosmouline.
Can be of use here to plot lightcurves with nice dates.
Returns the Gregorian calendar (i.e. our "normal" calendar)
Based on wikipedia:de and the interweb :-)
:type JD: float
:param JD: julian date
:rtype: datetime object
:returns: corresponding datetime
"""
if JD < 0:
raise ValueError, 'Julian Day must be positive'
dayofwk = int(math.fmod(int(JD + 1.5),7))
(F, Z) = math.modf(JD + 0.5)
Z = int(Z)
if JD < 2299160.5:
A = Z
else:
alpha = int((Z - 1867216.25)/36524.25)
A = Z + 1 + alpha - int(alpha/4)
B = A + 1524
C = int((B - 122.1)/365.25)
D = int(365.25 * C)
E = int((B - D)/30.6001)
day = B - D - int(30.6001 * E) + F
nday = B-D-123
if nday <= 305:
dayofyr = nday+60
else:
dayofyr = nday-305
if E < 14:
month = E - 1
else:
month = E - 13
if month > 2:
year = C - 4716
else:
year = C - 4715
# a leap year?
leap = 0
if year % 4 == 0:
leap = 1
if year % 100 == 0 and year % 400 != 0:
print year % 100, year % 400
leap = 0
if leap and month > 2:
dayofyr = dayofyr + leap
# Convert fractions of a day to time
(dfrac, days) = math.modf(day/1.0)
(hfrac, hours) = math.modf(dfrac * 24.0)
(mfrac, minutes) = math.modf(hfrac * 60.0)
seconds = round(mfrac * 60.0) # seconds are rounded
if seconds > 59:
seconds = 0
minutes = minutes + 1
if minutes > 59:
minutes = 0
hours = hours + 1
if hours > 23:
hours = 0
days = days + 1
return datetime.datetime(year,month,int(days),int(hours),int(minutes),int(seconds))
def flatten(x):
"""
Source : http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
::
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
"""
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def strtd(td):
"""
To print out time differences ...
Could be improved a bit :-)
"""
strdiff = str(td) # looks like 0:02:04.43353
return strdiff.split(".")[0]
def zipdirs(pattern = "rrs_*"):
"""
I will tgz all directories matching the pattern, except if the tgz already exists.
(handy to transfert multirun output to another computer ...)
"""
matches = sorted(glob.glob(pattern))
for match in matches:
if not os.path.isdir(match):
continue
if os.path.islink(match):
continue
zipdirpath = match + ".tgz"
if os.path.exists(zipdirpath):
continue
nfiles = len(glob.glob(os.path.join(match, "*")))
print "%s (%i) -> %s" % (match, nfiles, zipdirpath)
cmd = "tar zcvf %s %s" % (zipdirpath, match)
os.system(cmd)
|
Comforting in the difficulties of everyday life like an oasis in the middle of a desert, these lenses are truly the most progressive and nimble lenses manufactured by Johnson & Johnson currently. They are made with so smooth and thin material that they can not only be worn daily for 14 days, but also continuously for even a week! These lenses are made of Senofilcon A – the most advanced silicone hydrogel on the market and they contain HYDRACLEAR™ PLUS, the next generation of technology that creates a more wettable, smoother contact lens.
Taking the lens out can be a real problem every day for some people or in some situations, but the Acuvue Oasys lens enables you continuous wear for up to 6 nights, which means that you can leave for a weeks’ vacation without having to think about them!
|
from .. import Availability, Class, Constant, Define, Method, Parameter, Type
gx_class = Class('NGRD',
doc="Neargrid")
gx_defines = [
]
gx_methods = {
'': [
Method('_Clear_NGRD', module='geocslib', version='6.0.0',
availability=Availability.EXTENSION,
doc="Clears all the parameters in a NGRD object",
return_type=Type.VOID,
return_doc="Nothing",
parameters = [
Parameter('param0', type="NGRD",
doc="Handle to NGRD object (stores control parameters)")
]),
Method('Create_NGRD', module='geocslib', version='6.0.0',
availability=Availability.EXTENSION,
doc="Create a NGRD.",
return_type="NGRD",
return_doc="NGRD if OK (NULL if error)"),
Method('Destroy_NGRD', module='geocslib', version='6.0.0',
availability=Availability.EXTENSION,
doc="Destroy a NGRD.",
return_type=Type.VOID,
return_doc="Nothing",
parameters = [
Parameter('param0', type="NGRD",
doc="NGRD to destroy.")
]),
Method('iLoadParms_NGRD', module='geocslib', version='6.3.0',
availability=Availability.EXTENSION,
notes="""
If the control file name passed into this function is a file
which does not exist, then the defaults for a Neargrid control
file will be generated and put into the NGRD object.
Otherwise, the control file's settings are retrieved from
the file and loaded into the NGRD object.
""",
doc="Retrieves a Neargrid object's control parameters from a file.",
return_type=Type.INT32_T,
return_doc="0 OK, 1 Error.",
parameters = [
Parameter('param0', type="NGRD",
doc="NGRD to load parameter settings into"),
Parameter('param1', type=Type.STRING,
doc="Name of file to get the parameter settings from")
]),
Method('iRun_NGRD', module='geocslib', version='6.3.0',
availability=Availability.EXTENSION,
doc="""
Executes the Neargrid program, using the input channel and
output file parameters.
""",
return_type=Type.INT32_T,
return_doc="0 OK, 1 Error.",
parameters = [
Parameter('param0', type="NGRD",
doc="Handle to NGRD object (stores control parameters)"),
Parameter('param1', type=Type.STRING,
doc="Name of Z Channel to perfrom gridding on"),
Parameter('param2', type="DAT",
doc="Handle to source DAT object (from database)"),
Parameter('param3', type="DAT",
doc="Handle to output grid file DAT")
]),
Method('iSaveParms_NGRD', module='geocslib', version='6.3.0',
availability=Availability.EXTENSION,
notes="""
If the control file did not previously exist, it will be
created. Otherwise, the old file will be overwritten.
""",
doc="""
Puts the Neargrid object's control parameters back into
its control file.
""",
return_type=Type.INT32_T,
return_doc="0 OK, 1 Error.",
parameters = [
Parameter('param0', type="NGRD",
doc="NGRD object to get parameters from and put into the control file"),
Parameter('param1', type=Type.STRING,
doc="Name of file to put the parameter settings into")
])
]
}
|
Are you also interested in Hajipur speed dating but you cannot find a place in Hajipur to do this? If this is the case then you can easily visit our matchmaking website in Hajipur online and become a part of the Hajipur speed dating in order to meet the single guys as well as girls in Hajipur who are interested to get into the relationship, dating and friendship. Even if you are so much into speed dating Hajipur, then you can also visit the profiles on our website in Hajipur and use out chat service which we provide for the purpose or friendship in Hajipur. The various difficulties of speed dating in Hajipur for the singles have been reduced with our website in Hajipur city.
|
from sympy.core import (S, pi, oo, symbols, Function, Rational, Integer,
Tuple, Symbol)
from sympy.core import EulerGamma, GoldenRatio, Catalan, Lambda, Mul, Pow
from sympy.functions import (Piecewise, sqrt, ceiling, exp, sin, cos, LambertW,
sinc, Max, Min, arg, im, re)
from sympy.utilities.pytest import raises
from sympy.utilities.lambdify import implemented_function
from sympy.matrices import (eye, Matrix, MatrixSymbol, Identity,
HadamardProduct, SparseMatrix)
from sympy.functions.special.bessel import (jn, yn, besselj, bessely, besseli,
besselk, hankel1, hankel2, airyai,
airybi, airyaiprime, airybiprime)
from sympy.functions.special.gamma_functions import (lowergamma, uppergamma)
from sympy.utilities.pytest import XFAIL
from sympy.core.compatibility import range
from sympy import octave_code
from sympy import octave_code as mcode
x, y, z = symbols('x,y,z')
def test_Integer():
assert mcode(Integer(67)) == "67"
assert mcode(Integer(-1)) == "-1"
def test_Rational():
assert mcode(Rational(3, 7)) == "3/7"
assert mcode(Rational(18, 9)) == "2"
assert mcode(Rational(3, -7)) == "-3/7"
assert mcode(Rational(-3, -7)) == "3/7"
assert mcode(x + Rational(3, 7)) == "x + 3/7"
assert mcode(Rational(3, 7)*x) == "3*x/7"
def test_Function():
assert mcode(sin(x) ** cos(x)) == "sin(x).^cos(x)"
assert mcode(abs(x)) == "abs(x)"
assert mcode(ceiling(x)) == "ceil(x)"
assert mcode(arg(x)) == "angle(x)"
assert mcode(im(x)) == "imag(x)"
assert mcode(re(x)) == "real(x)"
assert mcode(Max(x, y) + Min(x, y)) == "max(x, y) + min(x, y)"
assert mcode(Max(x, y, z)) == "max(x, max(y, z))"
assert mcode(Min(x, y, z)) == "min(x, min(y, z))"
def test_Pow():
assert mcode(x**3) == "x.^3"
assert mcode(x**(y**3)) == "x.^(y.^3)"
assert mcode(x**Rational(2, 3)) == 'x.^(2/3)'
g = implemented_function('g', Lambda(x, 2*x))
assert mcode(1/(g(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"(3.5*2*x).^(-x + y.^x)./(x.^2 + y)"
# For issue 14160
assert mcode(Mul(-2, x, Pow(Mul(y,y,evaluate=False), -1, evaluate=False),
evaluate=False)) == '-2*x./(y.*y)'
def test_basic_ops():
assert mcode(x*y) == "x.*y"
assert mcode(x + y) == "x + y"
assert mcode(x - y) == "x - y"
assert mcode(-x) == "-x"
def test_1_over_x_and_sqrt():
# 1.0 and 0.5 would do something different in regular StrPrinter,
# but these are exact in IEEE floating point so no different here.
assert mcode(1/x) == '1./x'
assert mcode(x**-1) == mcode(x**-1.0) == '1./x'
assert mcode(1/sqrt(x)) == '1./sqrt(x)'
assert mcode(x**-S.Half) == mcode(x**-0.5) == '1./sqrt(x)'
assert mcode(sqrt(x)) == 'sqrt(x)'
assert mcode(x**S.Half) == mcode(x**0.5) == 'sqrt(x)'
assert mcode(1/pi) == '1/pi'
assert mcode(pi**-1) == mcode(pi**-1.0) == '1/pi'
assert mcode(pi**-0.5) == '1/sqrt(pi)'
def test_mix_number_mult_symbols():
assert mcode(3*x) == "3*x"
assert mcode(pi*x) == "pi*x"
assert mcode(3/x) == "3./x"
assert mcode(pi/x) == "pi./x"
assert mcode(x/3) == "x/3"
assert mcode(x/pi) == "x/pi"
assert mcode(x*y) == "x.*y"
assert mcode(3*x*y) == "3*x.*y"
assert mcode(3*pi*x*y) == "3*pi*x.*y"
assert mcode(x/y) == "x./y"
assert mcode(3*x/y) == "3*x./y"
assert mcode(x*y/z) == "x.*y./z"
assert mcode(x/y*z) == "x.*z./y"
assert mcode(1/x/y) == "1./(x.*y)"
assert mcode(2*pi*x/y/z) == "2*pi*x./(y.*z)"
assert mcode(3*pi/x) == "3*pi./x"
assert mcode(S(3)/5) == "3/5"
assert mcode(S(3)/5*x) == "3*x/5"
assert mcode(x/y/z) == "x./(y.*z)"
assert mcode((x+y)/z) == "(x + y)./z"
assert mcode((x+y)/(z+x)) == "(x + y)./(x + z)"
assert mcode((x+y)/EulerGamma) == "(x + y)/%s" % EulerGamma.evalf(17)
assert mcode(x/3/pi) == "x/(3*pi)"
assert mcode(S(3)/5*x*y/pi) == "3*x.*y/(5*pi)"
def test_mix_number_pow_symbols():
assert mcode(pi**3) == 'pi^3'
assert mcode(x**2) == 'x.^2'
assert mcode(x**(pi**3)) == 'x.^(pi^3)'
assert mcode(x**y) == 'x.^y'
assert mcode(x**(y**z)) == 'x.^(y.^z)'
assert mcode((x**y)**z) == '(x.^y).^z'
def test_imag():
I = S('I')
assert mcode(I) == "1i"
assert mcode(5*I) == "5i"
assert mcode((S(3)/2)*I) == "3*1i/2"
assert mcode(3+4*I) == "3 + 4i"
assert mcode(sqrt(3)*I) == "sqrt(3)*1i"
def test_constants():
assert mcode(pi) == "pi"
assert mcode(oo) == "inf"
assert mcode(-oo) == "-inf"
assert mcode(S.NegativeInfinity) == "-inf"
assert mcode(S.NaN) == "NaN"
assert mcode(S.Exp1) == "exp(1)"
assert mcode(exp(1)) == "exp(1)"
def test_constants_other():
assert mcode(2*GoldenRatio) == "2*(1+sqrt(5))/2"
assert mcode(2*Catalan) == "2*%s" % Catalan.evalf(17)
assert mcode(2*EulerGamma) == "2*%s" % EulerGamma.evalf(17)
def test_boolean():
assert mcode(x & y) == "x & y"
assert mcode(x | y) == "x | y"
assert mcode(~x) == "~x"
assert mcode(x & y & z) == "x & y & z"
assert mcode(x | y | z) == "x | y | z"
assert mcode((x & y) | z) == "z | x & y"
assert mcode((x | y) & z) == "z & (x | y)"
def test_Matrices():
assert mcode(Matrix(1, 1, [10])) == "10"
A = Matrix([[1, sin(x/2), abs(x)],
[0, 1, pi],
[0, exp(1), ceiling(x)]]);
expected = "[1 sin(x/2) abs(x); 0 1 pi; 0 exp(1) ceil(x)]"
assert mcode(A) == expected
# row and columns
assert mcode(A[:,0]) == "[1; 0; 0]"
assert mcode(A[0,:]) == "[1 sin(x/2) abs(x)]"
# empty matrices
assert mcode(Matrix(0, 0, [])) == '[]'
assert mcode(Matrix(0, 3, [])) == 'zeros(0, 3)'
# annoying to read but correct
assert mcode(Matrix([[x, x - y, -y]])) == "[x x - y -y]"
def test_vector_entries_hadamard():
# For a row or column, user might to use the other dimension
A = Matrix([[1, sin(2/x), 3*pi/x/5]])
assert mcode(A) == "[1 sin(2./x) 3*pi./(5*x)]"
assert mcode(A.T) == "[1; sin(2./x); 3*pi./(5*x)]"
@XFAIL
def test_Matrices_entries_not_hadamard():
# For Matrix with col >= 2, row >= 2, they need to be scalars
# FIXME: is it worth worrying about this? Its not wrong, just
# leave it user's responsibility to put scalar data for x.
A = Matrix([[1, sin(2/x), 3*pi/x/5], [1, 2, x*y]])
expected = ("[1 sin(2/x) 3*pi/(5*x);\n"
"1 2 x*y]") # <- we give x.*y
assert mcode(A) == expected
def test_MatrixSymbol():
n = Symbol('n', integer=True)
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', n, n)
assert mcode(A*B) == "A*B"
assert mcode(B*A) == "B*A"
assert mcode(2*A*B) == "2*A*B"
assert mcode(B*2*A) == "2*B*A"
assert mcode(A*(B + 3*Identity(n))) == "A*(3*eye(n) + B)"
assert mcode(A**(x**2)) == "A^(x.^2)"
assert mcode(A**3) == "A^3"
assert mcode(A**(S.Half)) == "A^(1/2)"
def test_special_matrices():
assert mcode(6*Identity(3)) == "6*eye(3)"
def test_containers():
assert mcode([1, 2, 3, [4, 5, [6, 7]], 8, [9, 10], 11]) == \
"{1, 2, 3, {4, 5, {6, 7}}, 8, {9, 10}, 11}"
assert mcode((1, 2, (3, 4))) == "{1, 2, {3, 4}}"
assert mcode([1]) == "{1}"
assert mcode((1,)) == "{1}"
assert mcode(Tuple(*[1, 2, 3])) == "{1, 2, 3}"
assert mcode((1, x*y, (3, x**2))) == "{1, x.*y, {3, x.^2}}"
# scalar, matrix, empty matrix and empty list
assert mcode((1, eye(3), Matrix(0, 0, []), [])) == "{1, [1 0 0; 0 1 0; 0 0 1], [], {}}"
def test_octave_noninline():
source = mcode((x+y)/Catalan, assign_to='me', inline=False)
expected = (
"Catalan = %s;\n"
"me = (x + y)/Catalan;"
) % Catalan.evalf(17)
assert source == expected
def test_octave_piecewise():
expr = Piecewise((x, x < 1), (x**2, True))
assert mcode(expr) == "((x < 1).*(x) + (~(x < 1)).*(x.^2))"
assert mcode(expr, assign_to="r") == (
"r = ((x < 1).*(x) + (~(x < 1)).*(x.^2));")
assert mcode(expr, assign_to="r", inline=False) == (
"if (x < 1)\n"
" r = x;\n"
"else\n"
" r = x.^2;\n"
"end")
expr = Piecewise((x**2, x < 1), (x**3, x < 2), (x**4, x < 3), (x**5, True))
expected = ("((x < 1).*(x.^2) + (~(x < 1)).*( ...\n"
"(x < 2).*(x.^3) + (~(x < 2)).*( ...\n"
"(x < 3).*(x.^4) + (~(x < 3)).*(x.^5))))")
assert mcode(expr) == expected
assert mcode(expr, assign_to="r") == "r = " + expected + ";"
assert mcode(expr, assign_to="r", inline=False) == (
"if (x < 1)\n"
" r = x.^2;\n"
"elseif (x < 2)\n"
" r = x.^3;\n"
"elseif (x < 3)\n"
" r = x.^4;\n"
"else\n"
" r = x.^5;\n"
"end")
# Check that Piecewise without a True (default) condition error
expr = Piecewise((x, x < 1), (x**2, x > 1), (sin(x), x > 0))
raises(ValueError, lambda: mcode(expr))
def test_octave_piecewise_times_const():
pw = Piecewise((x, x < 1), (x**2, True))
assert mcode(2*pw) == "2*((x < 1).*(x) + (~(x < 1)).*(x.^2))"
assert mcode(pw/x) == "((x < 1).*(x) + (~(x < 1)).*(x.^2))./x"
assert mcode(pw/(x*y)) == "((x < 1).*(x) + (~(x < 1)).*(x.^2))./(x.*y)"
assert mcode(pw/3) == "((x < 1).*(x) + (~(x < 1)).*(x.^2))/3"
def test_octave_matrix_assign_to():
A = Matrix([[1, 2, 3]])
assert mcode(A, assign_to='a') == "a = [1 2 3];"
A = Matrix([[1, 2], [3, 4]])
assert mcode(A, assign_to='A') == "A = [1 2; 3 4];"
def test_octave_matrix_assign_to_more():
# assigning to Symbol or MatrixSymbol requires lhs/rhs match
A = Matrix([[1, 2, 3]])
B = MatrixSymbol('B', 1, 3)
C = MatrixSymbol('C', 2, 3)
assert mcode(A, assign_to=B) == "B = [1 2 3];"
raises(ValueError, lambda: mcode(A, assign_to=x))
raises(ValueError, lambda: mcode(A, assign_to=C))
def test_octave_matrix_1x1():
A = Matrix([[3]])
B = MatrixSymbol('B', 1, 1)
C = MatrixSymbol('C', 1, 2)
assert mcode(A, assign_to=B) == "B = 3;"
# FIXME?
#assert mcode(A, assign_to=x) == "x = 3;"
raises(ValueError, lambda: mcode(A, assign_to=C))
def test_octave_matrix_elements():
A = Matrix([[x, 2, x*y]])
assert mcode(A[0, 0]**2 + A[0, 1] + A[0, 2]) == "x.^2 + x.*y + 2"
A = MatrixSymbol('AA', 1, 3)
assert mcode(A) == "AA"
assert mcode(A[0, 0]**2 + sin(A[0,1]) + A[0,2]) == \
"sin(AA(1, 2)) + AA(1, 1).^2 + AA(1, 3)"
assert mcode(sum(A)) == "AA(1, 1) + AA(1, 2) + AA(1, 3)"
def test_octave_boolean():
assert mcode(True) == "true"
assert mcode(S.true) == "true"
assert mcode(False) == "false"
assert mcode(S.false) == "false"
def test_octave_not_supported():
assert mcode(S.ComplexInfinity) == (
"% Not supported in Octave:\n"
"% ComplexInfinity\n"
"zoo"
)
f = Function('f')
assert mcode(f(x).diff(x)) == (
"% Not supported in Octave:\n"
"% Derivative\n"
"Derivative(f(x), x)"
)
def test_trick_indent_with_end_else_words():
# words starting with "end" or "else" do not confuse the indenter
t1 = S('endless');
t2 = S('elsewhere');
pw = Piecewise((t1, x < 0), (t2, x <= 1), (1, True))
assert mcode(pw, inline=False) == (
"if (x < 0)\n"
" endless\n"
"elseif (x <= 1)\n"
" elsewhere\n"
"else\n"
" 1\n"
"end")
def test_haramard():
A = MatrixSymbol('A', 3, 3)
B = MatrixSymbol('B', 3, 3)
v = MatrixSymbol('v', 3, 1)
h = MatrixSymbol('h', 1, 3)
C = HadamardProduct(A, B)
assert mcode(C) == "A.*B"
assert mcode(C*v) == "(A.*B)*v"
assert mcode(h*C*v) == "h*(A.*B)*v"
assert mcode(C*A) == "(A.*B)*A"
# mixing Hadamard and scalar strange b/c we vectorize scalars
assert mcode(C*x*y) == "(x.*y)*(A.*B)"
def test_sparse():
M = SparseMatrix(5, 6, {})
M[2, 2] = 10;
M[1, 2] = 20;
M[1, 3] = 22;
M[0, 3] = 30;
M[3, 0] = x*y;
assert mcode(M) == (
"sparse([4 2 3 1 2], [1 3 3 4 4], [x.*y 20 10 30 22], 5, 6)"
)
def test_sinc():
assert mcode(sinc(x)) == 'sinc(x/pi)'
assert mcode(sinc((x + 3))) == 'sinc((x + 3)/pi)'
assert mcode(sinc(pi*(x + 3))) == 'sinc(x + 3)'
def test_specfun():
n = Symbol('n')
for f in [besselj, bessely, besseli, besselk]:
assert octave_code(f(n, x)) == f.__name__ + '(n, x)'
assert octave_code(hankel1(n, x)) == 'besselh(n, 1, x)'
assert octave_code(hankel2(n, x)) == 'besselh(n, 2, x)'
assert octave_code(airyai(x)) == 'airy(0, x)'
assert octave_code(airyaiprime(x)) == 'airy(1, x)'
assert octave_code(airybi(x)) == 'airy(2, x)'
assert octave_code(airybiprime(x)) == 'airy(3, x)'
assert octave_code(uppergamma(n, x)) == 'gammainc(x, n, \'upper\')'
assert octave_code(lowergamma(n, x)) == 'gammainc(x, n, \'lower\')'
assert octave_code(jn(n, x)) == 'sqrt(2)*sqrt(pi)*sqrt(1./x).*besselj(n + 1/2, x)/2'
assert octave_code(yn(n, x)) == 'sqrt(2)*sqrt(pi)*sqrt(1./x).*bessely(n + 1/2, x)/2'
assert octave_code(LambertW(x)) == 'lambertw(x)'
assert octave_code(LambertW(x, n)) == 'lambertw(n, x)'
def test_MatrixElement_printing():
# test cases for issue #11821
A = MatrixSymbol("A", 1, 3)
B = MatrixSymbol("B", 1, 3)
C = MatrixSymbol("C", 1, 3)
assert mcode(A[0, 0]) == "A(1, 1)"
assert mcode(3 * A[0, 0]) == "3*A(1, 1)"
F = C[0, 0].subs(C, A - B)
assert mcode(F) == "(-B + A)(1, 1)"
|
Welcome to the ExpertRating online Macromedia Director MX 2004 Test. ExpertRating is an ISO 9001:2015 company that offers hundreds of popular certifications suitable for students, professionals, job seekers and companies. See where ExpertRating Certified Professionals are working.
Details of the Macromedia Director MX 2004 Test are displayed below. To take this test, scroll down to the bottom of the page and click on the appropriate link.
The Macromedia Director MX 2004 has been specifically designed to assess an individual's job prospects by evaluating working skills and job readiness. For this reason, emphasis is laid upon evaluating the knowledge of applied skills gained through real work experience, rather than theoretical knowledge.
If you are unable to pass the Macromedia Director MX 2004 Test or you wish to improve your score, you may re-appear by paying $5.00 only.
Detailed instructions relating to the testing procedure for the Macromedia Director MX 2004 Test are provided inside the login account before the start of the test.
|
import logging
import unittest
from selenium.webdriver.common.by import By
import alvi.tests.pages as pages
from alvi.tests.test_client.base import TestContainer
logger = logging.getLogger(__name__)
class TestGraph(TestContainer):
def test_create_node(self):
graph_page = pages.Graph(self._browser.driver, "GraphCreateNode")
graph_page.run(options=dict(n=4))
self.assertEqual(4, len(graph_page.svg.nodes), "create_node does not work properly")
self.assertEqual(4, len(graph_page.svg.edges), "create_edge does not work properly")
node_values = [int(element.find_element(By.CSS_SELECTOR, "text").text) for element in graph_page.svg.nodes]
node_values.sort()
created = node_values[:3]
self.assertEqual([0, 1, 2], created, "create_node does not work properly")
@unittest.skip("graph container does not support updating nodes at the moment")
def test_update_node(self):
graph_page = pages.Graph(self._browser.driver, "GraphUpdateNode")
graph_page.run()
updated = list(graph_page.svg.node_values)[3]
self.assertEqual(10, updated, "update_node does not work properly")
@unittest.skip("graph container does not support removing nodes at the moment")
def test_remove_node(self):
graph_page = pages.Graph(self._browser.driver, "GraphRemoveNode")
graph_page.run()
self.assertEqual(3, len(graph_page.svg.nodes), "remove_node does not work properly")
node_values = list(graph_page.svg.node_values)
node_values.sort()
self.assertEqual([0, 1, 2], node_values, "remove_node does not work properly")
def test_multi_marker(self):
graph_page = pages.Graph(self._browser.driver, "GraphAddMultiMarker")
graph_page.run(options=dict(n=4))
marker = [e for e in graph_page.svg.nodes if e.find_element(By.CSS_SELECTOR, "text").text == "multi marker"]
self.assertEquals(1, len(marker), "multi_marker was not created successfully")
#marked node have different color
marker = marker[0]
color = marker.value_of_css_property("stroke")
colors = map(lambda e: e.value_of_css_property("stroke"), graph_page.svg.nodes)
marked = [c for c in colors if c == color]
#expect 2 marked nodes + 1 node of multi_marker itself
self.assertEquals(3, len(marked), "nodes were not successfully added to multi_marker")
def test_marker(self):
graph_page = pages.Graph(self._browser.driver, "GraphMarker")
graph_page.run(options=dict(n=4))
marker0 = [e for e in graph_page.svg.nodes if e.find_element(By.CSS_SELECTOR, "text").text == "marker 0"]
marker1 = [e for e in graph_page.svg.nodes if e.find_element(By.CSS_SELECTOR, "text").text == "marker 1"]
self.assertEquals(1, len(marker0), "marker 0 was not created successfully")
self.assertEquals(1, len(marker0), "marker 1 was not created successfully")
#marked node have different color
marker = marker0[0]
color = marker.value_of_css_property("stroke")
colors = map(lambda e: e.value_of_css_property("stroke"), graph_page.svg.nodes)
marked = [c for c in colors if c == color]
#expect 1 marked nodes + 1 node of marker itself
self.assertEquals(2, len(marked), "node was not successfully marked")
|
Failing to deal with water damage in a timely manner can lead to further and more serious problems. This could make your water damage restoration in Columbus, OH and reconstruction even more costly. Remember the first 48 hours are crucial. The sooner a water damage restoration company pulls in to start the drying process, the better.
Putting off the process can increase the risk of long-lasting water damage to your property and the risk of mold growth. If the trouble comes from a leaking sink, a broken toilet, or any inside source, look for the shut-off valve and shut it down immediately. Once that is done, call us at 800-569-5380 so we can provide you with our prompt and quality water damage restoration services in Columbus, OH.
Both major and minor water damage must be inspected in order to avoid further property loss, expensive repair services, and headaches. This is why you should not attempt to clean up or do things which should be done by a professional. Hiring our water damage restoration services in Columbus, OH means your home or business will be dried out, cleaned and restored efficiently and thoroughly so you can return to your normal life as soon as possible.
When it comes to water damage restoration services, it is important to choose only certified professionals. Although water damage restoration in Columbus, OH is not a regulated industry, hiring technicians with certification would give you the assurance that you’re working with people who strive to acquire the best training possible and that they take their work seriously. Our technicians have undergone the most advanced training available and are knowledgeable in the newest techniques and technologies in this field.
|
#=================================
# xmlTagger.py
version = '1.0'
#=================================
# last modified : january 17 2006
# written by Benjamin Tardif
# [email protected]
#=================================
header = '\n#==============\n# xmlTagger.py\n# version %s\n#==============' %version
#====================================================================================================
#IMPORTS
import os
import sys
#====================================================================================================
#====================================================================================================
#METHODS
def detectfile(filename,path): # type(filename) = type(path) = string
# method detectfile returns True if the specified file is found in the specified path
return filename in os.listdir(path)
def clean(list): # type(list) = list of strings
# method clean removes character strings '\n' and '\r' and empty lines from a string list
# (the string list is usually obtained with the ".readlines()" method)
L = len(list)
for i in range(L):
list[L-1-i] = list[L-1-i].replace('\n','')
list[L-1-i] = list[L-1-i].replace('\r','')
if list[L-1-i].split() == []:
list.pop(L-1-i)
#====================================================================================================
#----------------------------------------------------------------------------------------------------
#MAIN
print header
#====================================================================================================
#COMMAND LINE
#get xmlfilename
if len(sys.argv) > 2:
# user entered too many arguments in the command line
print '\n- ERROR -\ntoo many arguments in the command line'
sys.exit()
elif len(sys.argv) == 2:
# user entered the xmlfilename in the command line
xmlfilename = sys.argv[1]
else:
# user entered no xmlfilename in the command line
xmlfilename = raw_input('\nEnter the name of the xml file to tag :\n')
#abort if file not found
if detectfile(xmlfilename,'.') == False:
print '\n- ERROR -\nfile not found\n'
sys.exit()
#abort if the file is not a xml file
if xmlfilename[-4:] != '.xml':
print '\n- ERROR -\nyou must enter a xml file (*.xml)\n'
sys.exit()
#abort if the file is already a tagged xml file
if xmlfilename[-8:] == '_tag.xml':
print '\n- ERROR -\nthis file is already tagged\n'
sys.exit()
#====================================================================================================
#====================================================================================================
#READ AND TREAT THE FILE
#read the file
reader = open(xmlfilename,'r')
filedata = reader.readlines()
reader.close()
clean(filedata)
#for each line, remove all characters before '<' and after '>'
for i in range(len(filedata)):
while filedata[i][0] != '<':
filedata[i] = filedata[i][1:]
while filedata[i][-1] != '>':
filedata[i] = filedata[i][:-1]
#compute len_max (number of digits of the number of the last line of the xml file)
len_max = len(str(len(filedata)))
#compute tagxmlfilename (name of the tagged xml file)
tagxmlfilename = xmlfilename[:-4]+'_tag.xml'
#====================================================================================================
#====================================================================================================
#WRITE THE TAGGED XML FILE
writer = open(tagxmlfilename,'w')
tag=0
for line in filedata:
if line.split()[0][1] == '/':
# </Element>
tag-=0
len_tag = len(str(tag))
writer.write((len_max+7)*' '+'%s\n' %line)
elif line.split()[-1][-2] == '/':
# <Element/>
tag+=1
len_tag = len(str(tag))
writer.write((len_max-len_tag)*' '+'<!--%i-->'%tag+line[:-2]+" tag='%i'/>\n"%tag)
else:
# <Element>
tag+=1
len_tag = len(str(tag))
writer.write((len_max-len_tag)*' '+'<!--%i-->'%tag+line[:-1]+" tag='%i'>\n"%tag)
writer.close()
print '\n"%s" file created successfully\n' %tagxmlfilename
#====================================================================================================
#----------------------------------------------------------------------------------------------------
|
letter, among other things, I said the following.
auspicious beginning of your second term as President of the USA.
heights with the passing of time and the gaining of new experience.
second term expires if not tow years earlier.
Americans to Paul Tsongas&#65533; call for renewal.
our peaceful competition in a unified future world.
and ask for your undivided attention.
be as great as the success of the second term of President J.F.
gift from a Democrat fellow American.
worthy of his title, should serve with devoted dedication.
greater threats of China and Islam.
world, which is economically unified and politically democratic.
probability, assume truly global dimensions.
brings me to the third and greatest threat to American dream.
soon will dawn upon the world.
force, and shake up the Western world fundamentally.
against America. Turkey is moving, under the Islamic guidance of Mr.
and nominally secular, it openly preaches the gospel of Pan-Islamism.
realistic thinking and bold political action is needed now.
American diplomats and honest American citizens are the following.
the virtues of the second need elaboration.
to become a member of the EU and NATO.
had of his eternal glory.
acted accordingly with justice for all.
like Thomas Jefferson and J.F. Kennedy, led the Democratic Party.
|
from __future__ import absolute_import
import copy, threading
from ..prelude import *
from .interfaces import Cursor, Journal, Memory, Change, CannotCommit
from .log import log, weaklog
__all__ = (
'memory', 'journal',
'readable_state', 'original_state', 'writable_state',
'change_state', 'copy_state', 'commit_transaction',
'change', 'Deleted', 'Inserted',
'good', 'verify_read', 'verify_write', 'unverified_write'
)
### Generic Operations
def readable_state(journal, cursor, *default):
return good(journal.readable_state, cursor, *default)
def original_state(journal, cursor, *default):
return good(journal.original_state, cursor, *default)
def writable_state(journal, cursor):
return good(journal.writable_state, cursor)
def change_state(method, what, *args, **kwargs):
if isinstance(what, Cursor):
method(what, *args, **kwargs)
else:
## cache cursors in a list so the log can be modified.
for cursor in list(what):
method(cursor, *args, **kwargs)
def commit_transaction(source, nested):
if source is nested:
raise RuntimeError("A journal can't be committed to itself.")
source.commit_transaction(nested)
### Journals
class change(namedtuple('changes', 'cursor orig state'), Change):
pass
class journal(Journal):
LogType = log
name = None
source = None
def __init__(self, name, source):
self.name = name
self.source = source
self.read_log = self.LogType()
self.write_log = self.LogType()
def __repr__(self):
return '<%s %s>' % (type(self).__name__, str(self))
def __str__(self):
return self.name
def make_journal(self, name):
return type(self)(name, self)
def allocate(self, cursor, state):
self.write_log.allocate(cursor, state)
return cursor
def readable_state(self, cursor):
try:
return self.write_log[cursor]
except KeyError:
return self.original_state(cursor)
def original_state(self, cursor):
try:
return self.read_log[cursor]
except KeyError:
state = good(self.source.readable_state, cursor, Inserted)
self.read_log[cursor] = state
return state
def writable_state(self, cursor):
try:
return self.write_log[cursor]
except KeyError:
state = copy_state(self.original_state(cursor))
self.write_log[cursor] = state
return state
def delete_state(self, cursor):
self.write_log[cursor] = Deleted
def rollback_state(self, cursor):
self.write_log.pop(cursor, None)
def commit_transaction(self, trans):
## A journal is single-threaded; state can be blindly copied
## in.
for (cursor, orig, state) in trans.changed():
self._write_log[cursor] = state
def original(self):
return iter(self.read_log)
def changed(self):
return (
change(k, get_state(self.read_log, k), v)
for (k, v) in self.write_log
)
class memory(Memory):
JournalType = journal
LogType = weaklog
name = None
def __init__(self, name='*memory*', check_read=True, check_write=True):
self.name = name
self.write_lock = threading.RLock()
self.check_read = check_read
self.check_write = check_write
self.mem = self.LogType()
def __repr__(self):
return '<%s %s>' % (type(self).__name__, str(self))
def __str__(self):
return self.name
def make_journal(self, name):
return self.JournalType(name, self)
def allocate(self, cursor, state):
self.mem.allocate(cursor, state)
return cursor
def readable_state(self, cursor):
return self.mem[cursor]
def commit_transaction(self, trans):
with self.write_lock:
self._read(trans.original())
self._commit(self._write(trans.changed()))
def _read(self, read):
if self.check_read:
verify_read(self.mem, read)
def _write(self, changed):
if self.check_write:
return verify_write(self.mem, changed)
else:
return unverified_write(changed)
def _commit(self, changed):
for (cursor, state) in changed:
if state is Deleted:
self.mem.pop(cursor, None)
else:
self.mem[cursor] = state
### State
copy_state = copy.deepcopy
Inserted = sentinal('<inserted>')
Deleted = sentinal('<deleted>')
def good(method, cursor, *default):
try:
value = method(cursor)
if not isinstance(value, Sentinal):
return value
except KeyError:
value = Undefined
if default:
return default[0]
raise ValueError(
'%s object %s %r.' % (type(cursor).__name__, id(cursor), value)
)
### Operations on Logs
def get_state(log, cursor):
return log.get(cursor, Inserted)
def verify_read(log, read):
conflicts = [(c, s) for (c, s) in read if log.get(c) != s]
if conflicts:
raise CannotCommit(conflicts)
def verify_write(log, changed):
changed, conflicts = partition_conflicts(log, changed)
if conflicts:
raise CannotCommit(conflicts)
return changed
def unverified_write(changed):
return list((c, s) for (c, o, s) in changed)
def partition_conflicts(log, changed):
good = []; bad = []
for (cursor, orig, state) in changed:
current = get_state(log, cursor)
(good if current is orig else bad).append((cursor, state))
return good, bad
|
Buy large lockable wooden outdoor storage shed tts. Lifetime side entry 15 ft w x 8 ft d plastic storage. Simple storage shed designs for your backyard shed. Suffolk timberframe construction garden storage, sheds.
Duramax 30214 vinyl woodside 105x8 shed on sale with free shipping. Shipping containers as storage sheds containerauctioncom. Shipping container roof cover shelter kit suits 2 x 20ft. Arrow 10 ft x 12 ft vinyl coated steel storage shed lowe. Suncast bms3200 horizontal storage shed buy online in. 8 things you need to know about a pop up storage shed. Building a 6x8 lean to shed diy plans pinterest. Storage sheds lowes creativity pixelmaricom. Outdoor patio with storage shed. Storage sheds 5 x 10 rubbermaid vertical storage shed. Better built barns better built barns, garden sheds. 5 x 7 lawnmower storage shed asgard.
Sheds garden sheds sears. 8 x 12 storage shed plan 6 x 12 shed plans free how to. Craftsman 8#039; x 4#039; resin storage shed slickdealsnet. 8x12 craftsman garden shed westcoast outbuildings. Metal garden shed 8x4 garden ftempo. Sheds outdoor storage: buy sheds outdoor storage in. Outhouse dimensions beautiful craftsman cbms8401 8 x 4. Storage building wood plastic composite shed steel. Sheds garden sheds sears. 10% off sears coupons: 2018 promo codes. Sheds garden sheds sears. Sheds garden sheds sears. Deals savings on outdoor storage sheds. Sears craftsman shed zhg01info. Sheds garden sheds sears.
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import pytest
from sqlalchemy import inspect
from invenio_db.utils import drop_alembic_version_table
def test_alembic_revision_a26f133d42a9(app, db):
ext = app.extensions['invenio-db']
if db.engine.name == 'sqlite':
raise pytest.skip('Upgrades are not supported on SQLite.')
db.drop_all()
drop_alembic_version_table()
with app.app_context():
inspector = inspect(db.engine)
assert 'workflows_workflow' not in inspector.get_table_names()
assert 'workflows_object' not in inspector.get_table_names()
ext.alembic.upgrade(target='a26f133d42a9')
with app.app_context():
inspector = inspect(db.engine)
assert 'workflows_workflow' in inspector.get_table_names()
assert 'workflows_object' in inspector.get_table_names()
ext.alembic.downgrade(target='720ddf51e24b')
with app.app_context():
inspector = inspect(db.engine)
assert 'workflows_workflow' not in inspector.get_table_names()
assert 'workflows_object' not in inspector.get_table_names()
drop_alembic_version_table()
|
Gateway has brought back the Chief and has removed about .25″ from the rim depth and overall height. It’s now a low profile Wizard with some nose slope, and bead. Great for those players that don’t feel comfortable with the deeper rimmed putters.
|
# -*- coding: utf-8 -*-
# Copyright 2015 Michał Nieznański
#
# This file is part of Tetroll.
#
# Tetroll is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tetroll is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tetroll. If not, see <http://www.gnu.org/licenses/>.
class block:
def __init__(self):
self.sqares = []
self.cur_sqares = [[0,0],[0,0],[0,0],[0,0]]
self.rotation = 0
self.colour = 1
self.identifier = 0
def I(self):
self.sqares = [
[[0, 0], [0, 1], [0, 2], [0, 3]],
[[0,1], [-1, 1], [1, 1], [2, 1]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 0
def T(self):
self.sqares = [
[[-1, 0], [0, 0], [1, 0], [0, 1]],
[[0, -1], [0, 0], [1, 0], [0, 1]],
[[-1, 0], [0, 0], [1, 0], [0, -1]],
[[-1, 0], [0, 0], [0, -1], [0, 1]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 1
def O(self):
self.sqares = [
[[0,0], [1, 0], [0, 1], [1 , 1]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 2
def S(self):
self.sqares = [
[[-1, 0], [0, 0], [0, 1], [1, 1]],
[[-1, 1], [0, 1], [-1, 2], [0, 0]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 3
def Z(self):
self.sqares = [
[[0, 0], [1, 0], [-1, 1], [0, 1]],
[[0, 0], [1, 1], [1, 2], [0, 1]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 4
def L(self):
self.sqares = [
[[0, 0], [1, 0], [0, 1], [0, 2]],
[[0, 0], [0, 1], [1, 1], [2, 1]],
[[0, 2], [1, 0], [1, 1], [1, 2]],
[[0, 0], [1, 0], [1, 1], [-1,0]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 5
def J(self):
self.sqares = [
[[0, 0], [1, 0], [1, 1], [1, 2]],
[[0, 0], [0, 1], [2, 0], [1, 0]],
[[0, 0], [0, 1], [0, 2], [1, 2]],
[[0, 1], [1, 1], [2, 1], [2, 0]],
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 6
def X(self):
self.sqares = [
[[-1, 0], [1, 0], [0, 1], [-1, 2], [1,2]]
]
self.init_cur_sqares()
self.colour = 2
self.identifier = 7
def H(self):
self.sqares = [
[[-1, 0], [1, 0], [0, 1], [-1, 2], [1,2], [-1,1], [1,1]]
]
self.init_cur_sqares()
self.colour = 2
self.identifier = 8
def U(self):
self.sqares = [
[[0, 0], [-1, 2], [1,2], [-1,1], [1,1]]
]
self.init_cur_sqares()
self.colour = 2
self.identifier = 9
def Tbig(self):
self.sqares = [
[[0, 0], [-1, 2], [1,2], [0,1], [0,2]]
]
self.init_cur_sqares()
self.colour = 2
self.identifier = 10
def t(self):
self.sqares = [
[[0, 0], [-1, 1], [1,1], [0,1], [0,2]]
]
self.init_cur_sqares()
self.colour = 2
self.identifier = 11
def dot(self):
self.sqares = [
[[0, 0]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 16
def from_int(self, x):
if x == 0:
self.I();
elif x == 1:
self.T()
elif x == 2:
self.O()
elif x == 3:
self.S()
elif x == 4:
self.Z()
elif x == 5:
self.L()
elif x == 6:
self.J()
elif x == 7:
self.X()
elif x == 8:
self.H()
elif x == 9:
self.U()
elif x == 10:
self.Tbig()
elif x == 11:
self.t()
elif x == 16:
self.dot()
def rotate(self, r_amount):
self.rotation = (self.rotation + r_amount) % len(self.sqares)
self.cur_sqares = self.sqares[self.rotation]
def current_sqares(self):
return self.sqares[self.rotation]
def init_cur_sqares(self):
self.cur_sqares = []
for i in self.sqares[0]:
self.cur_sqares.append(i)
self.rotation = 0
|
The headlines at yesterday's Apple event were largely dominated by the new gorgeous 5K Retina Display iMac as well as new Apple iPad tablets.
However, you'll have to refer to the company's press release and website to discover that it also released a new version of OS X server (version 4) and quietly retired the Apple Mac mini with OS X server. Google still shows the search result for "apple mac mini server" but the page has disappeared from Apple's website.
The new OS X Server 4.0 requires Yosemite - which means that it is likely to be a "superset" of features rather than an OS on its own - and is available from Mac App Store for £13.99 ($19.99, AU$21.99).
Apple has also rolled out an up-to-date program for OS X server which allows qualifying Mac mini purchasers to get the server OS for free. Will Apple eventually "kill" OS X Server by offering it as a free add-on in the near future? As the French say - c'est possible.
As for the Mac mini, its future might follow two distinct paths; either Apple kills it like it did for the Xserve several years ago or it will be the next one to be switched from Intel to ARM (like the Apple TV).
As it stands right now, the fact that it has cut the price of the Mac Mini and is positioning it as a stand alone desktop (rather than a server), leaves us to believe that that an ARM transition is imminent.
The new model supports up to 1TB of storage rather than 2TB, and It's also worth noting that while Apple has unveiled a 5K Retina Display iMac (essentially an all-in-one PC), it didn't announce a stand alone monitor with similar capabilities.
|
# -*- coding: utf-8 -*-
import sys
import codecs
from win32com.client import Dispatch
from collections import defaultdict
from mapping import fields, tables
from utils import dump_to_json_file, data_dir, log_dir
sub = '\n ' #'|'
sep = '\t|'
sep_short = '|'
tag_start = '\n'
tag_end = '' #'%'
record_start = '='*100 + '\n'
record_end = '\x1d\n\n'
MAX_RECS = 500000
BATCH_SIZE = 1024
table = "T.CATADO"
def get_field(node):
if not node:
return ""
if node.NodesCount == 0:
# If a field has one occurrence...
return node.Value
# If field has two or more occurrences
# Loop through each occurrence
s = ""
node_type = node.Nodes(1)[0].NodeType if isinstance(node.Nodes(1), tuple) else node.Nodes(1).NodeType
if node_type == 3:
# First Child node is Occurrence, so display occurrences
for i in range(1, node.NodesCount+1):
n = node.Nodes(i)
if isinstance(n, tuple):
n = n[0]
if n != None and n.NodesCount == 0 and n.Value != None:
s += sub + (n.Label if n.Label else ' ') + sep_short + n.Value.strip()
else:
for j in range(1, n.NodesCount+1):
nj = n.Nodes(j)
if isinstance(nj, tuple):
nj = nj[0]
if nj != None and nj.NodesCount == 0 and nj.Value != None:
s += sub + nj.Label + sep_short + nj.Value.strip()
else:
# Iterate through Subfields
for i in range(1, node.NodesCount+1):
n = node.Nodes(i)
if isinstance(n, tuple):
n = n[0]
if n != None and n.Value != None:
s += sub + n.Label + sep_short + n.Value.strip()
return s
def update_progress(progress):
sys.stdout.write('\r[{0}{1}] {2}% '.format('#'*(int(progress)),
' '*(100-int(progress)),
round(progress, 2)))
sys.stdout.flush()
def dump_to_text_file(d, filename):
print(">>> Writing to text file...")
total = float(len(d.items()))
f = open(data_dir + filename, 'w')
out = ''
counter = 0
for r in sorted(d.items()):
out = record_start + 'RECNO' + sep + str(r[0]) + tag_end
for tag in sorted(r[1].items()):
if tag[1] != '' and tag[0] != 'RECNO':
out += tag_start + tag[0] + sep + tag[1] + tag_end
out += record_end
f.write(out.encode('utf-8'))
counter = counter + 1
update_progress(counter*100/total)
f.close()
def format_record(d):
out = record_start + 'RECNO' + sep + str(d['RECNO']) if 'RECNO' in d else '' + tag_end
if d.items():
# out += record_start
for tag in sorted(d.items()):
if tag[0] != 'RECNO':
out += tag_start + tag[0] + sep + tag[1] + tag_end
out += record_end
return out
def main():
item_type = sys.argv[1]
MAX_RECS = 2500000 #int(sys.argv[2])
tables = {
'vendors': ['T.LIBVENDORS'],
'orders': ['T.ACQPOS', 'T.ACQITEMS'], #'T.ACQITEMS2'], #['T.ACQFUNDS', 'T.ACQINVOICE', 'T.ACQISELRCI', 'T.ACQISELRCV', 'T.ORDERSTAT', 'T.ORDERTYPES'],
'patrons': ['T.APPUSERS'], # 'T.IAEAUSERS'],
'serials': ['T.SERHOLD', 'T.SERIALS', ], # 'T.SERROUTE', 'T.SFREQS', ], # ['T.SERSELEOYR', 'T.SERSELHOLD', 'T.SERSELVUP'],
'bibliographic': ['T.CATADO'], # Items
'checkouts': ['T.CATCIRC'],
'holds': ['T.RESPICK'],
}
with codecs.open(log_dir + 'log_export.txt', 'w', encoding='utf8') as logfile:
logfile.close()
tables_to_download = []
if item_type in tables:
tables_to_download = tables[item_type]
else:
tables_to_download = [item_type]
for table in tables_to_download:
print(">>> Extracting from %s..." % table)
try:
i = 1
batch = 10000
last_record = 110000
last_fetched = 0
recs = {}
errors = []
get_me_out_of_here = False
offset = 0
while last_fetched < last_record:
print("\n>>> Connecting (%d)..." % i)
conn = Dispatch("STARADO.Connection")
conn.Login("libstar", "11002", "jaime", "dedalus")
res = conn.OpenRecordset(table, "R>%d AND R<=%d" % (last_fetched, min(last_fetched+batch, last_record)), " ".join(fields[table]))
rs = res[0]
if rs:
rec_no = rs.RecordCount
print(">>> Downloading %s records. [ %d to %d ]" % (min(rec_no, MAX_RECS), last_fetched+1, min(last_fetched+batch, last_record)))
total = float(min(rec_no, MAX_RECS))
if rs.RecordCount > 0:
f = open(data_dir+table+'_'+str(i)+'.txt', 'w')
rs.BatchSize = BATCH_SIZE # Set number of records to be retrieved in cached batches.
rs.MoveFirst() # Move focus to first record of retrieved set.
field_list = rs.FieldList.split(' ')
# Loop through each record until end of set is reached.
counter = 1
while not rs.EOF:
d = defaultdict(lambda : None)
for field in field_list:
value = None
try:
value = get_field(rs.Record(field))
if value != None and value != '':
d[field] = value
except:
with codecs.open(log_dir + 'log_export.txt', 'a', encoding='utf8') as logfile:
logfile.write(("==================\n%d\n%s:\t%s\n\n" % (counter+offset, field, value if value else 'No value set yet')).replace('\n', '\r\n'))
logfile.close()
# try:
f.write(format_record(d).encode('utf-8'))
recs[counter+offset] = d
# print counter*i
rs.MoveNext() # Go to the next record in the set
update_progress(counter*100/total)
counter = counter + 1
# if counter >= MAX_RECS:
# break
offset = offset + rs.RecordCount
rs.MoveLast()
f.close()
else:
get_me_out_of_here = True
last_fetched = last_fetched + rs.RecordCount #batch #int(get_field(rs.Record("RECNO")))
i = i + 1
# else:
print("\n>>> Closing connection. This may take a while...")
rs.CloseRecordset()
conn.Logout()
conn.CloseConnection()
rs = None
if get_me_out_of_here:
break
else:
conn.Logout()
conn.CloseConnection()
break
# print("\n>>> Saving json...")
# dump_to_json_file(recs, table+'.json')
# dump_to_text_file(recs, table+'.txt')
except Exception as e:
print e
print("\n>>> There was an ERROR: saving everything...")
# rs.Cancel()
f.close()
# rs.CloseRecordset()
conn.Logout()
conn.CloseConnection()
rs = None
if len(recs):
dump_to_json_file(recs, table+'.json')
# dump_to_json_file(errors, table+'-errors.json')
# print("\n>>> There were %d errors") % len(errors)
print(">>> %d records fetched") % len(recs)
# sys.exit(0)
if __name__ == '__main__':
main()
#
# ['T.ACQFUNDS', 'T.ACQFUNDSED', 'T.ACQFUNDSEO', 'T.ACQFUNDSOL', 'T.ACQINVOICE',
# 'T.ACQISELRCI', 'T.ACQISELRCV', 'T.ACQITEMS', 'T.ACQITEMSO1', 'T.ACQITEMSO2',
# 'T.ACQITEMSW', 'T.ACQPOS', 'T.ACQPOS1', 'T.ACQPOSBAK', 'T.ACQPOSED',
# 'T.ACQTEST', 'T.APPUPDATE', 'T.APPUSERS', 'T.APPUSERSN', 'T.APPUSERSSN',
# 'T.APPUSERSSV', 'T.AV', 'T.AVUSE', 'T.BOILERPLAT', 'T.CAT', 'T.CATADDITEM',
# 'T.CATBAK', 'T.CATCIRC', 'T.CATIN', 'T.CATINS', 'T.CATLINK', 'T.CATMASTER',
# 'T.CATOUT', 'T.CATOUTS', 'T.CATOVERDUE', 'T.CATPARTS', 'T.CATPRO',
# 'T.CATPROADIT', 'T.CATPROANAL', 'T.CATPROBKS', 'T.CATPROSER', 'T.CATPROSP',
# 'T.CATPROTOC', 'T.CATRENEW', 'T.CATRENEWS', 'T.CATRES', 'T.CATSELREQ',
# 'T.CATSELREQA', 'T.CATSELREQC', 'T.CATSELREQW', 'T.CATSP', 'T.CATTITLES',
# 'T.CATTOC', 'T.CATWEB', 'T.CATWEB4', 'T.CATWEBDES', 'T.CATWEBSEL',
# 'T.CATWEBSRV', 'T.CLAIMINT', 'T.CLSAMPTHES', 'T.COLLECTION', 'T.COUNTERS',
# 'T.CURRENCY', 'T.DIVISION', 'T.EMAIL', 'T.EMAILDB', 'T.GLOBAL', 'T.GLOBAL1',
# 'T.GLOBAL2', 'T.GLOBAL3', 'T.GLOBAL4', 'T.HNPU', 'T.IAEACSBC', 'T.IAEAUSERS',
# 'T.INISCAT', 'T.INISCATV17', 'T.INISCATWEB', 'T.INISTHES', 'T.INVENTORY',
# 'T.ISSCODE', 'T.ISSDATES', 'T.ISSDATESEL', 'T.ISSN', 'T.LDOCKWS', 'T.LDOCKWSN',
# 'T.LIBSCFORMS', 'T.LIBSGLOBAL', 'T.LIBSSERVER', 'T.LIBTRACK', 'T.LIBTYPES',
# 'T.LIBVENDORS', 'T.LNUMTYPES', 'T.LREFREQS', 'T.LREFREQSC', 'T.LSELRESCAN',
# 'T.LSERVICES', 'T.LSERVICESP', 'T.LSERVICESR', 'T.LSERVICESW', 'T.LSERVNUMS',
# 'T.MARCTEMP', 'T.ORDERSTAT', 'T.ORDERTYPES', 'T.PICKCTRY', 'T.PICKCTRYED',
# 'T.PICKLANG', 'T.PICKLANGED', 'T.PICKSTEP', 'T.PROJECTS', 'T.PWSELECTS',
# 'T.RAINBOW', 'T.REGLIBUSE', 'T.REQTYPE', 'T.RESERVE', 'T.RESERVEC', 'T.RESPICK',
# 'T.SERHOLD', 'T.SERHOLD2', 'T.SERIALS', 'T.SERIALS2', 'T.SERIALSBK',
# 'T.SERIALSC1', 'T.SERIALSC2', 'T.SERIALSC3', 'T.SERIALSDM', 'T.SERIALSID',
# 'T.SERIALSID2', 'T.SERIALSIN', 'T.SERIALSNE', 'T.SERIALSNE2', 'T.SERIALSNT',
# 'T.SERIALSNT2', 'T.SERIALSUNX', 'T.SERROUTE', 'T.SERROUTE2', 'T.SERSELEOYR',
# 'T.SERSELHOLD', 'T.SERSELVUP', 'T.SFREQS', 'T.SP', 'T.STAFPUB', 'T.STATISTIC',
# 'T.STATS', 'T.SUBINFO', 'T.SUBSNO', 'T.TOPIC', 'T.TUNCAT', 'T.TUNCATTITL',
# 'T.TUNCATWEB4', 'T.TUNTITLES', 'T.VALDB', 'T.VALIDATION', 'T.WAITCODES',
# 'T.WEBDES', 'T.WEBPROFILE', 'T.WEBSAMPTHS', 'T.WEBSERVER', 'T.WEBTYPES',
# 'T.WHENX', 'T.WHENXSUPER',]
|
A new marathon of super fun and interesting 2018 games is preparing now for you on our website friv-games.com, dear talented friends, so we are all very excited to tell you all about today s games because we think they are super cool online games and you will definitely like them once we tell you exactly what your mission is going to be. The next game, the first fun game of today, is called Shirley Wedding Dress Up and it seems to come from the Girls Games category because it seems that we will have in the center of attention as the main character a very beautiful bride and we are sure that this is good news for you. We are sure that you cannot wait to help this pretty bride dress up for her special day and pick the right clothes, accesories and of course we cannot forget about hairstyle. Good luck.
Shirley Wedding Dress Up is a game that has been added on 10.07.2018 and we invite you to play right now because it was played 211 times. Shirley Wedding Dress Up was added in Girl Games and seems to have 100% obtained at the vote from each member that have been played this game.
|
#! /usr/bin/env python
#
# Copyright (C) 2015-2016 Rich Lewis <[email protected]>
# License: 3-clause BSD
""" # skchem.pandas.structure_methods
Tools for adding a default attribute to pandas objects."""
from sklearn.manifold import TSNE, MDS
from sklearn.decomposition import PCA
import pandas as pd
from pandas.core.base import NoNewAttributesMixin, AccessorProperty
from pandas.core.series import Series
from pandas.core.index import Index
from .. import core
from .. import features
DIM_RED = {
'tsne': TSNE,
'pca': PCA,
'mds': MDS
}
class StructureMethods(NoNewAttributesMixin):
""" Accessor for calling chemical methods on series of molecules. """
def __init__(self, data):
self._data = data
def add_hs(self, **kwargs):
return self._data.apply(lambda m: m.add_hs(**kwargs))
def remove_hs(self, **kwargs):
return self._data.apply(lambda m: m.remove_hs(**kwargs))
def visualize(self, fper='morgan', dim_red='tsne', dim_red_kw=None,
**kwargs):
if dim_red_kw is None:
dim_red_kw = {}
if isinstance(dim_red, str):
dim_red = DIM_RED.get(dim_red.lower())(**dim_red_kw)
fper = features.get(fper)
fper.verbose = False
feats = fper.transform(self._data)
feats = feats.fillna(feats.mean())
twod = pd.DataFrame(dim_red.fit_transform(feats))
ax = twod.plot.scatter(x=0, y=1, **kwargs)
ax.set_xticklabels([])
ax.set_xlabel('')
ax.set_yticklabels([])
ax.set_ylabel('')
@property
def atoms(self):
return self._data.apply(lambda m: m.atoms)
def only_contains_mols(ser):
return ser.apply(lambda s: isinstance(s, core.Mol)).all()
class StructureAccessorMixin(object):
""" Mixin to bind chemical methods to objects. """
def _make_structure_accessor(self):
if isinstance(self, Index):
raise AttributeError('Can only use .mol accessor with molecules,'
'which use np.object_ in scikit-chem.')
if not only_contains_mols(self):
raise AttributeError('Can only use .mol accessor with '
'Series that only contain mols.')
return StructureMethods(self)
mol = AccessorProperty(StructureMethods, _make_structure_accessor)
Series.__bases__ += StructureAccessorMixin,
|
An American made, authentic backlit poster case with cool, contemporary design. An outer 3 ½” wide round frame matches or contrasts the inner lockable door frame that provides easy access for poster changes, separated by a 1 ½” matte that serves to accent the poster art. This poster case adds a modern and elegant feel to any home theater. Available with or without a Now Showing or Coming Soon dater area.
Custom sizes available up request.
Available in satin silver, satin gold, polished silver, polished gold, or gloss black or combinations of the same with any color matting.
|
import os
import sys
import traceback
from qt import QtWidgets, QtGui, QtCore
from zoo.libs.pyqt.syntaxhighlighter import highlighter
from zoo.libs.pyqt.widgets import layouts
class NumberBar(QtWidgets.QWidget):
def __init__(self, edit):
super(NumberBar, self).__init__(edit)
self.edit = edit
self.adjustWidth(1)
def paintEvent(self, event):
self.edit.numberbarPaint(self, event)
super(NumberBar, self).paintEvent(event)
def adjustWidth(self, count):
width = self.fontMetrics().width(unicode(count))
if self.width() != width:
self.setFixedWidth(width)
def updateContents(self, rect, scroll):
if scroll:
self.scroll(0, scroll)
else:
self.update()
class TextEditor(QtWidgets.QPlainTextEdit):
def __init__(self, parent=None):
super(TextEditor, self).__init__(parent=parent)
self.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)
self.setWordWrapMode(QtGui.QTextOption.NoWrap)
self.setFrameStyle(QtWidgets.QFrame.NoFrame)
self.centerOnScroll()
self.highlight()
self.cursorPositionChanged.connect(self.highlight)
metrics = QtGui.QFontMetrics(self.document().defaultFont())
self.setTabStopWidth(4 * metrics.width(' '))
font = QtGui.QFont("Courier")
font.setStyleHint(QtGui.QFont.Monospace)
font.setFixedPitch(True)
self.setFont(font)
def highlight(self):
hi_selection = QtWidgets.QTextEdit.ExtraSelection()
# hi_selection.format.setBackground(self.palette().dark()) # temp
hi_selection.format.setProperty(QtGui.QTextFormat.FullWidthSelection, True)
hi_selection.cursor = self.textCursor()
hi_selection.cursor.clearSelection()
self.setExtraSelections([hi_selection])
def numberbarPaint(self, number_bar, event):
font_metrics = self.fontMetrics()
current_line = self.document().findBlock(self.textCursor().position()).blockNumber() + 1
block = self.firstVisibleBlock()
line_count = block.blockNumber()
painter = QtGui.QPainter(number_bar)
painter.fillRect(event.rect(), self.palette().base())
# Iterate over all visible text blocks in the document.
while block.isValid():
line_count += 1
block_top = self.blockBoundingGeometry(block).translated(self.contentOffset()).top()
# Check if the position of the block is out side of the visible
# area.
if not block.isVisible() or block_top >= event.rect().bottom():
break
# We want the line number for the selected line to be bold.
if line_count == current_line:
font = painter.font()
font.setBold(True)
painter.setFont(font)
else:
font = painter.font()
font.setBold(False)
painter.setFont(font)
# Draw the line number right justified at the position of the line.
paint_rect = QtCore.QRect(0, block_top, number_bar.width(), font_metrics.height())
painter.drawText(paint_rect, QtCore.Qt.AlignRight, unicode(line_count))
block = block.next()
painter.end()
def wheelEvent(self, event):
"""
Handles zoom in/out of the text.
"""
if event.modifiers() & QtCore.Qt.ControlModifier:
delta = event.delta()
if delta < 0:
self.zoom(-1)
elif delta > 0:
self.zoom(1)
return True
return super(TextEditor, self).wheelEvent(event)
def zoom(self, direction):
"""
Zoom in on the text.
"""
font = self.font()
size = font.pointSize()
if size == -1:
size = font.pixelSize()
size += direction
if size < 7:
size = 7
if size > 50:
return
style = """
QWidget {
font-size: %spt;
}
""" % (size,)
self.setStyleSheet(style)
def keyPressEvent(self, event):
if (event.modifiers() & QtCore.Qt.ShiftModifier and
event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]):
self.insertPlainText("\n")
event.accept()
elif event.key() == QtCore.Qt.Key_Tab:
# intercept the tab key and insert 4 spaces
self.insertPlainText(" ")
event.accept()
else:
super(TextEditor, self).keyPressEvent(event)
if event.key() in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter) and event.modifiers() == QtCore.Qt.ControlModifier:
self.parent().execute()
class Editor(QtWidgets.QFrame):
outputText = QtCore.Signal(str)
def __init__(self, parent=None):
super(Editor, self).__init__(parent=parent)
self.setFrameStyle(QtWidgets.QFrame.StyledPanel | QtWidgets.QFrame.Sunken)
self._locals = {}
self.textEdit = TextEditor(parent=self)
self.numberBar = NumberBar(self.textEdit)
hbox = layouts.hBoxLayout(parent=self)
hbox.addWidget(self.numberBar)
hbox.addWidget(self.textEdit)
self.textEdit.blockCountChanged.connect(self.numberBar.adjustWidth)
self.textEdit.updateRequest.connect(self.numberBar.updateContents)
self.pythonHighlighter = highlighter.highlighterFromJson(os.path.join(os.path.dirname(highlighter.__file__),
"highlightdata.json"),
self.textEdit.document())
def text(self):
return self.textEdit.toPlainText()
def setText(self, text):
self.textEdit.setPlainText(text)
def isModified(self):
return self.edit.document().isModified()
def setModified(self, modified):
self.edit.document().setModified(modified)
def setLineWrapMode(self, mode):
self.edit.setLineWrapMode(mode)
def execute(self):
original_stdout = sys.stdout
class stdoutProxy():
def __init__(self, write_func):
self.write_func = write_func
self.skip = False
def write(self, text):
if not self.skip:
stripped_text = text.rstrip('\n')
self.write_func(stripped_text)
self.skip = not self.skip
def flush(self):
pass
sys.stdout = stdoutProxy(self.outputText.emit)
cursor = self.textEdit.textCursor()
script = cursor.selectedText()
script = script.replace(u"\u2029", "\n")
if not script:
script = str(self.toPlainText().strip())
if not script:
return
self.outputText.emit(script)
evalCode = True
try:
try:
outputCode = compile(script, "<string>", "eval")
except SyntaxError:
evalCode = False
outputCode = compile(script, "<string>", "exec")
except Exception:
trace = traceback.format_exc()
self.outputText.emit(trace)
return
# ok we've compiled the code now exec
if evalCode:
try:
results = eval(outputCode, globals(), self._locals)
self.outputText.emit(str(results))
except Exception:
trace = traceback.format_exc()
self.outputText.emit(trace)
else:
try:
exec (outputCode, globals(), self._locals)
except Exception:
trace = traceback.format_exc()
self.outputText.emit(trace)
finally:
sys.stdout = original_stdout
class TabbedEditor(QtWidgets.QTabWidget):
outputText = QtCore.Signal(str)
def __init__(self, parent):
super(TabbedEditor, self).__init__(parent=parent)
self.setTabsClosable(True)
self.setMovable(True)
self.newTabBtn = QtWidgets.QPushButton("+", parent=self)
self.newTabBtn.setMaximumWidth(40)
self.newTabBtn.setToolTip("Add New Tab")
self.setCornerWidget(self.newTabBtn, QtCore.Qt.TopLeftCorner)
self.newTabBtn.clicked.connect(self.addNewEditor)
self.tabCloseRequested.connect(self.closeCurrentTab)
def addNewEditor(self, name=None):
name = name or "New tab"
edit = Editor(parent=self)
self.addTab(edit, name)
edit.outputText.connect(self.outputText.emit)
edit.textEdit.moveCursor(QtGui.QTextCursor.Start)
self.setCurrentIndex(self.count() - 1)
def closeCurrentTab(self, index):
self.removeTab(index)
|
Discussion in 'iPhone and iPad Games' started by Boardumb, Mar 25, 2019.
We are ready to make your gaming experience even greater. Voetsak is an Endlessly Fun and addictive iPhone game. Put your sense of rhythm and timing to the test. Voetsak pushes the boundaries of the arcade genre and introduces a collection of unique features with fast-paced stage progression for hours of addictive, intuitive gameplay. It doesn't matter whether you are a seasoned resource-management veteran or a casual player who just wants to enjoy the ride. Simply tap and watch him pound through monsters and vexing obstacles a-plenty while climbing up the screen on an 8-bit soundtrack, with gameplay harking back to NES/arcade brilliance. Tap and avoid obstacles to earn tokens and unlock new features. Unleash your Thumb.
|
import time
import messengerClient
import sendEmailToLibrarian
'''
A class that deals with the messages we receive from users
'''
class ConversationHandler():
'''
create a new conversation handler with a given database client
'''
def __init__(self, database_client):
self.database_client = database_client
self.checkout_words = ['check', 'checking', 'checked', 'check out', 'checkout', 'checking out', 'take', 'took', 'taking', 'grabbing', 'grab', 'grabbed', 'checked out', 'borrow', 'borrowed', 'want']
self.return_words = ['return', 'returned','returning','brought', 'bring', 'bringing', 'dropping', 'dropped', 'took back', 'left', 'done', 'done with', 'finished']
self.closing_words = ['thanks', 'thank', 'ok', 'bye', 'goodbye', 'good-bye', 'okay', 'cancel', 'stop', 'fuck', 'yay']
self.available_words = ['available', 'there']
self.help_words = ['how do i', 'help', 'manual', 'documentation', 'how to', 'trouble', 'confused', 'what do i do with', 'what should i do', "i don't know"]
self.NO_CONTACT = 0
self.SENT_GREETING = 1
self.WANT_CHECKOUT = 2
self.CONFIRM_TOOL = 4
self.HOW_LONG = 5
self.CLOSING = 6
self.WANT_RETURN = 7
self.CONFIRM_TOOL_RETURN = 8
self.AVAILABILITY_QUESTION = 9
self.SEND_LIST = 10
'''
searches through a message looking for names of tools from the tools database
returns a list of tool names found, empty if none found
'''
def find_tools_in_message(self, message):
found_tools = []
tools_list = self.database_client.get_all_tools()
#loop through list looking for tool names in message
for tool in tools_list:
if tool['name'] in message:
found_tools.append(tool)
else:
for alt_name in tool['alternate_names']:
if alt_name in message:
found_tools.append(tool)
return found_tools
'''
creates a string of all tools a user is attempting to check out
'''
def make_tool_string(self, tool_list):
tool_string = ''
print('temp_tools', tool_list)
for tool in tool_list:
tool_string = tool_string + tool['name'] + " and " # allow for a list of tools
# remove final and from string
tool_string = tool_string[:-5]
print('tool string:', tool_string)
return tool_string
'''
Parses the loan time quick reply message to store a due_date
for the tool/s the user wants to check out. uses import time
TODO: handle the case when we somehow get a different message
than the quick reply options were expecting in a way other than
making the due date "0"
'''
def parse_due_date(self, message):
due_date = 0
SECONDS_IN_DAY = 3600*24
# they want a 24 hour loan
if message == 'yes':
due_date = int(time.time()) + 120 # !!!!!! CHANGE THIS BACK TO SECONDS_IN_DAY!!!!!!
# they want a 12 hour loan
elif message == '12 hours instead':
due_date = int(time.time()) + (SECONDS_IN_DAY/2)
#they want a 3 day loan
elif message == '3 days instead':
due_date = int(time.time()) + (SECONDS_IN_DAY*3)
return due_date
'''
Uses the user's stage to parse the message and determine how to reply
takes the message text string and a user (in dictionary format)
returns a tuple:
updated_user, response_text, quickreply
updated_user is the user dictionary, possibly changed or updated_user
response_text is the bot's response message
quickreply is a field indicating whether this should be a quickreply response
it either has the None value (not a quickreply message)
or a list of quickreply options
'''
def determine_response_for_user(self, message, user):
print('determine_response_for_user')
if any(word in message for word in self.closing_words):
response = "Glad to help!"
user['stage'] = self.NO_CONTACT
print(user['stage'])
return user, response, None
if any(word in message for word in self.help_words):
response = ''
tool_help_wanted = self.find_tools_in_message(message)
if len(tool_help_wanted) >0:
resource_links = ''
for tool in tool_help_wanted:
resource_links += ' ' + tool['resource_link']
response ="The Library gave me some resources that might be helpful, see if this is useful:" + resource_links
else:
response ="😵 I have no clue how to help you with this one! I've passed your question along to the librarians. Hopefully they know what to do and will contact you soon. 😅"
#TODO: send email to librarian here
return user, response, None
# this needs to be located above the NO_CONTACT check
# because if they said anything that's NOT "view more", then
# it needs to be treated like a NO_CONTACT message context
if user['stage'] == self.SEND_LIST:
user['stage'] = self.NO_CONTACT
print(user['stage'])
if message == 'view more':
response = "Check The Library's online database for the full tool list: https://olin.tind.io/"
return user, response, None
#if the user is initiating contact
if user['stage'] == self.NO_CONTACT:
# trying to return
if any(word in message for word in self.return_words):
user['stage'] = self.WANT_RETURN
print(user['stage'])
# checking availability status
elif any(word in message for word in self.available_words):
tools_wanted = self.find_tools_in_message(message)
response_string = ''
quickreply = None
if len(tools_wanted) >0:
unavailable_tools = []
for tool in tools_wanted:
available_modifier = ''
if tool['current_user'] != None:
available_modifier = 'not '
unavailable_tools.append(tool)
response_string += 'the {} is {}available and '.format(tool['name'], available_modifier)
response_string = response_string[:-5]
if len(unavailable_tools) > 0:
question = 'Would you like me to ask the tool borrowers to return them?'
response_string = response_string + '. ' + question
user['temp_tools'] = unavailable_tools
user['stage'] = self.AVAILABILITY_QUESTION
print(user['stage'])
quickreply = ['yes', 'no']
else:
response_string = "SEND_LIST"
user['stage'] = self.SEND_LIST
print(user['stage'])
return user, response_string, quickreply
# checking out
elif any(word in message for word in self.checkout_words):
user['stage'] = self.WANT_CHECKOUT
print(user['stage'])
else:
# send greeting and ask what tool
response = "😄 Hi there! I'm Loan Wrangler, what can I help you with?"
# user['stage'] = self.SENT_GREETING
return user, response, None
# if the user has asked about availability and we're finding out if we should
# send a reminder to the borrowers or not
if user['stage'] == self.AVAILABILITY_QUESTION:
if message == 'yes':
for tool in user['temp_tools']:
borrower_id = tool['current_user']
borrower_sender_id = self.database_client.find_user('_id', borrower_id)['sender_id']
# this is not the best code structure
# because we have this weird situation where the user we want to send a message to
# is not the user who sent us a message
messenger_client = messengerClient.MessengerClient()
reminder = "Hey, someone's interested in borrowing the {} that you have checked out. If you're done with it, could you bring it back?".format(tool['name'])
messenger_client.send_message(borrower_sender_id, reminder, None)
user['stage'] = self.NO_CONTACT
print(user['stage'])
user['temp_tools'] = []
return user, "Alright, I let them know someone's looking for it! 🔎", None
else:
user['stage'] = self.NO_CONTACT
print(user['stage'])
user['temp_tools'] = []
return user, "☺️ Alrighty. Is there something else I can help with?", None
#if the user wants to check out something
if user['stage'] == self.WANT_CHECKOUT or user['stage'] == self.SENT_GREETING:
tools_wanted = self.find_tools_in_message(message)
user['temp_tools'] = tools_wanted
#if we found a tool name/s in the message
if len(tools_wanted) > 0:
tool_string = self.make_tool_string(user['temp_tools'])
print('tool string in line:', tool_string)
response = "Sounds like you want to check out a {}, is that correct?".format(tool_string)
user['stage'] = self.CONFIRM_TOOL
print(user['stage'])
return user, response, ['yes','no']
#if we could not identify a tool name/s in the message
else:
user['stage'] = self.NO_CONTACT
print(user['stage'])
return user, "What can I do for ya?", None
#we check that we parsed the correct tool/s...
if user['stage'] == self.CONFIRM_TOOL:
#...if so, we find out how long the loan will be
if message == 'yes':
available = True
tools_out = []
# check if those tools are in right now
for tool in user['temp_tools']:
if tool['current_user'] != None:
available = False
tools_out.append(tool)
if available:
response = "Great! Is a loan time of 1 day okay?"
user['stage'] = self.HOW_LONG
print(user['stage'])
return user, response, ['yes', '12 hours instead', '3 days instead']
else:
response = "😓 Sorry, the following tools are not available right now: {}".format(self.make_tool_string(tools_out))
user['stage'] = self.NO_CONTACT
print(user['stage'])
return user, response, None
#...if not, we try again
else:
user['temp_tools'] = []
user['stage'] = self.NO_CONTACT
print(user['stage'])
return user, "😵 Sorry I misunderstood. What do you want to do?", None
#update user and tool db based on the loan time
if user['stage'] == self.HOW_LONG:
tool_string = self.make_tool_string(user['temp_tools'])
for tool in user['temp_tools']:
tool['current_user'] = user['_id']
tool['current_due_date'] = self.parse_due_date(message)
self.database_client.update_tool(tool)
user['tools'].append(tool['_id'])
# TODO: how to handle loan time if they are checking out more than one tool
#finish the interaction and reset the conversation stage
response = "😎 You're all set! I'll remind you to return the {} before it's due.".format(tool_string)
user['temp_tools'] = []
user['stage'] = self.NO_CONTACT
print(user['stage'])
return user, response, None
if user['stage'] == self.CONFIRM_TOOL_RETURN:
#...if so, we find out how long the loan will be
if message == 'yes':
tool_string = self.make_tool_string(user['temp_tools'])
# TODO: tell them if they're trying to return something they don't have
#update tool
for tool in user['temp_tools']:
if tool['current_user'] == user['_id']:
tool['current_user'] = None
tool['current_due_date'] = None
self.database_client.update_tool(tool)
# update user tool list
for checked_out_tool_id in user['tools']:
if checked_out_tool_id == tool['_id']:
user['tools'].remove(checked_out_tool_id)
user['temp_tools'] = []
user['stage'] = self.NO_CONTACT
print(user['stage'])
return user, "✨🏆✨ Thanks!!!! I'll let The Library know the {} has returned.".format(tool_string), None
#...if not, we try again
else:
user['temp_tools'] = []
user['stage'] = self.WANT_RETURN
print(user['stage'])
return user, "😓 Sorry I misunderstood. What tool do you want to return?", None
if user['stage'] == self.WANT_RETURN:
tools_returning = self.find_tools_in_message(message)
user['temp_tools'] = tools_returning
#if we found a tool name/s in the message
if len(tools_returning) > 0:
tool_string = self.make_tool_string(user['temp_tools'])
print('tool string in line:', tool_string)
response = "You're returning a {}, is that right?".format(tool_string)
user['stage'] = self.CONFIRM_TOOL_RETURN
print(user['stage'])
return user, response, ['yes','no']
#if we could not identify a tool name/s in the message
else:
user['stage'] = self.WANT_RETURN
print(user['stage'])
return user, "Which tool did you want to return?", None
print('I GOT TO THE END, OH NO')
return user
## TODO: check for cancelling
|
A handsome hat by Bullhide!
Treat yourself to a premium hat from Bullhide by Montecarlo Hat Company! The allure of this hat goes beyond its good looks - Bullhide cowboy hats feature superior finishing, innovative style, and exclusive trimmings. The Boot Hill hat features a fur blend felt construction. Hat brim measures 4 1/4". Cattleman crease crown measures 4 3/4". Imported.
|
"""
Global variables are loaded or set here:
DEBUG
PORT
API_NAME
DB_URL
APIDOC_OBJ
HYDRUS_SERVER_URL
FOUND_DOC
"""
import os
import json
import yaml
import logging
from os.path import abspath, dirname
from pathlib import Path
from importlib.machinery import SourceFileLoader
from hydra_openapi_parser.openapi_parser import parse
logger = logging.getLogger(__file__)
try:
DEBUG = bool(os.environ["DEBUG"])
except KeyError:
DEBUG = False
# load form environment (as many globals as possible shall be in
# environment configuration)
PORT = int(os.environ["PORT"]) if "PORT" in dict(os.environ).keys() else 8080
API_NAME = os.environ["API_NAME"] if "API_NAME" in dict(os.environ).keys() else "api"
DB_URL = (
os.environ["DB_URL"]
if "DB_URL" in dict(os.environ).keys()
else "sqlite:///database.db"
)
def get_apidoc_path():
"""
Get the path of the apidoc.
:return - Tuple (path, boolean). path denotes path of the apidoc.
If apidoc is not present at specified path then it falls back at sample apidoc.
boolean is true if the apidoc is present at the specified path.
boolean is false if sample apidoc is being used.
"""
cwd_path = Path(dirname(dirname(abspath(__file__))))
try:
apidoc_env = os.environ["APIDOC_REL_PATH"]
apidoc_path = cwd_path / Path(apidoc_env)
found_doc = True
except KeyError:
found_doc = False
apidoc_path = cwd_path / "hydrus" / "samples" / "hydra_doc_sample.py"
return (apidoc_path, found_doc)
def load_apidoc(path):
"""
Parses docs of .jsonld, .py, .yaml format and loads apidoc from the given path.
:param path - Path for the apidoc to be loaded
:return - apidoc
:Raises:
FileNotFoundError: If the wrong path of hydradoc is specified.
BaseException: If hydradoc is specified in wrong format.
"""
path = str(path)
try:
apidoc_format = path.split(".")[-1]
if apidoc_format == "jsonld":
with open(path, "r") as f:
api_doc = json.load(f)
elif apidoc_format == "py":
api_doc = SourceFileLoader("doc", path).load_module().doc
elif apidoc_format == "yaml":
with open(path, "r") as stream:
api_doc = parse(yaml.load(stream))
else:
raise (
"Error - hydradoc format not supported."
"The supported formats are .py, .jsonld and .yaml"
)
logger.info(f"APIDOC path loaded from: {path}")
return api_doc
except FileNotFoundError:
logger.critical(
f"No Hydra ApiDoc file to load has been found"
f" at {path}. Cannot set APIDOC_OBJ"
)
raise
except BaseException:
logger.critical("Problem parsing specified hydradoc file")
raise
def get_host_domain():
"""
Returns host domain.
"""
HOST_DOMAIN = f"http://localhost:{PORT}"
return HOST_DOMAIN
(path, FOUND_DOC) = get_apidoc_path()
APIDOC_OBJ = load_apidoc(path)
HYDRUS_SERVER_URL = f"http://localhost:{PORT}/"
|
Since Curatr’s conception in 2010, Social Learning has been at the forefront of the Curatr story, with HT2 Labs striving to create exceptional learning experiences for learners around the globe.
In December 2018, HT2 Labs were awarded Gold for Best Advance in Social Learning for Curatr LXP, the Social Learning Platform, at the Brandon Hall Excellence Awards.
Brandon Hall are world class leaders of research and data analysis in Learning & Development, Talent Management, Leadership Development, Talent Acquisition and Workforce Management.
Following this recognition from Brandon Hall, we wanted to highlight a few reasons why a Social Learning Platform can be helpful.
Social Learning is a learning concept which recognizes that undertaking training/educational courses in a collaborative forum, in which ideas can be discussed and concepts freely explored, contributes to much higher engagement and retention than more traditional teacher/learner environments.
Speech Recognition: Transforming the Way We Learn? New Year, New Savv-e!
|
# Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from UM.Application import Application
from UM.Settings.ContainerRegistry import ContainerRegistry
from cura.QualityManager import QualityManager
from cura.Settings.ProfilesModel import ProfilesModel
from cura.Settings.ExtruderManager import ExtruderManager
## QML Model for listing the current list of valid quality and quality changes profiles.
#
class QualityAndUserProfilesModel(ProfilesModel):
def __init__(self, parent = None):
super().__init__(parent)
self._empty_quality = ContainerRegistry.getInstance().findInstanceContainers(id = "empty_quality")[0]
## Fetch the list of containers to display.
#
# See UM.Settings.Models.InstanceContainersModel._fetchInstanceContainers().
def _fetchInstanceContainers(self):
global_container_stack = Application.getInstance().getGlobalContainerStack()
if not global_container_stack:
return {}, {}
# Fetch the list of quality changes.
quality_manager = QualityManager.getInstance()
machine_definition = quality_manager.getParentMachineDefinition(global_container_stack.definition)
quality_changes_list = quality_manager.findAllQualityChangesForMachine(machine_definition)
extruder_manager = ExtruderManager.getInstance()
active_extruder = extruder_manager.getActiveExtruderStack()
extruder_stacks = self._getOrderedExtruderStacksList()
# Fetch the list of usable qualities across all extruders.
# The actual list of quality profiles come from the first extruder in the extruder list.
quality_list = quality_manager.findAllUsableQualitiesForMachineAndExtruders(global_container_stack, extruder_stacks)
# Filter the quality_change by the list of available quality_types
quality_type_set = set([x.getMetaDataEntry("quality_type") for x in quality_list])
# Also show custom profiles based on "Not Supported" quality profile
quality_type_set.add(self._empty_quality.getMetaDataEntry("quality_type"))
filtered_quality_changes = {qc.getId(): qc for qc in quality_changes_list if
qc.getMetaDataEntry("quality_type") in quality_type_set and
((qc.getMetaDataEntry("extruder") == active_extruder.definition.getMetaDataEntry("quality_definition") or
qc.getMetaDataEntry("extruder") == active_extruder.definition.getId()) if qc.getMetaDataEntry("extruder") is not None else True)}
result = filtered_quality_changes
for q in quality_list:
if q.getId() != "empty_quality":
result[q.getId()] = q
return result, {} #Only return true profiles for now, no metadata. The quality manager is not able to get only metadata yet.
|
When you reside in Japan with a status of residence for a mid-to-long term (over three months), a Residence Card will be issued by the Minister of Justice in Japan.
This card shows basic personal information with the person’s photograph, such as name, date of birth, nationality and home address. For this reason, this Card can be used as an ID card inside Japan and you do not always need to carry your passport everywhere.
Where can you get the Residence Card?
If you land in Japan through the following airports at your first entry, the Card will be issued there at the time of the immigration examination.
For other airports, a designated stamp will be affixed to your passport and you need to present this passport when registering your address of the residence. The Residence Card will be delivered to the registered address by post at a later date.
How can you register your home address? After you decide on the residence in Japan, you must report the address to the municipal/ward office within 14 days. The address is added to the back side of the Residence Card.
Residence Cards are very useful for non-Japanese residents to prove the person legally stays in Japan and are used at various scenes both business and private affairs.
|
import sqlite3 as sql
import os
def _url_for_id(id):
return 'http://www.j-archive.com/showplayer.php?player_id=' + str(id)
def the_gender_binary():
print 'Manually classifying genders...'
database_path = os.path.abspath('../archive.db')
con = sql.connect(database_path)
cur = con.cursor()
cur.execute('SELECT * FROM Players WHERE Gender = "andy" or (Gender <> "female" and Gender <> "male")')
unknown_genders = cur.fetchall()
for player in unknown_genders:
id, name, classification = player
check = classification.split('_')
if check[0] == 'mostly':
classification = check[1]
while ((classification != 'male') and
(classification != 'female') and
(classification != 'skip')):
print name,
print 'was classified as',
print classification
print _url_for_id(id)
classification = raw_input('What gender? ')
if classification != 'skip':
cur.execute('UPDATE Players SET Gender=? WHERE Id=?',
(classification, id))
con.commit()
con.close()
print 'Genders classified.'
if __name__ == '__main__':
the_gender_binary()
|
…to successfully navigate through the entire work-life cycle. With the addition this year of GA and Vettery, our comprehensive HR solutions offering is broader than that of any competitor.
By rationalising our brand portfolio we have streamlined our operations and created greater clarity for the Adecco Group’s customers, enabling the Group to more effectively leverage relationships with its global lead brands.
The world’s leading workforce solutions company, offering temporary staffing, permanent placement and outsourcing across all sectors.
A digital staffing service, delivering jobs on demand in hospitality and catering, promotions and events as well as retail.
A global recruitment specialist, offering interim and permanent placements for senior management and executive roles.
The leading global source for training and up/reskilling in high-demand fields like data science, technology, design, and business.
The world’s leading talent development and transition company, helping individuals and organisations navigate workforce change.
The world’s leading provider of consulting, outsourcing, staffing and project services across IT, engineering and life sciences.
A leader in contingent and permanent workforce planning and talent advisory solutions across all industries.
A global recruitment leader, connecting management professionals with roles and opportunities across all industries.
A talent recruitment platform that uses machine learning and real-time data to match highly qualified job-seekers with top companies.
An online marketplace enabling flexible connections between freelancers and their clients.
Adecco Group businesses are increasingly working together to better serve clients and candidates.
Our complete brand ecosystem covers the entire work-life cycle. One Fortune 100 multinational tech company has harnessed the benefits of this unique model to manage its full HR needs through one trusted business partner. Through one central contact point, we took a holistic approach to assess its complex business and created a tailor-made solution to drive performance by tapping into market-leading services and expertise within our harmonised brand structure.
Through Modis, we provide IT managed services for the client’s ‘service desks’ in Bulgaria, Italy and Germany. With General Assembly, we reskill the client’s existing US-based talent. YOSS gives the client immediate access to in-demand, highly-skilled freelancers for ad hoc projects. Adecco caters for any general staffing needs across the globe, while Lee Hecht Harrison has established a career centre and active placement approach to manage talent development and workforce rebalancing in Europe and Asia.
This individualised 360° service offering provides the flexibility, simplicity and scope this client requires to attract talent and fuel growth in a changing world.
|
#!/usr/bin/env python
from __future__ import division
__author__ = 'Horea Christian'
from os import listdir, path
from lefunctions import get_dataframes_for_dp
from scipy.stats import ttest_ind, norm, sem, f_oneway
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import axis
from matplotlib.font_manager import FontProperties
from pylab import figure, show, errorbar, setp, legend
#from statsmodels.stats.anova import anova_lm
globalpath = '~/Data/shared/2att/' #root of results
bh_results = 'bh/' # behavioural test results
cq_results = 'cq/' # questionnaire results
globalpath = path.expanduser(globalpath)
bhpath = globalpath + bh_results
cqpath = globalpath + cq_results
files = [lefile for lefile in listdir(bhpath) if lefile.endswith('.csv')]
ids = [t.split('_',2)[0]+'_'+t.split('_',2)[1] for t in files]
ids = np.unique(ids)
spec = ['6245247_f']
h_dist = ['1236345_f','6779353_f','7310001_f','7714775_m','7816097_m','7865828_m','7922847_m']
l_dist = ['1975801_m','4724273_f','6268973_m','8963557_f','8286497_m','8963557_m','9651558_m','8240877_m','6887665_m','5559429_f','8582941_f','8582941_m','9302438_f','4276763_f','3878418_m','3537898_f','1247497_f','8717741_m','4744495_f','7117377_m']
test = ['chr1_f','chr2_f']
id_list = l_dist
isspec=False
t_cr_au,t_fa_au,t_ht_au,t_ms_au,t_cr_aa,t_fa_aa,t_ht_aa,t_ms_aa,t_cr_uu,t_fa_uu,t_ht_uu,t_ms_uu,all_au_dp,all_aa_dp,all_uu_dp = get_dataframes_for_dp(id_list, bhpath)
if isspec:
_,_,_,_,_,_,_,_,_,_,_,_,s_dp_au,s_dp_aa,s_dp_uu = get_dataframes_for_dp(spec, bhpath)
t_hr_au = t_ht_au / (t_ht_au+t_ms_au)
t_far_au = t_fa_au / (t_cr_au+t_fa_au)
t_zhr_au = norm.ppf(t_hr_au)
t_zfar_au = norm.ppf(t_far_au)
t_dp_au = t_zhr_au-t_zfar_au
t_hr_aa = t_ht_aa / (t_ht_aa+t_ms_aa)
t_far_aa = t_fa_aa / (t_cr_aa+t_fa_aa)
t_zhr_aa = norm.ppf(t_hr_aa)
t_zfar_aa = norm.ppf(t_far_aa)
t_dp_aa = t_zhr_aa-t_zfar_aa
t_hr_uu = t_ht_uu / (t_ht_uu+t_ms_uu)
t_far_uu = t_fa_uu / (t_cr_uu+t_fa_uu)
t_zhr_uu = norm.ppf(t_hr_uu)
t_zfar_uu = norm.ppf(t_far_uu)
t_dp_uu = t_zhr_uu-t_zfar_uu
ids = sorted(id_list)
pos_ids = np.arange(len(ids))
fig = figure(figsize=(pos_ids.max(), 5), dpi=80,facecolor='#eeeeee',tight_layout=True)
ax=fig.add_subplot(1,1,1)
width = 0.7
ax.yaxis.grid(True, linestyle='-', which='major', color='#dddddd',alpha=0.5, zorder = 1)
au_bars = plt.bar(pos_ids, all_au_dp, width/3 ,color='m', alpha=0.4, zorder = 1)
aa_bars = plt.bar(pos_ids+width/3, all_aa_dp, width/3 ,color='#488C0F', alpha=0.4, zorder = 1)
uu_bars = plt.bar(pos_ids+width*2/3, all_uu_dp, width/3 ,color='#0F8C2F', alpha=0.4, zorder = 1)
au_t_bar = plt.bar(pos_ids[-1]+1, np.mean(all_au_dp), width/3 ,color='m', alpha=0.8, zorder = 1)
au_t_err = errorbar(pos_ids[-1]+1+(width/6), np.mean(all_au_dp), yerr=sem(all_au_dp), ecolor='0.1', elinewidth='3', capsize=0, linestyle='None', zorder = 2)
aa_t_bar = plt.bar(pos_ids[-1]+1+width/3, np.mean(all_aa_dp), width/3 ,color='#488C0F', alpha=0.8, zorder = 1)
aa_t_err = errorbar(pos_ids[-1]+1+(width*3/6), np.mean(all_aa_dp), yerr=sem(all_aa_dp), ecolor='0.1', elinewidth='3', capsize=0, linestyle='None', zorder = 2)
uu_t_bar = plt.bar(pos_ids[-1]+1+width*2/3, np.mean(all_uu_dp), width/3,color='#0F8C2F', alpha=0.8, zorder = 1)
uu_t_err = errorbar(pos_ids[-1]+1+(width*5/6), np.mean(all_uu_dp), yerr=sem(all_uu_dp), ecolor='0.1', elinewidth='3', capsize=0, linestyle='None', zorder = 2)
if isspec:
s_au_bars = plt.bar(pos_ids[-1]+2, s_dp_au, width/3 ,color='m', alpha=0.4, zorder = 1)
s_aa_bars = plt.bar(pos_ids[-1]+2+width/3, s_dp_aa, width/3 ,color='#488C0F', alpha=0.4, zorder = 1)
s_uu_bars = plt.bar(pos_ids[-1]+2+width*2/3, s_dp_uu, width/3 ,color='#0F8C2F', alpha=0.4, zorder = 1)
print(f_oneway(all_au_dp,all_aa_dp,all_uu_dp))
if isspec:
ids=ids+['total',spec]
else:ids=ids+['total']
pos_ids = np.arange(len(ids))
ax.set_xlim(0, pos_ids.max()+0.7)
ax.set_ylim(0,9)
ax.set_ylabel('Sensitivity Index (d\')')
ax.set_xlabel('Participant ID')
ax.set_xticks(pos_ids + width/2)
ax.set_xticklabels(ids,fontsize=9,rotation=30)
#setp(ax.set_xticklabels, 'rotation', 'vertical')
for tick in ax.axes.get_xticklines():
tick.set_visible(False)
axis.Axis.zoom(ax.xaxis, -0.3)
legend((au_t_bar,aa_t_bar,uu_t_bar),('Mixed faces','Attractive faces only','Unattractive faces only'), 'upper right', shadow=False, frameon=False, prop= FontProperties(size='11'))
show()
|
We can supply Altera part# DK-DEV-10M50-A. Use the request quote form to request DK-DEV-10M50-A pirce and lead time. RANTLE EAST ELECTRONIC - ICRFQ.com is an independent stocking distributor of electronic components. With 5+ Million line items of available electronic components can ship in short lead-time, over 300 thousand part numbers of electronic components in stock for immediately delivery, which may include part number DK-DEV-10M50-A. The price and lead time for DK-DEV-10M50-A depending on the quantity required, availability and warehouse location. Contact us today and our sales representative will provide you price and delivery on Part# DK-DEV-10M50-A. We look forward to doing business with you.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
_\
\
O O-O
O O
O
Raspberry Potter
Version 0.1.5
Use your own wand or your interactive Harry Potter wands to control the IoT.
Updated for OpenCV 3.2
If you have an older version of OpenCV installed, please uninstall fully (check your cv2 version in python) and then install OpenCV following the guide here (but using version 3.2):
https://imaginghub.com/projects/144-installing-opencv-3-on-raspberry-pi-3/documentation
Copyright (c) 2015-2017 Sean O'Brien. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import io
import sys
sys.path.insert(1, '/usr/lib/python2.7/dist-packages/picamera')
import picamera
import numpy as np
import cv2
import threading
import math
import time
import pigpio
GPIOS = 32
MODES = ["INPUT", "OUTPUT", "ALT5", "ALT4", "ALT0", "ALT1", "ALT2", "ALT3"]
pi = pigpio.pi()
#pin for Powerswitch (Lumos,Nox)
switch_pin = 16
pi.set_mode(switch_pin,pigpio.OUTPUT)
#pin for Trinket (Colovario)
trinket_pin = 12
pi.set_mode(trinket_pin,pigpio.OUTPUT)
# Parameters for image processing
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
dilation_params = (5, 5)
movment_threshold = 80
Scan()
# Scan starts camera input and runs FindNewPoints
def Scan():
cv2.namedWindow("Raspberry Potter")
stream = io.BytesIO()
cam = picamera.PiCamera()
cam.resolution = (640, 480)
cam.framerate = 24
try:
while True:
FindNewPoints()
except KeyboardInterrupt:
End()
exit
#FindWand is called to find all potential wands in a scene. These are then tracked as points for movement. The scene is reset every 3 seconds.
def FindNewPoints():
global old_frame,old_gray,p0,mask,color,ig,img,frame
try:
try:
old_frame = cam.capture(stream, format='jpeg')
except:
print("resetting points")
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
old_frame = cv2.imdecode(data, 1)
cv2.flip(old_frame,1,old_frame)
old_gray = cv2.cvtColor(old_frame,cv2.COLOR_BGR2GRAY)
#cv2.equalizeHist(old_gray,old_gray)
#old_gray = cv2.GaussianBlur(old_gray,(9,9),1.5)
#dilate_kernel = np.ones(dilation_params, np.uint8)
#old_gray = cv2.dilate(old_gray, dilate_kernel, iterations=1)
#TODO: trained image recognition
p0 = cv2.HoughCircles(old_gray,cv2.HOUGH_GRADIENT,3,100,param1=100,param2=30,minRadius=4,maxRadius=15)
p0.shape = (p0.shape[1], 1, p0.shape[2])
p0 = p0[:,:,0:2]
mask = np.zeros_like(old_frame)
ig = [[0] for x in range(20)]
print("finding...")
TrackWand()
#This resets the scene every three seconds
threading.Timer(3, FindNewPoints).start()
except:
e = sys.exc_info()[1]
print("FindWand Error: %s" % e )
End()
exit
def TrackWand():
global old_frame,old_gray,p0,mask,color,ig,img,frame
color = (0,0,255)
try:
old_frame = cam.capture(stream, format='jpeg')
except:
print("resetting points")
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
old_frame = cv2.imdecode(data, 1)
cv2.flip(old_frame,1,old_frame)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
#cv2.equalizeHist(old_gray,old_gray)
#old_gray = cv2.GaussianBlur(old_gray,(9,9),1.5)
#dilate_kernel = np.ones(dilation_params, np.uint8)
#old_gray = cv2.dilate(old_gray, dilate_kernel, iterations=1)
# Take first frame and find circles in it
p0 = cv2.HoughCircles(old_gray,cv2.HOUGH_GRADIENT,3,100,param1=100,param2=30,minRadius=4,maxRadius=15)
try:
p0.shape = (p0.shape[1], 1, p0.shape[2])
p0 = p0[:,:,0:2]
except:
print("No points found")
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while True:
frame = cam.capture(stream, format='jpeg')
data2 = np.fromstring(stream.getvalue(), dtype=np.uint8)
frame = cv2.imdecode(data2, 1)
cv2.flip(frame,1,frame)
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#equalizeHist(frame_gray,frame_gray)
#frame_gray = GaussianBlur(frame_gray,(9,9),1.5)
#dilate_kernel = np.ones(dilation_params, np.uint8)
#frame_gray = cv2.dilate(frame_gray, dilate_kernel, iterations=1)
try:
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
# only try to detect gesture on highly-rated points (below 15)
if (i<15):
IsGesture(a,b,c,d,i)
dist = math.hypot(a - c, b - d)
if (dist<movment_threshold):
cv2.line(mask, (a,b),(c,d),(0,255,0), 2)
cv2.circle(frame,(a,b),5,color,-1)
cv2.putText(frame, str(i), (a,b), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,0,255))
except IndexError:
print("Index error")
End()
break
except:
e = sys.exc_info()[0]
print("TrackWand Error: %s" % e )
End()
break
img = cv2.add(frame,mask)
cv2.putText(img, "Press ESC to close.", (5, 25),
cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255))
cv2.imshow("Raspberry Potter", frame)
# get next frame
frame = cam.capture(stream, format='jpeg')
data3 = np.fromstring(stream.getvalue(), dtype=np.uint8)
frame = cv2.imdecode(data3, 1)
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
#Spell is called to translate a named spell into GPIO or other actions
def Spell(spell):
#clear all checks
ig = [[0] for x in range(15)]
#Invoke IoT (or any other) actions here
cv2.putText(mask, spell, (5, 25),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,0,0))
if (spell=="Colovaria"):
print("GPIO trinket")
pi.write(trinket_pin,0)
time.sleep(1)
pi.write(trinket_pin,1)
elif (spell=="Lumos"):
print("GPIO ON")
pi.write(switch_pin,1)
elif (spell=="Nox"):
print("GPIO OFF")
pi.write(switch_pin,0)
print("CAST: %s" %spell)
#IsGesture is called to determine whether a gesture is found within tracked points
def IsGesture(a,b,c,d,i):
print("point: %s" % i)
#record basic movements - TODO: trained gestures
if ((a<(c-5))&(abs(b-d)<1)):
ig[i].append("left")
elif ((c<(a-5))&(abs(b-d)<1)):
ig[i].append("right")
elif ((b<(d-5))&(abs(a-c)<5)):
ig[i].append("up")
elif ((d<(b-5))&(abs(a-c)<5)):
ig[i].append("down")
#check for gesture patterns in array
astr = ''.join(map(str, ig[i]))
if "rightup" in astr:
Spell("Lumos")
elif "rightdown" in astr:
Spell("Nox")
elif "leftdown" in astr:
Spell("Colovaria")
print(astr)
def End():
cam.close()
cv2.destroyAllWindows()
|
Years of research are clearly showing that exercise is one the best prescriptions for protecting and healing the body from ailments and physical problems that emerge with age. However, it is among the least-practiced activities in the United States.
Three-fourths of the nation’s population does not engage in the recommended 30-minutes per day of exercise, and two-thirds of Americans are either overweight or obese. Such sedentary lifestyles, and a lack of good nutrition, can lead to life-threatening diseases, particularly among today’s youth.
Overall, exercise (aerobic activities and/or strength training) has been shown to help slow the body’s natural aging process by: improving blood flow to the brain and heart, decreasing muscle decay, producing stronger bones and joints, improving brain activity, and a increasing immune system response. Exercise also can delay the onset, and/or reduce one’s risk of developing diabetes and some cancers; exercise affects the body’s insulin sensitivity and glucose transport by triggering glucose uptake by the muscles, regulating insulin production.
Though more research is needed on the effects of exercise on aging and on disease development, it has become clear that the body’s best bet for remaining or becoming healthy is exercise.
|
"""Provides TopLevel views; i.e. Windows"""
from julesTk.view import tk, BaseView
from julesTk.view.viewset import BaseViewSet
__author__ = "Joeri Jongbloets <joeri@jongbloets>"
class Window(tk.Toplevel, BaseView):
def __init__(self, parent, controller):
tk.Toplevel.__init__(self, parent)
BaseView.__init__(self, parent, controller)
self.protocol("WM_DELETE_WINDOW", self.exit)
@property
def root(self):
"""Return the root view
:rtype: Tkinter.Tk or tkinter.Tk
"""
result = self.parent
if self.controller is not None:
result = self.controller.root
elif isinstance(result, BaseView):
result = self.parent.root
return result
@property
def application(self):
result = self.parent
if self.controller is not None:
result = self.controller.application
elif isinstance(result, BaseView):
result = self.parent.application
return result
def _prepare(self):
raise NotImplementedError
def _show(self):
self.deiconify()
def _hide(self):
self.withdraw()
return True
def _close(self):
if self.controller is not None and not self.controller.is_stopped():
self.controller.stop()
self.destroy()
return True
def exit(self):
self.close()
class WindowViewSet(Window, BaseViewSet):
"""A window that can contain multiple views"""
def _prepare(self):
raise NotImplementedError
def _close(self):
BaseViewSet.close_views(self)
return super(WindowViewSet, self)._close()
|
Riddle Of Petra is a TV show on Australian national television from SBS ONE with an average rating of 4.0 stars by TVCatchUpAustralia.com's visitors. We have 1 episodes of Riddle Of Petra in our archive. The first episode of Riddle Of Petra was broadcast in March, 2019. Did you miss an episode of Riddle Of Petra but don't you wan't that to happen in the future? Please set an alarm and add Riddle Of Petra to your favourites, so we can remind you by email when there's a new episode available to watch. Completely free: handy!
|
#
# Seagull photo gallery app
# Copyright (C) 2016 Hajime Yamasaki Vukelic
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
import os
import locale
import hashlib
import logging
import functools
import urllib.request
from pathlib import PosixPath, WindowsPath
class Entry:
"""
This class encapsulates a single gallery entry. It contains information
about the file path, extension, modification timestamp, and file size.
Entries are created from ``os.Direntry`` objects returned by calls such as
``os.scandir()``.
Instantianting this class doubles as path validation. Passing objects that
have any of the following characteristics results in a ``ValidationError``:
- path is a directory
- path has no extension
- path has an extenion that is not supported
- path starts with an underscore
- file at the path has 0 size
"""
#: Supported extensions
EXTENSIONS = ('.jpg', '.png', '.gif')
def __init__(self, dentry):
self.validate(dentry)
self.path = dentry.path
self.name = dentry.name
self.ext = os.path.splitext(self.name)[1]
self.size = dentry.stat().st_size
self.mtime = dentry.stat().st_mtime
self._hash = None
@property
def hash(self):
"""
MD5 hash of the path
"""
if not self._hash:
md5 = hashlib.md5()
md5.update(self.path.encode('utf8'))
self._hash = md5.hexdigest()
return self._hash
@classmethod
def from_path(cls, path):
"""
Instantiate an entry from a path (string)
"""
try:
pentry = WindowsPath(path)
except NotImplementedError:
pentry = PosixPath(path)
return cls(pentry)
def validate(self, dentry):
"""
Validate the ``os.DirEntry`` object for use with ``Entry`` class
"""
path = dentry.path
# Is a directory
if dentry.is_dir():
raise ValueError('{} is a directory'.format(path))
if dentry.name.startswith('_'):
raise ValueError('{} starts with a dot'.format(path))
if dentry.stat().st_size <= 0:
raise ValueError('{} is an empty file'.format(path))
if '.' not in dentry.name:
raise ValueError('{} has no extension'.format(path))
if os.path.splitext(dentry.name)[1].lower() not in self.EXTENSIONS:
raise ValueError('{} has unsupported extension'.format(path))
@staticmethod
def cmp(entry):
"""
Comparison function to be used in ``key`` arguments when sorting
"""
collated_cmp = functools.cmp_to_key(locale.strcoll)
return collated_cmp(entry.path)
def __hash__(self):
return int(self.hash, 16)
def __str__(self):
return self.path
class Index:
"""
This class encapsulates the gallery index information (file list) and the
related methods. This object should be instantiated once and then used as
the authoritative source on the state of the gallery folder.
The Index also behaves as a container for the ``Entry`` objects and can be
iterated over, tested for inclusion, reversed, etc.
The entries in the gallery index are sorted alphabetically with full
support for Unicode collation according to currently active system locale.
"""
def __init__(self, path):
if not os.path.isdir(path):
raise ValueError('{} is missing or not a directory'.format(path))
self.path = path
self.entries = []
self.last_update = None
logging.debug('Setting up index for %s', self.path)
def check_last_update(self, entry):
"""
Update ``last_update`` property if ``entry`` is newer.
"""
if not self.last_update:
self.last_update = entry.mtime
if entry.mtime > self.last_update:
self.last_update = entry.mtime
def rescan(self):
"""
Perform full rescan of the gallery directory.
"""
entries = []
for dentry in os.scandir(self.path):
try:
entry = Entry(dentry)
except ValueError:
logging.debug('Omitted %s from gallery', dentry.path)
continue
self.check_last_update(entry)
entries.append(entry)
self.entries = self.sort(entries)
logging.debug('Added %s items to the index', len(self.entries))
def sort(self, entries=None):
"""
Sort the entries alphabetically
"""
entries = entries or self.entries
logging.debug('Sorting items')
entries.sort(key=Entry.cmp)
return entries
def get_relpath(self, entry):
"""
Return path of an entry relative to the gallery base path.
"""
# FIXME: This needs to guard against directory traversal
return os.path.relpath(entry.path, self.path)
def get_urlpath(self, entry):
"""
Return path of an entry relative to the gallery base path as posix url
"""
rpath = self.get_relpath(entry)
return urllib.request.pathname2url(rpath)
def __len__(self):
return len(self.entries)
def __getitem__(self, key):
return self.entries[key]
def __reversed__(self):
return reversed(self.entries)
def __contains__(self, item):
return item in self.entries
def __iter__(self):
return iter(self.entries)
|
An Australian reader of the first Volleyball Coaching Wizards book sent us an email to share his thoughts.
I’ve read both of Jack Schwager’s books, and so immediately related to the concept.
I’m only two chapters into it, but I absolutely love it so far, particularly the chapter with Giovanni Guidetti. I especially like the section about Jamie Morrison (former assistant coach to Karch Kiraly), where Giovanni deliberately runs a drill he knows he will disagree with to start a healthy debate. I work in the completely opposite environment in my day job, I would love to have a boss like that.
As a young graduate engineer, one of my first managers told me that I was very “black and white” and that the world is in fact many shades of grey. The older I’ve gotten, the more I relate to this statement, and that’s why Giovanni’s acceptance of this concept resonated with me so much.
I also personally appreciated the point you made about using punishments in training, and how it stifles creativity and focuses the player only on avoiding errors. I was torn over the concept of punishments at the start of last season, however my wife, who is a neuropsychologist, was dead against them, with the psychological research heavily supporting reward rather than punishment. I adopted a philosophy of patience and rewarding positive behaviours and thoroughly enjoyed the performance and culture that arose from it.
I look forward to the insights that I will find in the remaining chapters. I commend you for getting this book out there. I’ve written a short kindle book, and I appreciate that it’s a passion more so than a means to make a living.
The Jack Schwager books he mentioned are Market Wizards and The New Market Wizards. They were a big part of the inspiration for the Volleyball Coaching Wizards project. Glad to hear the volleyball version does indeed follow along with the Schwager version’s concept.
There are others we’ve interviewed who have done some beach coaching. Australian Craig Marshall, though, is the first of our interviewees who does it full-time. Beach volleyball coaching has taken him around the world and to multiple Olympic Games.
Get access to Craig’s interview now for just a $14 contribution to the Volleyball Coaching Wizards project.
Note: PayPal is used to process the payment, but a PayPal account is not required.
Australian Mark Lebedew currently coaches in Poland for Jastrzębski Węgiel after a successful five years in Germany leading the Berlin Recycling Volleys club to new heights. He is also the author of the At Home on the Court blog.
Get access to Mark’s interview now for just a $14 contribution to the Volleyball Coaching Wizards project.
Similar to Vital Heynen, Canadian Stelio DeRocco became a coach following a career playing volleyball professionally in Italy and for Canada. He has gone on to have a career at both the club and international coaching levels and has as a coach mentor.
Coached the Australian National Team during the 2000 Olympic cycle.
Get access to Stelio’s interview now for just a $14 contribution to the Volleyball Coaching Wizards project.
Insights from the professional volleyball coaching trenches!
At the international level Mark has coached in the Olympics, the World League, and World Championship qualifications. Professionally he’s coached teams in Italy, Poland, Germany, and Belgium.
Complete the form to learn how you can watch, listen to, and/or read Mark’s interview.
We are starting to lock in our Volleyball Coaching Wizards nominees for interviews. Requests have begun to go out, with positive responses already coming back from some of those we’ve contacted so far. Here’s the beginnings of the list.
A member of the original AVCA Hall of Fame induction class in 2003, Iradge coached for 19 years at the University of Northern Iowa. During that time he compiled a record of 503-142 (.780) and recorded at least 20 victories in 17 of 19 years. After guiding the Panthers to a 31-1 record in 1999, and eventually reaching the NCAA Division I Tournament Sweet 16, he was named the AVCA National Coach of the Year and received the inaugural AVCA Excellence in Education Award. In 1997, he took a leave of absence to serve as a consultant for USA Volleyball and the development of its teams.
Yes, he’s one of the principles on the project. He has also, however, been nominated for inclusion by multiple coaches outside the project. One notably said, “…there are 3 coaches I’ve never met who I feel I’ve learned more from than any coach I HAVE met. Mark is one of them.” Mark’s coaching bio is listed here.
As head coach at Emory University (NCAA Division III) since 1996, Jenny has amassed over 600 wins and her players have earned over 40 All-American selections, including two National Player of the Year awards. Her teams have reached the NCAA tournament 18 years in a row, making the round of 16 on 15 occasions, with four trips to the Final Four resulting in a national championship and a runner-up.
As head coach of Barnstable High School for 27 seasons, Tom has won 16 Massachusetts Division 1 state titles. From 2003 to 2007 his teams won a record 110 consecutive matches. Since 1995, Barnstable has had 10 undefeated seasons and amassed a 455-18 overall record. Tom’s players have included 4 Prepvolleyball.com All Americans, 1 AVCA All American, and 7 Massachusetts Gatorade Players of the Year. He was selected as 2008 AVCA National Coach of the Year and the 2012 NHSCA National Volleyball Coach of the Year.
As the head coach and director of volleyball at Leeds Met University, Simon was at the core of the dominant UK university program. From 2007 to 2012 his men’s and women’s teams combined for six BUCS (the UK equivalent to the NCAA) national championships and five Volleyball England Student Cup titles. His women’s team had a three year undefeated run during that span. Overlapping with his time at Leeds Met, Simon was also the head coach of the Scottish Men’s National Team, leading them to the 2012 Novotel Cup which was the nation’s first-ever international championship.
|
#!/usr/bin/env python
"""Additional URL/View routing for URL patterns and views
Django matches URL patterns based solely on pattern (not on pattern and HTTP
Verb). Other frameworks match on both.
In order to follow a more traditional convention as seen in other frameworks,
these methods will additionally route via the HTTP Verb.
For example, the index() and the create() 'actions' that are convention in
other frameworks have the same Django URL pattern. However, the GET requests
are routed to index() and POST requests are routed to create().
"""
from django import http
from blog import views
def index_create(request):
"""This meta-view handles index/create second level routing
GET /blog -> blog.views.index
POST /blog -> blog.views.create
See also the blog.routes module documentation (above)
"""
if request.method == "GET":
return views.index(request)
if request.method == "POST":
return views.create(request)
return http.HttpResponseNotFound(
'<h1>This is not a route for HTTP verb {0}</h1>'.format(
request.method))
def show_update_destroy(request, slug):
"""This meta-view handles show/update/destroy second level routing
GET /blog/<slug> -> blog.views.show
PUT /blog/<slug> -> blog.views.update
DELETE /blog/<slug> -> blog.views.destroy
See also the blog.routes module documentation (above)
"""
# Look for overloaded POSTS first
if request.method == "POST":
_method = request.POST.get("_method", None)
if _method is not None:
setattr(request, _method, request.POST)
request.method = _method
if request.method == "GET":
return views.show(request, slug)
if request.method == "PUT":
return views.update(request, slug)
if request.method == "DELETE":
return views.destroy(request, slug)
return http.HttpResponseNotFound(
'<h1>This is not a route for HTTP verb {0}</h1>'.format(
request.method))
def new(request):
"""This meta-view handles new second level routing
GET /blog/new -> blog.views.new
See the blog.routes module documentation (above)
"""
if request.method == "GET":
return views.new(request)
return http.HttpResponseNotFound(
'<h1>This is not a route for HTTP verb {0}</h1>'.format(
request.method))
def edit(request, slug):
"""This meta-view handles new second level routing
GET /blog/<slug>/edit -> blog.views.edit
See the blog.routes module documentation (above)
"""
if request.method == "GET":
return views.edit(request, slug)
return http.HttpResponseNotFound(
'<h1>This is not a route for HTTP verb {0}</h1>'.format(
request.method))
|
Lovely apartments, great value and spotlessly clean. Location also great, just off the main street with all the shops and bars and 5-10 min walk to the beach. Only downside is the bathroom is a little on the small side but is comparable to other local accommodation. Friendly and helpful staff make this a great place to stay, will definitely do so again.
the amount of repeat guests to these apartments says it all! staff,food ,pool, rooms all excellent.Could be slightly noisy for people who like to sit and read on the balcony at night,but not noisy enough if the book is good enough.
Zante è fantastica e tutta da vedere.
I Lazaros Apartment sono un punto di partenza ideale per la scoperta dell'isola. Si trovano in un paesino animato Tsilivi ricco di ristoranti e negozi dove fare shopping a pochi passi dalla lunga spiaggia.
Gli appartamenti sono confortevoli, dotati di aria condizionata e balconcino, muniti di cucinino e frigorifero. L'hotel in generale è ben tenuto, pulito con piscina e pool bar dove è possibile fermarsi a sorseggiare un drink o mangiare qualcosa. C'è una tv satellitare, un biliardo e le freccette.
E' un ottimo hotel per gruppi di amici ma anche per coppie e famiglie.
E' consigliabile noleggiare uno scooter o quad per non perdersi il resto dell'isola!
Stayed here for a week. Very clean accommodation, guys that run the place are very friendly and more than happy to help out with any issues. Pool area is very clean and the bar food is also cheap and of good value. Would recommend this for travellers on a budget looking for good value self catering.
|
# Copyright 2011 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import uno
from mytools_Mri import engine, node, values
from mytools_Mri.type import ExtType2, ExtAnyType2
from mytools_Mri.unovalues import MethodConcept, PropertyConcept, \
PropertyAttribute, ParamMode, TypeClass, TypeClassGroups
from mytools_Mri.config import Config
from com.sun.star.beans import UnknownPropertyException, PropertyVetoException
from com.sun.star.lang import WrappedTargetException, IllegalArgumentException
from com.sun.star.reflection import InvocationTargetException
class CancelException(Exception):
pass
Entry = engine.Entry
class RootEntry(node.Root):
pass
import mytools_Mri.web
import mytools_Mri.macros
from mytools_Mri.cg import CGMode, CGType, CodeEntry, CodeGenerator
class MRI(object):
def __init__(self, ctx, ui_class):
self.ctx = ctx
if values.MRI_DIR is None:
values.set_mri_dir(ctx)
self.config = Config(ctx)
self.config.property_only = False
self.web = mytools_Mri.web.create_IDL_opener(self, self.config, self.config.ref_by_doxygen)
self.engine = engine.MRIEngine(ctx)
self.history = RootEntry()
self.current = self.history
self.cg = CodeGenerator(self.config.code_type, False, True)
self.mode = True
self.open_new = False
self.macros = mytools_Mri.macros.Macros(self)
self.ui = ui_class(ctx, self)
def inspect(self, name, target):
try:
self.history.code_entry = None
entry = self.engine.create(self, name, target)
entry.code_entry = self.code(
type=CGType.NONE, key=name,
value_type=entry.type, args="", parent="ROOT", idl=None)
self.action_by_type(entry)
except Exception as e:
print(e)
traceback.print_exc()
def code(self, *args, **kwds):
try:
if not "parent" in kwds:
kwds["parent"] = self.current.code_entry
code_entry = self.cg.add(**kwds)
if self.mode:
self.ui.code_updated()
return code_entry
except Exception as e:
print(e)
traceback.print_exc()
return None
def set_mode(self, state):
""" Set mode which broadcast to ui or not. """
self.mode = not not state
def message(self, message, title=''):
"""shows message."""
if self.mode:
self.ui.message(message, title)
def error(self, message, title='Error'):
"""shows error."""
if self.mode:
self.ui.error(message, title)
def status(self, message):
"""status message."""
if self.mode:
self.ui.status(message)
def update_config(self, store=False):
"""change config."""
config = self.config
self.macros.set_user_dir(config.macros)
self.web.set_browser(config.browser)
self.web.set_sdk_path(config.sdk_path)
if store:
self.config.write()
def change_entry(self, entry):
if self.open_new:
self.open_new = False
self.create_service(
'mytools.Mri', nocode=True).inspect(entry.target)
else:
self.current.append_child(entry)
self.current = entry
self.ui.entry_changed(history=True, update=self.mode)
return entry
def set_current(self, entry):
self.current = entry
def change_history(self, index=None, entry=None):
if entry is None:
entry = self.history.get_history_entry(index)
#self.set_current(entry)
if entry != self.history:
self.current = entry
self.ui.entry_changed(history=False)
return True
def get_property_value(self, name):
entry = self.current
target = entry.target
inspected = entry.inspected
# normal property
if entry.has_interface("com.sun.star.beans.XPropertySet"):
psinfo = target.getPropertySetInfo()
if psinfo and psinfo.hasPropertyByName(name):
try:
value = target.getPropertyValue(name)
temp_type = psinfo.getPropertyByName(name).Type
if temp_type is None:
temp_type = uno.Type("any", TypeClass.ANY)
entry = self.engine.create(self, name, value)
idl = entry.type
ext_type = ExtType2(entry, self.engine,
temp_type.typeName, temp_type.typeClass)
entry.type = ext_type
entry.code_entry = self.code(
type=CGType.PROP ,mode=CGMode.GET, key=name, value_type=entry.type, idl=idl)
return self.action_by_type(entry)
except Exception as e:
self.error("Exception, to get property: %s, %s" % (name, str(e)))
traceback.print_exc()
if self.mode:
return
else:
raise
# pseud property
if inspected.hasMethod("get%s" % name, MethodConcept.ALL):
return self.call_method("get%s" % name, pseud=True)
elif inspected.hasMethod("is%s" % name, MethodConcept.ALL):
return self.call_method("is%s" % name, pseud=True)
elif inspected.hasMethod("set%s" % name, MethodConcept.ALL):
return self.status("Write only pseud property: %s" % name)
# interface attributes
if inspected.hasProperty(name, PropertyConcept.ATTRIBUTES):
psinfo = inspected.getProperty(name, PropertyConcept.ATTRIBUTES)
try:
value = getattr(target, name)
entry = self.engine.create(self, name, value)
#temp_type = entry.type
#if temp_type.getTypeClass() == TypeClass.SEQUENCE:
ext_type = ExtType2(entry, self.engine,
psinfo.Type.typeName, psinfo.Type.typeClass)
entry.type = ext_type
attr_def = self.engine.find_attribute_interface(
self.current, name)
if attr_def is False: attr_def = ""
entry.code_entry = self.code(
type=CGType.ATTR, mode=CGMode.GET, key=name, value_type=entry.type, idl=attr_def)
return self.action_by_type(entry)
except Exception as e:
self.error("Exception, to get attribute: %s, %s" % (name, str(e)))
traceback.print_exc()
if self.mode:
return
else:
raise
# XVclWindowPeer
if entry.has_interface("com.sun.star.awt.XVclWindowPeer"):
try:
value = target.getProperty(name)
temp_type = inspected.getProperty(name, PropertyConcept.ALL).Type
if temp_type is None:
temp_type = uno.Type("any", TypeClass.ANY)
entry = self.engine.create(self, name, value)
# ToDo code
return self.action_by_type(entry)
except Exception as e:
self.error("Exception, to get %s, %s" % (name, str(e)))
traceback.print_exc()
if self.mode:
return
else:
raise
def set_property_value(self, name, get_value=None, arg=None, get_args=None):
entry = self.current
target = entry.target
# normal property
if entry.has_interface("com.sun.star.beans.XPropertySet"):
psinfo = target.getPropertySetInfo()
if psinfo.hasPropertyByName(name):
pinfo = psinfo.getPropertyByName(name)
if pinfo.Attributes & PropertyAttribute.READONLY:
raise Exception("%s read-only property." % name)
if self.mode:
try:
old_value = target.getPropertyValue(name)
arg = get_value(name, pinfo.Type.typeName, pinfo.Type.typeClass,
("", ""), "current: " + str(old_value))
except CancelException:
return
except Exception as e:
self.status(str(e))
return
try:
if self.mode:
_arg, _any = self.extract_args(arg)
target.setPropertyValue(name, _arg)
entry = self.engine.create(self, name, _arg)
else:
# ToDo any
_arg, _any = self.extract_args(arg)
target.setPropertyValue(name, _arg)
entry = self.engine.create(self, name, _arg)
p_type = pinfo.Type
ext_type = ExtType2(entry, self.engine, p_type.typeName, p_type.typeClass)
entry.type = ext_type
entry.code_entry = self.code(
type=CGType.PROP, mode=CGMode.SET, key=name, value_type=entry.type, args=arg, idl=entry.type)
except WrappedTargetException as e:
te = e.TargetException
self.error("Exception: %s" % te.Message)
except IllegalArgumentException as e:
self.error("Illegal value for %s property." % prop_name)
except PropertyVetoException as e:
self.error("Veto to set the %s property value." % prop_name)
except UnknownPropertyException as e:
self.error("Unknown property! %s" % e)
except Exception as e:
self.error("Exception, to set %s property, %s" % (name, str(e)))
traceback.print_exc()
if self.mode:
return True
else:
return None
elif entry.inspected.hasProperty(name, PropertyConcept.ATTRIBUTES):
pinfo = entry.inspected.getProperty(name, PropertyConcept.ATTRIBUTES)
if pinfo.Attributes & PropertyAttribute.READONLY:
self.status("Attribute %s is readonly." % name)
raise Exception("%s read-only property." % name)
if self.mode:
try:
old_value = getattr(target, name)
arg = get_value(name, pinfo.Type.typeName, pinfo.Type.typeClass,
("", ""), "current: " + str(old_value))
except Exception as e:
return
try:
if self.mode:
setattr(target, name, arg)
entry = self.engine.create(self, name, arg)
else:
_arg, _any = self.extract_args(arg)
setattr(target, name, _arg)
entry = self.engine.create(self, name, _arg)
p_type = pinfo.Type
ext_type = ExtType2(entry, self.engine,
p_type.typeName, p_type.typeClass)
entry.type = ext_type
attr_def = self.engine.find_attribute_interface(
self.current, name)
if attr_def is False: attr_def = ""
entry.code_entry = self.code(
type=CGType.ATTR, mode=CGMode.SET, key=name, value_type=entry.type, args=arg, idl=attr_def)
except Exception as e:
print(("Error to set attribute: " + str(e)))
traceback.print_exc()
return None
method_name = "set%s" % name
if not entry.inspected.hasMethod(method_name, MethodConcept.ALL):
self.status("Property %s is readonly." % name)
if self.mode:
return
else:
raise AttributeError("Unknown method %s" % name)
return self.call_method(method_name, get_args=get_args, args=(arg,), pseud=True)
def call_method(self, name, get_args=None, args=None, pseud=False):
""" Frontend to invoke method. """
method = self.engine.get_method_info(self.current, name, raw=True)
if method is None: return
param_infos = method.getParameterInfos()
if self.mode:
if 0 < len(param_infos):
try:
if get_args:
args = tuple(get_args(method))
except CancelException:
return
except:
traceback.print_exc()
return
else:
args = ()
try:
return self.invoke_method(method, args, pseud=pseud)
except Exception as e:
self.status(str(e))
traceback.print_exc()
if self.mode:
return
else:
raise
def extract_args(self, args):
""" Extract value from Entry instance. """
_any = False
if isinstance(args, tuple) or isinstance(args, list):
a = []
for arg in args:
v, __any = self.extract_args(arg)
a.append(v)
if __any:
_any = True
return tuple(a), _any
else:
if isinstance(args, Entry):
target = args.get_target()
extracted, __any = self.extract_args(target)
if isinstance(target, uno.Any) or __any:
_any = True
return extracted, _any
else:
return args, _any
def get_out_param_index(self, idl):
""" Returns list of out/inout param indexes. """
params = idl.getParameterInfos()
if params:
return [i for i, info in enumerate(params)
if info.aMode == ParamMode.OUT or info.aMode == ParamMode.INOUT]
else:
return None
def invoke_method(self, method, args, name=None, pseud=False):
try:
if not name:
if args:
name = "%s(%s)" % (method.getName(),
", ".join([str(a) for a in args]))
else:
name = "%s()" % method.getName()
out_params = self.get_out_param_index(method)
if self.mode:
_args, _any = self.extract_args(args)
value, d = method.invoke(self.current.target, _args)
else:
_args, _any = self.extract_args(args)
if _any:
value, d = uno.invoke(method, "invoke", (self.current.target, _args))
else:
value, d = method.invoke(self.current.target, _args)
ret_type = method.getReturnType()
entry = self.engine.create(self, name, value)
if ret_type.getTypeClass() == TypeClass.ANY:
# check the method from container
if self.engine.check_method_from_container(method):
_type = self.current.target.getElementType()
ret_type = self.engine.for_name(_type.typeName)
# added to solve problem on new configuration
if ret_type.getTypeClass() == TypeClass.VOID:
ret_type = self.engine.get_type(entry)
entry.type = ret_type
value_type = ExtAnyType2(entry, self.engine, ret_type.getName(), ret_type.getTypeClass())
entry.type = value_type
if pseud:
code_type = CGType.PSEUD_PROP
else:
code_type = CGType.METHOD
entry.code_entry = self.code(
type=code_type, key=method.getName(),
value_type=value_type, args=args, idl=method)
if out_params:
param_infos = method.getParameterInfos()
_d = []
for i, v in zip(out_params, d):
_key = "%s_%s" % (name, i)
_entry = self.engine.create(self, _key, v)
_entry.type = param_infos[i]
type = _entry.type.aType
_entry.type = ExtType2(_entry, self.engine, type.getName(), type.getTypeClass())
_d.append(_entry)
_entry.code_entry = args[i].code_entry
ret = self.action_by_type(entry)
return (ret,) + tuple(_d)
else:
return self.action_by_type(entry)
except InvocationTargetException as e:
te = e.TargetException
self.error("Method: %s invocation exception.\nError Message: \n%s" % (
method.getName(), te.Message))
traceback.print_exc()
except Exception as e:
self.error("Method: %s unknown exception.\nError Message: \n%s" % (
name, str(e)))
traceback.print_exc()
def get_struct_element(self, name):
""" Get field value from current struct. """
entry = self.current
target = entry.target
try:
found = self.engine.find_field(name, self.engine.get_type(entry))
except:
return
try:
value = getattr(target, name)
field_type = found.getType()
if field_type == None:
field_type = self.engine.reflection.getType(value)
entry = self.engine.create(self, name, value)
# ToDo
ext_type = ExtAnyType2(entry, self.engine,
field_type.getName(), field_type.getTypeClass())
entry.type = ext_type
entry.code_entry = self.code(
type=CGType.FIELD, mode=CGMode.GET, key=name, value_type=entry.type, idl=self.engine.get_type(self.current))
return self.action_by_type(entry)
except Exception as e:
print(("Error: get_struct_element, " + str(e)))
traceback.print_exc()
def set_struct_element(self, name, value=None, get_value=None):
entry = self.current
target = entry.target
try:
found = self.engine.find_field(name, self.engine.get_type(entry))
except:
return
if self.mode:
try:
if get_value:
old_value = getattr(target, name)
value = get_value(name, found.getType().getName(), found.getType().getTypeClass(),
("", ""), "current: " + str(old_value))
except Exception as e:
print(e)
return
try:
if self.mode:
_arg, _any = self.extract_args(value)
setattr(target, name, _arg)
entry = self.engine.create(self, name, _arg)
else:
_arg, _any = self.extract_args(value)
setattr(target, name, _arg)
entry = self.engine.create(self, name, _arg)
field_type = found.getType()
if field_type == None:
field_type = self.engine.reflection.getType(value)
ext_type = ExtType2(entry, self.engine,
field_type.getName(), field_type.getTypeClass())
entry.type = ext_type
entry.code_entry = self.code(
type=CGType.FIELD, mode=CGMode.SET, key=name, value_type=entry.type, args=value, idl=self.engine.get_type(self.current))
except Exception as e:
print(("Error: get_struct_element, " + str(e)))
traceback.print_exc()
def action_by_type(self, entry):
if entry.target is None:
if self.mode:
return self.message("void")
else:
return None
type_name = entry.type.getName()
type_class = entry.type.getTypeClass()
if not self.mode and type_name in values.IGNORED_INTERFACES:
return self.error(
"You can not inspect \n%s \ntype value, sorry." % type_name,
"Listed in the IGNORED_INTERFACES list.")
try:
if type_class == TypeClass.ANY:
value_type = ExtAnyType2(entry, self.engine)
type_name = value_type.getName()
type_class = value_type.getTypeClass()
if type_class in TypeClassGroups.NUMERIC:
self.message(str(entry.target), type_name)
elif type_class == TypeClass.STRING:
if entry.target:
value = entry.target
else:
value = ""
self.message(value, type_name)
elif type_class == TypeClass.BOOLEAN:
self.message(str(entry.target), type_name)
elif type_class == TypeClass.INTERFACE:
self.change_entry(entry)
elif type_class == TypeClass.STRUCT:
self.change_entry(entry)
elif type_class == TypeClass.SEQUENCE:
self.change_entry(entry) # ToDo
elif type_class == TypeClass.ENUM:
self.message("%s.%s" % (type_name, entry.target.value), type_name)
elif type_class == TypeClass.BYTE:
self.message("%s" % entry.target, type_name)
elif type_class == TypeClass.TYPE:
self.message(entry.target.typeName, type_name)
elif type_class == TypeClass.VOID:
self.message("void", type_name)
elif type_class == TypeClass.CHAR:
self.message(entry.target.value, type_name)
else:
try:
self.message(str(entry.target), "unknown type")
except:
self.error("Error: value to string conversion.")
except Exception as e:
print(e)
print(("%s, %s" % (type_name, type_class)))
traceback.print_exc()
return entry
def manage_sequence(self, entry, k=None):
if len(entry.target) == 0:
self.message("empty sequence")
return None
value_type = entry.type
try:
c_type = value_type.getComponentType()
except:
value_type = self.engine.get_type(entry)
c_type = value_type.getComponentType()
comp_type = None
if c_type.getTypeClass() == TypeClass.SEQUENCE:
comp_type = self.engine.get_component_base_type(value_type)
type_class = c_type.getTypeClass()
#if not self.mode:
value = entry.target[k]
new_entry = self.engine.create(self, "[%s]" % k, value)
new_entry.type = c_type
new_entry.code_entry = self.code(
type=CGType.ELEMENT, mode=CGMode.GET, key=k, value_type=new_entry.type, idl=new_entry.type)
if type_class == TypeClass.INTERFACE or type_class == TypeClass.ANY:
self.change_entry(new_entry)
elif type_class == TypeClass.STRUCT:
self.change_entry(new_entry)
elif type_class == TypeClass.SEQUENCE:
self.change_entry(new_entry)
else:
self.action_by_type(new_entry)
return new_entry
def _get_value(self, args):
if isinstance(args, tuple):
return tuple([self._get_value(arg) for arg in args])
else:
return args.target if isinstance(args, Entry) else args
# for macros
def get_component_context(self):
entry = self.engine.create(self, "XComponentContext", self.ctx)
entry.code_entry = self.code(
type=CGType.CONTEXT, key="XComponentContext", value_type=entry.type, idl=entry.type)
return entry
def assign_element(self, k, value, append=False):
entry = self.current
self.code(
type=CGType.ELEMENT, mode=CGMode.SET, key=k, value_type=entry.type.getComponentType(), idl=entry.type.getComponentType(), args=value, misc=append)
def create_service(self, name, *args, **kwds):
"""
if args:
_args, _any = self.extract_args(args)
if _any:
obj, d = uno.invoke(self.ctx.getServiceManager(), "createInstanceWithArgumentsAndContext", (name, _args, self.ctx))
else:
obj = self.ctx.getServiceManager().\
createInstanceWithArgumentsAndContext(name, _args, self.ctx)
else:
"""
obj = self.ctx.getServiceManager().\
createInstanceWithContext(name, self.ctx)
if "nocode" in kwds: return obj
entry = self.engine.create(self, name, obj)
entry.code_entry = self.code(
type=CGType.SERVICE, key=name, value_type=entry.type,
args=args, idl=entry.type)
return entry
# ToDo initial arguments
def create_struct(self, type_name, *args, **kwds):
_args, _any = self.extract_args(args)
struct = uno.createUnoStruct(type_name, *_args)
if "nocode" in kwds: return struct
entry = self.engine.create(self, type_name, struct)
entry.code_entry = self.code(
type=CGType.STRUCT, key=type_name, value_type=entry.type, idl=entry.type, args=args)
return entry
# ToDo allows to pass initial values?
def create_sequence(self, type_name, length, var=None):
entry = self.engine.create(self, type_name, ())
entry.target = []
entry.type = self.engine.for_name(type_name)
entry.code_entry = self.code(
type=CGType.SEQ, key=type_name, value_type=entry.type, idl=entry.type, args=length, misc=var)
return entry
def declare_variable(self, type_name, value):
entry = self.engine.create(self, type_name, value)
entry.type = self.engine.for_name(type_name)
entry.code_entry = self.code(
type=CGType.VARIABLE, args=value, key=type_name, value_type=entry.type, idl=entry.type)
return entry
|
My daughter, who will be 9 months in 2.5 weeks has begun to hate being put in her crib. She has always been on a schedule with consistent nap and bed times and been put to sleep drowsy and at around 5 months I let her self soothe and she has been sleeping through the nights since then. However getting her to fall asleep has become quite difficult. Sometimes she will fall asleep on the breast and other times she will fall asleep drowsy with minimal interaction but more often than not she is refusing to sleep. She cries and stands in her crib and pleads to be picked up. I've tried not going in for days, I've tried going in and putting her back down or telling her it's time to sleep etc. Eventually she falls asleep but only after an enormous amount of crying. This is going on for a while and I hate to see her cry unnecessarily. I'm stumped, once I realized that CIO wasn't working I tried holding her, (she perks up and wants to play) rocking her (she stands up and cries) and I'm lost as to what to do.
I agree this is an unusual situation. In my experience, at her age most sleep issues happen because children have not learned to fall asleep on their own. This causes problems at bedtime and then throughout the night as these children cannot settle themselves back down after normal night wakings. Is she waking throughout the night? If she is able to put herself back to sleep in the middle of the night then I agree that she does know how to put herself to sleep.
It is common that our children who have recently learned to pull up, pull up and cry at bedtime. Often they do so since (early on) they are not able to let go and lay down. During this stage, we will go through a couple week period where we have to go in, lay them back down and give them a paci.
While I'm happy to hear that she is on a schedule - maybe the schedule is part of the problem. Is she really tired when you are putting her down for bed? It sounds like she is not ready to go to sleep at the current bedtime. How many naps is she getting a day? When is her last nap of the day? Most 9 month olds take 2 naps totaling about 3 hours.
4.) Getting outside between her last nap and bedtime.
Once you do begin the bedtime process you must stick with the routine. If you consistently give into her crying you will create a habitual crier.
|
"""
filename: controllers.py
description: Controllers for email invitations.
created by: Omar De La Hoz ([email protected])
created on: 10/12/17
"""
from flask_socketio import emit
from app.decorators import ensure_dict, get_user
from app import app, db, socketio
from app.users.models import Users
from app.invitations.models import Invitations
from app.invitations.invitations_response import Response
from flask import render_template
from sqlalchemy import and_
from app.email.models import huey
from app.email.controllers import send_email
import time
##
## @brief Sends an invitation to join a committee
## when user doesn't exist in ChargeTracker.
##
## @param committee The committee to join.
## @param new_user The user to be invited.
##
## @return True if email sent, False if not.
##
def send_invite(new_user, committee):
invite = and_(
Invitations.user_name == new_user,
Invitations.committee_id == committee.id,
Invitations.isInvite == True
)
if Invitations.query.filter(invite).first() is not None:
return Response.InviteExists
invitation = Invitations(
user_name= new_user,
committee= committee,
committee_id = committee.id,
charge_id = None,
isInvite= True
)
try:
db.session.add(invitation)
db.session.commit()
email = {}
email["title"] = "You're Invited"
email["sender"]=("SG TigerTracker", "[email protected]")
email["recipients"] = [new_user + "@rit.edu"]
email["subtype"] = "related"
email["html"] = render_template(
'committee_invitation.html',
user_name= new_user,
committee_name= committee.title,
committee_head= committee.head,
time_stamp= time.time(),
app_url= app.config['CLIENT_URL'] + str(invitation.id)
)
if not app.config['TESTING']:
send_email(email)
return Response.InviteSent
except Exception as e:
db.session.rollback()
return Response.InviteError
##
## @brief Sends a request email to join a committee
## to the committee head.
##
## @param new_user The user to be added.
## @param committee The committee to join.
##
## @return True if email sent, False if not.
##
def send_request(new_user, committee):
invite = and_(
Invitations.user_name == new_user.id,
Invitations.committee_id == committee.id,
Invitations.isInvite == False
)
if Invitations.query.filter(invite).first() is not None:
return Response.RequestExists
invitation = Invitations(
user_name= new_user.id,
committee= committee,
committee_id = committee.id,
charge_id = None,
isInvite= False
)
try:
db.session.add(invitation)
db.session.commit()
email = {}
email["title"] = "Great news, " + new_user.id + " wants to join!"
email["sender"] = ("SG TigerTracker", "[email protected]")
email["recipients"] = [committee.head + "@rit.edu"]
email["subtype"] = "related"
email["html"] = render_template(
'committee_request.html',
user_name= new_user.id,
committee_head= committee.head,
committee_name= committee.title,
time_stamp= time.time(),
request_url= app.config['CLIENT_URL'] + str(invitation.id)
)
if not app.config['TESTING']:
send_email(email)
return Response.RequestSent
except Exception as e:
db.session.rollback()
return Response.RequestError
##
## @brief Sends an invitation to a committee-head to close
## a charge.
##
## @param committee The committee to join.
## @param new_user The user to be invited.
##
## @return True if email sent, False if not.
##
def send_close_request(user, committee, chargeID):
admins = db.session.query(Users).filter(Users.is_admin == True).all()
admin_emails = []
for user in admins:
admin_emails.append(user.id + "@rit.edu")
invite = and_(
Invitations.user_name == committee.head,
Invitations.committee_id == committee.id,
Invitations.isInvite == False
)
invitation = Invitations (
user_name= user.id,
committee= committee,
committee_id = committee.id,
charge_id = chargeID,
isInvite=False
)
try:
db.session.add(invitation)
db.session.commit()
email = {}
email["title"] = "Close Charge Request"
email["sender"]=("SG TigerTracker", "[email protected]")
email["recipients"] = admin_emails
email["subtype"] = "related"
email["html"] = render_template(
'close_charge_request.html',
user_name= committee.head,
charge_name= chargeID,
time_stamp= time.time(),
request_url= app.config['CLIENT_URL'] + str(invitation.id)
)
if not app.config['TESTING']:
send_email(email)
return Response.RequestSent
except Exception as e:
db.session.rollback()
return Response.RequestError
##
## @brief Gets the data for a specific invitation/request.
##
## @param user_data The data to display a specific invitation,
## contains the keys (all required):
## - token: The token of the authenticated user
## - invitation_id: Id of invitation/request.
##
## @emit Data of a specific invitation or errors.
##
@socketio.on('get_invitation')
@ensure_dict
@get_user
def get_invitation(user, user_data):
invitation = Invitations.query.filter_by(id = user_data.get("invitation_id","")).first()
if invitation is None:
emit("get_invitation", Response.InviteDoesntExist)
return
if user is None:
emit("get_invitation", Response.NotAuthenticated)
return
committee = invitation.committee
# Check if user should be able to view
# invitation.
if (committee.head == user.id or
user.is_admin or
user.id == invitation.user_name):
invitation_data = {
"committee_id": committee.id,
"committee_head": committee.head,
"committee_title": committee.title,
"current_user": user.id,
"invite_user": invitation.user_name,
"is_invite": invitation.isInvite
}
emit("get_invitation", invitation_data)
else:
emit("get_invitation", Response.IncorrectPerms)
##
## @brief Changes the status of an invitation/request.
##
## @param user_data The data to modify a specific invitation,
## contains the keys (all required):
## - token: The token of the authenticated user
## - invitation_id: Id of invitation/request.
## - status: True to accept, false otherwise.
##
## @emit UserAdded, InviteDeleted or errors.
##
@socketio.on('set_invitation')
@ensure_dict
@get_user
def set_invitation(user, user_data):
invitation = Invitations.query.filter_by(id = user_data.get("invitation_id","")).first()
if invitation is None:
emit("set_invitation", Response.InviteDoesntExist)
return
if user is None:
emit("set_invitation", Response.NotAuthenticated)
return
if "status" not in user_data:
emit("set_invitation", Response.InvalidStatus)
return
if type(user_data.get("status","")) != type(True):
emit("set_invitation", Response.InvalidStatus)
return
com_head = Users.query.filter_by(id= invitation.committee.head).first()
com_id = invitation.committee.id
token = user.generate_auth()
# If invitation, use the committe heads token.
if invitation.isInvite:
token = com_head.generate_auth()
else:
if com_head != user or not user.is_admin:
emit("set_invitation", Response.IncorrectPerms)
return
if user_data["status"] == True:
add_data = {
"token": token,
"committee_id": com_id,
"user_id": invitation.user_name
}
from app.members.controllers import add_to_committee
returnValue = add_to_committee(add_data)
emit("set_invitation", Response.InviteAccept)
else:
emit("set_invitation", Response.InviteDeny)
# Remove the invitation.
db.session.delete(invitation)
|
Maybe you’re not going to be a superstar athlete. But you can still set a big fitness goal for yourself, even if you’ve never tried a sport before. Examples of fitness goals could be a century ride (a 160-km bike ride in less than a day). Or you could train for a triathlon (a series of three endurance events, often swimming, cycling, and running), or join a sports league.
First, consider the possibilities. There are lots of activities you could try, and you might discover you like something you never thought you’d do. Want to train for something really tough and out of your comfort zone? Check out different race and other adventurous events in your area.
You might have a big goal you want to reach one day, like a marathon. The best way to get there is to set a series of smaller goals that lead to your big goal. For example, before you sign up for a marathon, set goals to do a few 5K races first. And before that, work up to running a 2km. Fitness apps can help you keep track of each great thing you do on your way to your big goal.
If you’re not active now, talk to your doctor before you start exercising if you’re over 45 (men) or 55 (women). It’s also a good idea to get a doctor’s OK if you have a health problem or take regular medication. To avoid injuries and burnout, start working out slowly: 3 days a week for 10-15 minutes. Then gradually add time and intensity.
Exercise burns extra calories and raises your metabolism. So eat every couple of hours – three meals plus healthy snacks. Before a workout, snack on carbs (juice, fruit, or yogurt) for fast energy. After a long, tough workout, replenish with a carb/protein mix, like a peanut butter sandwich or a smoothie. Otherwise, keep your meals and snacks light: Try an apple and peanut butter, yogurt and nuts, or an egg on whole wheat toast.
Unless your workout is really long or tough, you don’t need a special sports drink with electrolytes. Water works just fine. Drink plenty: If you’re dehydrated, your muscles may cramp, and you raise your risk of heat exhaustion and heatstroke. Two hours before you exercise, drink about 2 to 3 cups of water. During your routine, drink about 1 cup every 10-20 minutes. Keep drinking after you’re done exercising, too.
Even if your goal – a marathon, for example – might centre on cardio, you should practice strength or resistance training, too. Strong muscles burn more calories, help prevent injuries, and build stronger bones. Work muscles on weight machines, with hand-held equipment like free weights, kettlebells, or resistance bands, or by doing exercises like push-ups. Rest each muscle group, such as biceps and triceps, at least 2 days between strength workouts.
You need the right clothes and shoes when you work out. It’s not about looking good (although that can’t hurt) – it’s about feeling comfortable. It’s no fun to walk, run, or bike if you have flapping sleeves or flimsy shoes. Ask the experts at a sporting goods store for help. Look for fabrics that draw moisture away from your body – not sweat-absorbing cotton. In cool temperatures, wear layers that you can peel off as you warm up.
Whether you’re running or weightlifting, it’s easy to get hurt if your form or technique is wrong. Don’t assume you’re exercising the right way. If your gym has trainers or fitness staff, they may be able to watch you exercise and give you advice on improving your technique. Or you can read fitness magazines or find online videos that show correct techniques.
|
# disable missing docstring
# pylint: disable=C0111
import json
from lettuce import world, step
from nose.tools import assert_equal, assert_true # pylint: disable=E0611
from common import type_in_codemirror, open_new_course
from advanced_settings import change_value
from course_import import import_file, go_to_import
DISPLAY_NAME = "Display Name"
MAXIMUM_ATTEMPTS = "Maximum Attempts"
PROBLEM_WEIGHT = "Problem Weight"
RANDOMIZATION = 'Randomization'
SHOW_ANSWER = "Show Answer"
TIMER_BETWEEN_ATTEMPTS = "Timer Between Attempts"
@step('I have created a Blank Common Problem$')
def i_created_blank_common_problem(step):
world.create_course_with_unit()
step.given("I have created another Blank Common Problem")
@step('I have created another Blank Common Problem$')
def i_create_new_common_problem(step):
world.create_component_instance(
step=step,
category='problem',
component_type='Blank Common Problem'
)
@step('I edit and select Settings$')
def i_edit_and_select_settings(_step):
world.edit_component_and_select_settings()
@step('I see the advanced settings and their expected values$')
def i_see_advanced_settings_with_values(step):
world.verify_all_setting_entries(
[
[DISPLAY_NAME, "Blank Common Problem", True],
[MAXIMUM_ATTEMPTS, "", False],
[PROBLEM_WEIGHT, "", False],
[RANDOMIZATION, "Never", False],
[SHOW_ANSWER, "Finished", False],
[TIMER_BETWEEN_ATTEMPTS, "0", False]
])
@step('I can modify the display name')
def i_can_modify_the_display_name(_step):
# Verifying that the display name can be a string containing a floating point value
# (to confirm that we don't throw an error because it is of the wrong type).
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, '3.4')
verify_modified_display_name()
@step('my display name change is persisted on save')
def my_display_name_change_is_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_modified_display_name()
@step('the problem display name is "(.*)"$')
def verify_problem_display_name(step, name):
assert_equal(name.upper(), world.browser.find_by_css('.problem-header').text)
@step('I can specify special characters in the display name')
def i_can_modify_the_display_name_with_special_chars(_step):
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, "updated ' \" &")
verify_modified_display_name_with_special_chars()
@step('my special characters and persisted on save')
def special_chars_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_modified_display_name_with_special_chars()
@step('I can revert the display name to unset')
def can_revert_display_name_to_unset(_step):
world.revert_setting_entry(DISPLAY_NAME)
verify_unset_display_name()
@step('my display name is unset on save')
def my_display_name_is_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_unset_display_name()
@step('I can select Per Student for Randomization')
def i_can_select_per_student_for_randomization(_step):
world.browser.select(RANDOMIZATION, "Per Student")
verify_modified_randomization()
@step('my change to randomization is persisted')
def my_change_to_randomization_is_persisted(step):
world.save_component_and_reopen(step)
verify_modified_randomization()
@step('I can revert to the default value for randomization')
def i_can_revert_to_default_for_randomization(step):
world.revert_setting_entry(RANDOMIZATION)
world.save_component_and_reopen(step)
world.verify_setting_entry(world.get_setting_entry(RANDOMIZATION), RANDOMIZATION, "Never", False)
@step('I can set the weight to "(.*)"?')
def i_can_set_weight(_step, weight):
set_weight(weight)
verify_modified_weight()
@step('my change to weight is persisted')
def my_change_to_weight_is_persisted(step):
world.save_component_and_reopen(step)
verify_modified_weight()
@step('I can revert to the default value of unset for weight')
def i_can_revert_to_default_for_unset_weight(step):
world.revert_setting_entry(PROBLEM_WEIGHT)
world.save_component_and_reopen(step)
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", False)
@step('if I set the weight to "(.*)", it remains unset')
def set_the_weight_to_abc(step, bad_weight):
set_weight(bad_weight)
# We show the clear button immediately on type, hence the "True" here.
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", True)
world.save_component_and_reopen(step)
# But no change was actually ever sent to the model, so on reopen, explicitly_set is False
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", False)
@step('if I set the max attempts to "(.*)", it will persist as a valid integer$')
def set_the_max_attempts(step, max_attempts_set):
# on firefox with selenium, the behavior is different.
# eg 2.34 displays as 2.34 and is persisted as 2
index = world.get_setting_entry_index(MAXIMUM_ATTEMPTS)
world.set_field_value(index, max_attempts_set)
world.save_component_and_reopen(step)
value = world.css_value('input.setting-input', index=index)
assert value != "", "max attempts is blank"
assert int(value) >= 0
@step('Edit High Level Source is not visible')
def edit_high_level_source_not_visible(step):
verify_high_level_source_links(step, False)
@step('Edit High Level Source is visible')
def edit_high_level_source_links_visible(step):
verify_high_level_source_links(step, True)
@step('If I press Cancel my changes are not persisted')
def cancel_does_not_save_changes(step):
world.cancel_component(step)
step.given("I edit and select Settings")
step.given("I see the advanced settings and their expected values")
@step('I have enabled latex compiler')
def enable_latex_compiler(step):
url = world.browser.url
step.given("I select the Advanced Settings")
change_value(step, 'use_latex_compiler', 'true')
world.visit(url)
world.wait_for_xmodule()
@step('I have created a LaTeX Problem')
def create_latex_problem(step):
world.create_course_with_unit()
step.given('I have enabled latex compiler')
world.create_component_instance(
step=step,
category='problem',
component_type='Problem Written in LaTeX',
is_advanced=True
)
@step('I edit and compile the High Level Source')
def edit_latex_source(_step):
open_high_level_source()
type_in_codemirror(1, "hi")
world.css_click('.hls-compile')
@step('my change to the High Level Source is persisted')
def high_level_source_persisted(_step):
def verify_text(driver):
css_sel = '.problem div>span'
return world.css_text(css_sel) == 'hi'
world.wait_for(verify_text, timeout=10)
@step('I view the High Level Source I see my changes')
def high_level_source_in_editor(_step):
open_high_level_source()
assert_equal('hi', world.css_value('.source-edit-box'))
@step(u'I have an empty course')
def i_have_empty_course(step):
open_new_course()
@step(u'I go to the import page')
def i_go_to_import(_step):
go_to_import()
@step(u'I import the file "([^"]*)"$')
def i_import_the_file(_step, filename):
import_file(filename)
@step(u'I go to the vertical "([^"]*)"$')
def i_go_to_vertical(_step, vertical):
world.css_click("span:contains('{0}')".format(vertical))
@step(u'I go to the unit "([^"]*)"$')
def i_go_to_unit(_step, unit):
loc = "window.location = $(\"span:contains('{0}')\").closest('a').attr('href')".format(unit)
world.browser.execute_script(loc)
@step(u'I see a message that says "([^"]*)"$')
def i_can_see_message(_step, msg):
msg = json.dumps(msg) # escape quotes
world.css_has_text("h2.title", msg)
@step(u'I can edit the problem$')
def i_can_edit_problem(_step):
world.edit_component()
def verify_high_level_source_links(step, visible):
if visible:
assert_true(world.is_css_present('.launch-latex-compiler'),
msg="Expected to find the latex button but it is not present.")
else:
assert_true(world.is_css_not_present('.launch-latex-compiler'),
msg="Expected not to find the latex button but it is present.")
world.cancel_component(step)
def verify_modified_weight():
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "3.5", True)
def verify_modified_randomization():
world.verify_setting_entry(world.get_setting_entry(RANDOMIZATION), RANDOMIZATION, "Per Student", True)
def verify_modified_display_name():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, '3.4', True)
def verify_modified_display_name_with_special_chars():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, "updated ' \" &", True)
def verify_unset_display_name():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, 'Blank Advanced Problem', False)
def set_weight(weight):
index = world.get_setting_entry_index(PROBLEM_WEIGHT)
world.set_field_value(index, weight)
def open_high_level_source():
world.edit_component()
world.css_click('.launch-latex-compiler > a')
|
Arctic Monkeys - Tranquility Base Hotel & Casino. Vinyl LP. Bleep.
Hot on the heels of the four-piece’s UK tour in support of their game-changing 2018 LP Tranquility Base Hotel & Casino, Arctic Monkeys release the LP’s title track on 7” alongside a brand new track, ‘Anyways’. A band above and beyond the heights of their powers, Arctic Monkeys continue to flirt with the charts, all while inviting awe from critics and fans alike, even when they refuse to box themselves into a single style.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-13 21:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import gifspool.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, default='', max_length=30)),
('num_gifs', models.IntegerField(default=0)),
('num_likes', models.IntegerField(default=0)),
('post_to', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Gif',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('tags', models.CharField(blank=True, max_length=300)),
('upload_date', models.DateTimeField(auto_now_add=True)),
('likes_by', models.TextField(blank=True, default='')),
('likes', models.IntegerField(default=0)),
('shocked', models.IntegerField(default=0)),
('loved', models.IntegerField(default=0)),
('laugh', models.IntegerField(default=0)),
('post_to', models.BooleanField(default=None)),
('gif_file', models.FileField(upload_to=gifspool.models.user_directory_path)),
('jpg_path', models.CharField(blank=True, default='', max_length=60, null=True)),
('jpg_url', models.CharField(blank=True, default='', max_length=60, null=True)),
('views', models.IntegerField(blank=True, default=0, null=True)),
('prev_gif', models.IntegerField(blank=True, default=None, null=True)),
('next_gif', models.IntegerField(blank=True, default=None, null=True)),
('creator', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='GifHashtagLinker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gif', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='gifspool.Gif')),
],
),
migrations.CreateModel(
name='GifView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip_address', models.CharField(max_length=30)),
('view_date', models.DateTimeField(auto_now_add=True, null=True)),
('gif', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='gifspool.Gif')),
('user', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Hashtag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hashtag', models.CharField(max_length=60, unique=True)),
('count', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('shocked', models.BooleanField(default=False)),
('loved', models.BooleanField(default=False)),
('laugh', models.BooleanField(default=False)),
('like_date', models.DateTimeField(auto_now_add=True, null=True)),
('gif_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='gifspool.Gif')),
('user_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='gifhashtaglinker',
name='hashtag',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='gifspool.Hashtag'),
),
migrations.AddField(
model_name='gif',
name='hashtags',
field=models.ManyToManyField(blank=True, related_name='gifs_hashtag', through='gifspool.GifHashtagLinker', to='gifspool.Hashtag'),
),
migrations.AddField(
model_name='gif',
name='liked_by',
field=models.ManyToManyField(blank=True, related_name='liked_by_user', through='gifspool.Like', to=settings.AUTH_USER_MODEL),
),
]
|
Clin Exp Allergy. 2005 Feb;35(2):234-43.
Molecular cloning of a class IV chitinase allergen from Japanese cedar (Cryptomeria japonica) pollen and competitive inhibition of its immunoglobulin E-binding capacity by latex C-serum.
Fujimura T1, Shigeta S, Suwa T, Kawamoto S, Aki T, Masubuchi M, Hayashi T, Hide M, Ono K.
Department of Molecular Biotechnology, Graduate School of Advanced Sciences of Matter, Hiroshima University, Kagamiyama, Higashi-Hiroshima, Japan.
Japanese cedar (Cryptomeria japonica) pollinosis is one of the most prevalent allergic diseases in Japan. Only three C. japonica allergens, Cry j 1, Cry j 2, and CJP-6, have been characterized. The full IgE-binding spectrum of C. japonica pollen allergens demonstrates that many allergens remain to be identified.
The aim of this study was to characterize a novel allergen with a high frequency of IgE binding.
The cDNA coding for a high-frequency IgE-binding protein, designated CJP-4, was cloned from the total mRNA of C. japonica pollen. The corresponding native allergen was purified by affinity precipitation with colloidal chitin and gel chromatography. The IgE-binding ability of purified native CJP-4 was characterized by ELISA and ELISA inhibition.
The CJP-4 cDNA encoded 281 amino acids with significant sequence homology to class IV chitinases. Purified native CJP-4, migrated as a homogeneous 34-kDa protein on SDS-PAGE, revealed endochitinase activity on native PAGE. The purified protein displayed the ability to bind IgE from all patients tested (31/31) in ELISA, whereas Cry j 1 bound to IgE at a 71% frequency (22/31). Pre-incubation with latex C-serum completely inhibited the reaction of pooled sera IgE from patients with C. japonica pollinosis and/or latex allergy to purified CJP-4.
We identified CJP-4 as a novel and fourth C. japonica chitinase allergen with high IgE-binding frequency. The competitive IgE-binding profile between C. japonica chitinase and latex C-serum indicated that C. japonica chitinase should be an important pan-allergen in C. japonica pollen.
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from gluon import current, URL
from gluon.storage import Storage
def config(settings):
"""
Template settings for SaFiRe: Sahana First Response
http://eden.sahanafoundation.org/wiki/BluePrint/SAFIRE
"""
T = current.T
settings.base.system_name = T("Sahana First Response")
settings.base.system_name_short = T("SAFIRE")
# PrePopulate data
settings.base.prepopulate.append("SAFIRE")
settings.base.prepopulate_demo.append("SAFIRE/Demo")
# Theme (folder to use for views/layout.html)
#settings.base.theme = "SAFIRE"
# Authentication settings
# Should users be allowed to register themselves?
#settings.security.self_registration = False
# Do new users need to verify their email address?
#settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
#settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff"),
}
settings.auth.registration_link_user_to_default = ["staff"]
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Uncomment to Disable the Postcode selector in the LocationSelector
#settings.gis.postcode_selector = False # @ToDo: Vary by country (include in the gis_config!)
# Uncomment to show the Print control:
# http://eden.sahanafoundation.org/wiki/UserGuidelines/Admin/MapPrinting
#settings.gis.print_button = True
# L10n settings
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Security Policy
# http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
# 1: Simple (default): Global as Reader, Authenticated as Editor
# 2: Editor role required for Update/Delete, unless record owned by session
# 3: Apply Controller ACLs
# 4: Apply both Controller & Function ACLs
# 5: Apply Controller, Function & Table ACLs
# 6: Apply Controller, Function, Table ACLs and Entity Realm
# 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy
# 8: Apply Controller, Function, Table ACLs, Entity Realm + Hierarchy and Delegations
settings.security.policy = 5 # Controller, Function & Table ACLs
# -------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
# Modules menu is defined in modules/eden/menu.py
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
#access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
module_type = None # No Menu
)),
("errors", Storage(
name_nice = "Ticket Viewer",
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
#("tour", Storage(
# name_nice = T("Guided Tour Functionality"),
# module_type = None,
#)),
#("translate", Storage(
# name_nice = T("Translation Functionality"),
# #description = "Selective translation of strings based on module.",
# module_type = None,
#)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = "Person Registry",
#description = "Central point to record details on People",
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
module_type = 1
)),
("hrm", Storage(
name_nice = "Staff",
#description = "Human Resources Management",
module_type = 2,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
module_type = 2,
)),
("cms", Storage(
name_nice = "Content Management",
#description = "Content Management System",
module_type = 10,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
module_type = 10,
)),
("msg", Storage(
name_nice = "Messaging",
#description = "Sends & Receives Alerts via Email & SMS",
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("supply", Storage(
name_nice = "Supply Chain Management",
#description = "Used within Inventory Management, Request Management and Asset Management",
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
module_type = 4
)),
("asset", Storage(
name_nice = "Assets",
#description = "Recording and Assigning Assets",
module_type = 5,
)),
# Vehicle depends on Assets
("vehicle", Storage(
name_nice = "Vehicles",
#description = "Manage Vehicles",
module_type = 10,
)),
#("budget", Storage(
# name_nice = T("Budgets"),
# #description = "Tracks the location, capacity and breakdown of victims in Shelters",
# module_type = 10
#)),
("fin", Storage(
name_nice = T("Finance"),
module_type = 10
)),
("cr", Storage(
name_nice = T("Shelters"),
#description = "Tracks the location, capacity and breakdown of victims in Shelters",
module_type = 10
)),
("project", Storage(
name_nice = "Tasks",
#description = "Tracking of Projects, Activities and Tasks",
module_type = 2
)),
("req", Storage(
name_nice = "Requests",
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
module_type = 10,
)),
("hms", Storage(
name_nice = T("Hospitals"),
#description = "Helps to monitor status of hospitals",
module_type = 10
)),
#("dvr", Storage(
# name_nice = T("Disaster Victim Registry"),
# #description = "Allow affected individuals & households to register to receive compensation and distributions",
# module_type = 10,
#)),
("event", Storage(
name_nice = "Events",
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
module_type = 10,
)),
#("transport", Storage(
# name_nice = T("Transport"),
# module_type = 10,
#)),
#("stats", Storage(
# name_nice = T("Statistics"),
# #description = "Manages statistics",
# module_type = None,
#)),
])
# -------------------------------------------------------------------------
# CMS
# -------------------------------------------------------------------------
settings.cms.richtext = True
# -------------------------------------------------------------------------
# Organisations
# -------------------------------------------------------------------------
settings.org.documents_tab = True
settings.org.projects_tab = False
# -------------------------------------------------------------------------
# Shelters
# -------------------------------------------------------------------------
settings.cr.people_registration = False
# -------------------------------------------------------------------------
def customise_cr_shelter_resource(r, tablename):
#table = current.s3db.cr_shelter
f = current.s3db.cr_shelter.shelter_service_id
f.readable = f.writable = False
settings.customise_cr_shelter_resource = customise_cr_shelter_resource
# -------------------------------------------------------------------------
# Events
# -------------------------------------------------------------------------
def event_rheader(r):
rheader = None
record = r.record
if record and r.representation == "html":
from gluon import A, DIV, TABLE, TR, TH
from s3 import s3_rheader_tabs
name = r.name
if name == "incident":
if settings.get_incident_label(): # == "Ticket"
label = T("Ticket Details")
else:
label = T("Incident Details")
tabs = [(label, None),
#(T("Tasks"), "task"),
#(T("Human Resources"), "human_resource"),
#(T("Equipment"), "asset"),
(T("Action Plan"), "plan"),
(T("Incident Reports"), "incident_report"),
(T("Logs"), "log"),
(T("Expenses"), "expense"),
(T("Situation Reports"), "sitrep"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
record_id = r.id
incident_type_id = record.incident_type_id
editable = current.auth.s3_has_permission("UPDATE", "event_incident", record_id)
if editable:
# Dropdown of Scenarios to select
stable = current.s3db.event_scenario
query = (stable.incident_type_id == incident_type_id) & \
(stable.deleted == False)
scenarios = current.db(query).select(stable.id,
stable.name,
)
if len(scenarios) and r.method != "event":
from gluon import SELECT, OPTION
dropdown = SELECT(_id="scenarios")
dropdown["_data-incident_id"] = record_id
dappend = dropdown.append
dappend(OPTION(T("Select Scenario")))
for s in scenarios:
dappend(OPTION(s.name, _value=s.id))
scenarios = TR(TH("%s: " % T("Scenario")),
dropdown,
)
s3 = current.response.s3
script = "/%s/static/themes/SAFIRE/js/incident_profile.js" % r.application
if script not in s3.scripts:
s3.scripts.append(script)
s3.js_global.append('''i18n.scenarioConfirm="%s"''' % T("Populate Incident with Tasks, Organizations, Positions and Equipment from the Scenario?"))
else:
scenarios = ""
else:
scenarios = ""
if record.exercise:
exercise = TH(T("EXERCISE"))
else:
exercise = TH()
if record.closed:
closed = TH(T("CLOSED"))
else:
closed = TH()
if record.event_id or r.method == "event" or not editable:
event = ""
else:
if settings.get_event_label(): # == "Disaster"
label = T("Assign to Disaster")
else:
label = T("Assign to Event")
event = A(label,
_href = URL(c = "event",
f = "incident",
args = [record_id, "event"],
),
_class = "action-btn"
)
table = r.table
rheader = DIV(TABLE(TR(exercise),
TR(TH("%s: " % table.name.label),
record.name,
),
TR(TH("%s: " % table.incident_type_id.label),
table.incident_type_id.represent(incident_type_id),
),
TR(TH("%s: " % table.location_id.label),
table.location_id.represent(record.location_id),
),
# @ToDo: Add Zone
TR(TH("%s: " % table.severity.label),
table.severity.represent(record.severity),
),
TR(TH("%s: " % table.level.label),
table.level.represent(record.level),
),
TR(TH("%s: " % table.organisation_id.label),
table.organisation_id.represent(record.organisation_id),
),
TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id),
),
scenarios,
TR(TH("%s: " % table.comments.label),
record.comments,
),
TR(TH("%s: " % table.date.label),
table.date.represent(record.date),
),
TR(closed),
event,
), rheader_tabs)
elif name == "incident_report":
record_id = r.id
ltable = current.s3db.event_incident_report_incident
query = (ltable.incident_report_id == record_id)
link = current.db(query).select(ltable.incident_id,
limitby = (0, 1)
).first()
if link:
from s3 import S3Represent
represent = S3Represent(lookup="event_incident", show_link=True)
rheader = DIV(TABLE(TR(TH("%s: " % ltable.incident_id.label),
represent(link.incident_id),
),
))
else:
if settings.get_incident_label(): # == "Ticket"
label = T("Assign to Ticket")
else:
label = T("Assign to Incident")
rheader = DIV(A(label,
_href = URL(c = "event",
f = "incident_report",
args = [record_id, "assign"],
),
_class = "action-btn"
))
elif name == "event":
if settings.get_event_label(): # == "Disaster"
label = T("Disaster Details")
else:
label = T("Event Details")
if settings.get_incident_label(): # == "Ticket"
INCIDENTS = T("Tickets")
else:
INCIDENTS = T("Incidents")
tabs = [(label, None),
(INCIDENTS, "incident"),
(T("Documents"), "document"),
(T("Photos"), "image"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % table.event_type_id.label),
table.event_type_id.represent(record.event_type_id),
),
TR(TH("%s: " % table.name.label),
record.name,
),
TR(TH("%s: " % table.start_date.label),
table.start_date.represent(record.start_date),
),
TR(TH("%s: " % table.comments.label),
record.comments,
),
), rheader_tabs)
elif name == "scenario":
tabs = [(T("Scenario Details"), None),
#(T("Tasks"), "task"),
#(T("Human Resources"), "human_resource"),
#(T("Equipment"), "asset"),
(T("Action Plan"), "plan"),
(T("Incident Reports"), "incident_report"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % table.incident_type_id.label),
table.incident_type_id.represent(record.incident_type_id),
),
TR(TH("%s: " % table.organisation_id.label),
table.organisation_id.represent(record.organisation_id),
),
TR(TH("%s: " % table.location_id.label),
table.location_id.represent(record.location_id),
),
TR(TH("%s: " % table.name.label),
record.name,
),
TR(TH("%s: " % table.comments.label),
record.comments,
),
), rheader_tabs)
return rheader
# -------------------------------------------------------------------------
def customise_event_event_controller(**attr):
#s3 = current.response.s3
# No sidebar menu
#current.menu.options = None
attr["rheader"] = event_rheader
return attr
settings.customise_event_event_controller = customise_event_event_controller
# -------------------------------------------------------------------------
def customise_event_incident_report_resource(r, tablename):
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Log Call"),
title_display = T("Call Log Details"),
title_list = T("Call Logs"),
title_update = T("Edit Call Log"),
label_list_button = T("List Call Logs"),
label_delete_button = T("Delete Call Log"),
msg_record_created = T("Call Log added"),
msg_record_modified = T("Call Log updated"),
msg_record_deleted = T("Call Log removed"),
msg_list_empty = T("No Calls currently logged"),
)
settings.customise_event_incident_report_resource = customise_event_incident_report_resource
# -------------------------------------------------------------------------
def customise_event_incident_report_controller(**attr):
from gluon import A
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard postp
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
method = r.method
if method in (None, "create"):
current.s3db.gis_location.addr_street.label = T("Street Address or Location Details")
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm((T("What is it?"), "name"),
"incident_type_id",
(T("Who am I speaking with?"), "reported_by"),
(T("How can we contact you?"), "contact"),
(T("Where did this Incident take place?"), "location_id"),
(T("Explain the Situation?"), "description"),
(T("What are your immediate needs?"), "needs"),
)
r.resource.configure(create_next = URL(args=["[id]", "assign"]),
crud_form = crud_form,
)
return True
s3.prep = custom_prep
# No sidebar menu
current.menu.options = None
req_args = current.request.args
if len(req_args) > 1 and req_args[1] == "assign":
if settings.get_incident_label(): # == "Ticket"
label = T("New Ticket")
else:
label = T("New Incident")
attr["rheader"] = A(label,
_class = "action-btn",
_href = URL(c="event", f="incident",
args = ["create"],
vars = {"incident_report_id": req_args[0]},
),
)
else:
attr["rheader"] = event_rheader
return attr
settings.customise_event_incident_report_controller = customise_event_incident_report_controller
# -------------------------------------------------------------------------
def event_incident_create_onaccept(form):
"""
Automate Level based on Type, Zone (intersect from Location) & Severity
@ToDo: Move this to SAFIRE/SC
"""
db = current.db
s3db = current.s3db
form_vars_get = form.vars.get
incident_id = form_vars_get("id")
# If Incident Type is Chemical then level must be > 2
level = form_vars_get("level")
if level and int(level) < 3:
incident_type_id = form_vars_get("incident_type_id")
ittable = s3db.event_incident_type
incident_type = db(ittable.id == incident_type_id).select(ittable.name,
limitby = (0,1)
).first().name
if incident_type == "Chemical Hazard":
itable = s3db.event_incident
db(itable.id == incident_id).update(level = 3)
current.response.warning = T("Chemical Hazard Incident so Level raised to 3")
# Alert Lead Agency
organisation_id = form_vars_get("organisation_id")
if organisation_id:
otable = s3db.org_organisation_tag
query = (otable.organisation_id == organisation_id) & \
(otable.tag == "duty")
duty = db(query).select(otable.value,
limitby = (0, 1)
).first()
if duty:
current.msg.send_sms_via_api(duty.value,
"You have been assigned an Incident: %s%s" % (settings.get_base_public_url(),
URL(c="event", f= "incident",
args = incident_id),
))
# -------------------------------------------------------------------------
def customise_event_incident_resource(r, tablename):
from s3 import S3LocationSelector
s3db = current.s3db
table = s3db.event_incident
f = table.severity
f.readable = f.writable = True
f = table.level
f.readable = f.writable = True
table.location_id.widget = S3LocationSelector(polygons = True,
show_address = True,
)
f = table.organisation_id
f.readable = f.writable = True
f.label = T("Lead Response Organization")
if r.method == "plan":
table.action_plan.label = T("Event Action Plan")
else:
f = table.action_plan
f.readable = f.writable = False
if r.interactive:
s3db.add_custom_callback(tablename,
"create_onaccept",
event_incident_create_onaccept,
)
settings.customise_event_incident_resource = customise_event_incident_resource
# -------------------------------------------------------------------------
def customise_event_incident_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard postp
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
resource = r.resource
# Redirect to action plan after create
resource.configure(create_next = URL(c="event", f="incident",
args = ["[id]", "plan"]),
)
method = r.method
if method == "create":
incident_report_id = r.get_vars.get("incident_report_id")
if incident_report_id:
# Got here from incident report assign => "New Incident"
# - prepopulate incident name from report title
# - copy incident type and location from report
# - onaccept: link the incident report to the incident
if r.http == "GET":
from s3 import s3_truncate
rtable = s3db.event_incident_report
incident_report = current.db(rtable.id == incident_report_id).select(rtable.name,
rtable.incident_type_id,
rtable.location_id,
limitby = (0, 1),
).first()
table = r.table
table.name.default = s3_truncate(incident_report.name, 64)
table.incident_type_id.default = incident_report.incident_type_id
table.location_id.default = incident_report.location_id
elif r.http == "POST":
def create_onaccept(form):
s3db.event_incident_report_incident.insert(incident_id = form.vars.id,
incident_report_id = incident_report_id,
)
s3db.add_custom_callback("event_incident",
"create_onaccept",
create_onaccept,
)
elif method == "plan" and settings.get_incident_label(): # == "Ticket"
s3db.event_task
s3db.event_organisation
crud_strings = s3.crud_strings
crud_strings.event_task.msg_list_empty = T("No Tasks currently registered for this ticket")
crud_strings.event_organisation.msg_list_empty = T("No Organizations currently registered in this ticket")
return True
s3.prep = custom_prep
# No sidebar menu
current.menu.options = None
attr["rheader"] = event_rheader
return attr
settings.customise_event_incident_controller = customise_event_incident_controller
# -------------------------------------------------------------------------
def customise_event_asset_resource(r, tablename):
table = current.s3db.event_asset
table.item_id.label = T("Item Type")
table.asset_id.label = T("Specific Item")
# DateTime
from gluon import IS_EMPTY_OR
from s3 import IS_UTC_DATETIME, S3CalendarWidget, S3DateTime
for f in (table.start_date, table.end_date):
f.requires = IS_EMPTY_OR(IS_UTC_DATETIME())
f.represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
f.widget = S3CalendarWidget(timepicker = True)
if settings.get_incident_label(): # == "Ticket"
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Equipment"),
title_display = T("Equipment Details"),
title_list = T("Equipment"),
title_update = T("Edit Equipment"),
label_list_button = T("List Equipment"),
label_delete_button = T("Remove Equipment from this ticket"),
msg_record_created = T("Equipment added"),
msg_record_modified = T("Equipment updated"),
msg_record_deleted = T("Equipment removed"),
msg_list_empty = T("No Equipment currently registered for this ticket"))
else:
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Equipment"),
title_display = T("Equipment Details"),
title_list = T("Equipment"),
title_update = T("Edit Equipment"),
label_list_button = T("List Equipment"),
label_delete_button = T("Remove Equipment from this incident"),
msg_record_created = T("Equipment added"),
msg_record_modified = T("Equipment updated"),
msg_record_deleted = T("Equipment removed"),
msg_list_empty = T("No Equipment currently registered for this incident"))
settings.customise_event_asset_resource = customise_event_asset_resource
# -------------------------------------------------------------------------
def event_human_resource_onaccept(form, create=True):
"""
When a Position is assigned to an Incident:
- set_event_from_incident
- add Log Entry
- send Notification
"""
db = current.db
s3db = current.s3db
s3db.event_set_event_from_incident(form, "event_human_resource")
table = s3db.event_human_resource
form_vars = form.vars
form_vars_get = form_vars.get
link_id = form_vars_get("id")
incident_id = form_vars_get("incident_id")
if not incident_id:
link = db(table.id == link_id).select(table.incident_id,
limitby = (0, 1)
).first()
incident_id = link.incident_id
pe_id = None
if create:
person_id = form_vars_get("person_id")
if person_id:
ptable = s3db.pr_person
person = db(ptable.id == person_id).select(ptable.pe_id,
limitby = (0, 1)
).first()
pe_id = person.pe_id
job_title_id = form_vars_get("job_title_id")
if job_title_id:
s3db.event_incident_log.insert(incident_id = incident_id,
name = "Person Requested",
comments = s3db.event_human_resource.job_title_id.represent(job_title_id),
)
else:
# Update
record = form.record
if record: # Not True for a record merger
from s3dal import Field
changed = {}
for var in form_vars:
vvar = form_vars[var]
if isinstance(vvar, Field):
# modified_by/modified_on
continue
rvar = record.get(var, "NOT_PRESENT")
if rvar != "NOT_PRESENT" and vvar != rvar:
f = table[var]
if var == "pe_id":
pe_id = vvar
type_ = f.type
if type_ == "integer" or \
type_.startswith("reference"):
if vvar:
vvar = int(vvar)
if vvar == rvar:
continue
represent = table[var].represent
if represent:
if hasattr(represent, "show_link"):
represent.show_link = False
else:
represent = lambda o: o
if rvar:
changed[var] = "%s changed from %s to %s" % \
(f.label, represent(rvar), represent(vvar))
else:
changed[var] = "%s changed to %s" % \
(f.label, represent(vvar))
if changed:
table = s3db.event_incident_log
text = []
for var in changed:
text.append(changed[var])
text = "\n".join(text)
table.insert(incident_id = incident_id,
#name = "Person Assigned",
name = "Person Request Updated",
comments = text,
)
if pe_id:
# Notify Assignee
if settings.get_incident_label(): # == "Ticket"
label = T("Ticket")
else:
label = T("Incident")
current.msg.send_by_pe_id(pe_id,
subject = "",
message = "You have been assigned to an %s: %s%s" % \
(label,
settings.get_base_public_url(),
URL(c="event", f= "incident",
args = [incident_id, "human_resource", link_id]),
),
contact_method = "SMS")
# -------------------------------------------------------------------------
def customise_event_human_resource_resource(r, tablename):
s3db = current.s3db
table = s3db.event_human_resource
# DateTime
from gluon import IS_EMPTY_OR
from s3 import IS_UTC_DATETIME, S3CalendarWidget, S3DateTime
for f in (table.start_date, table.end_date):
f.requires = IS_EMPTY_OR(IS_UTC_DATETIME())
f.represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
f.widget = S3CalendarWidget(timepicker = True)
if settings.get_incident_label(): # == "Ticket"
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Person"),
title_display = T("Person Details"),
title_list = T("Personnel"),
title_update = T("Edit Person"),
label_list_button = T("List Personnel"),
label_delete_button = T("Remove Person from this ticket"),
msg_record_created = T("Person added"),
msg_record_modified = T("Person updated"),
msg_record_deleted = T("Person removed"),
msg_list_empty = T("No Persons currently registered for this ticket"))
else:
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Person"),
title_display = T("Person Details"),
title_list = T("Personnel"),
title_update = T("Edit Person"),
label_list_button = T("List Personnel"),
label_delete_button = T("Remove Person from this incident"),
msg_record_created = T("Person added"),
msg_record_modified = T("Person updated"),
msg_record_deleted = T("Person removed"),
msg_list_empty = T("No Persons currently registered for this incident"))
s3db.configure(tablename,
# Deliberately over-rides
create_onaccept = event_human_resource_onaccept,
update_onaccept = lambda form:
event_human_resource_onaccept(form, create=False),
)
settings.customise_event_human_resource_resource = customise_event_human_resource_resource
# -------------------------------------------------------------------------
def customise_event_scenario_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard postp
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method != "plan":
f = r.table.action_plan
f.readable = f.writable = False
if r.method == "create"and r.http == "POST":
r.resource.configure(create_next = URL(c="event", f="scenario",
args = ["[id]", "plan"]),
)
return True
s3.prep = custom_prep
# No sidebar menu
current.menu.options = None
attr["rheader"] = event_rheader
return attr
settings.customise_event_scenario_controller = customise_event_scenario_controller
# -------------------------------------------------------------------------
def customise_event_scenario_asset_resource(r, tablename):
table = current.s3db.event_scenario_asset
table.item_id.label = T("Item Type")
table.asset_id.label = T("Specific Item")
if settings.get_incident_label(): # == "Ticket"
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Equipment"),
title_display = T("Equipment Details"),
title_list = T("Equipment"),
title_update = T("Edit Equipment"),
label_list_button = T("List Equipment"),
label_delete_button = T("Remove Equipment from this ticket"),
msg_record_created = T("Equipment added"),
msg_record_modified = T("Equipment updated"),
msg_record_deleted = T("Equipment removed"),
msg_list_empty = T("No Equipment currently registered for this ticket"))
else:
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Equipment"),
title_display = T("Equipment Details"),
title_list = T("Equipment"),
title_update = T("Edit Equipment"),
label_list_button = T("List Equipment"),
label_delete_button = T("Remove Equipment from this incident"),
msg_record_created = T("Equipment added"),
msg_record_modified = T("Equipment updated"),
msg_record_deleted = T("Equipment removed"),
msg_list_empty = T("No Equipment currently registered for this incident"))
settings.customise_event_scenario_asset_resource = customise_event_scenario_asset_resource
# -------------------------------------------------------------------------
def customise_event_scenario_human_resource_resource(r, tablename):
if settings.get_incident_label(): # == "Ticket"
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Person"),
title_display = T("Person Details"),
title_list = T("Personnel"),
title_update = T("Edit Person"),
label_list_button = T("List Personnel"),
label_delete_button = T("Remove Person from this ticket"),
msg_record_created = T("Person added"),
msg_record_modified = T("Person updated"),
msg_record_deleted = T("Person removed"),
msg_list_empty = T("No Persons currently registered for this ticket"))
else:
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Person"),
title_display = T("Person Details"),
title_list = T("Personnel"),
title_update = T("Edit Person"),
label_list_button = T("List Personnel"),
label_delete_button = T("Remove Person from this incident"),
msg_record_created = T("Person added"),
msg_record_modified = T("Person updated"),
msg_record_deleted = T("Person removed"),
msg_list_empty = T("No Persons currently registered for this incident"))
settings.customise_event_scenario_human_resource_resource = customise_event_scenario_human_resource_resource
# -------------------------------------------------------------------------
# HRM
# -------------------------------------------------------------------------
settings.hrm.job_title_deploy = True
settings.hrm.org_dependent_job_titles = True
# -------------------------------------------------------------------------
# Organisations
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def customise_org_organisation_resource(r, tablename):
s3db = current.s3db
# Custom Components
s3db.add_components(tablename,
org_organisation_tag = (# On-call Duty Number
{"name": "duty",
"joinby": "organisation_id",
"filterby": {"tag": "duty",
},
"multiple": False,
},
),
)
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink, \
IS_EMPTY_OR, IS_PHONE_NUMBER_MULTI, S3PhoneWidget, s3_phone_represent
# Individual settings for specific tag components
components_get = s3db.resource(tablename).components.get
duty = components_get("duty")
f = duty.table.value
f.represent = s3_phone_represent,
f.requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI())
f.widget = S3PhoneWidget()
crud_form = S3SQLCustomForm("name",
"acronym",
S3SQLInlineLink("organisation_type",
field = "organisation_type_id",
# Default 10 options just triggers which adds unnecessary complexity to a commonly-used form & commonly an early one (create Org when registering)
search = False,
label = T("Type"),
multiple = False,
widget = "multiselect",
),
"country",
(T("Reception Phone #"), "phone"),
S3SQLInlineComponent("duty",
label = T("On-call Duty Number"),
fields = [("", "value")],
multiple = False,
),
"website",
"logo",
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
)
settings.customise_org_organisation_resource = customise_org_organisation_resource
# -------------------------------------------------------------------------
# Projects
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def project_task_onaccept(form, create=True):
"""
Send Person a Notification when they are assigned to a Task
Log changes in Incident Log
"""
if current.request.function == "scenario":
# Must be a Scenario
# - don't Log
# - don't send Notification
return
db = current.db
s3db = current.s3db
ltable = s3db.event_task
form_vars = form.vars
form_vars_get = form_vars.get
task_id = form_vars_get("id")
link = db(ltable.task_id == task_id).select(ltable.incident_id,
limitby = (0, 1)
).first()
if not link:
# Not attached to an Incident
# - don't Log
# - don't send Notification
return
incident_id = link.incident_id
if create:
pe_id = form_vars_get("pe_id")
# Log
name = form_vars_get("name")
if name:
s3db.event_incident_log.insert(incident_id = incident_id,
name = "Task Created",
comments = name,
)
else:
# Update
pe_id = None
record = form.record
if record: # Not True for a record merger
from s3dal import Field
table = s3db.project_task
changed = {}
for var in form_vars:
vvar = form_vars[var]
if isinstance(vvar, Field):
# modified_by/modified_on
continue
if var == "pe_id":
pe_id = vvar
rvar = record.get(var, "NOT_PRESENT")
if rvar != "NOT_PRESENT" and vvar != rvar:
f = table[var]
type_ = f.type
if type_ == "integer" or \
type_.startswith("reference"):
if vvar:
vvar = int(vvar)
if vvar == rvar:
continue
represent = table[var].represent
if represent:
if hasattr(represent, "show_link"):
represent.show_link = False
else:
represent = lambda o: o
if rvar:
changed[var] = "%s changed from %s to %s" % \
(f.label, represent(rvar), represent(vvar))
else:
changed[var] = "%s changed to %s" % \
(f.label, represent(vvar))
if changed:
table = s3db.event_incident_log
text = []
for var in changed:
text.append(changed[var])
text = "\n".join(text)
table.insert(incident_id = incident_id,
name = "Task Updated",
comments = text,
)
if pe_id:
# Notify Assignee
message = "You have been assigned a Task: %s%s" % \
(settings.get_base_public_url(),
URL(c="event", f= "incident",
args = [incident_id, "task", task_id]),
)
instance_type = s3db.pr_instance_type(pe_id)
if instance_type == "org_organisation":
# Notify the Duty Number for the Organisation, not everyone in the Organisation!
otable = s3db.org_organisation
ottable = s3db.org_organisation_tag
query = (otable.pe_id == pe_id) & \
(ottable.organisation_id == otable.id) & \
(ottable.tag == "duty")
duty = db(query).select(ottable.value,
limitby = (0, 1)
).first()
if duty:
current.msg.send_sms_via_api(duty.value,
message)
else:
task_notification = settings.get_event_task_notification()
if task_notification:
current.msg.send_by_pe_id(pe_id,
subject = "%s: Task assigned to you" % settings.get_system_name_short(),
message = message,
contact_method = task_notification)
# -------------------------------------------------------------------------
def customise_project_task_resource(r, tablename):
s3db = current.s3db
f = s3db.project_task.source
f.readable = f.writable = False
s3db.configure(tablename,
# No need to see time log: KISS
crud_form = None,
# NB We deliberatly over-ride the default one
create_onaccept = project_task_onaccept,
# In event_ActionPlan()
#list_fields = ["priority",
# "name",
# "pe_id",
# "status_id",
# "date_due",
# ],
update_onaccept = lambda form:
project_task_onaccept(form, create=False),
)
settings.customise_project_task_resource = customise_project_task_resource
# END =========================================================================
|
I know this is one that many of you have been dreading. I’ve deliberately put it on the weekend so you’ve hopefully got a bit more time to work through it. If you don’t have kids or grandkids then you get the day to catch up!
Today’s task is to get rid of all the excess toys that stockpile in your home. Start by grabbing three big boxes, one for charity/giving away, one for rubbish and one for selling.
Does it promote the sort of play you want to encourage?
Is it easy to store and use?
Do you have multiples of this type of toy?
Check with them as to what is suitable to donate.
Okay, I got stuck here but today is the day I prevail. Wish me luck and thanks for the great series!
|
"""
Executable code for the PCA user story.
Run disc() to explore a randomly generated flat disc data. Run hypesphere to explore a high dimensional ball
of randomly generated data.
"""
import pca_disc
from pca_disc import *
from PyDSTool.Toolbox import synthetic_data as sd
import random
import numpy as np
import __future__
DOI = [(-10,10),(-10,10)]
def disc():
pts = sd.generate_ball(100, 2, 10)
pts = np.concatenate((pts, np.zeros((100, 1))), axis=1)
trans_am = 12
trans_ax = 1
X = [[],[],[]]
for i in range(3):
X[i] = rotate_z(rotate_y(rotate_x(translate(pts, trans_ax, trans_am),
random.uniform(0, 2*np.pi)),
random.uniform(0, 2*np.pi)),
random.uniform(0, 2*np.pi))
X[i] = noise(X[i], 2, 0.3, 0, 10)
rot_layers = ['rot1', 'rot2', 'rot3']
rot_styles = ['r', 'g', 'b']
fig, [before, after, variance] = pca_disc.setupDisplay(rot_layers, rot_styles, DOI)
layer_obj = before.get_layer('orig_data')
layer_obj.add_data(pts[:,0], pts[:,1], pts[:,2])
layer_obj.set_style('y.')
before.build_layers()
after.build_layers()
variance.build_layers()
return ControlSys(fig, X, rot_layers, rot_styles, 2, before, after, variance)
def hypersphere(dim):
pts = sd.generate_ball(100, dim, 10)
#Create and stretch different hypersphere "clusters":
X1 = translate(stretch(stretch(sd.generate_ball(133, dim, 10), 0, 1.4), 1, 1.4), 0, 25)
X2 = translate(sd.generate_ball(110, dim, 10), 1, 20)
X3 = translate(noise(sd.generate_ball(95, dim, 10), 2, 0.6, 0, 2), 2, 15)
X = [X1, X2, X3]
clus_layers = ['clus1', 'clus2', 'clus3']
clus_styles = ['r', 'g', 'b']
fig, [before, after, variance] = pca_disc.setupDisplay(clus_layers, clus_styles, DOI)
proj_vecsHI = pca_disc.ortho_proj_mat(len(X[0][0]), 3)
proj_vecsLO = pca_disc.ortho_proj_mat(len(X[0][0]), 2)
#Plot the entire dataset in blue.
X_all = np.concatenate((X1,X2,X3))
layer_obj = before.get_layer('orig_data')
layer_obj.add_data(np.dot(X_all, proj_vecsHI).transpose())
layer_obj.set_style('y.')
return ControlSys(gui.masterWin, X, clus_layers, clus_styles, 2, proj_vecsLO, proj_vecsHI)
ctrl_sys = disc()
#ctrl_sys = hypersphere(6)
halt= True
|
Maybe Smith was too wide open. He couldn’t haul in the pass. Dallas took three points. California will leave you dreaming about when you can go back againRob Burnett muscles in on the surf beaches and beyond in sunny southern California before heading to Hollywood for some A list glamour06:00, 24 OCT 2015Sun down: A surfer catches the evening’s waves at Huntington Beach Get daily updates directly to your inbox+ SubscribeSee our privacy noticeThank you for subscribing!Could not subscribe, try again laterInvalid EmailIt’s not quite 7am and I am halfway through my first ever yoga session, being pushed by an impossibly enthusiastic teacher named Jordan.I’m stretching my body into poses with impressive sounding names like ‘warrior’ and ‘cobra’.”You guys are doing so great! Remember, it’s about the journey, not the destination. And baaaaack into downward facing dog. Perfect!” Jordan encourages her reluctant students.”Oh my, you guys are the best class I’ve had all month!” Jordan grins.
where to buy real jordans for cheap Entering the house, now a museum, you see a taxidermied polar bear. Then, passing through narrow, twisting, turning hallways that mimic roads leading to the house, you emerge into sun drenched, whitewashed spaces full of surprises: dead end hallways and a pink curtained cheap jordan sneakers room that echoes only if you stand in the right spot. Thick sisal carpets, bundles of dried flowers, more stuffed animals, seashells and starfish, antique furniture and photos of Dali in his heyday fill the interior where to buy real jordans for cheap.
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test helpers."""
import os
from copy import deepcopy
import pytest
from flask import Flask
from flask_oauthlib.client import OAuth as FlaskOAuth
from flask_oauthlib.client import OAuthRemoteApp
from invenio_db import InvenioDB, db
from sqlalchemy_utils.functions import create_database, database_exists
from invenio_oauthclient import InvenioOAuthClient
from invenio_oauthclient.contrib.orcid import REMOTE_APP
def test_version():
"""Test version import."""
from invenio_oauthclient import __version__
assert __version__
def test_init():
"""Test extension initialization."""
app = Flask('testapp')
FlaskOAuth(app)
ext = InvenioOAuthClient(app)
assert 'invenio-oauthclient' in app.extensions
app = Flask('testapp')
ext = InvenioOAuthClient(app)
assert 'invenio-oauthclient' in app.extensions
app = Flask('testapp')
FlaskOAuth(app)
ext = InvenioOAuthClient()
assert 'invenio-oauthclient' not in app.extensions
ext.init_app(app)
assert 'invenio-oauthclient' in app.extensions
class _CustomOAuthRemoteApp(OAuthRemoteApp):
"""Custom OAuthRemoteApp used for testing."""
def test_standard_remote_app_factory(base_app):
"""Test standard remote_app class."""
base_app.config.update(
OAUTHCLIENT_REMOTE_APPS=dict(
custom_app=REMOTE_APP
)
)
FlaskOAuth(base_app)
InvenioOAuthClient(base_app)
assert isinstance(
base_app.extensions['oauthlib.client'].remote_apps['custom_app'],
OAuthRemoteApp)
assert not isinstance(
base_app.extensions['oauthlib.client'].remote_apps['custom_app'],
_CustomOAuthRemoteApp)
def test_remote_app_factory_global_customization(base_app):
"""Test remote_app override with global variable."""
base_app.config.update(
OAUTHCLIENT_REMOTE_APP=_CustomOAuthRemoteApp,
OAUTHCLIENT_REMOTE_APPS=dict(
custom_app=REMOTE_APP
)
)
FlaskOAuth(base_app)
InvenioOAuthClient(base_app)
assert isinstance(
base_app.extensions['oauthlib.client'].remote_apps['custom_app'],
_CustomOAuthRemoteApp)
def test_remote_app_factory_local_customization(base_app):
"""Test custom remote_app for one app only."""
config_for_one_app = deepcopy(REMOTE_APP)
config_for_one_app['remote_app'] = _CustomOAuthRemoteApp
base_app.config.update(
OAUTHCLIENT_REMOTE_APPS=dict(
custom_app=config_for_one_app
)
)
FlaskOAuth(base_app)
InvenioOAuthClient(base_app)
assert isinstance(
base_app.extensions['oauthlib.client'].remote_apps['custom_app'],
_CustomOAuthRemoteApp)
def test_db(request):
"""Test database backend."""
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite://'
)
InvenioDB(app)
FlaskOAuth(app)
InvenioOAuthClient(app)
def teardown():
with app.app_context():
db.drop_all()
request.addfinalizer(teardown)
with app.app_context():
is_sqllite = str(db.engine.url) == 'sqlite://'
db_exists = database_exists(str(db.engine.url))
if not is_sqllite and not db_exists:
create_database(str(db.engine.url))
db.create_all()
tables = list(filter(lambda table: table.startswith('oauthclient'),
db.metadata.tables.keys()))
assert len(tables) == 3
def test_alembic(app):
"""Test alembic recipes."""
ext = app.extensions['invenio-db']
with app.app_context():
if db.engine.name == 'sqlite':
raise pytest.skip('Upgrades are not supported on SQLite.')
assert not ext.alembic.compare_metadata()
db.drop_all()
ext.alembic.upgrade()
assert not ext.alembic.compare_metadata()
ext.alembic.downgrade(target='96e796392533')
ext.alembic.upgrade()
assert not ext.alembic.compare_metadata()
ext.alembic.downgrade(target='96e796392533')
|
ford f250 bench seat covers covers a attractive ford solid bench seat custom fit seat ford bench 97 f250 bench seat covers.
folding toddler bed special pony design toddler bed for mattress cheap wooden children bed for wholesale folding toddler bed frame.
flush mount led ceiling light flush mount led ceiling light w white led flush mount ceiling light fixtures canada.
fold down bar cabinet the bar cabinet fold out bar cabinet.
furniture chaise lounge skyline furniture grey chaise lounge furniture chaise lounge indoor.
fountains for backyard backyard water fountains backyard water fountains backyard water fountains ideas fountain design ideas backyard diy backyard fountains and waterfalls.
flushing cabinets kitchen cabinets twitter google kitchen cabinets flushing cabinet depot flushing queens.
foam bed topper foam bed topper full.
fx cabinets warehouse cabinets warehouse wood fx cabinets warehouse hours.
forevermark cabinet dealers gorgeous forevermark cabinetry dealers near me.
|
# iNeuron
#
#
# Michele Giugliano, 18-19/10/2014, Antwerpen
# http://www.ua.ac.be/michele.giugliano
#
# pythonista://MG/iNeuron?action=run
#
import string
import sound
from scene import *
from random import *
from time import localtime
from itertools import chain
from math import sin, exp, fmod
ipad = False #will be set in the setup method
# Our class inherits from Scene, so that its draw method
# is automatically called 60 times per second when we run it.
# This implies approximately once every 16.6666... ms
class iNeuron (Scene):
def setup(self):
global ipad, yy, tt
ipad = self.size.w > 700
#Render all the digits as individual images:
self.numbers = {}
self.numbers_small = {}
font_size = 150 if self.size.w > 700 else 60
for s in chain(string.digits, [':', '.']):
#render_text returns a tuple of
#an image name and its size.
self.numbers[s] = render_text(s, 'Helvetica-Light', font_size)
self.numbers_small[s] = render_text(s, 'Courier', 20)
#--------------------------------------------------------------------------
# Simulation definition and control, general parameters
self.umin = -100.; # Minimal voltage to be displayed
self.umax = 100.; # Maximal voltage to be displayed
self.t = 0; # Current sim. time [ms]
self.mdt = .1; # Integration time step [ms]
self.u = -70.6; # Membrane potential state variable
self.w = 0.; # Adaptation state variable
self.t0 =-9999.; # Last firing time [ms], for refractoryness
self.In = 0.; # Synaptic fluctuating background current
self.Iext = 0.; # External current, when the touch screen is touched
#--------------------------------------------------------------------------
# (1) Model neuron parameters (i.e. exponential I&F)
self.th = 20; #[mV] - peak value for a spike
self.C = 281; #[pF] - membrane capacitance
self.g_L = 30; #[nS] - leak conductance
self.E_L = -70.6; #[mV] - leak reversal potential (or resting potential)
self.V_T = -50.4; #[mV] - excitability threshold
self.Delta_T = 2; #[mV] - excitability slope
self.Tarp = 2.; #[ms] - absolute refractory period
#--------------------------------------------------------------------------
self.tau_w = 144; #[ms] - decay time constant for adaptation variable
self.a = 4; #[nS] - voltage-dependence of adaptation variable
self.b = 0.0805; #[nA] - spike-dependence of adaptation variable
#--------------------------------------------------------------------------
self.mu = 200.; # Mean of the synaptic background current
self.sigma = 400.; # Stddev of the syn. backgr. current (e.g., 2 * mu)
self.taux = 5.; # Autocorrelation time length [ms]
self.t1 = self.mdt / self.taux; # Temp. var.for convenience - refer to eqs.
self.t2 = sqrt(2.*self.t1); # Temp. var.for convenience - refer to eqs.
#--------------------------------------------------------------------------
def should_rotate(self, orientation):
return True
def draw(self):
global yy, tt
background(0., 0., 0.)
fill(0.6,0,0)
stroke(0.6,0,0)
stroke_weight(3)
#---------------------------------------------------------------------------------------
# Main simulation cycle, repeated as many are the horizontal points on scren
for kk in range(int(self.size.w)):
# Iteratively update the equation for the noisy external cu
self.In += (self.mu - self.In) * self.t1 + self.sigma * self.t2 * gauss(0,1);
if self.u==self.th: # threshold
self.u = self.E_L;
self.w += self.b;
self.t0 = self.t;
line(kk,0.1 * self.size.h,kk,self.size.h)
tmp = sound.play_effect('Drums_02', 100, 20)
#sound.stop_effect(tmp)
else:
if (abs(self.t-self.t0) >= self.Tarp):
udot = self.mdt/self.C*(-self.g_L*(self.u-self.E_L) + self.g_L*self.Delta_T*exp((self.u-self.V_T)/self.Delta_T) - self.w + self.In + self.Iext);
if ((self.u + udot) > self.th):
self.u = self.th
else:
self.u += udot
else:
self.u = self.E_L;
wdot = self.mdt/self.tau_w*(self.a*(self.u-self.E_L) - self.w);
self.w += wdot;
self.t += self.mdt;
ud = (self.u - self.umin)/(self.umax - self.umin) * self.size.h * 0.9 + 0.1 * self.size.h
if (fmod(kk,2)==0):
ellipse(kk, ud, 2, 2)
#------------------------------------------------------------------------------------------
t = localtime() # current time probed, in the structure "t"
minute = t.tm_min # minutes
second = t.tm_sec # seconds
hour = t.tm_hour # hours
#Format the elapsed time (dt):
s = '%02d:%02d.%02d' % (hour, minute, second)
#Determine overall size for centering:
w, h = 0.0, self.numbers['0'][1].h
for c in s:
size = self.numbers[c][1]
w += size.w
#Draw the digits:
x = int(self.size.w * 0.5 - w * 0.5)
y = int(self.size.h * 0.5 - h * 0.5)
for c in s:
img, size = self.numbers[c]
image(img, x, y, size.w, size.h)
x += size.w
#Format the real-time index:
# self.dt : time in seconds elapsed since the last "draw" operation
tmp1 = (0.001 * self.mdt * self.size.w) # simulated seconds per frame
tmp2 = tmp1 / self.dt
s = '%02f' % tmp2
#Determine overall size for centering:
w, h = 0.0, self.numbers_small['0'][1].h
for c in s:
size = self.numbers_small[c][1]
w += size.w
#Draw the digits:
x = int(self.size.w * 0.5 - w * 0.5)
y = int(self.size.h * 0.75 - h * 0.5)
for c in s:
img, size = self.numbers_small[c]
image(img, x, y, size.w, size.h)
x += size.w
def touch_began(self, touch):
self.Iext = 200.;
def touch_ended(self, touch):
self.Iext = 0.;
#Run the scene that we just defined (10 frames/sec --> "6")
run(iNeuron(),orientation=DEFAULT_ORIENTATION, frame_interval=6, anti_alias=False)
# 1: 60
# 2: 30
# 3: 20
# 4: 15
# 5: 12
# 6: 10
# 7: 60/7
# 8: 60/8
|
One of the more ambitious and interesting hardware projects on a crowdfunding platform recently has to be the Quasar IV, a smartphone designed entirely around security, encryption and identity protection. QSAlpha’s Quasar IV uses authentication tech called Quatrix, and a hybrid Android/Linux and Quatrix mobile OS called QuaOS to make sure communications in and out of the phone are protected.
The Quasar IV is going to be made, despite the fact that there are only nine days remaining in its Indiegogo crowdfunding campaign, and only $48,796 raised out of $3.2 million sought on a fixed funding goal (meaning they don’t get anything if they haven’t got the whole amount committed). While the Quasar IV won’t raise that much money in the next 9 days, it apparently will get made regardless thanks to help from outside investors.
|
from django.db.utils import OperationalError
from django.contrib.contenttypes.models import ContentType
PLUGIN_NAME = 'Featured Articles'
DESCRIPTION = 'This is a homepage element that renders featured articles.'
AUTHOR = 'Martin Paul Eve'
def install():
import core.models as core_models
import journal.models as journal_models
import press.models as press_models
# check whether this homepage element has already been installed for all journals
journals = journal_models.Journal.objects.all()
for journal in journals:
content_type = ContentType.objects.get_for_model(journal)
element, created = core_models.HomepageElement.objects.get_or_create(
name=PLUGIN_NAME,
configure_url='featured_articles_setup',
template_path='journal/homepage_elements/featured.html',
content_type=content_type,
object_id=journal.pk,
has_config=True)
element.save()
presses = press_models.Press.objects.all()
for press in presses:
content_type = ContentType.objects.get_for_model(press)
element, created = core_models.HomepageElement.objects.get_or_create(
name=PLUGIN_NAME,
configure_url='featured_articles_setup',
template_path='journal/homepage_elements/featured.html',
content_type=content_type,
object_id=press.pk,
has_config=True)
element.save()
def hook_registry():
try:
install()
return {
'yield_homepage_element_context': {
'module': 'core.homepage_elements.featured.hooks',
'function': 'yield_homepage_element_context',
'name': PLUGIN_NAME,
}
}
except OperationalError:
# if we get here the database hasn't yet been created
return {}
except BaseException:
return {}
|
California’s Twenty-ninth Assembly District has named Bette Worthen of the Visalia Fourth Ward, Visalia California Stake, its 1994 Woman of the Year. Sister Worthen, who received the honor during a special ceremony on the assembly floor, has worked in the office of assemblyman Bill Jones for twelve years.
Utah Governor Michael Leavitt has appointed the state’s first director for Polynesian affairs. Fineasi M. Nau of the Bonneville Fourth Ward, Provo Utah Bonneville Stake, will work closely with the governor’s Polynesian Advisory Council to build unity and solve problems within the Polynesian community, as well as increase awareness of one of the state’s fastest growing ethnic populations.
The Penn State Nittany Lions men’s volleyball team, coached by Tom Peterson, recently won the NCAA Men’s Volleyball Championship. Brother Peterson, a member of the State College Ward, Altoona Pennsylvania Stake, has coached at Penn State for six years. He was also named Coach of the Year by the American Volleyball Coaches Association.
Susan Tjarks of the Mitchell Ward, Sioux Falls South Dakota Stake, was recently selected as one of the one hundred most influential women in South Dakota. Sister Tjarks, who has lupus, serves on the Mitchell School Board and has spent hundreds of hours in community service.
Members of Parliament from both parties recently honored sixteen-year-old Mark Grant as the Most Outstanding Prime Minister in the national Motorola Youth Parliament Competition. Mark, a member of the Nuneaton Ward, Coventry England Stake, attended a special reception at the House of Commons, Westminster, London, to receive a prize.
|
"""
Django settings for woodypage project.
To setup the settings json file:
1. rename settings_template.json to settings.json
2. write all the necessary data in it
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import json
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
try:
with open(os.path.join(BASE_DIR, 'settings', 'settings.json')) as data_file:
data = json.load(data_file)
except IOError:
print("You need to setup the settings data file (see instructions in base.py file.)")
SECRET_KEY = data["secret_key"]
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEPLOYED = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_unused_media',
'adminsortable2',
'imagekit',
'paintings',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'woodypage.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'woodypage.wsgi.application'
# Database
DATABASES = {}
for database in data['databases']:
DATABASES[database['name']] = {
'ENGINE': database['engine'],
'NAME': database['schema'],
'USER': database['user'],
'PASSWORD': database['password'],
'HOST': database['host'],
'PORT': database['port'],
'OPTIONS': database['options']
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
|
There are many instances where this statement is not accurate; however, it’s corollary, “If you can’t measure it, you can’t improve it” is closer to the mark. Lord Kelvin, Peter Drucker, and Edward Deming have had these quotes attributed to them, but like many great ideas, there are many authors.
They develop the correct metrics. Collecting the wrong data will not provide improvement.
The data is important to the user. If the information, although correct, is not actionable, progress cannot be made.
The collection of data is not intrusive. In many cases, the testing skews not only the results, but the methods it was designed to measure.
Addressing constituents’ needs is complex, yet achievable.
The process of educating a child involves more than just the teacher and the student. There are other constituencies involved, including the family, school administrators, the school board, and the community and the public at large. When we talk about measuring family engagement and the parent teacher partnership, we need to address each of these constituents’ different needs.
First we need to know what each group wants to measure.
They want to evaluate what’s important to them. They need to receive information on which they can take action; it needs to be easily understood and delivered in a timely manner. In this way, light adjustments can be made without it being obtrusive. Let’s take a closer look at the following constituents.
Studies have shown that most parents feel that they are responsible for their child’s progress, regardless of the success or lack thereof. They view the teacher and the school as providing the service of educating their child, but the parent still feels responsible. In many cases, parents feel disenfranchised, and this is perceived as disengaged. Studies have also shown that parents are not that interested in the education system as a whole; they are more focused on their child’s education.
So what do they want to see?
Parents don’t only want to know how their child is doing; they want to see how their child is doing according to their ability. Who knows this best? The parent. They want to see how she fits in to the rest of the class, and how the class is doing. They want to see how their expectations are met in relation to the rest of the parents in that class. If the other parents are happy but they aren’t, decisions can be made. In all this, FERPA issues must be recognized and addressed. Meeting a parent’s need for information is paramount to a successful Parent Teacher Partnership.
Teachers need to easily receive insights from home. By knowing the parents’ expectations early, they can address any disconnect between a parent’s vision and educational reality. Similarly, they can address when parent expectations are too low. This is considered by studies to be the most important influence on positive family engagement. Understanding objectives become the basis for parent teacher partnerships. Ongoing, civil dialogue strengthens this relationship.
Teachers also need to gauge how they’re doing with parents. They need to see when parents go on ‘auto-pilot’, no longer engaged in the partnership. They want to see the performance of their students not only as it correlates to family involvement, but also based on other factors. This “whole picture” helps them develop better methods for teaching that may not always be restricted to the classroom.
Knowing that a teacher’s time is limited, whatever system is used must be able to become a natural part of the fabric of their day. In this way, managing many parent teacher partnerships influence the performance of the whole class, in line with the teacher’s objectives.
Administrators need to see that teachers are actively engaged with parents. Targets can be set for the level of parent teacher partnerships, and these can be correlated to student and teacher performance. They can measure the timeliness of information provided to the parents and work with teachers when this information is lacking. Through this system, administrators can develop an atmosphere of respect for the parent contribution.
How does the School Board and the public enter into this conversation of metrics? What context do they need? Remember the phrase, “You can’t manage what you can’t measure”? Although there are many, many studies that prove family engagement is important to a child’s success, none have developed the key metric: family engagement will give you “x” improvement in student performance, which can be correlated to “x” improvement in teacher effectiveness, and into an “x” return on investment in their community.
Using measurement, we can gain momentum.
If it’s not quantifiable, it’s not fundable. With all the pressures of conflicting concerns, family engagement funding has lagged far behind its importance. Of the average $12,400 spent per year on average per K-12 student in the US, less than $10 is earmarked for parent involvement! It is this metric that One Green Apple seeks to change, being cost effective, but more importantly, creating an effective system for involvement.
Metrics help make this a reality.
|
# -*- coding: utf-8 -*-
# Copyright (C) 1998-2018 by the Free Software Foundation, Inc.
#
# This file is part of Postorius.
#
# Postorius is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Postorius is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Postorius. If not, see <http://www.gnu.org/licenses/>.
from django import template
register = template.Library()
@register.inclusion_tag('postorius/menu/list_nav.html', takes_context=True)
def list_nav(context, current, title='', subtitle=''):
return dict(list=context['list'],
current=current,
user=context['request'].user,
title=title, subtitle=subtitle)
@register.inclusion_tag('postorius/menu/user_nav.html', takes_context=True)
def user_nav(context, current, title='', subtitle=''):
return dict(current=current,
user=context['request'].user,
title=title, subtitle=subtitle)
@register.simple_tag(takes_context=True)
def nav_active_class(context, current, view_name):
if current == view_name:
return 'active'
return ''
|
Late last week, a conversation between high-ranking Turkish officials was leaked online purporting to expose a plan that had been devised to use a staged attack on a Turkish target in Aleppo as a pretext to start a war with Syria. Many questions about the recording and where it came from, why it was released now, and what it means for the future of the NATO agenda in Egypt and for the Turkish government. Join us this week on The Eyeopener as we peel back the layers of the onion to find out what’s really behind the false flag leak.
|
"""
======================
Histogram Equalization
======================
This examples enhances an image with low contrast, using a method called
*histogram equalization*, which "spreads out the most frequent intensity
values" in an image [1]_. The equalized image has a roughly linear cumulative
distribution function.
While histogram equalization has the advantage that it requires no parameters,
it sometimes yields unnatural looking images. An alternative method is
*contrast stretching*, where the image is rescaled to include all intensities
that fall within the 2nd and 98th percentiles [2]_.
.. [1] http://en.wikipedia.org/wiki/Histogram_equalization
.. [2] http://homepages.inf.ed.ac.uk/rbf/HIPR2/stretch.htm
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
matplotlib.rcParams['font.size'] = 8
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
img = img_as_float(img)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
# Contrast stretching
p2, p98 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(8, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
fig.subplots_adjust(wspace=0.4)
plt.show()
|
Julia McIvor has modelled for me since the age of seven.
From the beginning of our work together, her presence in front of the camera has spoken for itself. Working with the same model over a period of time has allowed me to build a rare and very special collaboration. This collaboration lends substance to the images I create, because it is built upon trust, respect and a deeper knowledge of the person through whom I express myself.
Julia contributes directly to the creative processes of my art with suggestions and ideas. She has become my alter ego, the person through whom my anima is filtered.
On many occasions both Julia and her mother, Sarah Hall, have helped with equipment, transport and supplies. My endless thanks go to Sarah, her family and especially Julia.
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import gzip
import cPickle as marshal
import optparse
import os
import sys
import xml.sax
def parse_type(type_string):
'''Get a tuple of the type components for a SWIG-formatted type.
For example, given the type "p.f(p.struct _XExtData).int",
return ('int', ('f', ('struct _XExtData', 'p'),), 'p')
Qualifiers are ignored (removed).
'''
# Scan the type string left-to-right
buf = ''
stack = [()]
def flush(): # buf = flush()
if buf:
stack[-1] = stack[-1] + (buf,)
return ''
def push():
stack.append(())
def pop():
item = finalize(stack.pop())
if item is not None:
stack[-1] = stack[-1] + (item,)
def finalize(item):
assert type(item) is tuple
if not item:
# Empty tuple is dropped (empty param list)
return
elif item[0] == 'q':
# Discard qualifiers
return
# Reverse (puts pointers at end)
item = item[::-1]
# Reverse arguments of function
if item[-1] == 'f':
item = item[::-1]
# Empty out (void) param list
if item == ('f', ('void',)):
item = ('f',)
# Varargs encoding
elif item[-1] == 'v':
item = '...'
# Array encoding
elif item[-1] == 'a':
try:
item = ('a',) + tuple(int(j[0]) for j in item[-2::-1])
except (TypeError, ValueError):
# TODO arrays of dimension given by sizeof expression
item = ('a', 0)
# Remove one level of indirection for function types (CFUNCTYPE is
# already a pointer)
off = 0
for i, j in enumerate(item):
if type(j) is tuple and j and j[0] == 'f':
item = item[:i+1+off] + item[i+2+off:]
off -= 1
return item
for c in type_string:
if c == '.':
buf = flush()
elif c == '(':
push() # Push param list
buf = flush()
push() # Push item
elif c == ',':
buf = flush()
pop() # Pop item
push() # Push item
elif c == ')':
buf = flush()
pop() # Pop item
pop() # Pop param list
else:
buf += c
flush()
type_tuple = finalize(stack[0])
return type_tuple
class SwigInterfaceHandler(object):
def __init__(self):
self.name = None
self.cdecls = []
self.constants = []
def attribute(self, attrs):
if attrs['name'] == 'name':
self.name = str(attrs['value'])
def typemap(self, attrs):
return IgnoreElementHandler()
def cdecl(self, attrs):
handler = CDeclHandler(attrs)
self.cdecls.append(handler)
return handler
def constant(self, attrs):
handler = ConstantHandler(attrs)
self.constants.append(handler)
return handler
def class_(self, attrs):
handler = ClassHandler(attrs)
self.cdecls.append(handler)
return handler
def classforward(self, attrs):
handler = ClassForwardHandler(attrs)
self.cdecls.append(handler)
return handler
def enum(self, attrs):
handler = EnumHandler(attrs)
self.cdecls.append(handler)
return handler
def get_map(self):
map = {}
for cdecl in self.cdecls:
# ('typedef', type)
if cdecl.kind == 'typedef':
map[cdecl.name] = (cdecl.kind, cdecl.get_type(with_decl=True))
# ('enum', items)
elif cdecl.kind == 'enum':
enum = (cdecl.kind, cdecl.get_items())
map[cdecl.kind + ' ' + cdecl.name] = enum
map[cdecl.get_tdname()] = enum
# ('struct', variables)
# ('union', variables)
elif cdecl.kind in ('struct', 'union'):
class_ = (cdecl.kind, cdecl.get_variables())
map[cdecl.kind + ' ' + cdecl.name] = class_
map[cdecl.get_tdname()] = class_
# ('function', type)
elif cdecl.kind == 'function':
map[cdecl.name] = (cdecl.kind, cdecl.get_type(with_decl=True))
# ('variable', type)
elif cdecl.kind == 'variable':
map[cdecl.name] = (cdecl.kind, cdecl.get_type())
else:
assert False, (cdecl.kind, cdecl.type, cdecl.name)
# Constants: ('constant', value)
for constant in self.constants:
map[constant.name] = ('constant', constant.get_value())
import pprint
pprint.pprint(map)
return map
class IgnoreElementHandler(object):
pass
class ConstantHandler(object):
name = None
value = None
type = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'value':
self.value = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
def get_value(self):
if self.type in ('int', 'long'):
# Yes, ugly and bad -- most C int constants can also be
# parsed as Python expressions; e.g. "1L << 8".
return int(eval(self.value))
return self.value
class EnumHandler(object):
name = None
tdname = None
kind = 'enum'
unnamed = False
def __init__(self, attrs):
self.items = []
def attribute(self, attrs):
name = attrs['name']
if name == 'name' and not self.unnamed:
self.name = str(attrs['value'])
elif name == 'unnamed':
self.name = str(attrs['value'])
self.unnamed = True
elif name == 'tdname':
self.tdname = str(attrs['value'])
def enumitem(self, attrs):
handler = EnumItemHandler(attrs)
self.items.append(handler)
return handler
def get_items(self):
items = []
index = 0
for item in self.items:
try:
# TODO parse enumvalueex properly
index = int(item.value)
except ValueError:
index += 1
items.append((item.name, index))
return tuple(items)
def get_tdname(self):
if self.tdname:
return self.tdname
else:
return self.name
class EnumItemHandler(object):
name = None
value = None
type = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'unnamed':
self.name = str(attrs['value'])
elif name == 'enumvalueex':
self.value = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
def get_value(self):
if self.type in ('int', 'long'):
# Yes, ugly and bad -- most C int constants can also be
# parsed as Python expressions; e.g. "1L << 8".
return int(eval(self.value))
return self.value
class CDeclHandler(object):
name = None
kind = None
type = None
decl = ''
params = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'kind':
self.kind = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
elif name == 'decl':
self.decl = str(attrs['value'])
def parmlist(self, attrs):
self.params = []
handler = ParmListHandler(attrs, self.params)
return handler
def get_params(self):
# (type, ...)
if self.params is None:
return None
return tuple(p.get_type() for p in self.params)
def get_type(self, with_decl=False):
if with_decl:
return parse_type(self.decl + self.type)
else:
return parse_type(self.type)
def __str__(self):
if self.params:
return self.name + \
'(' + ', '.join(map(str, self.params)) + ') : ' + self.type
else:
return self.name + ' : ' + self.type
class ParmListHandler(object):
def __init__(self, attrs, params):
self.params = params
def parm(self, attrs):
param = ParmHandler(attrs)
self.params.append(param)
return param
class ParmHandler(object):
name = ''
type = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
def get_type(self):
return parse_type(self.type)
def __str__(self):
return self.name + ' : ' + self.type
class ClassHandler(object):
name = ''
kind = None
tdname = None
unnamed = False
def __init__(self, attrs):
self.cdecls = []
def attribute(self, attrs):
name = attrs['name']
if name == 'name' and not self.unnamed:
self.name = str(attrs['value'])
elif name == 'unnamed':
self.name = str(attrs['value'])
self.unnamed = True
elif name == 'kind':
self.kind = str(attrs['value'])
assert self.kind in ('struct', 'union'), self.kind
elif name == 'tdname':
self.tdname = str(attrs['value'])
def cdecl(self, attrs):
handler = CDeclHandler(attrs)
self.cdecls.append(handler)
return handler
def get_variables(self):
# ((name, type), ...)
return tuple((cdecl.name, cdecl.get_type(with_decl=True))
for cdecl in self.cdecls if cdecl.kind == 'variable')
def get_tdname(self):
if self.tdname:
return self.tdname
else:
return self.name
class ClassForwardHandler(object):
name = ''
kind = None
tdname = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'kind':
self.kind = str(attrs['value'])
assert self.kind in ('struct', 'union'), self.kind
elif name == 'tdname':
self.tdname = str(attrs['value'])
def get_variables(self):
return ()
def get_tdname(self):
if self.tdname:
return self.tdname
else:
return self.name
class FFIContentHandler(xml.sax.handler.ContentHandler):
def __init__(self):
self.swig_interface_handler = SwigInterfaceHandler()
self.stack = [self.swig_interface_handler]
def startElement(self, name, attrs):
if name == 'class':
name = 'class_'
top = self.stack[-1]
func = getattr(top, name, None)
if func:
self.stack.append(func(attrs))
else:
self.stack.append(top)
def endElement(self, name):
del self.stack[-1]
class KeepGoingErrorHandler(xml.sax.handler.ErrorHandler):
def error(self, exception):
print exception
def fatalError(self, exception):
print exception
def parse(xml_filename, output_filename):
handler = FFIContentHandler()
error_handler = KeepGoingErrorHandler()
xml.sax.parse(xml_filename, handler, error_handler)
map = handler.swig_interface_handler.get_map()
data = marshal.dumps(map)
output_file = gzip.open(output_filename, 'w')
output_file.write(data)
output_file.close()
if __name__ == '__main__':
usage = 'usage: %prog [options] <module.xml>'
op = optparse.OptionParser(usage=usage)
op.add_option('-o', '--output')
(options, args) = op.parse_args(sys.argv[1:])
if len(args) < 1:
print >> sys.stderr, 'No input file given'
sys.exit(1)
xml_filename = args[0]
module_name, _ = os.path.splitext(os.path.basename(xml_filename))
ffi_filename = module_name + '.ffi'
parse(xml_filename, ffi_filename)
|
There’s just something about barstool chairs that feel effortlessly cool. It’s as if they were designed specifically to influence the users to take a load off, slump casually and enjoy a beverage or two. Whether you’re lucky enough to have an actual bar down in the man den or a kitchen island that functions as the neighborhoods most popular happy hour spot, cool barstools are a must have to pull it all together.
A type of extra tall chair, bar stools were specifically designed to be used at bars and tall tables or islands. They are generally backless, armless and have support for the feet somewhere at the bottom. The average height for a barstool is 30” and 26” for those used at kitchen counters. They are available in a variety of different materials to match your décor. The most traditional adjustable barstool design is made of chrome or steel and can accommodate guests of a variety of heights. Wooden barstools don’t have the same flexibility but have a charming rustic feel that helps them to work in many different kitchen designs.
What Kind of Barstool Will Work for You?
If you’re in the market for cheap barstools, you need to take stock of where they’ll be used and by whom. If you are decorating an outdoor pool space, you’ll need a design constructed out of a material hardy enough to withstand the elements. A wooden barstool might look perfectly elegant, but unless it’s thoroughly sealed it’s better off in your next to a pool table rather than your actual pool. A folding barstool made of outdoor appropriate materials that can be neatly stacked away at the end of summer is a more prudent purchase.
If you love to entertain, you may need to take your guests into account. Not all your guests will be at loud Sean barstool level! A barstool with back support is easier for younger or older folks to manage for the course of a meal. Those extra tall barstools you’ve been eyeing? Imagine your grandmother trying to hop up on one. You’re better off with adjustable, supportive designs that can support a wider variety of body shapes and sizes.
Belly Up to the Bar Folks!
1StopBedrooms has sourced the best in barstools from around the world. We’ve worked hard to bring you the best possible prices, negotiating for steep manufacturer discounts and ensuring that our site is crackling with monthly sales and coupons. On top of no sales tax, free shipping and guaranteed delivery, we’ll have them lined up at your front door for the best cocktail hour in town.
|
#
# Copyright 2010-2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
'''
Provides source and sink blocks to interface with the UHD library.
Used to send and receive data between the Ettus Research, LLC product
line.
'''
########################################################################
# Prepare uhd swig module to make it more pythonic
########################################################################
def _prepare_uhd_python():
try:
from . import uhd_python
except ImportError:
import os
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "bindings"))
from . import uhd_python
#some useful typedefs for the user
setattr(uhd_python, 'freq_range_t', uhd_python.meta_range_t)
setattr(uhd_python, 'gain_range_t', uhd_python.meta_range_t)
#Make the python tune request object inherit from float
#so that it can be passed in GRC as a frequency parameter.
#The type checking in GRC will accept the tune request.
#Also use kwargs to construct individual struct elements.
class tune_request_t(uhd_python.tune_request_t):
# def __new__(self, *args, **kwargs): return float.__new__(self)
def __float__(self): return self.target_freq
def __init__(self, *args, **kwargs):
super().__init__(*args)
for key, val in list(kwargs.items()): setattr(self, key, val)
setattr(uhd_python, 'tune_request_t', tune_request_t)
#handle general things on all uhd_python attributes
#Install the __str__ and __repr__ handlers if applicable
#Create aliases for uhd swig attributes to avoid the "_t"
for attr in dir(uhd_python):
myobj = getattr(uhd_python, attr)
if hasattr(myobj, 'to_string'): myobj.__repr__ = lambda o: o.to_string().strip()
if hasattr(myobj, 'to_pp_string'): myobj.__str__ = lambda o: o.to_pp_string().strip()
if hasattr(myobj, 'to_bool'): myobj.__nonzero__ = lambda o: o.to_bool()
if hasattr(myobj, 'to_int'): myobj.__int__ = lambda o: o.to_int()
if hasattr(myobj, 'to_real'): myobj.__float__ = lambda o: o.to_real()
if attr.endswith('_t'): setattr(uhd_python, attr[:-2], myobj)
#make a new find devices that casts everything with the pythonized device_addr_t which has __str__
def find_devices(*args, **kwargs):
def to_pythonized_dev_addr(dev_addr):
new_dev_addr = uhd_python.device_addr_t()
for key in list(dev_addr.keys()): new_dev_addr[key] = dev_addr.get(key)
return new_dev_addr
return __builtins__['map'](to_pythonized_dev_addr, uhd_python.find_devices_raw(*args, **kwargs))
setattr(uhd_python, 'find_devices', find_devices)
########################################################################
# Initialize this module with the contents of uhd pybind
########################################################################
_prepare_uhd_python()
from .uhd_python import *
|
With costs down, government subsidies up and an expanded solar buy-back program recently approved by the Georgia Public Service Commission, solar is becoming a more attractive alternative here, presenters at the Southern Solar Summit said Wednesday.
Sponsored by the nonprofit Georgia Solar Energy Association, the first-ever summit attracted more than 100 solar installers, government agency officials, real estate professionals and others to two panel discussions. The summit preceded the ongoing Georgia Environmental Conference at the Hyatt Regency Savannah.
Tom Lane, founder of the Gainesville, Fla.-based ECS Solar Energy Systems, promotes solar hot water systems, which his company installs. They're relatively compact on the roof, covering less than 100 square feet, and they quickly recoup their installation costs in energy savings. His rule of thumb is to look at your lowest electric bills of the year, typically in the spring and fall when heating and air conditioning is least used. Half of that bill is likely the monthly cost of water heating, he said.
That cost savings is typically greater than the monthly payment on a loan for a solar water heater. Plus, the installation increases home equity.
"It's costing you money not to do it," Lane said.
Other places where the sun shines less intensely and less frequently than in Georgia, such as New Jersey and Germany, have already tapped into the sun much more.
In large part, that's because of government mandates. New Jersey, for example, requires utilities to produce 22.5 percent of their electricity from renewable sources, at least 2 percent of which must come from solar sources. Georgia and most of the South have no such requirements.
Such government policies are what's required to make solar competitive with coal and nuclear-generated electricity, said Ervan Hancock, manager of renewable generation Southern Company, the parent of Georgia Power.
Under direction from the Public Service Commission, Georgia Power will soon triple the capacity of a program that buys back solar power from producers.
The program is required to be revenue neutral, meaning the cost of the solar Georgia Power buys is offset by customers who pay a premium for green power. Currently, only about 4,400 customers buy green.
"We're reassessing the marketing campaign to get those numbers up," he said.
In the Southeast, Hancock said, it costs six times more to produce electricity with solar than with existing coal and nuclear.
That won't always be the case, argued Ross Harding, managing partner of Energy Launch Partners LLC. Installing solar panels, either on a residential or larger scale, is a 20-30 year investment, he said. Over the next decade, traditional power sources are poised to see costs double while the sun will keep shining for free.
"Ten years from now, if you don't do (solar), you're going to be really disadvantaged," he said.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.