text
stringlengths
29
850k
from __future__ import with_statement from numpy import array from pyglet.window import key, Window try: from kiva.gl import GraphicsContext except ImportError, e: raise Exception(e) from kiva.constants import FILL, STROKE, FILL_STROKE class TestWindow(Window): """ Press Q or Escape to exit """ def __init__(self, *args, **kw): Window.__init__(self, *args, **kw) self.init_window() def init_window(self): self.gc = GraphicsContext(size=(self.width, self.height)) self.gc.gl_init() def on_key_press(self, symbol, modifiers): if symbol in (key.ESCAPE, key.Q): self.has_exit = True def draw(self): gc = self.gc with gc: gc.clear((0, 1, 0, 1)) gc.set_stroke_color((1,1,1,1)) gc.set_line_width(2) pts = array([[50, 50], [50,100], [100,100], [100,50]]) gc.begin_path() gc.lines(pts) gc.close_path() gc.draw_path(STROKE) gc.flush() def main(): win = TestWindow(width = 640, height=480) exit = False while not exit: win.switch_to() win.dispatch_events() win.clear() win.draw() win.flip() exit = win.has_exit if __name__ == "__main__": main()
The DRV632 is a 2-VRMS pop-free stereo line driver designed to allow the removal of the output dc-blocking capacitors for reduced component count and cost. The device is ideal for single-supply electronics where size and cost are critical design parameters. Designed using TI’s patented DirectPath™ technology, The DRV632 is capable of driving 2 VRMS into a 10-kΩ load with 3.3-V supply voltage. The device has differential inputs and uses external gain-setting resistors to support a gain range of ±1 V/V to ±10 V/V, and gain can be configured individually for each channel. Line outputs have ±8-kV IEC ESD protection, requiring just a simple resistor-capacitor ESD protection circuit. The DRV632 has built-in active-mute control for pop-free audio on/off control. The DRV632 has an external undervoltage detector that mutes the output when the power supply is removed, ensuring a pop-free shutdown. Using the DRV632 in audio products can reduce component count considerably compared to traditional methods of generating a 2-VRMS output. The DRV632 does not require a power supply greater than 3.3 V to generate its 5.6-Vpp output, nor does it require a split-rail power supply. The DRV632 integrates its own charge pump to generate a negative supply rail that provides a clean, pop-free ground-biased 2-VRMS output. The DRV632 is available in a 14-pin TSSOP.
# -*- coding: utf-8 -*- # # pdf - compute and plot pair distribution function # # Copyright © 2008-2012 Felix Höfling and Peter Colberg # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. # from __future__ import print_function """ Compute and plot pair distribution function g(r) """ def plot(args): import os, os.path import h5py from matplotlib import pyplot as plt import h5mdtools._plot.label from numpy import linspace ax = plt.axes() label = None ax.axhline(y=1, color='black', lw=0.5) ax.set_color_cycle(args.colors) for (i, fn) in enumerate(args.input): try: f = h5py.File(fn, 'r') except IOError: raise SystemExit('failed to open HDF5 file: %s' % fn) try: # determine file type, prefer precomputed static structure factor data if 'structure' in f.keys() and 'ssf' in f['structure'].keys(): import filon import h5mdtools._plot.ssf as ssf from scipy.constants import pi param = f['parameters'] # load static structure factor from file H5 = f['structure/ssf/' + '/'.join(args.flavour)] q = f['structure/ssf/wavenumber'].__array__() # convert to NumPy array S_q, S_q_err = ssf.load_ssf(H5, args) # read some parameters dim = param['box'].attrs['dimension'] density = param['box'].attrs['density'] length = param['box'].attrs['length'] # compute pair distribution function xlim = args.xlim or (0, min(length) / 2) r = linspace(xlim[0], xlim[1], num=args.bins) if r[0] == 0: r = r[1:] if dim == 3: # convert 3-dim Fourier transform F[S_q - 1] / (2π)³ to 1-dim Fourier integral pdf = filon.filon(q * (S_q - 1), q, r).imag / (2 * pi * pi * r) pdf_err = filon.filon(q * S_q_err, q, r).imag / (2 * pi * pi * r) pdf = 1 + pdf / density # add δ-contribution pdf_err = pdf_err / density elif 'particles' in f.keys(): # compute SSF from trajectory data H5 = f['particles/' + args.flavour[0]] r, pdf, pdf_err = pdf_from_trajectory(H5, args) else: raise SystemExit('Input file provides neither data for the static structure factor nor a trajectory') # before closing the file, store attributes for later use if 'param' in locals(): attrs = h5mdtools._plot.label.attributes(param) except IndexError: raise SystemExit('invalid phase space sample offset') except KeyError as what: raise SystemExit(str(what) + '\nmissing simulation data in file: %s' % fn) finally: f.close() if args.label: label = args.label[i % len(args.label)] % attrs elif args.legend or not args.small: basename = os.path.splitext(os.path.basename(fn))[0] label = r'%s' % basename.replace('_', r'\_') if args.title: title = args.title % attrs c = args.colors[i % len(args.colors)] ax.plot(r, pdf, '-', color=c, label=label) if 'pdf_err' in locals(): ax.errorbar(r, pdf, pdf_err, fmt='o', color=c, markerfacecolor=c, markeredgecolor=c, markersize=2, linewidth=.5) else: ax.plot(r, pdf, 'o', markerfacecolor=c, markeredgecolor=c, markersize=2) # write plot data to file if args.dump: f = open(args.dump, 'a') print('# %s, sample %s' % (label.replace(r'\_', '_'), args.sample), file=f) if 'pdf_err' in locals(): print('# r g(r) g_err(r)', file=f) savetxt(f, array((r, pdf, pdf_err)).T) else: print('# r g(r)', file=f) savetxt(f, array((r, pdf)).T) print('\n', file=f) f.close() # adjust axis ranges ax.axis('tight') if args.xlim: plt.setp(ax, xlim=args.xlim) if args.ylim: plt.setp(ax, ylim=args.ylim) else: plt.setp(ax, ylim=(0, plt.ylim()[1])) # optionally plot with logarithmic scale(s) if args.axes == 'xlog': ax.set_xscale('log') if args.axes == 'ylog': ax.set_yscale('log') if args.axes == 'loglog': ax.set_xscale('log') ax.set_yscale('log') if args.legend or not args.small: l = ax.legend(loc=args.legend) l.legendPatch.set_alpha(0.7) plt.xlabel(args.xlabel or r'distance $r / \sigma$') plt.ylabel(args.ylabel or r'pair distribution function $g(r)$') if args.output is None: plt.show() else: plt.savefig(args.output, dpi=args.dpi) """ Compute pair distribution function from trajectory data """ def pdf_from_trajectory(H5group, args): from scipy.constants import pi from scipy.special import gamma from numpy import array, diagonal, float32, histogram, power, prod, round_, sqrt, sum, zeros import re # read periodically extended particle positions, # read one or several samples, convert to single precision idx = [int(x) for x in re.split(':', args.sample)] data = H5group['position/value'] if len(idx) == 1: samples = array([data[idx[0]],], dtype=float32) elif len(idx) == 2: samples = array(data[idx[0]:idx[1]], dtype=float32) elif len(idx) == 3: samples = array(data[idx[0]:idx[1]:idx[2]], dtype=float32) # positional coordinates dimension dim = H5group['box'].attrs['dimension'] # periodic simulation box length length = diagonal(H5group['box/edges']) # number of particles N = data.shape[1] density = N / prod(length) r_max = args.xlim or (0, min(length) / 2) H = zeros(args.bins) for r in samples: for (i, j) in enumerate(range(r.shape[0] - 1, 0, -1)): # particle distance vectors dr = r[:j] - r[i + 1:] # minimum image distances dr = dr - round_(dr / length) * length # magnitude of distance vectors r_norm = sqrt(sum(dr * dr, axis=1)) # accumulate histogram of minimum image distances h, bins = histogram(r_norm, bins=args.bins, range=r_max) H += 2 * h # volume of n-dimensional unit sphere Vn = power(pi, dim / 2.) / gamma(dim / 2. + 1.) # average number of atoms in ideal gas per interval n = Vn * density * (power(bins[1:], dim) - power(bins[:-1], dim)) # compute pair distribution function g(r) pdf = H / samples.shape[0] / n / N pdf_err = sqrt(H) / samples.shape[0] / n / N return .5 * (bins[1:] + bins[:-1]), pdf, pdf_err def add_parser(subparsers): parser = subparsers.add_parser('pdf', help='pair distribution function') parser.add_argument('input', nargs='+', metavar='INPUT', help='HDF5 file with trajectory or ssf data') parser.add_argument('--flavour', nargs=2, help='particle flavours') parser.add_argument('--sample', help='index of phase space sample(s)') parser.add_argument('--bins', type=int, help='number of histogram bins') parser.add_argument('--xlim', metavar='VALUE', type=float, nargs=2, help='limit x-axis to given range') parser.add_argument('--ylim', metavar='VALUE', type=float, nargs=2, help='limit y-axis to given range') parser.add_argument('--axes', choices=['xlog', 'ylog', 'loglog'], help='logarithmic scaling') parser.add_argument('--verbose', action='store_true') parser.set_defaults(flavour=('A', 'A'), sample='0', bins=50,)
A preliminary design for a multi-role police vessel for the River Thames. The purpose of this design dissertation is to come up with a preliminary design for a new multi role police launch for the Metropolitan Police Marine Policing Unit (MPU) which can be used during the London 2012 Olympics and then after for other police forces that police tidal rivers or estuaries similar to the Thames. With the 2012 Olympics in London fast approaching, likely to be the largest most significant sporting event the United Kingdom has ever seen, there will be a significant increase in the level of security and river traffic in the months leading up to the Games as well as the constant threat of terrorist activity. At present the Metropolitan Police use modified Targa 31’s as their main policing vessels which are a common sight patrolling the River. These vessels are subject to prolonged arduous use and therefore have a limited operating life of between four and five years. The modified Targa 31 only makes up four of the Marine Policing Unit’s twenty two vessels, the rest comprising of a Targa 37 Logistics Support Vessel, three Semi Displacement vessels for use in deploying divers and a number of Rigid Inflatable Boats (RIBs) and dinghies, none of which have been specifically designed for policing. The new design will therefore try to incorporate a number of different vessel roles into one specifically designed police launch. The new design could subsequently reduce the fleet down from twenty two vessels, reducing operating costs which is very appropriate in the current economic climate.
from django import template from django.template.defaultfilters import stringfilter from django.utils.safestring import mark_safe import bleach import markdown2 register = template.Library() # Not really safe, but Django needs to think it is. @register.filter(is_safe=True) @stringfilter def unsafe_markdown(value): return mark_safe(markdown2.markdown( text=value, extras=[ "fenced-code-blocks", "code-friendly", "tables", "highlightjs-lang", ], )) @register.filter(is_safe=True) @stringfilter def markdown(value): html = unsafe_markdown(value) return bleach.clean( html, tags=[ 'a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol', 'p', 'pre', 'strong', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'tr', 'ul', ], attributes={ 'a': ['href', 'title'], 'abbr': ['title'], 'acroynym': ['title'], 'code': ['class'], }, protocols=[ 'http', 'https', 'mailto', ], )
In consultation with schools and other key stakeholders, the Health and Wellbeing Service has created a brand new and improved separate model policy for Drug Education & Incidents which will be shared at this training event. This course gives schools the opportunity to review and update their Drug Education and Incidents Policy. It will update schools on the latest developments within PSHE, Drug Education and Incidents and allow schools to update their policy to reflect this. The policies are in line with all the most relevant law and guidance, both local and national, so schools can be sure they have included everything that is required. Delegates will be given the model policy and have the opportunity to start working on it at the event. Update schools on the latest developments within PSHE, Drug Education and Incidents.
import os import logging import itertools import json import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import pylab from datetime import datetime from utils import gen_next_day, setup_logging sns.set() plt.switch_backend('Qt4Agg') ROOT = os.getcwd() CONFIG_FILE = os.path.join(ROOT, 'config.ini') DATA_DIR = os.path.join(ROOT, 'data', 'sleep') DATA_DB_FILE = os.path.join(DATA_DIR, '.db') FILENAME_TEMPLATE = 'sleep-{date}.json' DATE_FORMAT = '%Y-%m-%d' ASLEEP, AWAKE, REALLY_AWAKE, DEFAULT = 1, 2, 3, -1 logger = logging.getLogger(__name__) def format_date(date): return datetime.strftime(date, DATE_FORMAT) def convert_ts_to_minute(ts): hour, minute, seconds = ts.split(':', 2) return int(hour) * 60 + int(minute) def get_minute_data(day): filename = FILENAME_TEMPLATE.format(date=format_date(day)) filepath = os.path.join(DATA_DIR, filename) with open(filepath, 'r') as f: data = json.loads(f.read()) minute_data = [] for record in data['sleep']: minute_data += record['minuteData'] return minute_data def write_to_dataframe(dataframe, day, minute_data): for record in minute_data: minute = convert_ts_to_minute(record['dateTime']) dataframe[minute][format_date(day)] = int(record['value']) def main(): start_date = datetime.strptime('2015-02-06', DATE_FORMAT) end_date = datetime.strptime('2015-03-21', DATE_FORMAT) days = itertools.takewhile(lambda x: x <= end_date, gen_next_day(start_date)) date_index = pd.date_range(start_date, end_date) df = pd.DataFrame(index=date_index, columns=range(24 * 60), dtype='uint8') print df.dtypes for day in days: logger.info('Processing day {}'.format(format_date(day))) minute_data = get_minute_data(day) write_to_dataframe(df, day, minute_data) df = df.fillna(0) sns.heatmap(df, xticklabels=False, yticklabels=False, linewidths=0) # df.plot() pylab.show() if __name__ == '__main__': setup_logging(logger, logging.INFO) main()
And above all be Entertaining and Personable. Believing that loyal followers are a “Key”, to any business’s success, “ Sound Investment” maintains a customer e-mail base, and e-mails upcoming appearances to all of their loyal followers. As an added service, a song history card is maintained for All of the “Karaoke Enthusiasts”, keeping their favorite songs and song titles on file for quick reference, enabling them to “by pass”, the “song slip” process.
# ----------------------------------------------------------------------------- # Karajlug.org # Copyright (C) 2010 Karajlug community # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # ----------------------------------------------------------------------------- from django.shortcuts import render_to_response as rr from django.http import Http404 from django.template import RequestContext from django.core.paginator import Paginator, InvalidPage, EmptyPage from django.conf import settings from .models import Book def books_index(request): """ Main index of registered books. """ books = Book.objects.all().order_by("weight") book_per_page = 4 try: book_per_page = settings.BOOK_IN_PAGE except AttributeError: pass paginator = Paginator(books, book_per_page) try: page = int(request.GET.get('page', '1')) except ValueError: page = 1 try: books_list = paginator.page(page) except (EmptyPage, InvalidPage): # if provided page value in GET was out of range books_list = paginator.page(paginator.num_pages) return rr("books.html", {"books": books_list}, context_instance=RequestContext(request)) def book_view(request, slug): """ View of each Book """ try: book = Book.objects.get(slug=slug) except Book.DoesNotExist: raise Http404() return rr("book_view.html", {"book": book}, context_instance=RequestContext(request))
Due to low success rates for hernia mesh devices, many device companies have issued recalls following injuries and device lawsuits. Several serious health problems are associated with defective hernia mesh devices, and device companies including Bard, Covidien, Atrium, Gore, Aspide and Ethicon may be held responsible when injuries occur. Hernia repairs are quite common, and doctors commonly use surgical mesh to add support to weakened or damaged tissue. Surgical mesh devices available on the market are generally constructed from synthetic absorbable, non-absorbable or a combination of medical materials. Patients should contact their surgeons about health issues and contact an experienced Ohio hernia mesh lawyer for legal counsel. Joe Lyon is a highly-rated Cincinnati medical device attorney and Ohio hernia mesh lawyer representing plaintiffs nationwide in a wide variety of defective medical device and product liability claims. Hernias have a high recurrence rate, and surgeons will try to use surgical mesh to strengthen the hernia site. But some hernia mesh products may have a higher risk, and have been found to be ineffective and cause further injury. A U.S. Food and Drug Administration (FDA) analysis of mesh adverse event reports notes chronic pain, infection, hernia recurrence, adhesion, abdominal obstruction, bleeding, fistula, granuloma, seroma, and tissue perforation as severe risks linked to hernia mesh devices. Mesh migration may also occur as a complication and lead to more invasive issues. A more serious complication is the development of bowel perforation or fistula, usually due to chronic erosion of bowel. Following health problems, mesh removal can lead to infection, and permanent pelvic pain. Some estimates say about 30 percent of patients with mesh implants removed will suffer from chronic pain. Product liability attorneys claim thousands of defective hernia mesh devices have not been properly tested before they were marketed and distributed to the American public. Medical device companies like Aspide, Atrium, Gore, Covidien, Ethicon and Bard have paid millions of dollar to victims to settle device injury lawsuits. If you have experienced hernia mesh complications, consult a medical professional and an Ohio hernia mesh lawyer. The Lyon Firm has national experience litigating product liability cases against medical device manufacturers. If you or a loved one suffered an injury due to a defective hernia mesh device, and have questions about the legal remedies available to improve quality of life and medical care, contact The Lyon Firm at (800) 513-2403. You will speak directly with Mr. Lyon, an Ohio hernia mesh lawyer, and he will help you answer these critical questions.
# coding=utf-8 """ Serializers common to all assessment types. """ from copy import deepcopy import logging from django.core.cache import cache from rest_framework import serializers from openassessment.assessment.models import ( Assessment, AssessmentPart, Criterion, CriterionOption, Rubric, ) logger = logging.getLogger(__name__) class InvalidRubric(Exception): """This can be raised during the deserialization process.""" def __init__(self, errors): Exception.__init__(self, repr(errors)) self.errors = deepcopy(errors) class NestedModelSerializer(serializers.ModelSerializer): """Model Serializer that supports deserialization with arbitrary nesting. The Django REST Framework does not currently support deserialization more than one level deep (so a parent and children). We want to be able to create a :class:`Rubric` → :class:`Criterion` → :class:`CriterionOption` hierarchy. Much of the base logic already "just works" and serialization of arbritrary depth is supported. So we just override the save_object method to recursively link foreign key relations instead of doing it one level deep. We don't touch many-to-many relationships because we don't need to for our purposes, so those still only work one level deep. """ def recursively_link_related(self, obj, **kwargs): if getattr(obj, '_related_data', None): for accessor_name, related in obj._related_data.items(): setattr(obj, accessor_name, related) for related_obj in related: self.recursively_link_related(related_obj, **kwargs) del(obj._related_data) def save_object(self, obj, **kwargs): obj.save(**kwargs) # The code for many-to-many relationships is just copy-pasted from the # Django REST Framework ModelSerializer if getattr(obj, '_m2m_data', None): for accessor_name, object_list in obj._m2m_data.items(): setattr(obj, accessor_name, object_list) del(obj._m2m_data) # This is our only real change from ModelSerializer self.recursively_link_related(obj, **kwargs) class CriterionOptionSerializer(NestedModelSerializer): """Serializer for :class:`CriterionOption`""" class Meta: model = CriterionOption fields = ('order_num', 'points', 'name', 'label', 'explanation') class CriterionSerializer(NestedModelSerializer): """Serializer for :class:`Criterion`""" options = CriterionOptionSerializer(required=True, many=True) points_possible = serializers.Field(source='points_possible') class Meta: model = Criterion fields = ('order_num', 'name', 'label', 'prompt', 'options', 'points_possible') class RubricSerializer(NestedModelSerializer): """Serializer for :class:`Rubric`.""" criteria = CriterionSerializer(required=True, many=True) points_possible = serializers.Field(source='points_possible') class Meta: model = Rubric fields = ('id', 'content_hash', 'structure_hash', 'criteria', 'points_possible') def validate_criteria(self, attrs, source): """Make sure we have at least one Criterion in the Rubric.""" criteria = attrs[source] if not criteria: raise serializers.ValidationError("Must have at least one criterion") return attrs @classmethod def serialized_from_cache(cls, rubric, local_cache=None): """For a given `Rubric` model object, return a serialized version. This method will attempt to use the cache if possible, first looking at the `local_cache` dict you can pass in, and then looking at whatever Django cache is configured. Args: rubric (Rubric): The Rubric model to get the serialized form of. local_cach (dict): Mapping of `rubric.content_hash` to serialized rubric dictionary. We include this so that we can call this method in a loop. Returns: dict: `Rubric` fields as a dictionary, with `criteria` and `options` relations followed. """ # Optional local cache you can send in (for when you're calling this # in a loop). local_cache = local_cache or {} # Check our in-memory cache... if rubric.content_hash in local_cache: return local_cache[rubric.content_hash] # Check the external cache (e.g. memcached) rubric_dict_cache_key = ( "RubricSerializer.serialized_from_cache.{}" .format(rubric.content_hash) ) rubric_dict = cache.get(rubric_dict_cache_key) if rubric_dict: local_cache[rubric.content_hash] = rubric_dict return rubric_dict # Grab it from the database rubric_dict = RubricSerializer(rubric).data cache.set(rubric_dict_cache_key, rubric_dict) local_cache[rubric.content_hash] = rubric_dict return rubric_dict class AssessmentPartSerializer(serializers.ModelSerializer): """Serializer for :class:`AssessmentPart`.""" class Meta: model = AssessmentPart fields = ('option', 'criterion', 'feedback') class AssessmentSerializer(serializers.ModelSerializer): """Simplified serializer for :class:`Assessment` that's lighter on the DB.""" class Meta: model = Assessment fields = ( 'submission_uuid', 'rubric', 'scored_at', 'scorer_id', 'score_type', 'feedback', ) def serialize_assessments(assessments_qset): assessments = list(assessments_qset.select_related("rubric")) rubric_cache = {} return [ full_assessment_dict( assessment, RubricSerializer.serialized_from_cache( assessment.rubric, rubric_cache ) ) for assessment in assessments ] def full_assessment_dict(assessment, rubric_dict=None): """ Return a dict representation of the Assessment model, including nested assessment parts. We do some of the serialization ourselves here instead of relying on the Django REST Framework serializers. This is for performance reasons -- we have a cached rubric easily available, and we don't want to follow all the DB relations from assessment -> assessment part -> option -> criterion. Args: assessment (Assessment): The Assessment model to serialize Returns: dict with keys 'rubric' (serialized Rubric model) and 'parts' (serialized assessment parts) """ assessment_cache_key = "assessment.full_assessment_dict.{}.{}.{}".format( assessment.id, assessment.submission_uuid, assessment.scored_at.isoformat() ) assessment_dict = cache.get(assessment_cache_key) if assessment_dict: return assessment_dict assessment_dict = AssessmentSerializer(assessment).data if not rubric_dict: rubric_dict = RubricSerializer.serialized_from_cache(assessment.rubric) assessment_dict["rubric"] = rubric_dict # This part looks a little goofy, but it's in the name of saving dozens of # SQL lookups. The rubric_dict has the entire serialized output of the # `Rubric`, its child `Criterion` and grandchild `CriterionOption`. This # includes calculated things like `points_possible` which aren't actually in # the DB model. Instead of invoking the serializers for `Criterion` and # `CriterionOption` again, we simply index into the places we expect them to # be from the big, saved `Rubric` serialization. parts = [] for part in assessment.parts.all().select_related("criterion", "option"): criterion_dict = rubric_dict["criteria"][part.criterion.order_num] options_dict = None if part.option is not None: options_dict = criterion_dict["options"][part.option.order_num] options_dict["criterion"] = criterion_dict parts.append({ "option": options_dict, "criterion": criterion_dict, "feedback": part.feedback }) # Now manually built up the dynamically calculated values on the # `Assessment` so we can again avoid DB calls. assessment_dict["parts"] = parts assessment_dict["points_earned"] = sum( part_dict["option"]["points"] if part_dict["option"] is not None else 0 for part_dict in parts ) assessment_dict["points_possible"] = rubric_dict["points_possible"] cache.set(assessment_cache_key, assessment_dict) return assessment_dict def rubric_from_dict(rubric_dict): """Given a dict of rubric information, return the corresponding Rubric This will create the Rubric and its children if it does not exist already. Sample data (one criterion, two options):: { "prompt": "Create a plan to deliver ora2!", "criteria": [ { "order_num": 0, "name": "realistic", "prompt": "Is the deadline realistic?", "options": [ { "order_num": 0, "points": 0, "name": "No", "explanation": "We need more time!" }, { "order_num": 1, "points": 2, "name": "Yes", "explanation": "We got this." }, ] } ] } """ rubric_dict = deepcopy(rubric_dict) # Calculate the hash based on the rubric content... content_hash = Rubric.content_hash_from_dict(rubric_dict) try: rubric = Rubric.objects.get(content_hash=content_hash) except Rubric.DoesNotExist: rubric_dict["content_hash"] = content_hash rubric_dict["structure_hash"] = Rubric.structure_hash_from_dict(rubric_dict) for crit_idx, criterion in enumerate(rubric_dict.get("criteria", {})): if "order_num" not in criterion: criterion["order_num"] = crit_idx for opt_idx, option in enumerate(criterion.get("options", {})): if "order_num" not in option: option["order_num"] = opt_idx rubric_serializer = RubricSerializer(data=rubric_dict) if not rubric_serializer.is_valid(): raise InvalidRubric(rubric_serializer.errors) rubric = rubric_serializer.save() return rubric
There's a part of me that wants to have a face off with the various covers of Mary E Pearson's The Adoration of Jenna Fox series (I mean, I own and love to of the different versions, after all), but for now I'm just going to let Jenna face off against the puzzling face on the cover of MetaGame . (Puzzling, haha. Get it?) [ba dum bum]. Anywhoodle, both are pretty neat-o, but which one catches your eye? Knowing nothing about either book, which one would you pick up? Last Week on FFO: The US and UK versions of Amy Kathleen Ryan's Glow faced off in one of the closest matches we've had in recent weeks, with the UK cover just barely sparkling its way to a win. I like the cover for Jenna Fox much better actually. I like the profile shot better and the puzzles pieces seem more like a part of the image than the very definite- PUZZLE! feel (IMO) of Mind Games. I like Jenna Fox better. I"m in agreement. Jenna Fox for me. Jenna fox for me as well. I like the cover of Jenna Fox more. The profile shot looks better, and I like that it's more puzzley. The 1st one. The 2nd feels so odd. To be honest, I really dislike both of these. But I guess the 1st one is slightly better. Jenna Fox did it way better! Yeah I'm kinda hating both of these, as well, but the Jenna Fox one is the better of the two. The Metagame cover looks like a PC program manual, I don't like it at all. Wow, I'm the only one that likes MetaGame. I'm not a fan of the "puzzle" design for either cover. Puzzle pieces are meant to be 3D but covers that use them normally lack any depth in the image. Metagame pulls it off a bit better but meh. Even my cover whorish ways do not approve.
from bitmovin.errors import MissingArgumentError, FunctionalityNotAvailableError from bitmovin.services.manifests.media_custom_tag_service import MediaCustomTag from bitmovin.services.rest_service import RestService class GenericMediaService(RestService): BASE_ENDPOINT_URL = 'encoding/manifests/hls/{manifest_id}/media/{media_type}' def __init__(self, http_client, media_type_url, resource_class): if not media_type_url: raise MissingArgumentError('media_type_url must be given') if not resource_class: raise MissingArgumentError('resource_class must be given') self.media_type_url = media_type_url self.resource_class = resource_class super().__init__(http_client=http_client, relative_url=self.BASE_ENDPOINT_URL, class_=self.resource_class) self.CustomTag = MediaCustomTag(http_client=http_client) def _get_endpoint_url(self, manifest_id): if not manifest_id: raise MissingArgumentError('manifest_id must be given') endpoint_url = self.BASE_ENDPOINT_URL\ .replace('{manifest_id}', manifest_id)\ .replace('{media_type}', self.media_type_url) return endpoint_url def create(self, object_, manifest_id): self.relative_url = self._get_endpoint_url(manifest_id=manifest_id) return super().create(object_) def delete(self, manifest_id, media_id): self.relative_url = self._get_endpoint_url(manifest_id=manifest_id) return super().delete(id_=media_id) def retrieve(self, manifest_id, media_id): self.relative_url = self._get_endpoint_url(manifest_id=manifest_id) return super().retrieve(id_=media_id) def list(self, manifest_id, offset=None, limit=None): self.relative_url = self._get_endpoint_url(manifest_id=manifest_id) return super().list(offset, limit) def retrieve_custom_data(self, manifest_id, media_id): raise FunctionalityNotAvailableError('Retrieve Custom Data is not available for HLS Manifest Medias')
This policy sets the standards for the doctoral final oral examination and the submission of the final copy of the doctoral dissertation for the completion of the doctoral degree. Collegiate deans or their designated representatives at the collegiate level must verify eligibility and approve the members of the final oral examination committee. A minimum of 2 major field reviewers and 1 minor/outside reviewer are required. In the case of multiple minors, there must be a reviewer for each minor. Advisor(s) and co-advisor(s) must serve as reviewers. Students must provide reviewers with a copy of the dissertation at least 14 days before the scheduled date of the doctoral final oral examination. A public presentation of the candidate’s dissertation to the doctoral final oral examination committee and the invited scholarly community. A closed session (open only to the doctoral final oral examination committee and the candidate) immediately following the public presentation. To be recommended for the award of the doctoral degree, all committee members, or all committee members save one, must vote that the student has passed the doctoral final oral examination. Students are not allowed to retake the final oral examination. Committee members must notify the candidate in writing of all required revisions to the doctoral dissertation as well as specifying a time limit for the submission of the revised doctoral dissertation within seven (7) days of the final oral examination. All students who complete a doctoral dissertation must file a digital copy of the dissertation with the University in accordance with University standards. Students may choose whether or not to request an embargo of the publication of the dissertation for a limited period of time. Doctoral programs with approved completion requirements that do not include a final oral examination are exempt from I. Doctoral programs with approved completion requirements that do not include a doctoral dissertation are exempt from II. This policy applies to all students admitted after January 1, 2013. Students who matriculated before January 1, 2013 may choose to continue under the policies in effect when they initially matriculated in their graduate program. This policy does not apply to the J.D., M.D., Pharm.D., D.V.M., D.D.S, L.L.M degrees. This policy establishes uniform standards for the doctoral final oral examination; defines timely submission of copies of the dissertation for University archives, and supports Board of Regents Policy: Openness in Research which covers public dissemination of University-sponsored research. Provide guidelines for formatting and submitting the dissertation, to include not only current instructions for electronic formatting and filing but also guidelines governing the use of already published material in the dissertation. Guidelines should take account of possible copyright issues. Approve and archive in the system of record committee membership (including any subsequent changes to an approved committee). Approve and record the specific procedures used by programs for administering and grading the doctoral preliminary and final examinations. Maintain and publish any additional collegiate-level publishing standards or guidelines (e.g., stylistic conventions based on discipline, language of the thesis). Maintain and publish any additional program-level publishing standards or guidelines (e.g., stylistic conventions based on discipline, language of the thesis). Review and approve committee membership (including any subsequent changes to an approved committee); route program-approved requests to the collegiate unit for approval. Must meet the formatting requirements for the submission of the final doctoral dissertation. Must meet all requirements for completing the doctoral degree. June 2017 – Comprehensive Review, Minor Revision. 1) Rewrote the introduction to clarify the goal of the policy, 2) moved language regarding composition of the final exam committee from Appointments to Graduate Examination Committees policy to this policy for consistency, 3) added language to clarify that the co-advisors must be thesis reviewers, 4) added language requiring the committee to notify the student in writing of any required revisions to the dissertation and a deadline for the completion of such revisions, 5) clarified language to indicate that students have a choice whether or not to impose and embargo on publication of the thesis for a limited time period, 6) removed language regarding reactivation in order to graduate. Changes in policy respond to need for greater clarity on policy intent on the part of affected constituencies and enhanced consistency in policy implementation across programs. July 2012 - New Policy, Comprehensive Review. 1. Establishes guidelines for remote participation in graduate milestone examination. 2. Specifies the University as the digital archive of record for deposit of dissertations. 3. Facilitates reactivation of students who have completed all other degree requirements so that their degree may be conferred. 4. Extends applicability of policy requirements to programs not formerly under the aegis of the Graduate School.
"""This module provides utility classes and functions to load spike sorting data sets.""" # ----------------------------------------------------------------------------- # Imports # ----------------------------------------------------------------------------- import os.path import re import numpy as np from iotools import (find_filename, find_index, load_text, load_xml, normalize, load_binary) from selection import select from logger import debug, info, warn # ----------------------------------------------------------------------------- # File loading functions # ----------------------------------------------------------------------------- def read_xml(filename_xml, fileindex): """Read the XML file associated to the current dataset, and return a metadata dictionary.""" params = load_xml(filename_xml, fileindex=fileindex) # klusters tests metadata = dict( nchannels=params['nchannels'], nsamples=params['nsamples'], fetdim=params['fetdim'], freq=params['rate']) return metadata def read_features(filename_fet, nchannels, fetdim, freq): """Read a .fet file and return the normalize features array, as well as the spiketimes.""" features = load_text(filename_fet, np.int32, skiprows=1) features = np.array(features, dtype=np.float32) # HACK: There are either 1 or 5 dimensions more than fetdim*nchannels # we can't be sure so we first try 1, if it does not work we try 5. for nextradim in [1, 5]: try: features = features.reshape((-1, fetdim * nchannels + nextradim)) # if the features array could be reshape, directly break the loop break except ValueError: features = None if features is None: raise ValueError("""The number of columns in the feature matrix is not fetdim (%d) x nchannels (%d) + 1 or 5.""" % (fetdim, nchannels)) # get the spiketimes spiketimes = features[:,-1].copy() spiketimes *= (1. / freq) # count the number of extra features nextrafet = features.shape[1] - nchannels * fetdim # normalize normal features while keeping symmetry features[:,:-nextrafet] = normalize(features[:,:-nextrafet], symmetric=True) # normalize extra features without keeping symmetry features[:,-nextrafet:] = normalize(features[:,-nextrafet:], symmetric=False) return features, spiketimes def read_clusters(filename_clu): clusters = load_text(filename_clu, np.int32) clusters = clusters[1:] return clusters def read_masks(filename_mask, fetdim): full_masks = load_text(filename_mask, np.float32, skiprows=1) masks = full_masks[:,:-1:fetdim] return masks, full_masks def read_waveforms(filename_spk, nsamples, nchannels): waveforms = load_binary(filename_spk) waveforms = waveforms.reshape((-1, nsamples, nchannels)) return waveforms # ----------------------------------------------------------------------------- # KlustersLoader class # ----------------------------------------------------------------------------- class KlustersLoader(object): # Initialization methods # ---------------------- def __init__(self, filename=None): """Initialize a Loader object for loading Klusters-formatted files. Arguments: * filename: the full path of any file belonging to the same dataset. """ if filename: self.open(filename) def open(self, filename): """Open a file.""" self.filename = filename # Find the file index associated to the filename, or 1 by default. self.fileindex = find_index(filename) or 1 self.find_filenames() self.read() def find_filenames(self): """Find the filenames of the different files for the current dataset.""" self.filename_xml = find_filename(self.filename, 'xml') self.filename_fet = find_filename(self.filename, 'fet') self.filename_clu = find_filename(self.filename, 'clu') # fmask or mask file self.filename_mask = find_filename(self.filename, 'fmask') if not self.filename_mask: self.filename_mask = find_filename(self.filename, 'mask') self.filename_spk = find_filename(self.filename, 'spk') # Input-Output methods # -------------------- def read(self): # Read metadata. try: self.metadata = read_xml(self.filename_xml, self.fileindex) except IOError: # Die if no XML file is available for this dataset, as it contains # critical metadata. raise IOError("The XML file is missing.") nsamples = self.metadata.get('nsamples') nchannels = self.metadata.get('nchannels') fetdim = self.metadata.get('fetdim') freq = self.metadata.get('freq') # Read features. try: self.features, self.spiketimes = read_features(self.filename_fet, nchannels, fetdim, freq) except IOError: raise IOError("The FET file is missing.") # Count the number of spikes and save it in the metadata. nspikes = self.features.shape[0] self.metadata['nspikes'] = nspikes # Read clusters. try: self.clusters = read_clusters(self.filename_clu) except IOError: warn("The CLU file is missing.") # Default clusters if the CLU file is not available. self.clusters = np.zeros(nspikes + 1, dtype=np.int32) self.clusters[0] = 1 # Read masks. try: self.masks, self.masks_full = read_masks(self.filename_mask, fetdim) except IOError: warn("The MASKS/FMASKS file is missing.") # Default masks if the MASK/FMASK file is not available. self.masks = np.ones((nspikes, nchannels)) self.masks_full = np.ones(features.shape) # Read waveforms. try: self.waveforms = read_waveforms(self.filename_spk, nsamples, nchannels) except IOError: warn("The SPK file is missing.") self.waveforms = np.zeros((nspikes, nsamples, nchannels)) def close(self): self.filename = None self.fileindex = None self.filename_xml = None self.filename_fet = None self.filename_clu = None self.filename_mask = None self.filename_spk = None self.features = None self.spiketimes = None self.clusters = None self.masks = None self.masks_full = None self.waveforms = None self.metadata = {} # Access to the data # ------------------ def get_features(self, spikes=None): return select(self.features, spikes) def get_spiketimes(self, spikes=None): return select(self.spiketimes, spikes) def get_clusters(self, spikes=None): return select(self.clusters, spikes) def get_masks(self, spikes=None, full=None): if not full: masks = self.masks else: masks = self.masks_full return select(masks, spikes) def get_waveforms(self, spikes=None): return select(self.waveforms, spikes) if __name__ == '__main__': filename = "D:\Git\spiky\_test\data\subset41test.clu.1" l = KlustersLoader(filename) print l.metadata
Florida Mobile Signs, LLC Your Full-Service Sign Company! Neon Sign Repair and Conversion. You've already made a significant investment in your sign to draw attention and foot traffic to your organization or business. If the health of your sign has faded with time, let Florida Mobile Signs, LLC repair and restore the "face" of your business. From vinyl replacement, new paint, LED retrofits, new sign faces, or ballast and bulb replacement, let our professional sign technicians get your sign back in working order. Florida Mobile Signs, LLC has a 42 ft. bucket truck and can service and repair all your signs and lighting needs. Whether you have a damaged, worn out or non-working sign; parking lot or wall pack lights that are not working; your looking to retrofit your signs or interior & exterior light fixtures with energy saving LED's, we can service all your commercial sign and lighting needs.
""" Multi-dimensional Scaling (MDS) """ # author: Nelle Varoquaux <[email protected]> # Licence: BSD import numpy as np import warnings from ..base import BaseEstimator from ..metrics import euclidean_distances from ..utils import check_random_state, check_array, check_symmetric from ..externals.joblib import Parallel from ..externals.joblib import delayed from ..isotonic import IsotonicRegression def _smacof_single(similarities, metric=True, n_components=2, init=None, max_iter=300, verbose=0, eps=1e-3, random_state=None): """ Computes multidimensional scaling using SMACOF algorithm Parameters ---------- similarities: symmetric ndarray, shape [n * n] similarities between the points metric: boolean, optional, default: True compute metric or nonmetric SMACOF algorithm n_components: int, optional, default: 2 number of dimension in which to immerse the similarities overwritten if initial array is provided. init: {None or ndarray}, optional if None, randomly chooses the initial configuration if ndarray, initialize the SMACOF algorithm with this array max_iter: int, optional, default: 300 Maximum number of iterations of the SMACOF algorithm for a single run verbose: int, optional, default: 0 level of verbosity eps: float, optional, default: 1e-6 relative tolerance w.r.t stress to declare converge random_state: integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Returns ------- X: ndarray (n_samples, n_components), float coordinates of the n_samples points in a n_components-space stress_: float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points) n_iter : int Number of iterations run. """ similarities = check_symmetric(similarities, raise_exception=True) n_samples = similarities.shape[0] random_state = check_random_state(random_state) sim_flat = ((1 - np.tri(n_samples)) * similarities).ravel() sim_flat_w = sim_flat[sim_flat != 0] if init is None: # Randomly choose initial configuration X = random_state.rand(n_samples * n_components) X = X.reshape((n_samples, n_components)) else: # overrides the parameter p n_components = init.shape[1] if n_samples != init.shape[0]: raise ValueError("init matrix should be of shape (%d, %d)" % (n_samples, n_components)) X = init old_stress = None ir = IsotonicRegression() for it in range(max_iter): # Compute distance and monotonic regression dis = euclidean_distances(X) if metric: disparities = similarities else: dis_flat = dis.ravel() # similarities with 0 are considered as missing values dis_flat_w = dis_flat[sim_flat != 0] # Compute the disparities using a monotonic regression disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w) disparities = dis_flat.copy() disparities[sim_flat != 0] = disparities_flat disparities = disparities.reshape((n_samples, n_samples)) disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) / (disparities ** 2).sum()) # Compute stress stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2 # Update X using the Guttman transform dis[dis == 0] = 1e-5 ratio = disparities / dis B = - ratio B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1) X = 1. / n_samples * np.dot(B, X) dis = np.sqrt((X ** 2).sum(axis=1)).sum() if verbose >= 2: print('it: %d, stress %s' % (it, stress)) if old_stress is not None: if(old_stress - stress / dis) < eps: if verbose: print('breaking at iteration %d with stress %s' % (it, stress)) break old_stress = stress / dis return X, stress, it + 1 def smacof(similarities, metric=True, n_components=2, init=None, n_init=8, n_jobs=1, max_iter=300, verbose=0, eps=1e-3, random_state=None, return_n_iter=False): """ Computes multidimensional scaling using SMACOF (Scaling by Majorizing a Complicated Function) algorithm The SMACOF algorithm is a multidimensional scaling algorithm: it minimizes a objective function, the *stress*, using a majorization technique. The Stress Majorization, also known as the Guttman Transform, guarantees a monotone convergence of Stress, and is more powerful than traditional techniques such as gradient descent. The SMACOF algorithm for metric MDS can summarized by the following steps: 1. Set an initial start configuration, randomly or not. 2. Compute the stress 3. Compute the Guttman Transform 4. Iterate 2 and 3 until convergence. The nonmetric algorithm adds a monotonic regression steps before computing the stress. Parameters ---------- similarities : symmetric ndarray, shape (n_samples, n_samples) similarities between the points metric : boolean, optional, default: True compute metric or nonmetric SMACOF algorithm n_components : int, optional, default: 2 number of dimension in which to immerse the similarities overridden if initial array is provided. init : {None or ndarray of shape (n_samples, n_components)}, optional if None, randomly chooses the initial configuration if ndarray, initialize the SMACOF algorithm with this array n_init : int, optional, default: 8 Number of time the smacof algorithm will be run with different initialisation. The final results will be the best output of the n_init consecutive runs in terms of stress. n_jobs : int, optional, default: 1 The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. max_iter : int, optional, default: 300 Maximum number of iterations of the SMACOF algorithm for a single run verbose : int, optional, default: 0 level of verbosity eps : float, optional, default: 1e-6 relative tolerance w.r.t stress to declare converge random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. return_n_iter : bool Whether or not to return the number of iterations. Returns ------- X : ndarray (n_samples,n_components) Coordinates of the n_samples points in a n_components-space stress : float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points) n_iter : int The number of iterations corresponding to the best stress. Returned only if `return_n_iter` is set to True. Notes ----- "Modern Multidimensional Scaling - Theory and Applications" Borg, I.; Groenen P. Springer Series in Statistics (1997) "Nonmetric multidimensional scaling: a numerical method" Kruskal, J. Psychometrika, 29 (1964) "Multidimensional scaling by optimizing goodness of fit to a nonmetric hypothesis" Kruskal, J. Psychometrika, 29, (1964) """ similarities = check_array(similarities) random_state = check_random_state(random_state) if hasattr(init, '__array__'): init = np.asarray(init).copy() if not n_init == 1: warnings.warn( 'Explicit initial positions passed: ' 'performing only one init of the MDS instead of %d' % n_init) n_init = 1 best_pos, best_stress = None, None if n_jobs == 1: for it in range(n_init): pos, stress, n_iter_ = _smacof_single( similarities, metric=metric, n_components=n_components, init=init, max_iter=max_iter, verbose=verbose, eps=eps, random_state=random_state) if best_stress is None or stress < best_stress: best_stress = stress best_pos = pos.copy() best_iter = n_iter_ else: seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))( delayed(_smacof_single)( similarities, metric=metric, n_components=n_components, init=init, max_iter=max_iter, verbose=verbose, eps=eps, random_state=seed) for seed in seeds) positions, stress, n_iters = zip(*results) best = np.argmin(stress) best_stress = stress[best] best_pos = positions[best] best_iter = n_iters[best] if return_n_iter: return best_pos, best_stress, best_iter else: return best_pos, best_stress class MDS(BaseEstimator): """Multidimensional scaling Read more in the :ref:`User Guide <multidimensional_scaling>`. Parameters ---------- metric : boolean, optional, default: True compute metric or nonmetric SMACOF (Scaling by Majorizing a Complicated Function) algorithm n_components : int, optional, default: 2 number of dimension in which to immerse the similarities overridden if initial array is provided. n_init : int, optional, default: 4 Number of time the smacof algorithm will be run with different initialisation. The final results will be the best output of the n_init consecutive runs in terms of stress. max_iter : int, optional, default: 300 Maximum number of iterations of the SMACOF algorithm for a single run verbose : int, optional, default: 0 level of verbosity eps : float, optional, default: 1e-6 relative tolerance w.r.t stress to declare converge n_jobs : int, optional, default: 1 The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. dissimilarity : string Which dissimilarity measure to use. Supported are 'euclidean' and 'precomputed'. Attributes ---------- embedding_ : array-like, shape [n_components, n_samples] Stores the position of the dataset in the embedding space stress_ : float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points) References ---------- "Modern Multidimensional Scaling - Theory and Applications" Borg, I.; Groenen P. Springer Series in Statistics (1997) "Nonmetric multidimensional scaling: a numerical method" Kruskal, J. Psychometrika, 29 (1964) "Multidimensional scaling by optimizing goodness of fit to a nonmetric hypothesis" Kruskal, J. Psychometrika, 29, (1964) """ def __init__(self, n_components=2, metric=True, n_init=4, max_iter=300, verbose=0, eps=1e-3, n_jobs=1, random_state=None, dissimilarity="euclidean"): self.n_components = n_components self.dissimilarity = dissimilarity self.metric = metric self.n_init = n_init self.max_iter = max_iter self.eps = eps self.verbose = verbose self.n_jobs = n_jobs self.random_state = random_state @property def _pairwise(self): return self.kernel == "precomputed" def fit(self, X, y=None, init=None): """ Computes the position of the points in the embedding space Parameters ---------- X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \ if dissimilarity='precomputed' Input data. init : {None or ndarray, shape (n_samples,)}, optional If None, randomly chooses the initial configuration if ndarray, initialize the SMACOF algorithm with this array. """ self.fit_transform(X, init=init) return self def fit_transform(self, X, y=None, init=None): """ Fit the data from X, and returns the embedded coordinates Parameters ---------- X : array, shape=[n_samples, n_features], or [n_samples, n_samples] \ if dissimilarity='precomputed' Input data. init : {None or ndarray, shape (n_samples,)}, optional If None, randomly chooses the initial configuration if ndarray, initialize the SMACOF algorithm with this array. """ X = check_array(X) if X.shape[0] == X.shape[1] and self.dissimilarity != "precomputed": warnings.warn("The MDS API has changed. ``fit`` now constructs an" " dissimilarity matrix from data. To use a custom " "dissimilarity matrix, set " "``dissimilarity='precomputed'``.") if self.dissimilarity == "precomputed": self.dissimilarity_matrix_ = X elif self.dissimilarity == "euclidean": self.dissimilarity_matrix_ = euclidean_distances(X) else: raise ValueError("Proximity must be 'precomputed' or 'euclidean'." " Got %s instead" % str(self.dissimilarity)) self.embedding_, self.stress_, self.n_iter_ = smacof( self.dissimilarity_matrix_, metric=self.metric, n_components=self.n_components, init=init, n_init=self.n_init, n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps, random_state=self.random_state, return_n_iter=True) return self.embedding_
Color: Blue bulbs / green wire. Number of bulbs on string: 50. Bulb size: Mini. Spacing between each bulb: 4". Lighted length: 16.8'. Total length: 17.5'. 4.5" Lead cord. 4" Tail cord. Super bright bulbs. UL listed for indoor or outdoor use. If one bulb burns out the rest will stay lit. Lights are equipped with lamp lock feature which makes them replaceable interchangeable and keeps them from falling out. Contains end-to-end connectors which allows you to connect multiple sets together (not to exceed 210 watts). Comes with replacement bulbs spare fuses and blinker bulbs. Wire gauge: 22. 120 Volts, 60 hertz, 0.17 amps, 20.4 watts.
import numpy as np import matplotlib.pyplot as plt import sys #========================================================================== # read Sharp & Huebner data #========================================================================== SHnam = np.loadtxt('SharpHuebner.dat',skiprows=0,usecols=[0],dtype='str') SHcoeff = np.loadtxt('SharpHuebner.dat',skiprows=0,usecols=[1,2,3,4,5]) #========================================================================== # read GGchem data #========================================================================== f = open('../../data/DustChem.dat','r') lines = f.readlines()[:] f.close Nline = len(lines) Ncond = len(SHnam) Nfound = 0 f = open('DustChem_SH90.dat','w') f.write('dust species\n') f.write('============\n') f.write('%d\n' %(Ncond)) for i in range(0,Ncond): cond = SHnam[i]+'[s]' lenc = len(cond) found = 0 for j in range(0,Nline): line = lines[j] if (line[0:lenc]==cond): found = 1 Nfound = Nfound+1 print cond,j f.write('\n') correct = 0 while True: line = lines[j] if (line=='\n'): break if (line[0]=='#'): correct=1 if (correct==1): line = '#'+line[1:] if (correct==0): f.write(line) j = j+1 c = SHcoeff[i,0:5] f.write('# Sharp & Huebner (1990):\n') f.write(' 1 %12.5e %12.5e %12.5e %12.5e %12.5e\n' %(c[0],c[1],c[2],c[3],c[4])) if (found==0): print cond," not found." f.write('\n') f.write(cond+'\n') f.write('# Sharp & Huebner (1990):\n') f.write(' 1 %12.5e %12.5e %12.5e %12.5e %12.5e\n' %(c[0],c[1],c[2],c[3],c[4])) f.close print Nfound," condensates found." print Ncond-Nfound," condensates not found."
Homeschooling your kids is a unique opportunity to truly shape the minds of tomorrow, but even the most enthusiastic student can get bored at times. The hours spent reading lines of text, trying to solve math problems, and struggling to remember facts can really start to get your son or daughter down — unless you step in and make homeschooling fun and exciting. In this article, we'll share three homeschooling tips with you that will transform the way you teach your kids and give them new enthusiasm for learning. There are times when your kids will need to knuckle down and cram - but that shouldn't be their daily experience of homeschooling. Take the time to plan interesting outings with your kids that give them an opportunity to apply what they've learned - or learn by doing. Studying biology is way more fun at the zoo, while history comes alive at museums and monuments. As long as the outing is related to the work they are studying, they'll remember it more easily and have fun learning. The cold months are especially hard for homeschooled kids, because nobody feels like leaving home in winter. To break the monotony of this season, get your kids some clothes especially for school time and head out for some educational adventures. Moncler for kids has a wide selection of outerwear that is both stylish and functional. Snowy climates have their own unique physical features that your kids can study as part of their geography lessons, while the holiday season is full of cultural activities that make studying history, English and social studies more engaging. Head to a football game and get a physical education lesson! Go, Chiefs! A teacher who is able to make their subject seem fascinating, relevant — or at least useful - by telling interesting stories and sharing life experiences is more likely to succeed than a boring bookworm. Whatever you teach your kids from a book, tell or show them how it makes a difference in the real world. If you're not sure yourself, take some time to research it online and think of ways to explain the topic to your kids in practical terms — you may learn something new yourself. Daddy and Callie playing Ticket to Ride on a Sunday ... geography, math and more! Math, science, and history are three subjects that kids usually describe as difficult or boring — but they are essential to understanding the world and shaping your child's outlook later in life. By showing your kids how these subjects (and the people who study them) make a difference in our world, you'll inspire them to do their best. A visit to your local science or history museum is a great chance for your kids to interact with museum staff and volunteers who can explain difficult concepts in language that's easy to understand - and if you have questions of your own, they'll probably be happy to answer them. Touch pool at Shedd Aquarium in Chicago, Illinois. A great place to learn about science! By putting in a little extra effort, you can transform your child's experience of school and open their eyes to the fascinating world out there and the many ways there are to study and understand it. You'll have a unique bonding opportunity that most parents never have with their kids as you teach, learn and explore the world of knowledge together.
"""This filter enables one to select a portion of an input dataset using a plane and clip it so only one side remains. Many thanks to Prabhu for ScalarCutPlane and TransformData. Wouldn't have been able to code this otherwise. """ # Author: Samir Talwar <[email protected]> # License: BSD Style. # Enthought library imports. from enthought.traits.api import Instance, Int, Trait, TraitMap, Button from enthought.traits.ui.api import View, Group, Item from enthought.tvtk.api import tvtk # Local imports from enthought.mayavi.core.common import error from enthought.mayavi.filters.filter_base import FilterBase from enthought.mayavi.core.pipeline_info import PipelineInfo from enthought.mayavi.components.implicit_plane import ImplicitPlane ###################################################################### # `DataSetClipper` class. ###################################################################### class DataSetClipper(FilterBase): # The version of this class. Used for persistence. __version__ = 0 # The implicit plane widget used to easily place the implicit function. implicit_plane = Instance(ImplicitPlane, allow_none=False) # The actual filter. filter = Instance(tvtk.ClipDataSet, allow_none=False) # I'm not sure what this'll work with. vtkUnstructuredGrid is confirmed. # Everything else is somewhat possible. input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) output_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) ######################################## # View related traits. # Button to reset the boundaries of the plane. # This should really be done automatically. reset_button = Button('Reset Boundaries') # The View for this object. view = View(Group(Item(name='reset_button'), Item(name='implicit_plane', style='custom'), show_labels=False, ), ) ###################################################################### # `Filter` interface ###################################################################### def setup_pipeline(self): self.implicit_plane = ImplicitPlane() self.filter = tvtk.ClipDataSet() def update_pipeline(self): inputs = self.inputs if len(inputs) == 0: return implicit_plane = self.implicit_plane implicit_plane.inputs = inputs implicit_plane.update_pipeline() widget = self.implicit_plane.widget widget.outline_translation = 0 self.widgets = [widget] filter = self.filter filter.input = inputs[0].outputs[0] filter.clip_function = implicit_plane.plane filter.update() self._set_outputs([filter.output]) self.pipeline_changed = True def update_data(self): # Do nothing if there is no input. if len(self.inputs) == 0: return # Propagate the data_changed event. self.data_changed = True ###################################################################### # Non-public methods. ###################################################################### def _on_implicit_plane_changed(self): self.filter.clip_function = self.implicit_plane.plane self.filter.update() self.render() def _reset_button_fired(self): if len(self.widgets) == 0: return self.widgets[0].place_widget() self.render()
Wood burning fireplaces are the traditional way of having fireplaces in your home or outdoor patio. For centuries, wood burning fireplaces have been used to warm us and cook our food. While Fireplaces and Wood Stoves NOW isn’t going to say that a wood burning fireplace is better than the gas fireplaces, there are some definite advantages to wood burning fireplaces. There are many different types of wood burning fireplace models which we explain about on Fireplaces and Wood Stoves NOW. Among these are high efficiency tax credit wood fireplaces, best selling wood fireplaces and radiant wood burning fireplaces. There are also indoor and outdoor wood burning fireplaces and wood burning fireplace inserts. Each of these models has different benefits and price ranges. With all of the best selling wood fireplaces on the market, sometimes it can be hard to decide on a certain wood stove or even a wood fireplaces manufacturer! To narrow down your decision a bit, Fireplaces and Wood Stoves NOW has compiled a list of the best selling wood fireplaces. This best selling wood fireplace makes this list for a number of reasons. First of all, it’s from Vermont Casting Fireplaces, one of the best manufacturers in the industry which is enough information to why it is one of the best selling wood fireplaces. The Sequoia model is also a high efficiency tax credit wood fireplace by becoming certified regarding energy efficiency standards – which means that consumers can get a great best selling wood fireplace and a tax credit to help pay for their wood burning fireplace on top to make it even better! Majestic fireplaces Windsor series is another best selling wood fireplace because of the enormous value you get with these wood stove inserts. These best selling wood fireplaces are a good combination of the traditional look with modern wood stove insert technology, and will make any room or home more cozy. They are a great price (normally around $650), and for that price you are going to have a hard time finding better quality and better looking best selling wood fireplaces. For some people, outdoor wood burning fireplaces are the only way to go when it comes to different outdoor fireplaces models. You can’t really completely match the combination of the outdoors and wood burning fireplaces which will make you feel like you really are in the great outdoors, even though you could be right outside your house! Every other type of fireplaces really try to match the look and feel of these outdoor wood burning fireplaces, and they get close but can’t get completely there. Outdoor wood burning fireplaces have arguably the best style, look, feel and smell of any type of outdoor fireplaces. There are also so many style choices when it comes to these so homeowners have a easy time matching their backyard home decor with these outdoor wood burning fireplace units. Outdoor wood burning masonry fireplaces top our list of these outdoor wood burning fireplaces because they are simply the most elegant of all styles. Masonry fireplaces can use the material of your preference, such as brick, stone, or rock depending on what kind of look you want. These outdoor wood burning fireplaces are great at generating heat as well. Cast iron outdoor wood burning fireplaces are next on this list because they are the best at generating heat, and unsurpassed by any other model of fireplaces. Put one of these in your backyard, and you will never be cold again thanks to these outdoor wood burning fireplaces. Tradition is hard to break. It’s hard to beat the beauty the flames create from one of these, the sound of the wood burning / cracking or the delightful smell of outdoor wood burning fireplaces. While there is more maintenance involved in these models than gas fireplaces, many people really like the whole process of making a fire and maintaining it by feeding it wood. Wood burning fireplaces are focal points during social gatherings and can create beautiful romantic settings. There is something soothing about wood burning fireplaces which cannot be replaced by any other model. Before purchasing one of these models you should think about certain things that you wouldn’t have to consider with a gas fireplace. When starting a wood burning fireplace, you should always be present to monitor the fire, especially if children are present. With gas fireplaces this isn’t such an issue, since they can be turned on and off very easily. You also have to consider the maintenance. Wood burning fireplaces require regular maintenance including the chimney for safety reasons, as well as the ash left behind from these models. While these models are much more maintenance, it’s hard to replace a wood burning fireplaces with any other model. Radiant wood burning fireplaces are space saving wood burning fireplaces which are very effective at heating your home and you! A radiant wood burning fireplace heats your home by way of infrared radiation, meaning the wood fire first heats your fireplace, then the fireplace gives heat back to you through infrared radiation as well. This heat generated from from these radiant wood burning fireplaces all comes from the walls of your fireplaces. Basically, all fireplaces and stoves produce radiant heat as well as circulating heat (the heat that comes out of forced air heaters). But some units, such as cast iron fireplaces were designed to be more radiant heating wood burning fireplaces. Radiant wood burning fireplaces produce some of the most efficient and comfortable heat of all home heating methods. Radiant heat is basically what you get from the sun, not what comes out of forced air heaters which aren’t as efficient or as healthy compared to radiant wood burning fireplaces. Radiant heat has no odor, noise and no drafts – just even distribution of heat. A radiant wood burning fireplace will make you more comfortable when you get out of bed. Not only will one of these fireplaces or wood burning stoves heat the air around you, they will also heat the ground you walk on. So next time you get out of bed and feel the nice warm ground under your feet, you can thank your radiant wood burning fireplaces. A radiant heater is more comfortable to be around for a couple reasons. Because the heat from a radiant wood burning fireplace is slow moving, you don’t feel drafts or lose heat quickly when a door gets opened. This kind of heat is better for your skin as well, and won’t dry skin out like a forced air heater does. If you have a family with little children who are bound to get curious and touch the fireplace, maybe one of these models aren’t for you. Radiant wood burning fireplaces walls get very hot, and should never be touched. Touching can result in severe burn or injury. If this might ever be a problem in your household, then maybe you shouldn’t invest in one of these radiant wood burning fireplaces. High efficiency tax credit wood fireplaces are wood burning fireplaces that have been EPA (Environmental Protection Agency) certified and rated with a energy efficiency rating of 72% or higher. This means that these high efficiency tax credit wood fireplace models will give you more heat for every piece of wood that goes into your wood burning fireplaces. Not only do these high efficiency tax credit wood fireplaces save you money on fire wood, the government is offering a 30% tax credit for sales of any of these models which qualify! With energy prices skyrocketing and the need for the world to start considering alternative fuels, President Barack Obama signed the American Recovery and Reinvestment Act which included owners of these high efficiency tax credit wood fireplaces to receive compensation products that adhere to certain energy efficiency regulations. How Much Money Can I Expect To See Back From Purchasing These High Efficiency Tax Credit Wood Fireplaces? If you purchase any high efficiency tax credit wood fireplace, stoves or inserts that qualify you can get back up to 30% reimbursement in the form of a tax credit up to $1,500 total during the 2009 and 2010 tax years. For more information regarding this, please see www.epa.gov. In this review, Fireplaces and Wood Stoves NOW is going to review the 1400P from Napoleon Fireplaces. This is a very energy efficient wood burning stove has a very large firebox chamber and is great at heating your home. Not only is this wood burning stove built for performance, it is also very appealing on your eyes with the pedestal base. This model is one of the easiest wood burning stoves to maintain, and is very easy to use. If you own one of these, the only heater you’ll need is this high efficiency tax credit wood fireplaces!
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations import json import email.header def _parse_header(header): r = "" for h, c in email.header.decode_header(header): if isinstance(h, bytes): h = h.decode(c or "utf-8", "replace") r += h return r def fix_utf8_recipients(apps, schema_editor): # We can't import the models directly as they may be a newer # version than this migration expects. We use the historical version. Message = apps.get_model("api", "Message") msgs = Message.objects.all() msgs = msgs.filter(recipients__contains="?") for m in msgs: recipients = json.loads(m.recipients) recipients = [[_parse_header(x[0]), x[1]] for x in recipients] m.recipients = json.dumps(recipients) m.save() class Migration(migrations.Migration): dependencies = [("api", "0020_auto_20180204_0647")] operations = [ migrations.RunPython( fix_utf8_recipients, reverse_code=migrations.RunPython.noop ) ]
We arrived there in the dark of night after all of the stalls had shut down except for two women selling garlands of white flowers. In our bare feet we padded up a long, worn, stone staircase to the summit– up past families noiselessly eating dinner around circular tables. Sleepy cats lazing on wooden window sills. A monk who sat in his simple abode and stared blankly at us with even keeled indifference. Dana Carruth, Love Peace, and 3 more people like this update. Mikey, this is beautiful. I can't wait to see the prints!
from common import * from re import compile, DOTALL, MULTILINE from urlgrab import Cache from urlparse import urljoin linkPattern = compile("<h3><a href=\"(/[^\"]+)\">(.+?)</a></h3>") earlierPattern = compile("<a href='([^\']+)'>.+?Earlier Stories.+?</a>", DOTALL | MULTILINE) titlePattern = compile("<h2>(.+?)</h2>") subtitlePattern = compile("<p class=\"standfirst\">(.+?)</p>") contentPattern = compile("<strong class=\"trailer\">.+?</p>(.+?)(?:(?:<p>(?:(?:<i>)|(?:<small>)|(?:<font size=\"-2\">)|(?:<br>\n))?BOFH .+? Simon Travaglia)|(?:<ul class=\"noindent\">)|(?:<ul>.+?<li><a href=\"http://www.theregister.co.uk/content/30/index.html\">BOFH: The whole shebang</a></li>)|(?:</form>))", DOTALL| MULTILINE) adPattern = compile("(<div id=ad-mu1-spot>.+?</div>)", MULTILINE | DOTALL) episodePattern = compile("<strong class=\"trailer\">Episode \d+") url = "http://www.theregister.co.uk/data_centre/bofh/" pages = [url] cache = Cache() while True: print url data = cache.get(url).read() links = linkPattern.findall(data) if links == []: break pages.insert(0, url) earlier = earlierPattern.findall(data) url = urljoin(url, earlier[0]) skipTitles = ["Salmon Days is Go!"] year = None newItems = False for mainPage in pages: data = cache.get(mainPage).read() links = linkPattern.findall(data) links.reverse() for l in links: url = urljoin(mainPage, l[0]) newyear = url.split("/")[3] if newyear != year: if year != None: if int(newyear) < int(year): raise Exception, (year, newyear) tocEnd(toc) makeMobi(folder, "Simon Travaglia", newitems = newItems) newItems = False folder = "BOFH-%s"%newyear toc = tocStart(folder) year = newyear data = cache.get(url, max_age = -1).read() episode = episodePattern.findall(data) if len(episode) == 0: print "Skipping", url continue print url title = titlePattern.findall(data)[0] print title if title in skipTitles: print "skipping", title continue subtitle = subtitlePattern.findall(data)[0] content = contentPattern.findall(data)[0] ad = adPattern.findall(data)[0] content = content.replace(ad, "") content = content.decode('utf-8') title = title.decode("utf-8") subtitle = subtitle.decode("utf-8") assert len(content)>0 if generatePage(url, title, subtitle + "<br />\n" + content, folder, toc): newItems = True #break print links tocEnd(toc) makeMobi(folder, "Simon Travaglia")
High quality wizard hats. Velvety feel with metallic designs. Assorted colors. 20 inches high. $2.45 each for 48 or more. $2.10 each for 96 or more.
#!/usr/bin/python # -*- coding: utf-8 -*- # # --- BEGIN_HEADER --- # # convolution - http://www.songho.ca/dsp/convolution/convolution.html # Copyright (C) 2011-2012 The CPHHPC Project lead by Brian Vinter # # This file is part of CPHHPC Toolbox. # # CPHHPC Toolbox is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # CPHHPC Toolbox is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, # USA. # # -- END_HEADER --- # """Convolution: http://www.songho.ca/dsp/convolution/convolution.html""" from numcil import zeros def convolve2d(input, window, out=None, data_type=None): """ Convolve two 2-dimensional arrays: http://www.songho.ca/dsp/convolution/convolution.html Parameters ---------- input : ndarray A 2-dimensional input array window: ndarray A 2-dimensional convolution window array (shape must be odd) out : ndarray, optional Output argument. This must have the exact kind that would be returned if it was not used. In particular, it must have the right shape and must be C-contiguous. This is a performance feature. Therefore, if these conditions are not met, an exception is raised, instead of attempting to be flexible. data_type : data-type, optional The precision of the created `out` ndarray if `out` is None Raises ------ ValueError If shape of `window` is even If shape of `out` doesn't match those of `input` """ if window.shape[0] % 2 == 0 or window.shape[1] % 2 == 0: msg = "window.shape: %s is _NOT_ odd" % (str(window.shape)) raise ValueError(msg) window_radius = (window.shape[0]/2, window.shape[1]/2) zero_pad_shape = (input.shape[0] + (window_radius[0]*2), input.shape[1] + (window_radius[1]*2)) zero_padded_input = zeros(zero_pad_shape, dtype=data_type) zero_padded_input[window_radius[0]:-window_radius[0], window_radius[1]:-window_radius[1]] = input if out != None: if out.shape != input.shape: msg = "input.shape: %s and out.shape: %s doesn't match" % (str(input.shape), str(out.shape)) raise ValueError(msg) else: if data_type == None: out = zeros(input.shape, dtype=input.dtype) else: out = zeros(input.shape, dtype=data_type) start_y = window_radius[0]*2 end_y = zero_pad_shape[0] for y in xrange(window.shape[0]): start_x = window_radius[1]*2 end_x = zero_pad_shape[1] for x in xrange(window.shape[1]): tmp = zero_padded_input * window[y][x] out += tmp[start_y:end_y, start_x:end_x] start_x -= 1 end_x -= 1 start_y -= 1 end_y -= 1 return out
©inTRAlinea & Alison Marsh (2006). Red Bee Media used to be part of BBC but now is a separate company. BBC is Red Bee Media’s biggest client and all the subtitling at BBC is made by Red Bee Media. First of all, let me explain how respeaking works at Red Bee Media. We use a voice recognition software which is called ViaVoice. Respeakers train the software to recognise their voices. They do a lot of training using ViaVoice so that they can improve their voice model to get it to on-air standards. They also have to use ViaVoice for a lot of preparation because ViaVoice has a very big dictionary, of about 75,000 words. Although it has this big dictionary, it is very English-focussed. But since Red Bee Media covers a lot of live sporting events, news broadcasts, because ViaVoice does not contain a lot of the vocabulary we need (e.g. footballers names or places in Iraq), respeakers have to do a lot of training into ViaVoice so that they can go on-air and subtitle these broadcasts. Once they go on air they use a piece of software which is called K-live, especially designed for BBC’s respeakers by the BBC’s Research and Development department (when respeakers were part of the BBC). It allows respeakers to do whatever they want to do in terms of transmitting subtitles: positioning them on the screen appropriately; broadcasting them in different colours to indicate different speakers; making subtitles more accurate thanks to an automatic correction function; and it also allows respeakers to connect to any of the channels to be subtitled on. Red Bee Media subtitles on the two analogue BBC channels (BBC One and BBC Two) and digital and satellite channels like BBC News 24, BBC Parliament and BBC Three. When a respeaker goes on-air s/he connects to K-live, put his/her headphones on and s/he respeaks what s/he hears into the microphone and s/he and then the voice recognition element of ViaVoice processes the vocal input from the microphone, turns it into text and K-live broadcasts it on the screen so that the words scroll out one by one. So, respeaking is essentially repeating what is heard, inserting the right punctuation, editing when necessary (condensing information to keep up with the speakers because sometimes they speak very fast especially on news broadcasts). In doing this, respeaking involves a complex mental process: hearing one thing, saying another thing, whilst keeping the main ideas and trying to keep a bit the flavour of the programme for the deaf viewers so that they can appreciate a little bit the way a speaker is speaking, the general register of the programme because a football commentator during a match speaks in a totally different way to a politician in the Houses of Parliament. As far as the background to respeaking, it came into being for different reasons. BBC has always been at the forefront in terms of subtitling. The first subtitling departments were set up in London and in Glasgow in the 1980s but initially they only did pre-recorded subtitling. There was no live subtitling. In 1990, the live subtitling unit was created, on a very modest scale. There was no respeaking since all the real-time subtitling was done by stenographers (former court reporters using phonetic keyboards in order to produce texts at a speed of about 250 wpm). Gradually, the live subtitling department began to expand and it started taking on more and more live output. In January 2001, development into respeaking began and this happened basically for three reasons: the first was that there was a growing demand for subtitles from the deaf community; the second was some legislation introduced by the Government called the Broadcasting Act, in 1990. This legislation stipulated that all of the major television companies had to increase the proportion of output that they subtitled (and this meant live subtitling as well as pre-recorded subtitling) up to 90% by 2010. Then the BBC set its own target, 100% on its analogue channels by 2008; thirdly, the BBC had to find an alternative way to cover all its live output because stenography is a very specialised skill, you have to train for at least five years to become a professional stenographer. As a result there were not many stenographers available, and the few could demand for very high salaries because it is a very specialised skill. Clearly, it was not possible for BBC to cover all its live output with stenographers because this was not financially viable. So, experiments began in the field of respeaking which was a more practical and cost effective way of subtitling large volumes of live output. The first live respeaking was in April 2001 when the World Snooker Championships were subtitled. Later that year BBC subtitled the coverage of tennis, Wimbledon, which is a very important event in the English sporting calendar. So, BBC began subtitling sports. But gradually respeakers started subtitling more and more output including coverage of BBC Parliament, regional news and later, national news on BBC News 24. As they started knowing more about respeaking, the quality of the subtitles produced by respeaking increased. A very important factor for that was the software they were using, ViaVoice, the quality of which was improved by the introduction of ViaVoice 10, a new version. Then, with the introduction of K-live respeakers began having even more accurate subtitles. As for research, we work in cooperation with the deaf community. We have done a lot of research in the readability of subtitles and we receive a lot of feedback by the deaf about the colours, the kind of subtitles they prefer (block or subtitles the scroll one word at a time). The result of this research has been the creation of house-styles. So we use punctuation in a given way, the number of words to be displayed on the screen to make sure that the deaf viewers can read subtitles easily. Another result we got from the deaf community at the beginning of our experimenting live subtitling was the option for immediacy over accuracy. Currently, Red Bee Media subtitles through respeaking approximately 650 hours per month of live programmes. This means that in summer, when there is a lot of sports, respeakers subtitle more than that but in wintertime they subtitle less because there is less live output to cover. Generally, 24 hours a day of lived subtitling are covered by respeaking on a variety of channels (BBC one, BBC two, the digital and satellite channels). Moreover, since every individual British region has its five individual news per day, even the majority of regional news are subtitled through respeaking. At some times of the day there are 17 different regional programmes going on air simultaneously that would be subtitled by respeakers. Since then the amount of coverage increased and the kind of programmes has become more varied. By the time, as respeakers become more experienced and the quality of subtitles they produce increases, respeakers have started covering output the was traditionally the domain of stenographers and programmes such as the national news, current affairs programmes like “News Night” and “Question Time”. Then the number of respeakers has increased quite dramatically over the past three or four years. Now at Red Bee Media there are 50 respeakers working all over the country. The two main offices are in London and in Glasgow, but there are also respeakers working in small offices all around the UK. The software they use, K-live allows people to work even from home so that a number of respeakers can go on air from their living room. Another important aspect of a respeaker’s job is something called scripting, contributing to the pre-recorded subtitling process. Voice recognition is used to produce the scripts of pre-recorded programmes. These scripts are then processed to create subtitle files which are then checked through by pre-recorded subtitlers. So respeakers are used in as many ways as possible. Leaders try to avoid respeakers going on air for too long during the course of the day because there is a lot of strain in the voice and every respeaker has to concentrate very hard to respeak live output. A respeaker is generally given half of live output and half of scripting every day. As far as recruitment is concerned, in the past, respeakers were recruited from the ranks of subtitlers. The recruitment procedure was non-existing. Any subtitler with an interest in respeaking could have a try. However, now, the recruitment procedure is very stringent and respeakers are recruited externally most of the time. Candidates have to pass a series of tests before they can become respeakers. The most important test is called speech test. They are given samples of live output and they have to try respeaking it to see how they get on. Similarly, in the past, the training was very ad hoc. There was not a structured training programme. It was not very clear how long it would take to each candidate to be able to go on air. Nowadays, the training programme is very organised and structured. Leaders know exactly what a respeaker is going to do during the course of their training which nowadays takes between two and three months. Their training is very closely monitored. All that has evolved as the department has expanded. There is also a reviewing system which is in place to ensure that respeakers are consistently producing high quality subtitles (the accuracy target is 97%, that is that over 100 words, three words can be incorrect). Another important aspect is the so called multi-skilling, that is, instead of having separate file and live teams, Red Bee Media tries to combine the two so that every subtitler has experience of respeaking and every respeaker can do pre-recorded subtitling as well. This is to make our workforce as flexible and productive as possible, thus allowing Red Bee Media to attend the BBC’s 100% target by 2008. In the future Red Bee Media live subtitling offices are continuing to expand, particularly those in Glasgow. They are taking more and more flexible people as possible and every person is trained on every aspect of the job. One of the advantages of having this flexible workforce is that respeakers have to work shift works because so as to cover nearly 24 hours on the BBC. This involves very early news broadcasts and very late sport broadcasts. So, since every respeaker has to work a variety of shifts including week ends, the larger the workforce that can cover different shifts, the easier it will be for individuals to work more dayshifts and not so many demanding shifts. Lastly, Red Bee Media is experimenting with respeaking in another language. A lot of respeaking has been made in French but they are looking into respeaking in Spanish and German. BBC has neither a written list of guidelines nor printed/on-line feedback. The only feedback available is composed of the e-mails they receive from the end users.
import struct import socket from collections import namedtuple IPv4Header = namedtuple('IPv4Header', [ 'version', 'ihl', 'tos', 'total_length', 'id', 'flags', 'fragment_offset', 'ttl', 'proto', 'checksum', 'src', 'dest', ]) class IpHeader(IPv4Header): _format = '!BBHHHBBH4s4s' def pack(self): to_pack = ( self[0] << 4 | self[1], self[2], self[3], self[4], self[5] << 13 | self[6], self[7], self[8], self[9], self[10], self[11], ) return struct.pack(self._format, *to_pack) @classmethod def unpack(cls, byte_obj): (ver_ihl, tos, tot_len, id, flags_offset, *others) = struct.unpack( cls._format, byte_obj) version = ver_ihl >> 4 ihl = ver_ihl & 0xf flags = flags_offset >> 13 fragment_offset = flags_offset & 0x1fff return IPv4Header( version, ihl, tos, tot_len, id, flags, fragment_offset, *others) def __len__(self): return struct.calcsize(self._format) def make_ip_packet(dest_addr, ip_proto, payload, source_addr, ttl=64): source_addr = socket.inet_aton(source_addr) dest_addr = socket.inet_aton(dest_addr) id = 13371 header = IpHeader( 4, 5, 0, 0, id, 2, 0, ttl, ip_proto, 0, source_addr, dest_addr) data = header.pack() + payload return data
1. Do not use the garden hose. It’s a common misconception that the added water pressure a water hose provides will unclog drains and flush out pipes. Most of the time, all this is going to do is risk damaging your pipes and create a giant headache for you. The garden hose belongs in the garden. Leave it there. 2. Coat hangers do not work. This should go without saying, but do not, under any circumstances, unbend a coat hanger and slide it down your drain in an attempt to unclog it. This is both frustrating and extremely ineffective. If the coat hanger gets stuck in your pipes, you risk causing even more damage to them, and you’re going to have to call the plumber anyways. 3. Chemical drain cleaners don’t work either. In addition to the obvious safety hazards, most chemical drain cleaners that are on the market today simply do not have the power to effectively clear a clogged drain. They do not have the capability to dissolve solid obstructions. If you have a clogged drain in your home or pipes that need cleaning, do it right the first time by calling a professional plumber from Extreme Plumbing. Don’t risk damaging your pipes and creating an expensive mess. With more than 20 years of experience in the business, Extreme Plumbing offers high quality service 24 hours a day, 7 days a week. Contact us today!
import sh from multiprocessing import cpu_count from os.path import exists, join from pythonforandroid.archs import Arch from pythonforandroid.logger import shprint from pythonforandroid.recipe import Recipe from pythonforandroid.util import current_directory, ensure_dir class LibLzmaRecipe(Recipe): version = '5.2.4' url = 'https://tukaani.org/xz/xz-{version}.tar.gz' built_libraries = {'liblzma.so': 'install/lib'} def build_arch(self, arch: Arch) -> None: env = self.get_recipe_env(arch) install_dir = join(self.get_build_dir(arch.arch), 'install') with current_directory(self.get_build_dir(arch.arch)): if not exists('configure'): shprint(sh.Command('./autogen.sh'), _env=env) shprint(sh.Command('autoreconf'), '-vif', _env=env) shprint(sh.Command('./configure'), '--host=' + arch.command_prefix, '--prefix=' + install_dir, '--disable-builddir', '--disable-static', '--enable-shared', '--disable-xz', '--disable-xzdec', '--disable-lzmadec', '--disable-lzmainfo', '--disable-scripts', '--disable-doc', _env=env) shprint( sh.make, '-j', str(cpu_count()), _env=env ) ensure_dir('install') shprint(sh.make, 'install', _env=env) def get_library_includes(self, arch: Arch) -> str: """ Returns a string with the appropriate `-I<lib directory>` to link with the lzma lib. This string is usually added to the environment variable `CPPFLAGS`. """ return " -I" + join( self.get_build_dir(arch.arch), 'install', 'include', ) def get_library_ldflags(self, arch: Arch) -> str: """ Returns a string with the appropriate `-L<lib directory>` to link with the lzma lib. This string is usually added to the environment variable `LDFLAGS`. """ return " -L" + join( self.get_build_dir(arch.arch), self.built_libraries['liblzma.so'], ) @staticmethod def get_library_libs_flag() -> str: """ Returns a string with the appropriate `-l<lib>` flags to link with the lzma lib. This string is usually added to the environment variable `LIBS`. """ return " -llzma" recipe = LibLzmaRecipe()
1Cook the orzo according to the package directions; drain well, running under cool water to cool it down, then dump into a bowl. 2Thinly slice your spinach: tack a bunch of leaves, roll them up tightly and slice through the whole bunch – or just tear it in with your hands. The method you choose will likely depend on the salad's final destination. Add the onion, feta and lemon zest. 3Squeeze the lemon juice over the salad, and drizzle with the rice vinegar, oil, salt and pepper. Toss to coat well. Taste it and adjust the seasonings if it needs it. Serve right away or refrigerate until you’re ready for it.
# Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from st2tests.base import CleanDbTestCase from st2common.constants.keyvalue import FULL_SYSTEM_SCOPE, SYSTEM_SCOPE, DATASTORE_PARENT_SCOPE from st2common.models.db.keyvalue import KeyValuePairDB from st2common.persistence.keyvalue import KeyValuePair from st2common.services.keyvalues import KeyValueLookup from st2common.util import jinja as jinja_utils from st2common.util.crypto import read_crypto_key, symmetric_encrypt class JinjaUtilsDecryptTestCase(CleanDbTestCase): def test_filter_decrypt_kv(self): secret = 'Build a wall' crypto_key_path = cfg.CONF.keyvalue.encryption_key_path crypto_key = read_crypto_key(key_path=crypto_key_path) secret_value = symmetric_encrypt(encrypt_key=crypto_key, plaintext=secret) KeyValuePair.add_or_update(KeyValuePairDB(name='k8', value=secret_value, scope=FULL_SYSTEM_SCOPE, secret=True)) env = jinja_utils.get_jinja_environment() context = {} context.update({SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE)}) context.update({ DATASTORE_PARENT_SCOPE: { SYSTEM_SCOPE: KeyValueLookup(scope=FULL_SYSTEM_SCOPE) } }) template = '{{st2kv.system.k8 | decrypt_kv}}' actual = env.from_string(template).render(context) self.assertEqual(actual, secret)
If the valve position is not advancing according to the relationship above, an adjustment is required. Use “zero adjustment” to set the valve position to a specific input value. For example, with an input of 12mA, the valve should be @ 50%. Use the “span adjustment” to adjust the % change the valve moves relative to the input value. 6. Stroke the valve – Take the valve thru a complete cycle of 4 – 20 mA inputs verifying that the correct valve position exists with a given input value.
import os class Core(object): """ The core of the Romaine, provides BDD test API. """ # All located features feature_file_paths = set() instance = None def __init__(self): """ Initialise Romaine core. """ self.steps = {} Core.instance = self def locate_features(self, path): """ Locate any features given a path. Keyword arguments: path -- The path to search for features, recursively. Returns: List of features located in the path given. """ walked_paths = os.walk(path) # Features in this path are stored in an intermediate list before # being added to the class variable so that we can return only the # ones we find on this invocation of locate_features feature_candidates = [] for walked_path in walked_paths: base_directory, sub_directories, feature_files = walked_path for feature_file in feature_files: feature_candidates.append( os.path.join( base_directory, feature_file ) ) self.feature_file_paths.update(feature_candidates) return feature_candidates
Older people who practice tai chi appear to be less likely to fall, according to new research in the British Journal of Sports Medicine. Scientists at the Korea Institute of Oriental Medicine and the University of Exeter analysed data contained in 35 reviews, all of which had looked at the benefits of tai chi. They found relatively clear evidence that the Chinese martial art is effective at preventing falls and improving mental wellbeing among older people. However, evidence for many other health conditions was contradictory and the researchers found that tai chi was not beneficial for relieving symptoms of cancer or rheumatoid arthritis. 'Our overview showed that tai chi, which combines deep breathing and relaxation with slow and gentle movements, may exert exercise-based general benefits for fall prevention and improvement of balance in older people, as well as some meditative effects for improving psychological health,' the researchers concluded. Tai chi is said to have originated in the 16th century and has since developed into dozens of different styles.
from PyQt5.QtWidgets import QAbstractItemView, QFrame, QListWidget, QListWidgetItem from PyQt5.QtCore import Qt import os from mimetypes import MimeTypes from urllib import request from PyQt5 import QtGui, QtCore import savedData as sd from pytube import YouTube from pprint import pprint # Customized list widget item to hold more data than just the absolute path of the item # class MyListWidgetItem(QListWidgetItem): def __init__(self, path, dest, video_checked=False, video=None, audio_checked=False, audio=None): super().__init__() self.absFilePath = path # determine if new item is a url or file path - handle accordingly if os.path.exists(path): print("path exists:", path) # extract the path without the filename and render it back to a string self.path = '/'.join(path.split('/')[0:-1]) + '/' # print("directory path: " + self.path) # idk if this is useful anymore # extract the last part of the path to get the file name self.fName = path.split('/')[-1] # file name without the extension self.no_extension = self.fName.split('.')[0] # use MimeTypes to determine the file type self.fType = identifyItem(path) # set the save destination for when the conversion is done self.fDest = dest # the audio/video type to convert to if they have one - blank by default # TODO maybe make them the currently checked values? and/or reset checked values when adding new item? self.audio = audio if audio_checked else "" self.video = video if video_checked else "" else: print("Pathhh:", path) # TODO put something here? see how this corresponds to the above self.path self.path = path self.yt = YouTube(path) # use the youtube scraper to get the youtube video name self.no_extension = self.yt.filename self.fName = self.no_extension + "." + sd.initVidFmt self.fType = ('youtube/video', None) # save a custom mime-type TODO extract the mime type from the metadata self.fDest = dest self.audio = audio if audio_checked else "" self.video = video if video_checked else "" print("fType:", self.fType) def __repr__(self): try: return self.fName except Exception as err: print("I think fName is trying to be accessed when it hasn't been created:") pprint(err.args) def getAudio(self, audio=""): if audio != "": self.audio = audio return self.audio def getVideo(self, video=""): if video != "": self.video = video return self.video def getFileType(self): return self.fType # identify the type of item the user is adding to the queue # def identifyItem(path): """ :param path: the file path or url the user is providing :return: the type of file the user is providing """ mime = MimeTypes() url = request.pathname2url(path) mime_type = mime.guess_type(url) print("MimeType: " + str(mime_type)) return mime_type # Customized list widget to allow internal/external drag-and-drop actions # class MyListWidget(QListWidget): def __init__(self, parent): super(MyListWidget, self).__init__(parent) self.setAcceptDrops(True) self.setDragDropMode(QAbstractItemView.InternalMove) self.setFrameShadow(QFrame.Plain) self.setFrameShape(QFrame.Box) # do stuff if a dragged item enters the widget # def dragEnterEvent(self, event): if event.mimeData().hasUrls(): event.acceptProposedAction() else: super(MyListWidget, self).dragEnterEvent(event) # do stuff repeatedly if a dragged item is moving around in the widget # def dragMoveEvent(self, event): super(MyListWidget, self).dragMoveEvent(event) # handle internal and external drag-and-drop actions # def dropEvent(self, event): # capture the main windows audio/video configuration to be applied to the next added items video_checked = self.parent().parent().parent().ui.chk_video.isChecked() audio_checked = self.parent().parent().parent().ui.chk_audio.isChecked() video = self.parent().parent().parent().ui.combo_video.currentText() audio = self.parent().parent().parent().ui.combo_audio.currentText() # handle external drop if event.mimeData().hasUrls(): for url in event.mimeData().urls(): print("url: " + str(url)) print(url) path = url.toLocalFile() if os.path.isfile(path): item = MyListWidgetItem(path, sd.initSaveDir, video_checked, video, audio_checked, audio) print("local file:", item) self.addItem(item) else: item = MyListWidgetItem(url.toString(), sd.initSaveDir, video_checked, video, audio_checked, audio) print("Youtube Video:", item) self.addItem(item) # make the item display its name self.item(self.count() - 1).setText(item.no_extension) self.item(self.count() - 1).setSelected(True) else: # default internal drop super(MyListWidget, self).dropEvent(event) # noinspection PyArgumentList def keyPressEvent(self, event): """ Assign the following functions to keystrokes delete -> delete the highlighted items ctrl + a -> highlight all items in the queue :param event: signal event to determine if it's a keyboard event """ # TODO make arrow keys move selection to above/below item # TODO Ctrl + arrow keys to move the highlighted items priority modifiers = QtGui.QApplication.keyboardModifiers() if event.key() == Qt.Key_Delete: self._del_item() elif modifiers == QtCore.Qt.ControlModifier and event.key() == Qt.Key_A: self._highlight_all() # remove the selected item def _del_item(self): for item in self.selectedItems(): self.takeItem(self.row(item)) # highlight all items in the list def _highlight_all(self): self.selectAll()
Pushing my grocery cart through the aisles at Food Lion I was all about trying to find the 9 or 10 things on my shopping list and out of the blue someone bumped into my cart and burst out laughing. My first reaction was to frown, well, scowl and snarl, actually. But then I saw that it was Sheila and I had to bust out a laugh, too. I cannot believe that Sheila shops at the same grocery store that I do. She is clear on the other side of the Lake and the Kroger is much closer to her. But she says that she likes the smaller store because she can get what she wants and check out ten times faster than going to the huge, crowded Kroger. So I have to agree, I avoid the Kroger for quick trips like today and only go there once every 4 to 6 weeks to stock up on meat and other things that Food Lion doesn’t carry. The new Superintendent of Schools has decided that the only way the kids here will learn enough to pass the minimum grade levels tests is to go to school for more days out the year. He wants to take away most of the summer vacation days and make them go to school earlier in the year. I already think it is ludicrous for the kids to start back the first week of August. That is awfully early to be going to school when most of the rest of the country does not start until after Labor Day. As far as I am concerned, they need to bring in extra teachers and extra tutors to help the kids who are struggling. The kids who are average and better are being punished for being in the same classes as the dumb ones.
"""Contains base class for completers. Attributes: log (logging.Logger): logger for this module """ import logging from ..tools import Tools log = logging.getLogger("ECC") class BaseCompleter: """A base class for clang based completions. Attributes: compiler_variant (CompilerVariant): compiler specific options valid (bool): is completer valid version_str (str): version string of format "3.4.0" error_vis (obj): an object of error visualizer """ name = "base" valid = False def __init__(self, settings, error_vis): """Initialize the BaseCompleter. Args: settings (SettingsStorage): an object that stores current settings error_vis (ErrorVis): an object of error visualizer Raises: RuntimeError: if clang not defined we throw an error """ # check if clang binary is defined if not settings.clang_binary: raise RuntimeError("clang binary not defined") self.compiler_variant = None self.version_str = settings.clang_version self.clang_binary = settings.clang_binary # initialize error visualization self.error_vis = error_vis def complete(self, completion_request): """Function to generate completions. See children for implementation. Args: completion_request (ActionRequest): request object Raises: NotImplementedError: Guarantees we do not call this abstract method """ raise NotImplementedError("calling abstract method") def info(self, tooltip_request): """Provide information about object in given location. Using the current translation unit it queries libclang for available information about cursor. Args: tooltip_request (tools.ActionRequest): A request for action from the plugin. Raises: NotImplementedError: Guarantees we do not call this abstract method """ raise NotImplementedError("calling abstract method") def update(self, view, settings): """Update the completer for this view. This can increase consequent completion speeds or is needed to just show errors. Args: view (sublime.View): this view settings: all plugin settings Raises: NotImplementedError: Guarantees we do not call this abstract method """ raise NotImplementedError("calling abstract method") def show_errors(self, view, output): """Show current complie errors. Args: view (sublime.View): Current view output (object): opaque output to be parsed by compiler variant """ errors = self.compiler_variant.errors_from_output(output) if not Tools.is_valid_view(view): log.error("cannot show errors. View became invalid!") return self.error_vis.generate(view, errors) self.error_vis.show_errors(view)
Jefferson County crews extinguished a wildfire Thursday that appeared to have been sparked by an unattended campfire on a beach near Coyle, Quilcene Fire Chief Larry Karp said. The fire burned about a tenth of an acre in a steep, forested area at the southern tip of Toandos Peninsula. “It traveled from a beach up a hillside through tress and brush,” Karp said in a telephone interview. The campers who were believed to have started the fire had left the area before crews arrived Thursday morning. “Nobody fessed up to it,” Karp said. Smoke from the fire was reported as being seen from Kitsap County at about 9 a.m. Thursday. Crews from the Quilcene Fire Department and other Jefferson County agencies extinguished the blaze at about noon. “All fire districts in Jefferson County were involved,” Karp said. A state Department of Natural Resources crew was called in for mop-up duty. The campfire appeared to have spread to two small boats on the beach, which generated enough heat to ignite the nearby forest, Karp said. Karp reminded the public to extinguish campfires by dousing them with water and stirring the ashes with a shovel. “Make sure that it’s cold to the touch,” Karp said. Quilcene Fire, or Jefferson County Fire District 2, responded with three tenders and three brush rigs. Meanwhile, Clallam County Fire District 2 firefighters knocked down a small brush fire that was believed to have been started by fireworks west of Port Angeles on Wednesday. Crews quickly extinguished the 15- by 30-foot blaze that was burning in grass and light brush at 400 Charles Road on the Lower Elwha Klallam tribal reservation, according to a news release. It was determined that the Charles Road fire had been started by fireworks, Clallam 2 Fire-Rescue officials said. The fire district responded with a standard engine, fast attack wildland brush engine, command vehicle and five firefighters. Other firefighters and two 3,000-gallon water tenders were on standby for the Fourth of July, officials said. Peninsula Communications received 61 reports of fireworks violations between 6 p.m. Wednesday and 2 a.m. Thursday, according to the dispatch center’s call for service log. No major fires were reported on the North Olympic Peninsula on the Fourth of July. “We had zero fireworks-related incidents,” said Ben Andrews, chief of Sequim-area Clallam County Fire District 3. East Jefferson Fire-Rescue spokesman Bill Beezley said there were no fireworks-related calls to his district Wednesday. “We had a really, really quiet evening,” Beezley said. Reporter Rob Ollikainen can be reached at 360-452-2345, ext. 56450, or at [email protected] suladailynews.com.
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import brbn import email.utils as _email import json as _json import logging as _logging import os as _os import quopri as _quopri import re as _re import sqlite3 as _sqlite import time as _time import textwrap as _textwrap from datetime import datetime as _datetime from pencil import * _log = _logging.getLogger("haystack") _strings = StringCatalog(__file__) _topics = _json.loads(_strings["topics"]) class Haystack(brbn.Application): def __init__(self, home_dir): super().__init__(home_dir) path = _os.path.join(self.home, "data", "data.sqlite") self.database = Database(path) self.root_resource = _IndexPage(self) self.search_page = _SearchPage(self) self.thread_page = _ThreadPage(self) self.message_page = _MessagePage(self) def receive_request(self, request): request.database_connection = self.database.connect() try: return super().receive_request(request) finally: request.database_connection.close() class _IndexPage(brbn.Page): def __init__(self, app): super().__init__(app, "/", _strings["index_page_body"]) def get_title(self, request): return "Haystack" @brbn.xml def render_topics(self, request): items = list() for topic in _topics: href = self.app.search_page.get_href(request, query=topic) text = xml_escape(topic) items.append(html_a(text, href)) return html_ul(items, class_="four-column") class _SearchPage(brbn.Page): def __init__(self, app): super().__init__(app, "/search", _strings["search_page_body"]) def get_title(self, request): query = request.get("query") return "Search '{}'".format(query) def render_query(self, request): return request.get("query") @brbn.xml def render_threads(self, request): query = request.get("query") sql = ("select * from messages where id in " "(select distinct thread_id from messages_fts " " where messages_fts match ? limit 1000) " "order by date desc") escaped_query = query.replace("\"", "\"\"") records = self.app.database.query(request, sql, escaped_query) thread = Thread() rows = list() for record in records: thread.load_from_record(record) thread_link = thread.get_link(request) row = [ thread_link, xml_escape(thread.from_address), thread.authored_words, xml_escape(str(_email.formatdate(thread.date)[:-6])), ] rows.append(row) return html_table(rows, False, class_="messages four") class _ThreadPage(brbn.Page): def __init__(self, app): super().__init__(app, "/thread", _strings["thread_page_body"]) def get_title(self, request): return "Thread '{}'".format(request.thread.subject) def process(self, request): id = request.get("id") request.thread = self.app.database.get(request, Message, id) sql = ("select * from messages " "where thread_id = ? " "order by thread_position, date asc " "limit 1000") records = self.app.database.query(request, sql, request.thread.id) request.messages = list() request.messages_by_id = dict() for record in records: message = Message() message.load_from_record(record) request.messages.append(message) request.messages_by_id[message.id] = message def render_title(self, request): return request.thread.subject @brbn.xml def render_index(self, request): rows = list() for i, message in enumerate(request.messages): date = _time.strftime("%d %b %Y", _time.gmtime(message.date)) number = i + 1 title = self.get_message_title(request, message, number) row = [ html_a(xml_escape(title), "#{}".format(number)), xml_escape(date), message.authored_words, ] rows.append(row) return html_table(rows, False, class_="messages") @brbn.xml def render_messages(self, request): out = list() for i, message in enumerate(request.messages): number = i + 1 title = self.get_message_title(request, message, number) out.append(html_elem("h2", title, id=str(number))) out.append(html_elem("pre", xml_escape(message.content))) return "\n".join(out) def get_message_title(self, request, message, number): title = "{}. {}".format(number, message.from_name) if message.in_reply_to_id is not None: rmessage = request.messages_by_id.get(message.in_reply_to_id) if rmessage is not None: rperson = rmessage.from_name title = "{} replying to {}".format(title, rperson) return title class _MessagePage(brbn.Page): def __init__(self, app): super().__init__(app, "/message", _strings["message_page_body"]) def get_title(self, request): return "Message '{}'".format(request.message.subject) def process(self, request): id = request.get("id") request.message = self.app.database.get(request, Message, id) def render_title(self, request): return request.message.subject @brbn.xml def render_thread_link(self, request): thread = None thread_id = request.message.thread_id thread_link = xml_escape(thread_id) if thread_id is not None: try: thread = self.app.database.get(request, Message, thread_id) except ObjectNotFound: pass if thread is not None: thread_link = thread.get_link(request) return thread_link @brbn.xml def render_in_reply_to_link(self, request): rmessage = None rmessage_id = request.message.in_reply_to_id rmessage_link = nvl(xml_escape(rmessage_id), "[None]") if rmessage_id is not None: try: rmessage = self.database.get(request, Message, rmessage_id) except ObjectNotFound: pass if rmessage is not None: rmessage_link = rmessage.get_link(request) return rmessage_link @brbn.xml def render_headers(self, request): message = request.message from_field = "{} <{}>".format(message.from_name, message.from_address) items = ( ("ID", xml_escape(message.id)), ("List", xml_escape(message.list_id)), ("From", xml_escape(from_field)), ("Date", xml_escape(_email.formatdate(message.date))), ("Subject", xml_escape(message.subject)), ) return html_table(items, False, True, class_="headers") @brbn.xml def render_content(self, request): message = request.message content = "" if message.content is not None: lines = list() for line in message.content.splitlines(): line = line.strip() if line.startswith(">"): m = _re.match("^[> ]+", line) prefix = "\n{}".format(m.group(0)) line = prefix.join(_textwrap.wrap(line, 80)) line = html_span(xml_escape(line), class_="quoted") else: line = "\n".join(_textwrap.wrap(line, 80)) line = xml_escape(line) lines.append(line) content = "\n".join(lines) return content class Database: def __init__(self, path): self.path = path _log.info("Using database at {}".format(self.path)) def connect(self): # XXX thread local connections return _sqlite.connect(self.path) def create_schema(self): columns = list() for name in Message.fields: field_type = Message.field_types.get(name, str) column_type = "text" if field_type == int: column_type = "integer" column = "{} {}".format(name, column_type) columns.append(column) statements = list() columns = ", ".join(columns) ddl = "create table messages ({});".format(columns) statements.append(ddl) ddl = "create index messages_id_idx on messages (id);" statements.append(ddl) columns = ", ".join(Message.fts_fields) ddl = ("create virtual table messages_fts using fts4 " "({}, notindexed=id, notindexed=thread_id, tokenize=porter)" "".format(columns)) statements.append(ddl) conn = self.connect() cursor = conn.cursor() try: for statement in statements: cursor.execute(statement) finally: conn.close() def optimize(self): conn = self.connect() cursor = conn.cursor() ddl = "insert into messages_fts (messages_fts) values ('optimize')" try: cursor.execute(ddl) finally: conn.close() def cursor(self, request): return request.database_connection.cursor() def query(self, request, sql, *args): cursor = self.cursor(request) try: cursor.execute(sql, args) return cursor.fetchall() finally: cursor.close() def get(self, request, cls, id): _log.debug("Getting {} with ID {}".format(cls.__name__, id)) assert issubclass(cls, _DatabaseObject), cls assert id is not None sql = "select * from {} where id = ?".format(cls.table) cursor = self.cursor(request) try: cursor.execute(sql, [id]) record = cursor.fetchone() finally: cursor.close() if record is None: raise ObjectNotFound() obj = cls() obj.load_from_record(record) return obj class ObjectNotFound(Exception): pass class _DatabaseObject: table = None def __init__(self, id, name, parent=None): self.id = id self._name = name self.parent = parent def __repr__(self): return format_repr(self, self.id) @property def name(self): return self._name def get_link_href(self, request): raise NotImplementedError() def get_link_text(self, request): return self.name def get_link(self, request, text=None): href = self.get_link_href(request) if text is None: text = self.get_link_text(request) return "<a href=\"{}\">{}</a>".format(href, xml_escape(text)) class Message(_DatabaseObject): table = "messages" fields = [ "id", "in_reply_to_id", "from_name", "from_address", "list_id", "date", "subject", "content_type", "content", "authored_content", "authored_words", "thread_id", "thread_position", ] field_types = { "date": int, "authored_words": int, "thread_position": int, } field_mbox_keys = { "id": "Message-ID", "in_reply_to_id": "In-Reply-To", "list_id": "List-Id", "subject": "Subject", "content_type": "Content-Type", } fts_fields = [ "id", "thread_id", "subject", "authored_content", ] def __init__(self): super().__init__(None, None) for name in self.fields: setattr(self, name, None) @property def name(self): return self.subject def load_from_mbox_message(self, mbox_message): for name in self.field_mbox_keys: mbox_key = self.field_mbox_keys[name] value = mbox_message.get(mbox_key) field_type = self.field_types.get(name, str) if value is not None: value = field_type(value) setattr(self, name, value) name, address = _email.parseaddr(mbox_message["From"]) self.from_name = name self.from_address = address tup = _email.parsedate(mbox_message["Date"]) self.date = _time.mktime(tup) content = _get_mbox_content(mbox_message) assert content is not None self.content = content self.authored_content = _get_authored_content(self.content) self.authored_words = len(self.authored_content.split()) def load_from_record(self, record): for i, name in enumerate(self.fields): value = record[i] field_type = self.field_types.get(name, str) if value is not None: value = field_type(value) setattr(self, name, value) def save(self, cursor): columns = ", ".join(self.fields) values = ", ".join("?" * len(self.fields)) args = [getattr(self, x) for x in self.fields] dml = "insert into messages ({}) values ({})".format(columns, values) cursor.execute(dml, args) columns = ", ".join(self.fts_fields) values = ", ".join("?" * len(self.fts_fields)) args = [getattr(self, x) for x in self.fts_fields] dml = "insert into messages_fts ({}) values ({})".format(columns, values) cursor.execute(dml, args) def get_link_href(self, request): return request.app.message_page.get_href(request, id=self.id) def get_link_title(self, request): return self.subject class Thread(Message): def get_link_href(self, request): return request.app.thread_page.get_href(request, id=self.id) def _get_mbox_content(mbox_message): content_type = None content_encoding = None content = None if mbox_message.is_multipart(): for part in mbox_message.walk(): if part.get_content_type() == "text/plain": content_type = "text/plain" content_encoding = part["Content-Transfer-Encoding"] content = part.get_payload() if content_type is None: content_type = mbox_message.get_content_type() content_encoding = mbox_message["Content-Transfer-Encoding"] content = mbox_message.get_payload() assert content_type is not None assert content is not None if content_encoding == "quoted-printable": content = _quopri.decodestring(content) content = content.decode("utf-8", errors="replace") if content_type == "text/html": content = strip_tags(content) return content def _get_authored_content(content): lines = list() for line in content.splitlines(): line = line.strip() if line.startswith(">"): continue lines.append(line) return "\n".join(lines)
I've already written about how Teen Author Week was great, but now I will give you evidence. I stood behind a bookcase at Books of Wonder to get this footage of Tiger Beat singing "Heroes" (forgive me if it's a little shaky!). Eat your heart out, David Bowie. That's Libba Bray singing, Daniel Ehrenhaft on lead guitar, Barnabas Miller on drums and Natalie Standiford on bass! I have two more songs on film, so I'll upload them all on youtube soon, just had to share a favorite here.
## # wrapping: A program making it easy to use hyperparameter # optimization software. # Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import unittest import sys import HPOlib.benchmark_util as benchmark_util class BenchmarkUtilTest(unittest.TestCase): def setUp(self): # Change into the parent of the test directory os.chdir(os.path.join("..", os.path.dirname(os.path.realpath(__file__)))) # Make sure there is no config file try: os.remove("./config.cfg") except: pass def test_read_parameters_from_command_line(self): # Legal call sys.argv = ["test.py", "--folds", "10", "--fold", "0", "--params", "-x", "3"] args, params = benchmark_util.parse_cli() self.assertEqual(params, {'x': '3'}) self.assertEqual(args, {'folds': '10', 'fold': '0'}) # illegal call, arguments with one minus before --params sys.argv = ["test.py", "-folds", "10", "--fold", "0", "--params", "-x", "3"] with self.assertRaises(ValueError) as cm1: benchmark_util.parse_cli() self.assertEqual(cm1.exception.message, "You either try to use arguments" " with only one leading minus or try to specify a " "hyperparameter before the --params argument. test.py" " -folds 10 --fold 0 --params -x 3") # illegal call, trying to specify an arguments after --params sys.argv = ["test.py", "--folds", "10", "--params", "-x", "'3'", "--fold", "0"] with self.assertRaises(ValueError) as cm5: benchmark_util.parse_cli() self.assertEqual(cm5.exception.message, "You are trying to specify an argument after the " "--params argument. Please change the order.") # illegal call, no - in front of parameter name sys.argv = ["test_cv.py", "--params", "x", "'5'"] with self.assertRaises(ValueError) as cm2: benchmark_util.parse_cli() self.assertEqual(cm2.exception.message, "Illegal command line string, expected a hyperpara" "meter starting with - but found x")
HOTSLAVE27 . undeniablydiffe. MiaSweetCandy. CoryKennedy. bbiancandayannaMissLarraLindaPerishxQueenSEXMAKERx .lovelycouple96PerfectPvtCharmingNicolasTanyaJones .JenAndJustina0SpreadYourAssPangiotalovelycouple96 .SweetSoniaXChockoSweetnicole69diamondAmeliaParker .xARIAxEnjoyableSANTOSEXYYStrongDICKxa0SpreadYourAss .TSBrittanyBellaSweetNina97bbiancandayannaGirlOfYourDREAM .SquirtyKellyKatiaDanielsMaryorieYourSexEscapeX .CarolineFantasyMissLarraLindaPerisha0SpreadYourAss .AmeliaParkerCharmingNicolasLigueraNabiba .KiaraDearExquisiteDuoAnzua0SpreadYourAss .KatiaDanielsKimmBeckerSANTOSEXYYPangiota . LindaPerishSanriaLigueraSimonsB .NadyaNadya2LolitHotPinkMissLarraFRIENDSS .hottieBBWRioAnemonaAnyLexiBelleRoseLiluBabyy .MiaSweetCandyMikeAndRichxQueenSEXMAKERxxARIAxEnjoyable .FilipinaBreedmoniiikkkkaKrystallineLukasDemon .MissFicsaaMaryoriea0SpreadYourAssHotLadyStacy .YourSexEscapeXMikeAndRichKatiaDanielsNabiba .NinaGomezmoniiikkkkaDobbidaria2016 .SANTOSEXYYCoryKennedyKrystallineBerryBoy .LigueraTanyaJonesa0SpreadYourAssxARIAxEnjoyable .xPlayfulDollTSxCoryKennedySANTOSEXYYLolitHotPink .
import asyncio import socket from cloudbot import hook socket.setdefaulttimeout(10) # Auto-join on Invite (Configurable, defaults to True) @asyncio.coroutine @hook.irc_raw('INVITE') def invite(irc_paramlist, conn, event): """ :type irc_paramlist: list[str] :type conn: cloudbot.client.Client """ invite_join = conn.config.get('invite_join', True) if invite_join: conn.join(irc_paramlist[-1]) invite = event.irc_raw.replace(":", "") head, sep, tail = invite.split()[ 0].partition('!') # message(invite.split()[0] + " invited me to " + invite.split()[-1], invite.split()[-1]) conn.message(irc_paramlist[-1].strip(":"), "Hello! I'm " + conn.config["nick"] + ". " + head + " invited me here! Check what I can do with " + conn.config[ "command_prefix"] + "help.") conn.message(irc_paramlist[-1].strip(":"), "You can check more info about me at github : https://github.com/paris-ci/CloudBot") # Identify to NickServ (or other service) @asyncio.coroutine @hook.irc_raw('004') def onjoin(conn, bot): """ :type conn: cloudbot.clients.clients.IrcClient :type bot: cloudbot.bot.CloudBot """ bot.logger.info("[{}|misc] Bot is sending join commands for network.".format(conn.name)) nickserv = conn.config.get('nickserv') if nickserv and nickserv.get("enabled", True): bot.logger.info("[{}|misc] Bot is authenticating with NickServ.".format(conn.name)) nickserv_password = nickserv.get('nickserv_password', '') nickserv_name = nickserv.get('nickserv_name', 'nickserv') nickserv_account_name = nickserv.get('nickserv_user', '') nickserv_command = nickserv.get('nickserv_command', 'IDENTIFY') if nickserv_password: if "censored_strings" in bot.config and nickserv_password in bot.config['censored_strings']: bot.config['censored_strings'].remove(nickserv_password) if nickserv_account_name: conn.message(nickserv_name, "{} {} {}".format(nickserv_command, nickserv_account_name, nickserv_password)) else: conn.message(nickserv_name, "{} {}".format(nickserv_command, nickserv_password)) if "censored_strings" in bot.config: bot.config['censored_strings'].append(nickserv_password) yield from asyncio.sleep(1) # Set bot modes mode = conn.config.get('mode') if mode: bot.logger.info("[{}|misc] Bot is setting mode on itself: {}".format(conn.name, mode)) conn.cmd('MODE', conn.nick, mode) # Join config-defined channels bot.logger.info("[{}|misc] Bot is joining channels for network.".format(conn.name)) for channel in conn.channels: conn.join(channel) yield from asyncio.sleep(0.4) conn.ready = True bot.logger.info("[{}|misc] Bot has finished sending join commands for network.".format(conn.name)) @asyncio.coroutine @hook.irc_raw('004') def keep_alive(conn): """ :type conn: cloudbot.clients.clients.IrcClient """ keepalive = conn.config.get('keep_alive', False) if keepalive: while True: conn.cmd('PING', conn.nick) yield from asyncio.sleep(60)
Alltech founder and president Pearse Lyons has built a global business based in Kentucky. Alltech, the Nicholasville-based feed supplement company, was suspended for a year from a major poultry industry trade show after company founder T. Pearse Lyons was accused of searching through a competitor's booth after hours. The competitor, Diamond V of Cedar Rapids, Iowa, filed a formal complaint with the Midwest Poultry Federation at its annual convention in Minneapolis in March 2009. "That incident did occur and we did consider it a serious incident," said Mike Mitchell, director of the company's North American Poultry division. In response to a Herald-Leader inquiry about the 2009 incident, Alltech said it was "a misunderstanding" that the company resolved by stepping aside from the trade show for a year. "We gave them (Alltech) an opportunity to respond. They declined to dispute that," he said. In a 2009 email to all exhibitors, the federation said: "One exhibitor was observed, shortly after show hours, photographing a competitor's booth and opening closed cabinets to view promotional materials." Alltech spokesman Billy Frey said the company's lawyers also reviewed the video and "saw no wrongdoing." He said Alltech's attorneys did not see Lyons clearly on tape. The lawyers "said there was no implicit evidence," said Frey, who has not seen the footage. Olson disputes that the security camera footage, viewed by convention officials, cleared Lyons. "It was part of what we based our decision on. That and a corroborating witness," he said. "The CCTV (closed-circuit television) didn't show everything, but it showed enough." The federation's board voted unanimously to ban Alltech from the trade show for a year, saying in an email to other exhibitors, "At the core of our relationship is our integrity and we do not take this lightly, nor for granted." Frey questioned why the 2009 incident is only now coming to light. He noted that Alltech recently accused Diamond V of false advertising and sent them a cease-and-desist letter. "I'm wondering if they're retaliating against us," Frey said. "This is a story that seems kind of out-of-the-blue, that a story like this would come up two years later, after it's been completely resolved and we've moved on and we've exhibited at the show, with no incidences there." Mitchell said he was unaware of Diamond V receiving any recent cease-and-desist letters from Alltech. The trade show incident surfaces publicly as the international nutritional supplements company faces a sexual harassment lawsuit filed last month by the company's former U.S. controller. Amanda Jo Wester alleged in her suit against Alltech that she suffered three years of salacious emails from her former boss, Eric Lanz, and that top officials ignored her complaints, then retaliated by removing job duties. Wester also cited behavior by Lyons, such as slapping her and another female employee on the bottom, that she said set a permissive tone at the company. Alltech responded to Wester's allegations with a statement that said the company's policies prohibit harassment and that it provides a "productive and comfortable work environment." Alltech in recent years has aggressively pursued sales growth around the world, with Lyons as the public face of the company. Most notably, the company spent more than $30 million to sponsor and support the 2010 Alltech FEI World Equestrian Games. In a written statement about the 2009 trade show incident, Alltech spokeswoman Susanna Elliott said complaints among competitors at trade shows are "very common." "The misunderstanding involved competitor complaints alleging that Alltech broke an exhibitor rule," Elliott said. "Alltech exhibits at hundreds of trade shows around the world each year, and complaints at trade shows are very common between competitors. We were invited by organizers to respond to the allegations and did so privately. We asked for a review including CCTV footage, which indicated no wrongdoing. We chose, however, not to prolong the issue." Olson said this week he can't speak for other shows Alltech attends but that in the 11 years he's been with the Midwest Poultry Federation this is the only time an incident like this has occurred. "At our show, we don't have those kinds of complaints between competitors," he said.
# # Copyright (c) 2009-2015 Tom Keffer <[email protected]> # # See the file LICENSE.txt for your full rights. # """Classees and functions for interfacing with an Oregon Scientific WMR100 station. The WMRS200 reportedly works with this driver (NOT the WMR200, which is a different beast). The wind sensor reports wind speed, wind direction, and wind gust. It does not report wind gust direction. WMR89: - data logger - up to 3 channels - protocol 3 sensors - THGN800, PRCR800, WTG800 WMR86: - no data logger - protocol 3 sensors - THGR800, WGR800, PCR800, UVN800 The following references were useful for figuring out the WMR protocol: From Per Ejeklint: https://github.com/ejeklint/WLoggerDaemon/blob/master/Station_protocol.md From Rainer Finkeldeh: http://www.bashewa.com/wmr200-protocol.php The WMR driver for the wfrog weather system: http://code.google.com/p/wfrog/source/browse/trunk/wfdriver/station/wmrs200.py Unfortunately, there is no documentation for PyUSB v0.4, so you have to back it out of the source code, available at: https://pyusb.svn.sourceforge.net/svnroot/pyusb/branches/0.4/pyusb.c """ from __future__ import absolute_import from __future__ import print_function import logging import time import operator from functools import reduce import usb import weewx.drivers import weewx.wxformulas import weeutil.weeutil log = logging.getLogger(__name__) DRIVER_NAME = 'WMR100' DRIVER_VERSION = "3.5.0" def loader(config_dict, engine): # @UnusedVariable return WMR100(**config_dict[DRIVER_NAME]) def confeditor_loader(): return WMR100ConfEditor() class WMR100(weewx.drivers.AbstractDevice): """Driver for the WMR100 station.""" DEFAULT_MAP = { 'pressure': 'pressure', 'windSpeed': 'wind_speed', 'windDir': 'wind_dir', 'windGust': 'wind_gust', 'windBatteryStatus': 'battery_status_wind', 'inTemp': 'temperature_0', 'outTemp': 'temperature_1', 'extraTemp1': 'temperature_2', 'extraTemp2': 'temperature_3', 'extraTemp3': 'temperature_4', 'extraTemp4': 'temperature_5', 'extraTemp5': 'temperature_6', 'extraTemp6': 'temperature_7', 'extraTemp7': 'temperature_8', 'inHumidity': 'humidity_0', 'outHumidity': 'humidity_1', 'extraHumid1': 'humidity_2', 'extraHumid2': 'humidity_3', 'extraHumid3': 'humidity_4', 'extraHumid4': 'humidity_5', 'extraHumid5': 'humidity_6', 'extraHumid6': 'humidity_7', 'extraHumid7': 'humidity_8', 'inTempBatteryStatus': 'battery_status_0', 'outTempBatteryStatus': 'battery_status_1', 'extraBatteryStatus1': 'battery_status_2', 'extraBatteryStatus2': 'battery_status_3', 'extraBatteryStatus3': 'battery_status_4', 'extraBatteryStatus4': 'battery_status_5', 'extraBatteryStatus5': 'battery_status_6', 'extraBatteryStatus6': 'battery_status_7', 'extraBatteryStatus7': 'battery_status_8', 'rain': 'rain', 'rainTotal': 'rain_total', 'rainRate': 'rain_rate', 'hourRain': 'rain_hour', 'rain24': 'rain_24', 'rainBatteryStatus': 'battery_status_rain', 'UV': 'uv', 'uvBatteryStatus': 'battery_status_uv'} def __init__(self, **stn_dict): """Initialize an object of type WMR100. NAMED ARGUMENTS: model: Which station model is this? [Optional. Default is 'WMR100'] timeout: How long to wait, in seconds, before giving up on a response from the USB port. [Optional. Default is 15 seconds] wait_before_retry: How long to wait before retrying. [Optional. Default is 5 seconds] max_tries: How many times to try before giving up. [Optional. Default is 3] vendor_id: The USB vendor ID for the WMR [Optional. Default is 0xfde] product_id: The USB product ID for the WM [Optional. Default is 0xca01] interface: The USB interface [Optional. Default is 0] IN_endpoint: The IN USB endpoint used by the WMR. [Optional. Default is usb.ENDPOINT_IN + 1] """ log.info('Driver version is %s' % DRIVER_VERSION) self.model = stn_dict.get('model', 'WMR100') # TODO: Consider putting these in the driver loader instead: self.record_generation = stn_dict.get('record_generation', 'software') self.timeout = float(stn_dict.get('timeout', 15.0)) self.wait_before_retry = float(stn_dict.get('wait_before_retry', 5.0)) self.max_tries = int(stn_dict.get('max_tries', 3)) self.vendor_id = int(stn_dict.get('vendor_id', '0x0fde'), 0) self.product_id = int(stn_dict.get('product_id', '0xca01'), 0) self.interface = int(stn_dict.get('interface', 0)) self.IN_endpoint = int(stn_dict.get('IN_endpoint', usb.ENDPOINT_IN + 1)) self.sensor_map = dict(self.DEFAULT_MAP) if 'sensor_map' in stn_dict: self.sensor_map.update(stn_dict['sensor_map']) log.info('Sensor map is %s' % self.sensor_map) self.last_rain_total = None self.devh = None self.openPort() def openPort(self): dev = self._findDevice() if not dev: log.error("Unable to find USB device (0x%04x, 0x%04x)" % (self.vendor_id, self.product_id)) raise weewx.WeeWxIOError("Unable to find USB device") self.devh = dev.open() # Detach any old claimed interfaces try: self.devh.detachKernelDriver(self.interface) except usb.USBError: pass try: self.devh.claimInterface(self.interface) except usb.USBError as e: self.closePort() log.error("Unable to claim USB interface: %s" % e) raise weewx.WeeWxIOError(e) def closePort(self): try: self.devh.releaseInterface() except usb.USBError: pass try: self.devh.detachKernelDriver(self.interface) except usb.USBError: pass def genLoopPackets(self): """Generator function that continuously returns loop packets""" # Get a stream of raw packets, then convert them, depending on the # observation type. for _packet in self.genPackets(): try: _packet_type = _packet[1] if _packet_type in WMR100._dispatch_dict: # get the observations from the packet _raw = WMR100._dispatch_dict[_packet_type](self, _packet) if _raw is not None: # map the packet labels to schema fields _record = dict() for k in self.sensor_map: if self.sensor_map[k] in _raw: _record[k] = _raw[self.sensor_map[k]] # if there are any observations, add time and units if _record: for k in ['dateTime', 'usUnits']: _record[k] = _raw[k] yield _record except IndexError: log.error("Malformed packet: %s" % _packet) def genPackets(self): """Generate measurement packets. These are 8 to 17 byte long packets containing the raw measurement data. For a pretty good summary of what's in these packets see https://github.com/ejeklint/WLoggerDaemon/blob/master/Station_protocol.md """ # Wrap the byte generator function in GenWithPeek so we # can peek at the next byte in the stream. The result, the variable # genBytes, will be a generator function. genBytes = weeutil.weeutil.GenWithPeek(self._genBytes_raw()) # Start by throwing away any partial packets: for ibyte in genBytes: if genBytes.peek() != 0xff: break buff = [] # March through the bytes generated by the generator function genBytes: for ibyte in genBytes: # If both this byte and the next one are 0xff, then we are at the end of a record if ibyte == 0xff and genBytes.peek() == 0xff: # We are at the end of a packet. # Compute its checksum. This can throw an exception if the packet is empty. try: computed_checksum = reduce(operator.iadd, buff[:-2]) except TypeError as e: log.debug("Exception while calculating checksum: %s" % e) else: actual_checksum = (buff[-1] << 8) + buff[-2] if computed_checksum == actual_checksum: # Looks good. Yield the packet yield buff else: log.debug("Bad checksum on buffer of length %d" % len(buff)) # Throw away the next character (which will be 0xff): next(genBytes) # Start with a fresh buffer buff = [] else: buff.append(ibyte) @property def hardware_name(self): return self.model #=============================================================================== # USB functions #=============================================================================== def _findDevice(self): """Find the given vendor and product IDs on the USB bus""" for bus in usb.busses(): for dev in bus.devices: if dev.idVendor == self.vendor_id and dev.idProduct == self.product_id: return dev def _genBytes_raw(self): """Generates a sequence of bytes from the WMR USB reports.""" try: # Only need to be sent after a reset or power failure of the station: self.devh.controlMsg(usb.TYPE_CLASS + usb.RECIP_INTERFACE, # requestType 0x0000009, # request [0x20,0x00,0x08,0x01,0x00,0x00,0x00,0x00], # buffer 0x0000200, # value 0x0000000, # index 1000) # timeout except usb.USBError as e: log.error("Unable to send USB control message: %s" % e) # Convert to a Weewx error: raise weewx.WakeupError(e) nerrors = 0 while True: try: # Continually loop, retrieving "USB reports". They are 8 bytes long each. report = self.devh.interruptRead(self.IN_endpoint, 8, # bytes to read int(self.timeout * 1000)) # While the report is 8 bytes long, only a smaller, variable portion of it # has measurement data. This amount is given by byte zero. Return each # byte, starting with byte one: for i in range(1, report[0] + 1): yield report[i] nerrors = 0 except (IndexError, usb.USBError) as e: log.debug("Bad USB report received: %s" % e) nerrors += 1 if nerrors > self.max_tries: log.error("Max retries exceeded while fetching USB reports") raise weewx.RetriesExceeded("Max retries exceeded while fetching USB reports") time.sleep(self.wait_before_retry) # ========================================================================= # LOOP packet decoding functions #========================================================================== def _rain_packet(self, packet): # NB: in my experiments with the WMR100, it registers in increments of # 0.04 inches. Per Ejeklint's notes have you divide the packet values # by 10, but this would result in an 0.4 inch bucket --- too big. So, # I'm dividing by 100. _record = { 'rain_rate' : ((packet[3] << 8) + packet[2]) / 100.0, 'rain_hour' : ((packet[5] << 8) + packet[4]) / 100.0, 'rain_24' : ((packet[7] << 8) + packet[6]) / 100.0, 'rain_total' : ((packet[9] << 8) + packet[8]) / 100.0, 'battery_status_rain': packet[0] >> 4, 'dateTime': int(time.time() + 0.5), 'usUnits': weewx.US} # Because the WMR does not offer anything like bucket tips, we must # calculate it by looking for the change in total rain. Of course, this # won't work for the very first rain packet. _record['rain'] = weewx.wxformulas.calculate_rain( _record['rain_total'], self.last_rain_total) self.last_rain_total = _record['rain_total'] return _record def _temperature_packet(self, packet): _record = {'dateTime': int(time.time() + 0.5), 'usUnits': weewx.METRIC} # Per Ejeklint's notes don't mention what to do if temperature is # negative. I think the following is correct. Also, from experience, we # know that the WMR has problems measuring dewpoint at temperatures # below about 20F. So ignore dewpoint and let weewx calculate it. T = (((packet[4] & 0x7f) << 8) + packet[3]) / 10.0 if packet[4] & 0x80: T = -T R = float(packet[5]) channel = packet[2] & 0x0f _record['temperature_%d' % channel] = T _record['humidity_%d' % channel] = R _record['battery_status_%d' % channel] = (packet[0] & 0x40) >> 6 return _record def _temperatureonly_packet(self, packet): # function added by fstuyk to manage temperature-only sensor THWR800 _record = {'dateTime': int(time.time() + 0.5), 'usUnits': weewx.METRIC} # Per Ejeklint's notes don't mention what to do if temperature is # negative. I think the following is correct. T = (((packet[4] & 0x7f) << 8) + packet[3])/10.0 if packet[4] & 0x80: T = -T channel = packet[2] & 0x0f _record['temperature_%d' % channel] = T _record['battery_status_%d' % channel] = (packet[0] & 0x40) >> 6 return _record def _pressure_packet(self, packet): # Although the WMR100 emits SLP, not all consoles in the series # (notably, the WMRS200) allow the user to set altitude. So we # record only the station pressure (raw gauge pressure). SP = float(((packet[3] & 0x0f) << 8) + packet[2]) _record = {'pressure': SP, 'dateTime': int(time.time() + 0.5), 'usUnits': weewx.METRIC} return _record def _uv_packet(self, packet): _record = {'uv': float(packet[3]), 'battery_status_uv': packet[0] >> 4, 'dateTime': int(time.time() + 0.5), 'usUnits': weewx.METRIC} return _record def _wind_packet(self, packet): """Decode a wind packet. Wind speed will be in kph""" _record = { 'wind_speed': ((packet[6] << 4) + ((packet[5]) >> 4)) / 10.0, 'wind_gust': (((packet[5] & 0x0f) << 8) + packet[4]) / 10.0, 'wind_dir': (packet[2] & 0x0f) * 360.0 / 16.0, 'battery_status_wind': (packet[0] >> 4), 'dateTime': int(time.time() + 0.5), 'usUnits': weewx.METRICWX} # Sometimes the station emits a wind gust that is less than the # average wind. If this happens, ignore it. if _record['wind_gust'] < _record['wind_speed']: _record['wind_gust'] = None return _record def _clock_packet(self, packet): """The clock packet is not used by weewx. However, the last time is saved in case getTime() is called.""" tt = (2000 + packet[8], packet[7], packet[6], packet[5], packet[4], 0, 0, 0, -1) self.last_time = time.mktime(tt) return None # Dictionary that maps a measurement code, to a function that can decode it _dispatch_dict = {0x41: _rain_packet, 0x42: _temperature_packet, 0x46: _pressure_packet, 0x47: _uv_packet, 0x48: _wind_packet, 0x60: _clock_packet, 0x44: _temperatureonly_packet} class WMR100ConfEditor(weewx.drivers.AbstractConfEditor): @property def default_stanza(self): return """ [WMR100] # This section is for the Oregon Scientific WMR100 # The driver to use driver = weewx.drivers.wmr100 # The station model, e.g., WMR100, WMR100N, WMRS200 model = WMR100 """ def modify_config(self, config_dict): print(""" Setting rainRate calculation to hardware.""") config_dict.setdefault('StdWXCalculate', {}) config_dict['StdWXCalculate'].setdefault('Calculations', {}) config_dict['StdWXCalculate']['Calculations']['rainRate'] = 'hardware'
Welcome to Shivam Tourist Guest House in Bundi - in the heart of Rajastahn. It is a small family driven Guest House managed by the two brothers Montu and Tampi and their families. We have seven very nice room with shower and hot water. We have delicious home-cooked food at our Rooftop Restaurant, free Wi-Fi, we speek English and French, we can help you with ticket-booking etc.
#!/usr/bin/python #------------------------------------------------------------------------------- #License GPL v3.0 #Author: Alexandre Manhaes Savio <[email protected]> #Grupo de Inteligencia Computational <www.ehu.es/ccwintco> #Universidad del Pais Vasco UPV/EHU #Use this at your own risk! #------------------------------------------------------------------------------- #DEPENDENCIES: #sudo apt-get install python-argparse python-numpy python-numpy-ext python-matplotlib python-scipy python-nibabel #For development: #sudo apt-get install ipython python-nifti python-nitime #from IPython.core.debugger import Tracer; debug_here = Tracer() import os import sys import argparse import logging import numpy as np import nibabel as nib import aizkolari_utils as au import aizkolari_preproc as pre import aizkolari_pearson as pear import aizkolari_bhattacharyya as bat import aizkolari_ttest as ttst import aizkolari_postproc as post #------------------------------------------------------------------------------- def set_parser(): parser = argparse.ArgumentParser(description='Slices and puts together a list of subjects to perform voxe-wise group calculations, e.g., Pearson correlations and bhattacharyya distance. \n The Pearson correlation is calculated between each voxel site for all subjects and the class label vector of the same subjects. \n Bhatthacharyya distance is calculated between each two groups using voxelwise Gaussian univariate distributions of each group. \n Student t-test is calculated as a Welch t-test where the two population variances are assumed to be different.') parser.add_argument('-c', '--classesf', dest='classes', required=True, help='class label file. one line per class: <class_label>,<class_name>.') parser.add_argument('-i', '--insubjsf', dest='subjs', required=True, help='file with a list of the volume files and its labels for the analysis. Each line: <class_label>,<subject_file>') parser.add_argument('-o', '--outdir', dest='outdir', required=True, help='name of the output directory where the results will be put.') parser.add_argument('-e', '--exclude', dest='exclude', default='', required=False, help='subject list mask, i.e., text file where each line has 0 or 1 indicating with 1 which subject should be excluded in the measure. To help calculating measures for cross-validation folds, for leave-one-out you can use the -l option.') parser.add_argument('-l', '--leave', dest='leave', default=-1, required=False, type=int, help='index from subject list (counting from 0) indicating one subject to be left out of the measure. For leave-one-out measures.') parser.add_argument('-d', '--datadir', dest='datadir', required=False, help='folder path where the subjects are, if the absolute path is not included in the subjects list file.', default='') parser.add_argument('-m', '--mask', dest='mask', required=False, help='Brain mask volume file for all subjects.') parser.add_argument('-n', '--measure', dest='measure', default='pearson', choices=['pearson','bhatta','bhattacharyya','ttest'], required=False, help='name of the distance/correlation method. Allowed: pearson (Pearson Correlation), bhatta (Bhattacharyya distance), ttest (Student`s t-test). (default: pearson)') parser.add_argument('-k', '--cleanup', dest='cleanup', action='store_true', help='if you want to clean up all the temp files after processing') parser.add_argument('-f', '--foldno', dest='foldno', required=False, type=int, default=-1, help='number to identify the fold for this run, in case you will run many different folds.') parser.add_argument('-x', '--expname', dest='expname', required=False, type=str, default='', help='name to identify this run, in case you will run many different experiments.') parser.add_argument('-a', '--absolute', dest='absolute', required=False, action='store_true', help='put this if you want absolute values of the measure.') parser.add_argument('-v', '--verbosity', dest='verbosity', required=False, type=int, default=2, help='Verbosity level: Integer where 0 for Errors, 1 for Progression reports, 2 for Debug reports') parser.add_argument('--checklist', dest='checklist', required=False, action='store_true', help='If set will use and update a checklist file, which will control the steps already done in case the process is interrupted.') return parser #------------------------------------------------------------------------------- def decide_whether_usemask (maskfname): usemask = False if maskfname: usemask = True if usemask: if not os.path.exists(maskfname): print ('Mask file ' + maskfname + ' not found!') usemask = False return usemask #------------------------------------------------------------------------------- def get_fold_numberstr (foldno): if foldno == -1: return '' else: return zeropad (foldno) #------------------------------------------------------------------------------- def get_measure_shortname (measure_name): if measure_name == 'bhattacharyya' or measure_name == 'bhatta': measure = 'bat' elif measure_name == 'pearson': measure = 'pea' elif measure_name == 'ttest': measure = 'ttest' return measure #------------------------------------------------------------------------------- def parse_labels_file (classf): labels = [] classnames = [] labfile = open(classf, 'r') for l in labfile: line = l.strip().split(',') labels .append (int(line[0])) classnames.append (line[1]) labfile.close() return [labels, classnames] #------------------------------------------------------------------------------- def parse_subjects_list (subjsfname, datadir=''): subjlabels = [] subjs = [] if datadir: datadir += os.path.sep try: subjfile = open(subjsfname, 'r') for s in subjfile: line = s.strip().split(',') subjlabels.append(int(line[0])) subjfname = line[1].strip() if not os.path.isabs(subjfname): subjs.append (datadir + subjfname) else: subjs.append (subjfname) subjfile.close() except: log.error( "Unexpected error: ", sys.exc_info()[0] ) sys.exit(-1) return [subjlabels, subjs] #------------------------------------------------------------------------------- def parse_exclude_list (excluf, leave=-1): excluded =[] if (excluf): try: excluded = np.loadtxt(excluf, dtype=int) #if leave already excluded, dont take it into account if leave > -1: if excluded[leave] == 1: au.log.warn ('Your left out subject (-l) is already being excluded in the exclusion file (-e).') leave = -1 except: au.log.error ('Error processing file ' + excluf) au.log.error ('Unexpected error: ' + str(sys.exc_info()[0])) sys.exit(-1) return excluded #------------------------------------------------------------------------------- def main(argv=None): parser = set_parser() try: args = parser.parse_args () except argparse.ArgumentError, exc: print (exc.message + '\n' + exc.argument) parser.error(str(msg)) return -1 datadir = args.datadir.strip() classf = args.classes.strip() subjsf = args.subjs.strip() maskf = args.mask.strip() outdir = args.outdir.strip() excluf = args.exclude.strip() measure = args.measure.strip() expname = args.expname.strip() foldno = args.foldno cleanup = args.cleanup leave = args.leave absval = args.absolute verbose = args.verbosity chklst = args.checklist au.setup_logger(verbose) usemask = decide_whether_usemask(maskf) foldno = get_fold_numberstr (foldno) measure = get_measure_shortname (measure) classnum = au.file_len(classf) subjsnum = au.file_len(subjsf) #reading label file [labels, classnames] = parse_labels_file (classf) #reading subjects list [subjlabels, subjs] = parse_subjects_list (subjsf, datadir) #if output dir does not exist, create if not(os.path.exists(outdir)): os.mkdir(outdir) #checklist_fname if chklst: chkf = outdir + os.path.sep + au.checklist_str() if not(os.path.exists(chkf)): au.touch(chkf) else: chkf = '' #saving data in files where further processes can find them outf_subjs = outdir + os.path.sep + au.subjects_str() outf_labels = outdir + os.path.sep + au.labels_str() np.savetxt(outf_subjs, subjs, fmt='%s') np.savetxt(outf_labels, subjlabels, fmt='%i') #creating folder for slices slidir = outdir + os.path.sep + au.slices_str() if not(os.path.exists(slidir)): os.mkdir(slidir) #slice the volumes #creating group and mask slices pre.slice_and_merge(outf_subjs, outf_labels, chkf, outdir, maskf) #creating measure output folder if measure == 'pea': measure_fname = au.pearson_str() elif measure == 'bat': measure_fname = au.bhattacharyya_str() elif measure == 'ttest': measure_fname = au.ttest_str() #checking the leave parameter if leave > (subjsnum - 1): au.log.warning('aizkolari_measure: the leave (-l) argument value is ' + str(leave) + ', bigger than the last index of subject: ' + str(subjsnum - 1) + '. Im setting it to -1.') leave = -1 #reading exclusion list excluded = parse_exclude_list (excluf, leave) #setting the output folder mdir extension mdir = outdir + os.path.sep + measure_fname if expname: mdir += '_' + expname if foldno: mdir += '_' + foldno #setting the stats folder statsdir = outdir + os.path.sep + au.stats_str() if expname: statsdir += '_' + expname if foldno: statsdir += '_' + foldno #setting a string with step parameters step_params = ' ' + measure_fname + ' ' + mdir absolute_str = '' if absval: absolute_str = ' ' + au.abs_str() step_params += absolute_str leave_str = '' if leave > -1: leave_str = ' excluding subject ' + str(leave) step_params += leave_str #checking if this measure has already been done endstep = au.measure_str() + step_params stepdone = au.is_done(chkf, endstep) #add pluses to output dir if it already exists if stepdone: while os.path.exists (mdir): mdir += '+' else: #work in the last folder used plus = False while os.path.exists (mdir): mdir += '+' plus = True if plus: mdir = mdir[0:-1] #setting statsdir pluses = mdir.count('+') for i in np.arange(pluses): statsdir += '+' #merging mask slices to mdir if not stepdone: #creating output folders if not os.path.exists (mdir): os.mkdir(mdir) #copying files to mdir au.copy(outf_subjs, mdir) au.copy(outf_labels, mdir) #saving exclude files in mdir outf_exclude = '' if (excluf): outf_exclude = au.exclude_str() if expname: outf_exclude += '_' + expname if foldnumber: outf_exclude += '_' + foldnumber np.savetxt(outdir + os.path.sep + outf_exclude , excluded, fmt='%i') np.savetxt(mdir + os.path.sep + au.exclude_str(), excluded, fmt='%i') excluf = mdir + os.path.sep + au.exclude_str() step = au.maskmerging_str() + ' ' + measure_fname + ' ' + mdir if usemask and not au.is_done(chkf, step): maskregex = au.mask_str() + '_' + au.slice_str() + '*' post.merge_slices (slidir, maskregex, au.mask_str(), mdir, False) au.checklist_add(chkf, step) #CORRELATION #read the measure argument and start processing if measure == 'pea': #measure pearson correlation for each population slice step = au.measureperslice_str() + step_params if not au.is_done(chkf, step): pear.pearson_correlation (outdir, mdir, usemask, excluf, leave) au.checklist_add(chkf, step) #merge all correlation slice measures step = au.postmerging_str() + step_params if not au.is_done(chkf, step): pearegex = au.pearson_str() + '_' + au.slice_str() + '*' peameasf = mdir + os.path.sep + au.pearson_str() if leave > -1: pearegex += '_' + au.excluded_str() + str(leave) + '*' peameasf += '_' + au.excluded_str() + str(leave) + '_' + au.pearson_str() post.merge_slices (mdir, pearegex, peameasf, mdir) if absval: post.change_to_absolute_values(peameasf) au.checklist_add(chkf, step) #BHATTACHARYYA AND T-TEST elif measure == 'bat' or measure == 'ttest': if not os.path.exists (statsdir): os.mkdir(statsdir) gsize = np.zeros([len(classnames),2], dtype=int) for c in range(len(classnames)): gname = classnames[c] glabel = labels [c] godir = mdir + os.path.sep + gname au.log.debug ('Processing group ' + gname) gselect = np.zeros(len(subjs)) gsubjs = list() glabels = list() for s in range(len(subjs)): slabel = subjlabels[s] if slabel == glabel: gsubjs .append (subjs[s]) glabels.append (slabel) gselect[s] = 1 if outf_exclude: if excluded[s]: gselect[s] = 0 gsize[c,0] = glabel gsize[c,1] = np.sum(gselect) outf_subjs = mdir + os.path.sep + gname + '_' + au.subjects_str() outf_labels = mdir + os.path.sep + gname + '_' + au.labels_str() outf_group = mdir + os.path.sep + gname + '_' + au.members_str() np.savetxt(outf_subjs , gsubjs, fmt='%s') np.savetxt(outf_labels, glabels, fmt='%i') np.savetxt(outf_group , gselect, fmt='%i') step = au.groupfilter_str() + ' ' + gname + ' ' + statsdir if not au.is_done(chkf, step): au.group_filter (outdir, statsdir, gname, outf_group, usemask) au.checklist_add(chkf, step) grp_step_params = ' ' + au.stats_str() + ' ' + gname + ' ' + statsdir step = au.measureperslice_str() + grp_step_params if not au.is_done(chkf, step): post.group_stats (statsdir, gname, gsize[c,1], statsdir) au.checklist_add(chkf, step) statfnames = {} step = au.postmerging_str() + grp_step_params if not au.is_done(chkf, step): statfnames[gname] = post.merge_stats_slices (statsdir, gname) au.checklist_add(chkf, step) sampsizef = mdir + os.path.sep + au.groupsizes_str() np.savetxt(sampsizef, gsize, fmt='%i,%i') #decide which group distance function to use if measure == 'bat': distance_func = bat.measure_bhattacharyya_distance elif measure == 'ttest': distance_func = ttst.measure_ttest #now we deal with the indexed excluded subject step = au.postmerging_str() + ' ' + str(classnames) + step_params exsubf = '' exclas = '' if leave > -1: exsubf = subjs[leave] exclas = classnames[subjlabels[leave]] if not au.is_done(chkf, step): #group distance called here, in charge of removing the 'leave' subject from stats as well measfname = post.group_distance (distance_func, statsdir, classnames, gsize, chkf, absval, mdir, foldno, expname, leave, exsubf, exclas) if usemask: au.apply_mask (measfname, mdir + os.path.sep + au.mask_str()) au.checklist_add(chkf, step) #adding step end indication au.checklist_add(chkf, endstep) #CLEAN SPACE SUGGESTION rmcomm = 'rm -rf ' + outdir + os.path.sep + au.slices_str() + ';' if cleanup: au.log.debug ('Cleaning folders:') au.log.info (rmcomm) os.system (rmcomm) else: au.log.info ('If you need disk space, remove the temporary folders executing:') au.log.info (rmcomm.replace(';','\n')) if leave > -1: au.log.info ('You should not remove these files if you are doing further leave-one-out measures.') #------------------------------------------------------------------------------- if __name__ == "__main__": sys.exit(main())
A new survey conducted by Saigon's Department of Education and Training reveals unsettling realities regarding student well-being. The study covers 150 schools and institutions in Ho Chi Minh City including 74 public high schools, 34 public secondary schools, eight primary schools and 34 other institutions ranging from kindergartens to private schools. According to the survey, 31% of the students experience stress and 53.8% show lack of study motivation, Dan Tri reports. Of all the students in the study, 7.8% have dropped out of school and 21.1% are at risk of doing so. The report argues that study environment, society and family, lack of support services from schools combined with anxiety are the reason for such troubling numbers. The survey also shed light on a more worrying issue — 24.6% of students are bullied and 20.8% of them are victims of psychological abuse. Thirty percents of all respondents reported having been harassed online in different forms such as posting photos without consent, insults, provocations, threats. More than 6% of respondents use drugs, 5.7% have violated the law, 2.8% have gone through an abortion, and 0.8% shows self-destructive behaviors.
''' RecycleView Views ================= .. versionadded:: 1.10.0 The adapter part of the RecycleView which together with the layout is the view part of the model-view-controller pattern. The view module handles converting the data to a view using the adapter class which is then displayed by the layout. A view can be any Widget based class. However, inheriting from RecycleDataViewBehavior adds methods for converting the data to a view. TODO: * Make view caches specific to each view class type. ''' from kivy.properties import StringProperty, ObjectProperty from kivy.event import EventDispatcher from kivy.factory import Factory from collections import defaultdict _view_base_cache = {} '''Cache whose keys are classes and values is a boolean indicating whether the class inherits from :class:`RecycleDataViewBehavior`. ''' _cached_views = defaultdict(list) '''A size limited cache that contains old views (instances) that are not used. Each key is a class whose value is the list of the instances of that class. ''' # current number of unused classes in the class cache _cache_count = 0 # maximum number of items in the class cache _max_cache_size = 1000 def _clean_cache(): '''Trims _cached_views cache to half the size of `_max_cache_size`. ''' # all keys will be reduced to max_size. max_size = (_max_cache_size // 2) // len(_cached_views) global _cache_count for cls, instances in _cached_views.items(): _cache_count -= max(0, len(instances) - max_size) del instances[max_size:] class RecycleDataViewBehavior(object): '''A optional base class for data views (:attr:`RecycleView`.viewclass). If a view inherits from this class, the class's functions will be called when the view needs to be updated due to a data change or layout update. ''' def refresh_view_attrs(self, rv, index, data): '''Called by the :class:`RecycleAdapter` when the view is initially populated with the values from the `data` dictionary for this item. Any pos or size info should be removed because they are set subsequently with :attr:`refresh_view_layout`. :Parameters: `rv`: :class:`RecycleView` instance The :class:`RecycleView` that caused the update. `data`: dict The data dict used to populate this view. ''' sizing_attrs = RecycleDataAdapter._sizing_attrs for key, value in data.items(): if key not in sizing_attrs: setattr(self, key, value) def refresh_view_layout(self, rv, index, layout, viewport): '''Called when the view's size is updated by the layout manager, :class:`RecycleLayoutManagerBehavior`. :Parameters: `rv`: :class:`RecycleView` instance The :class:`RecycleView` that caused the update. `viewport`: 4-tuple The coordinates of the bottom left and width height in layout manager coordinates. This may be larger than this view item. :raises: `LayoutChangeException`: If the sizing or data changed during a call to this method, raising a `LayoutChangeException` exception will force a refresh. Useful when data changed and we don't want to layout further since it'll be overwritten again soon. ''' w, h = layout.pop('size') if w is None: if h is not None: self.height = h else: if h is None: self.width = w else: self.size = w, h for name, value in layout.items(): setattr(self, name, value) def apply_selection(self, rv, index, is_selected): pass class RecycleDataAdapter(EventDispatcher): '''The class that converts data to a view. --- Internal details --- A view can have 3 states. * It can be completely in sync with the data, which occurs when the view is displayed. These are stored in :attr:`views`. * It can be dirty, which occurs when the view is in sync with the data, except for the size/pos parameters which is controlled by the layout. This occurs when the view is not currently displayed but the data has not changed. These views are stored in :attr:`dirty_views`. * Finally the view can be dead which occurs when the data changes and the view was not updated or when a view is just created. Such views are typically added to the internal cache. Typically what happens is that the layout manager lays out the data and then asks for views, using :meth:`set_visible_views,` for some specific data items that it displays. These views are gotten from the current views, dirty or global cache. Then depending on the view state :meth:`refresh_view_attrs` is called to bring the view up to date with the data (except for sizing parameters). Finally, the layout manager gets these views, updates their size and displays them. ''' recycleview = ObjectProperty(None, allownone=True) '''The :class:`~kivy.uix.recycleview.RecycleViewBehavior` associated with this instance. ''' # internals views = {} # current displayed items # items whose attrs, except for pos/size is still accurate dirty_views = defaultdict(dict) _sizing_attrs = { 'size', 'width', 'height', 'size_hint', 'size_hint_x', 'size_hint_y', 'pos', 'x', 'y', 'center', 'center_x', 'center_y', 'pos_hint', 'size_hint_min', 'size_hint_min_x', 'size_hint_min_y', 'size_hint_max', 'size_hint_max_x', 'size_hint_max_y'} def attach_recycleview(self, rv): '''Associates a :class:`~kivy.uix.recycleview.RecycleViewBehavior` with this instance. It is stored in :attr:`recycleview`. ''' self.recycleview = rv def detach_recycleview(self): '''Removes the :class:`~kivy.uix.recycleview.RecycleViewBehavior` associated with this instance and clears :attr:`recycleview`. ''' self.recycleview = None def create_view(self, index, data_item, viewclass): '''(internal) Creates and initializes the view for the data at `index`. The returned view is synced with the data, except for the pos/size information. ''' if viewclass is None: return view = viewclass() self.refresh_view_attrs(index, data_item, view) return view def get_view(self, index, data_item, viewclass): '''(internal) Returns a view instance for the data at `index` It looks through the various caches and finally creates a view if it doesn't exist. The returned view is synced with the data, except for the pos/size information. If found in the cache it's removed from the source before returning. It doesn't check the current views. ''' # is it in the dirtied views? dirty_views = self.dirty_views if viewclass is None: return stale = False view = None if viewclass in dirty_views: # get it first from dirty list dirty_class = dirty_views[viewclass] if index in dirty_class: # we found ourself in the dirty list, no need to update data! view = dirty_class.pop(index) elif _cached_views[viewclass]: # global cache has this class, update data view, stale = _cached_views[viewclass].pop(), True elif dirty_class: # random any dirty view element - update data view, stale = dirty_class.popitem()[1], True elif _cached_views[viewclass]: # otherwise go directly to cache # global cache has this class, update data view, stale = _cached_views[viewclass].pop(), True if view is None: view = self.create_view(index, data_item, viewclass) if view is None: return if stale: self.refresh_view_attrs(index, data_item, view) return view def refresh_view_attrs(self, index, data_item, view): '''(internal) Syncs the view and brings it up to date with the data. This method calls :meth:`RecycleDataViewBehavior.refresh_view_attrs` if the view inherits from :class:`RecycleDataViewBehavior`. See that method for more details. .. note:: Any sizing and position info is skipped when syncing with the data. ''' viewclass = view.__class__ if viewclass not in _view_base_cache: _view_base_cache[viewclass] = isinstance(view, RecycleDataViewBehavior) if _view_base_cache[viewclass]: view.refresh_view_attrs(self.recycleview, index, data_item) else: sizing_attrs = RecycleDataAdapter._sizing_attrs for key, value in data_item.items(): if key not in sizing_attrs: setattr(view, key, value) def refresh_view_layout(self, index, layout, view, viewport): '''Updates the sizing information of the view. viewport is in coordinates of the layout manager. This method calls :meth:`RecycleDataViewBehavior.refresh_view_attrs` if the view inherits from :class:`RecycleDataViewBehavior`. See that method for more details. .. note:: Any sizing and position info is skipped when syncing with the data. ''' if view.__class__ not in _view_base_cache: _view_base_cache[view.__class__] = isinstance( view, RecycleDataViewBehavior) if _view_base_cache[view.__class__]: view.refresh_view_layout( self.recycleview, index, layout, viewport) else: w, h = layout.pop('size') if w is None: if h is not None: view.height = h else: if h is None: view.width = w else: view.size = w, h for name, value in layout.items(): setattr(view, name, value) def make_view_dirty(self, view, index): '''(internal) Used to flag this view as dirty, ready to be used for others. See :meth:`make_views_dirty`. ''' del self.views[index] self.dirty_views[view.__class__][index] = view def make_views_dirty(self): '''Makes all the current views dirty. Dirty views are still in sync with the corresponding data. However, the size information may go out of sync. Therefore a dirty view can be reused by the same index by just updating the sizing information. Once the underlying data of this index changes, the view should be removed from the dirty views and moved to the global cache with :meth:`invalidate`. This is typically called when the layout manager needs to re-layout all the data. ''' views = self.views if not views: return dirty_views = self.dirty_views for index, view in views.items(): dirty_views[view.__class__][index] = view self.views = {} def invalidate(self): '''Moves all the current views into the global cache. As opposed to making a view dirty where the view is in sync with the data except for sizing information, this will completely disconnect the view from the data, as it is assumed the data has gone out of sync with the view. This is typically called when the data changes. ''' global _cache_count for view in self.views.values(): _cached_views[view.__class__].append(view) _cache_count += 1 for cls, views in self.dirty_views.items(): _cached_views[cls].extend(views.values()) _cache_count += len(views) if _cache_count >= _max_cache_size: _clean_cache() self.views = {} self.dirty_views.clear() def set_visible_views(self, indices, data, viewclasses): '''Gets a 3-tuple of the new, remaining, and old views for the current viewport. The new views are synced to the data except for the size/pos properties. The old views need to be removed from the layout, and the new views added. The new views are not necessarily *new*, but are all the currently visible views. ''' visible_views = {} previous_views = self.views ret_new = [] ret_remain = [] get_view = self.get_view # iterate though the visible view # add them into the container if not already done for index in indices: view = previous_views.pop(index, None) if view is not None: # was current view visible_views[index] = view ret_remain.append((index, view)) else: view = get_view(index, data[index], viewclasses[index]['viewclass']) if view is None: continue visible_views[index] = view ret_new.append((index, view)) old_views = previous_views.items() self.make_views_dirty() self.views = visible_views return ret_new, ret_remain, old_views def get_visible_view(self, index): '''Returns the currently visible view associated with ``index``. If no view is currently displayed for ``index`` it returns ``None``. ''' return self.views.get(index)
April 13 is that special time of the year again when the Douglas County Community & Senior Center opens it doors to everyone for a good old-fashioned ice cream social. They will be serving ice cream, root beer floats, and banana splits, followed by playing bingo. The event is free to Young at Heart members and $5 to others. Join the YAH that day for $5 and enter free. Your membership pays for itself instantly and also entitles you to attend the Christmas luncheon at the center. Plus, members receive two free bingo cards that day. Additional bingo cards are just $1. The center is located at 1329 Waterloo Lane in Gardnerville. Whether you are already a lover of the dice game "bunco" or just want to learn, here's a date you shouldn't to miss. On April 27, the Douglas County Democratic Women are inviting the public to join in this fast moving, social dice game involving 100 percent luck and absolutely no skill! It's fun for everyone and beginners are welcome. Winners take home prizes and proceeds from the event go toward the group's goal of making a difference in our community. Bring along your friends to share in this fun afternoon from 1-4 p.m. There will be snacks, and adult beverages available. Registration is $25 a person. Make check payable to DCDW and mail to P.O. Box 939, Minden, NV 89423 before April 24. Questions? Call 602-549-5378. Want to find out what is so special about the Lions Club? On May 4 you are invited to the Lions Tri-Club Dinner at the Topaz Lodge. The theme is "Proud to be an American," with guest speakers Alex and Amber Carrillo of INTEGRAS who will deliver an amazing program about our American flag. Tickets are $27.50 a person. Reservations accepted through April 24, at club meetings or by mail. Cocktails at 6:30, with dinner of prime rib and chicken with all the fixings, including dessert served at 7 p.m. Raffle and door prizes. Please make your dinner reservation check payable to and mail to Carson Valley Lions Club, Attn: Lion Elizabeth PO Box 314, Minden, NV 89423-0314. Questions? Call 552-5580. The place to be on June 6, is at Dangberg Home Ranch Historic Park at 6:30 p.m. to help kick off their Thursday evening concerts beginning with country music legend Lacy J Dalton. Dalton is an American country singer and songwriter with a career spanning several decades. In March 2017 she was inducted into the "North American Country Music Association International Hall of Fame," and in 2018 she was nominated for a Lifetime Achievement Award. This year's Dangberg Summer Festival sponsors include Douglas County, Nevada, Horse Tales, Carson Valley Chamber of Commerce, Jacobs Family Berry Farm, Dr. James the Dentist and Associates, Carson Valley Accounting, Nevada State Bank, HolidayinnExpress Minden Nevada and the Frances C. and William P. Smallwood Foundation. Concert ticket prices are $15 general admission for adults and $10 for "Friends of Dangberg" members. Young people 16-years and under enter for free. Please bring your own seating and you are welcome to bring a picnic. No pets allowed (except service animals). For our full Dangberg Summer Festival schedule, please visit their website at dangberghomeranch.org.
import os import sys import math import select import socket import getpass import logging import textwrap from addict import Dict from six import iteritems from six.moves import urllib from pymesos import Scheduler, MesosSchedulerDriver from tfmesos.utils import send, recv, setup_logger import uuid FOREVER = 0xFFFFFFFF logger = logging.getLogger(__name__) class Job(object): def __init__(self, name, num, cpus=1.0, mem=1024.0, gpus=0, cmd=None, start=0): self.name = name self.num = num self.cpus = cpus self.gpus = gpus self.mem = mem self.cmd = cmd self.start = start class Task(object): def __init__(self, mesos_task_id, job_name, task_index, cpus=1.0, mem=1024.0, gpus=0, cmd=None, volumes={}, env={}): self.mesos_task_id = mesos_task_id self.job_name = job_name self.task_index = task_index self.cpus = cpus self.gpus = gpus self.mem = mem self.cmd = cmd self.volumes = volumes self.env = env self.offered = False self.addr = None self.connection = None self.initalized = False def __str__(self): return textwrap.dedent(''' <Task mesos_task_id=%s addr=%s >''' % (self.mesos_task_id, self.addr)) def to_task_info(self, offer, master_addr, gpu_uuids=[], gpu_resource_type=None, containerizer_type='MESOS', force_pull_image=False): ti = Dict() ti.task_id.value = str(self.mesos_task_id) ti.agent_id.value = offer.agent_id.value ti.name = '/job:%s/task:%s' % (self.job_name, self.task_index) ti.resources = resources = [] cpus = Dict() resources.append(cpus) cpus.name = 'cpus' cpus.type = 'SCALAR' cpus.scalar.value = self.cpus mem = Dict() resources.append(mem) mem.name = 'mem' mem.type = 'SCALAR' mem.scalar.value = self.mem image = os.environ.get('DOCKER_IMAGE') if image is not None: if containerizer_type == 'DOCKER': ti.container.type = 'DOCKER' ti.container.docker.image = image ti.container.docker.force_pull_image = force_pull_image ti.container.docker.parameters = parameters = [] p = Dict() p.key = 'memory-swap' p.value = '-1' parameters.append(p) if self.gpus and gpu_uuids: hostname = offer.hostname url = 'http://%s:3476/docker/cli?dev=%s' % ( hostname, urllib.parse.quote( ' '.join(gpu_uuids) ) ) try: docker_args = urllib.request.urlopen(url).read() for arg in docker_args.split(): k, v = arg.split('=') assert k.startswith('--') k = k[2:] p = Dict() parameters.append(p) p.key = k p.value = v except Exception: logger.exception( 'fail to determine remote device parameter,' ' disable gpu resources' ) gpu_uuids = [] elif containerizer_type == 'MESOS': ti.container.type = 'MESOS' ti.container.mesos.image.type = 'DOCKER' ti.container.mesos.image.docker.name = image # "cached" means the opposite of "force_pull_image" ti.container.mesos.image.cached = not force_pull_image else: assert False, ( 'Unsupported containerizer: %s' % containerizer_type ) ti.container.volumes = volumes = [] for path in ['/etc/passwd', '/etc/group']: v = Dict() volumes.append(v) v.host_path = v.container_path = path v.mode = 'RO' for src, dst in iteritems(self.volumes): v = Dict() volumes.append(v) v.container_path = dst v.host_path = src v.mode = 'RW' if self.gpus and gpu_uuids and gpu_resource_type is not None: if gpu_resource_type == 'SET': gpus = Dict() resources.append(gpus) gpus.name = 'gpus' gpus.type = 'SET' gpus.set.item = gpu_uuids else: gpus = Dict() resources.append(gpus) gpus.name = 'gpus' gpus.type = 'SCALAR' gpus.scalar.value = len(gpu_uuids) ti.command.shell = True cmd = [ sys.executable, '-m', '%s.server' % __package__, str(self.mesos_task_id), master_addr ] ti.command.value = ' '.join(cmd) ti.command.environment.variables = variables = [ Dict(name=name, value=value) for name, value in self.env.items() if name != 'PYTHONPATH' ] env = Dict() variables.append(env) env.name = 'PYTHONPATH' env.value = ':'.join(sys.path) return ti class TFMesosScheduler(Scheduler): MAX_FAILURE_COUNT = 3 def __init__(self, task_spec, role=None, master=None, name=None, quiet=False, volumes={}, containerizer_type=None, force_pull_image=False, forward_addresses=None, protocol='grpc', env={}, extra_config={}): self.started = False self.master = master or os.environ['MESOS_MASTER'] self.name = name or '[tensorflow] %s %s' % ( os.path.abspath(sys.argv[0]), ' '.join(sys.argv[1:])) self.task_spec = task_spec self.containerizer_type = containerizer_type self.force_pull_image = force_pull_image self.protocol = protocol self.extra_config = extra_config self.forward_addresses = forward_addresses self.role = role or '*' self.tasks = {} self.task_failure_count = {} self.job_finished = {} for job in task_spec: self.job_finished[job.name] = 0 for task_index in range(job.start, job.num): mesos_task_id = str(uuid.uuid4()) task = Task( mesos_task_id, job.name, task_index, cpus=job.cpus, mem=job.mem, gpus=job.gpus, cmd=job.cmd, volumes=volumes, env=env ) self.tasks[mesos_task_id] = task self.task_failure_count[self.decorated_task_index(task)] = 0 if not quiet: global logger setup_logger(logger) def resourceOffers(self, driver, offers): ''' Offer resources and launch tasks ''' for offer in offers: if all(task.offered for id, task in iteritems(self.tasks)): self.driver.suppressOffers() driver.declineOffer(offer.id, Dict(refuse_seconds=FOREVER)) continue offered_cpus = offered_mem = 0.0 offered_gpus = [] offered_tasks = [] gpu_resource_type = None for resource in offer.resources: if resource.name == 'cpus': offered_cpus = resource.scalar.value elif resource.name == 'mem': offered_mem = resource.scalar.value elif resource.name == 'gpus': if resource.type == 'SET': offered_gpus = resource.set.item else: offered_gpus = list(range(int(resource.scalar.value))) gpu_resource_type = resource.type for id, task in iteritems(self.tasks): if task.offered: continue if not (task.cpus <= offered_cpus and task.mem <= offered_mem and task.gpus <= len(offered_gpus)): continue offered_cpus -= task.cpus offered_mem -= task.mem gpus = int(math.ceil(task.gpus)) gpu_uuids = offered_gpus[:gpus] offered_gpus = offered_gpus[gpus:] task.offered = True offered_tasks.append( task.to_task_info( offer, self.addr, gpu_uuids=gpu_uuids, gpu_resource_type=gpu_resource_type, containerizer_type=self.containerizer_type, force_pull_image=self.force_pull_image ) ) driver.launchTasks(offer.id, offered_tasks) @property def targets(self): targets = {} for id, task in iteritems(self.tasks): target_name = '/job:%s/task:%s' % (task.job_name, task.task_index) grpc_addr = 'grpc://%s' % task.addr targets[target_name] = grpc_addr return targets def _start_tf_cluster(self): cluster_def = {} tasks = sorted(self.tasks.values(), key=lambda task: task.task_index) for task in tasks: cluster_def.setdefault(task.job_name, []).append(task.addr) for id, task in iteritems(self.tasks): response = { 'job_name': task.job_name, 'task_index': task.task_index, 'cpus': task.cpus, 'mem': task.mem, 'gpus': task.gpus, 'cmd': task.cmd, 'cwd': os.getcwd(), 'cluster_def': cluster_def, 'forward_addresses': self.forward_addresses, 'extra_config': self.extra_config, 'protocol': self.protocol } send(task.connection, response) assert recv(task.connection) == 'ok' logger.info( 'Device /job:%s/task:%s activated @ grpc://%s ', task.job_name, task.task_index, task.addr ) task.connection.close() def start(self): def readable(fd): return bool(select.select([fd], [], [], 0.1)[0]) lfd = socket.socket() try: lfd.bind(('', 0)) self.addr = '%s:%s' % (socket.gethostname(), lfd.getsockname()[1]) lfd.listen(10) framework = Dict() framework.user = getpass.getuser() framework.name = self.name framework.hostname = socket.gethostname() framework.role = self.role self.driver = MesosSchedulerDriver( self, framework, self.master, use_addict=True ) self.driver.start() task_start_count = 0 while any((not task.initalized for id, task in iteritems(self.tasks))): if readable(lfd): c, _ = lfd.accept() if readable(c): mesos_task_id, addr = recv(c) task = self.tasks[mesos_task_id] task.addr = addr task.connection = c task.initalized = True task_start_count += 1 logger.info('Task %s with mesos_task_id %s has ' 'registered', '{}:{}'.format(task.job_name, task.task_index), mesos_task_id) logger.info('Out of %d tasks ' '%d tasks have been registered', len(self.tasks), task_start_count) else: c.close() self.started = True self._start_tf_cluster() except Exception: self.stop() raise finally: lfd.close() def registered(self, driver, framework_id, master_info): logger.info( 'Tensorflow cluster registered. ' '( http://%s:%s/#/frameworks/%s )', master_info.hostname, master_info.port, framework_id.value ) if self.containerizer_type is None: version = tuple(int(x) for x in driver.version.split(".")) self.containerizer_type = ( 'MESOS' if version >= (1, 0, 0) else 'DOCKER' ) def statusUpdate(self, driver, update): logger.debug('Received status update %s', str(update.state)) mesos_task_id = update.task_id.value if self._is_terminal_state(update.state): task = self.tasks.get(mesos_task_id) if task is None: # This should be very rare and hence making this info. logger.info("Task not found for mesos task id {}" .format(mesos_task_id)) return if self.started: if update.state != 'TASK_FINISHED': logger.error('Task failed: %s, %s with state %s', task, update.message, update.state) raise RuntimeError( 'Task %s failed! %s with state %s' % (task, update.message, update.state) ) else: self.job_finished[task.job_name] += 1 else: logger.warn('Task failed while launching the server: %s, ' '%s with state %s', task, update.message, update.state) if task.connection: task.connection.close() self.task_failure_count[self.decorated_task_index(task)] += 1 if self._can_revive_task(task): self.revive_task(driver, mesos_task_id, task) else: raise RuntimeError('Task %s failed %s with state %s and ' 'retries=%s' % (task, update.message, update.state, TFMesosScheduler.MAX_FAILURE_COUNT)) def revive_task(self, driver, mesos_task_id, task): logger.info('Going to revive task %s ', task.task_index) self.tasks.pop(mesos_task_id) task.offered = False task.addr = None task.connection = None new_task_id = task.mesos_task_id = str(uuid.uuid4()) self.tasks[new_task_id] = task driver.reviveOffers() def _can_revive_task(self, task): return self.task_failure_count[self.decorated_task_index(task)] < \ TFMesosScheduler.MAX_FAILURE_COUNT @staticmethod def decorated_task_index(task): return '{}.{}'.format(task.job_name, str(task.task_index)) @staticmethod def _is_terminal_state(task_state): return task_state in ["TASK_FINISHED", "TASK_FAILED", "TASK_KILLED", "TASK_ERROR"] def slaveLost(self, driver, agent_id): if self.started: logger.error('Slave %s lost:', agent_id.value) raise RuntimeError('Slave %s lost' % agent_id) def executorLost(self, driver, executor_id, agent_id, status): if self.started: logger.error('Executor %s lost:', executor_id.value) raise RuntimeError('Executor %s@%s lost' % (executor_id, agent_id)) def error(self, driver, message): logger.error('Mesos error: %s', message) raise RuntimeError('Error ' + message) def stop(self): logger.debug('exit') if hasattr(self, 'tasks'): for id, task in iteritems(self.tasks): if task.connection: task.connection.close() del self.tasks if hasattr(self, 'driver'): self.driver.stop() self.driver.join() del self.driver def finished(self): return any( self.job_finished[job.name] >= job.num for job in self.task_spec ) def processHeartBeat(self): # compatibility with pymesos pass
On December 1, 2013, version 6.5 of the Community Enterprise Operating System (CentOS) was made available to the general public. This iteration supports both i386 and x86_84 system architectures and also improves several existing functions. But for a business already running CentOS, is this release worth the upgrade? Here’s what you need to know about version 6.5. CentOS got its start in 2004, as an offshoot of the Red Hat Enterprise Linux (RHEL) distribution, or distro, according to the CentOS Blog. It began as an artifact in the CAOS Linux build but gained popularity for its ability to effectively collaborate with other RHEL clones, and in 2006, another distro, Tao Linux, chose to roll in its functionality with CentOS. The operating system offers 100 percent binary compatibility with RHEL, meaning it’s almost identical — upgrades to RHEL are typically followed by similar CentOS upgrades within a few months. In the case of version 6.5, Red Hat released its version on November 21, 2013; CentOS followed suit 10 days later. Red Hat and CentOS have the same version numbers and virtually identical features, since CentOS is derived entirely from publicly available Red Hat source code. All branding is removed, however, and Red Hat provides no consumer support. CentOS has made a mark for itself in the business-technology community. By July 2010, the operating system surpassed Debian as the most popular Linux distribution for web servers around the globe, owning nearly 30 percent of the market. In spite of relinquishing the top spot to Debian in 2012, CentOS remains the most common OS for web-host control panels. Although CentOS receives no support from Red Hat — financially or otherwise — the distribution is known for its superior customer support. In large measure, this stems from an active, interested community that provides feedback on every version. While the bulk of OS development is handled by a small team of experts, a host of system administrators, network administrators and enterprise users help improve each CentOS build. As a result, the distro has gained notoriety for being quickly built, rebuilt, tested and available on countless mirror networks. As noted by Tecmint, upgrading to version 6.5 from any earlier 6.X version is simple: Just run the “yum update” command from the command line. Based on your network speed, you’ll have to wait 15 to 30 minutes for the process to finish. While no serious issues during upgrading have been reported, it’s always a good idea to back up any critical files or network configuration data. Once the upgrade is complete, run “# cat /etc/redhat-release” to make sure you have the right version. According to the official CentOS wiki, there are several notable improvements to version 6.5, starting with driver updates for Hyper-V and VMware virtualization tools. In addition, Evolution and LibreOffice get an update, while kernel-based virtual machines (KVMs) get improved read-only support for VMDK and VHDX files. Perhaps the most interesting upgrade, however, is for the Precision Time Protocol (PTP). PTP allows local area network (LAN) computers to be synchronized to within 100 nanoseconds under the IEEE-1588 standard. Although previewed in an earlier release, this feature is now fully supported. There are some issues with the release — for example older AMD video cards are not compatible with the new X server version, and some users report that Ethernet interfaces do not start with the new NetworkManager tool. But overall, these bugs are minor. Dedicated hosting customers who are running CentOS and who are not subscribed to one of a managed hosting services should check to see whether they are running the latest version of CentOS. Lunarpages customers on shared servers should already be on operating on CentOS 6; if not, they will soon be migrated from CentOS 4 to the latest version of the OS. While this release offers better LAN time-stamping support and upgrades to other existing functions, companies won’t suffer by holding on to 6.4 or another 6.X version for a few more months. But based on the reliable build structure and superior technical support channels of CentOS, there’s no reason to wait. This enterprise RHEL clone is popular with good reason, and its newest upgrade is another solid build.
import collections import threading import errors import tokens def ensure_type(required_class, item, *rest): ''' Raises a WrongArgumentTypeError if all the items aren't instances of the required class/classes tuple. ''' if not isinstance(item, required_class): raise errors.WrongArgumentTypeError.build(item, required_class) for thing in rest: if not isinstance(thing, required_class): raise errors.WrongArgumentTypeError.build(thing, required_class) def ensure_args(supplied_args, num_required=0, num_optional=0, is_variadic=False): ''' Enforces the argument format specified by the keyword arguments. This format is: required arguments first, optional arguments next, and a single optional variadic arg last. num_required defaults to 0, num_optional defaults to 0, and is_variadic defaults to False. Raises an IncorrectArgumentCountError if the args don't match the spec. ''' # get the various counts we need to determine if the number of args is good min_args = num_required max_args = float('inf') if is_variadic else num_required + num_optional # determine whether the arg spec was met by the supplied arg list num_supplied = len(supplied_args) if num_supplied < min_args or num_supplied > max_args: raise errors.IncorrectArgumentCountError.build(min_args, max_args, num_supplied, is_variadic=is_variadic) def file_char_iter(f): '''Iterate over an open file one character at a time.''' for line in f: for c in line: yield c def to_string(x): '''Convert an atom to a string as it appears in our language.''' if isinstance(x, bool): return tokens.TRUE if x else tokens.FALSE elif isinstance(x, basestring): # TODO: escape properly return tokens.STRING + x + tokens.STRING return unicode(x) class ThreadSafeCounter: '''When called, returns increasing ints in order.''' def __init__(self, count=0): self.count = count self.lock = threading.Lock() def __call__(self): with self.lock: c = self.count self.count += 1 return c
I am a little late getting this published this week and I apologize! It has been a crazy hectic week for me with the added stress of homecoming week. Alas, here is our Parlipulls for the week. Thanks to Shawn who is the only one that submitted one this week. This week I started the second arc of a series called “Rose”. I read the first arc last year and enjoyed it thoroughly, especially the great art and colors by Ig Guara. Issues 7 & 8 pick up right where we left off last year (and if you didn’t read it, no worries… each issue recaps the story for new readers). This would be a very quick read if I didn’t continually find myself getting distracted by the artwork. For fans of Coda or The Inheritance Series this may be for you.
debug_options={} speed_options={} pythonic_options={} all_compile_options = dict( internal_ast = False, debug = False, print_statements=True, function_argument_checking=False, attribute_checking=False, getattr_support=True, bound_methods=True, descriptors=False, source_tracking=False, line_tracking=False, store_source=False, inline_code=False, operator_funcs=True, number_classes=False, create_locals=False, stupid_mode=False, translator='proto', ) def add_compile_options(parser): global debug_options, speed_options, pythonic_options parser.add_option("--internal-ast", dest="internal_ast", action="store_true", help="Use internal AST parser instead of standard python one" ) parser.add_option("--no-internal-ast", dest="internal_ast", action="store_false", help="Use internal AST parser instead of standard python one" ) parser.add_option("--debug-wrap", dest="debug", action="store_true", help="Wrap function calls with javascript debug code", ) parser.add_option("--no-debug-wrap", dest="debug", action="store_false", ) debug_options['debug'] = True speed_options['debug'] = False parser.add_option("--no-print-statements", dest="print_statements", action="store_false", help="Remove all print statements", ) parser.add_option("--print-statements", dest="print_statements", action="store_true", help="Generate code for print statements", ) speed_options['print_statements'] = False parser.add_option("--no-function-argument-checking", dest = "function_argument_checking", action="store_false", help = "Do not generate code for function argument checking", ) parser.add_option("--function-argument-checking", dest = "function_argument_checking", action="store_true", help = "Generate code for function argument checking", ) speed_options['function_argument_checking'] = False pythonic_options['function_argument_checking'] = True parser.add_option("--no-attribute-checking", dest = "attribute_checking", action="store_false", help = "Do not generate code for attribute checking", ) parser.add_option("--attribute-checking", dest = "attribute_checking", action="store_true", help = "Generate code for attribute checking", ) speed_options['attribute_checking'] = False pythonic_options['attribute_checking'] = True parser.add_option("--no-getattr-support", dest = "getattr_support", action="store_false", help = "Do not support __getattr__()", ) parser.add_option("--getattr-support", dest = "getattr_support", action="store_true", help = "Support __getattr__()", ) speed_options['getattr_support'] = False pythonic_options['getattr_support'] = True parser.add_option("--no-bound-methods", dest = "bound_methods", action="store_false", help = "Do not generate code for binding methods", ) parser.add_option("--bound-methods", dest = "bound_methods", action="store_true", help = "Generate code for binding methods", ) speed_options['bound_methods'] = False pythonic_options['bound_methods'] = True parser.add_option("--no-descriptors", dest = "descriptors", action="store_false", help = "Do not generate code for descriptor calling", ) parser.add_option("--descriptors", dest = "descriptors", action="store_true", help = "Generate code for descriptor calling", ) speed_options['descriptors'] = False pythonic_options['descriptors'] = True parser.add_option("--no-source-tracking", dest = "source_tracking", action="store_false", help = "Do not generate code for source tracking", ) parser.add_option("--source-tracking", dest = "source_tracking", action="store_true", help = "Generate code for source tracking", ) debug_options['source_tracking'] = True speed_options['source_tracking'] = False pythonic_options['source_tracking'] = True parser.add_option("--no-line-tracking", dest = "line_tracking", action="store_true", help = "Do not generate code for source tracking on every line", ) parser.add_option("--line-tracking", dest = "line_tracking", action="store_true", help = "Generate code for source tracking on every line", ) debug_options['line_tracking'] = True pythonic_options['line_tracking'] = True parser.add_option("--no-store-source", dest = "store_source", action="store_false", help = "Do not store python code in javascript", ) parser.add_option("--store-source", dest = "store_source", action="store_true", help = "Store python code in javascript", ) debug_options['store_source'] = True pythonic_options['store_source'] = True parser.add_option("--no-inline-code", dest = "inline_code", action="store_false", help = "Do not generate inline code for bool/eq/len", ) parser.add_option("--inline-code", dest = "inline_code", action="store_true", help = "Generate inline code for bool/eq/len", ) speed_options['inline_code'] = True parser.add_option("--no-operator-funcs", dest = "operator_funcs", action="store_false", help = "Do not generate function calls for operators", ) parser.add_option("--operator-funcs", dest = "operator_funcs", action="store_true", help = "Generate function calls for operators", ) speed_options['operator_funcs'] = False pythonic_options['operator_funcs'] = True parser.add_option("--no-number-classes", dest = "number_classes", action="store_false", help = "Do not use number classes", ) parser.add_option("--number-classes", dest = "number_classes", action="store_true", help = "Use classes for numbers (float, int, long)", ) speed_options['number_classes'] = False pythonic_options['number_classes'] = True parser.add_option("--create-locals", dest = "create_locals", action="store_true", help = "Create locals", ) parser.add_option("--no-stupid-mode", dest = "stupid_mode", action="store_false", help = "Doesn't rely on javascriptisms", ) parser.add_option("--stupid-mode", dest = "stupid_mode", action="store_true", help = "Creates minimalist code, relying on javascript", ) parser.add_option("--translator", dest = "translator", default="proto", help = "Specify the translator: proto|dict", ) def set_multiple(option, opt_str, value, parser, **kwargs): for k in kwargs.keys(): setattr(parser.values, k, kwargs[k]) parser.add_option("-d", "--debug", action="callback", callback = set_multiple, callback_kwargs = debug_options, help="Set all debugging options", ) parser.add_option("-O", action="callback", callback = set_multiple, callback_kwargs = speed_options, help="Set all options that maximize speed", ) parser.add_option("--strict", action="callback", callback = set_multiple, callback_kwargs = pythonic_options, help="Set all options that mimic standard python behavior", ) parser.set_defaults(**all_compile_options) def get_compile_options(opts): d = {} for opt in all_compile_options: d[opt] = getattr(opts, opt) return d
Re-Juvenated Clottey Says Unification Deal With Cotto Close! Joshua Clottey is an ecstatic man these days. Since conquering Zab Judah last August for the vacant IBF Welterweight title, the powerful warrior from Ghana has had to wait and wait and wait – for the chance to defend his title. You see, fights kept falling through, for one reason or another, dates with Paul Williams, Andre Berto, Antonio Margarito could not be made. But, suddenly, in the span of about 24 hours, everything has changed for Joshua Clottey. With their all-action styles, Cotto vs Clottey will undoubtedly be a fantastic IBF/WBO unification battle. Both scored important and impressive wins against Zab Judah and battled Antonio Margarito on just about even terms. So I asked Clottey why he believes he has the edge and will defeat Cotto? And how special and inspiring it would be to compete in a main event unification match at the mecca of boxing Madison Square Garden? Just a week ago, Clottey was so distraught over the politics of boxing and inability of his team to produce a world title defense for him, that he was seriously considering retiring from boxing to take up his other passion – soccer. Clottey has devoted most of his life to boxing and is at the peak of his powers now. He vehemently desires to achieve much more success and wealth – so he can share it with his family and other dependents back home in Ghana.
#!/usr/bin/python3 import random from humans import Human from cities import City from clubs import Club class Match(): def __init__(self, teamHome, teamAway): self.teamHome = teamHome self.teamAway = teamAway self.homeScore = 0 self.awayScore = 0 self.result = 0 # 0 = unplayed def sim_match(self): homeTier = 0.0 awayTier = 0.0 # add player tiers for player in self.teamHome.players: homeTier += player.tier for player in self.teamAway.players: awayTier += player.tier homeTier = homeTier*random.uniform(0,2) awayTier = awayTier*random.uniform(0,2) # do some randoms.. more chances to score based on your tier # you must have a higher end result than the opponent for to score a goal for goalAtt in range(0, 5): home = random.randint(0, 10) + homeTier away = random.randint(0, 8) + awayTier if home > away: self.homeScore += 1 for goalAtt in range(0, 4): home = random.randint(0, 8) + homeTier away = random.randint(0, 10) + awayTier if away > home: self.awayScore += 1 # 1: home win -- 2: away win -- 3: draw if self.homeScore > self.awayScore: self.result = 1 elif self.awayScore > self.homeScore: self.result = 2 else: self.result = 3 def print_postmatch(self): print(" Home: {homeScore} :{teamHome}\n Away: {awayScore} :{teamAway}\n".format(homeScore=self.homeScore, teamHome=self.teamHome.name, awayScore=self.awayScore, teamAway=self.teamAway.name)) def print_prematch(self): print(" HOME: {0}\n AWAY: {1}\n".format(self.teamHome.name, self.teamAway.name))
Ranch Fiberglass offers many ways to customize your lid to fit your needs. Below are some of the great optional features that are available for our lids. Please check with your Ranch dealer as some options are not available on certain models.
""" DBus related functionality including the DBus interface and utility functions Copyright: John Stowers, 2006 License: GPLv2 """ import os.path import dbus import dbus.service import logging log = logging.getLogger("DBus") import conduit import conduit.utils as Utils import conduit.Conduit as Conduit import conduit.SyncSet as SyncSet ERROR = -1 SUCCESS = 0 DEBUG_ALL_CALLS = True APPLICATION_DBUS_IFACE="org.conduit.Application" SYNCSET_DBUS_IFACE="org.conduit.SyncSet" CONDUIT_DBUS_IFACE="org.conduit.Conduit" EXPORTER_DBUS_IFACE="org.conduit.Exporter" DATAPROVIDER_DBUS_IFACE="org.conduit.DataProvider" ################################################################################ # DBus API Docs ################################################################################ # # ==== Main Application ==== # Service org.conduit.Application # Interface org.conduit.Application # Object path / # # Methods: # BuildConduit(source, sink) # BuildExporter(self, sinkKey) # ListAllDataProviders # GetDataProvider # NewSyncSet # Quit # # Signals: # DataproviderAvailable(key) # DataproviderUnavailable(key) # # ==== SyncSet ==== # Service org.conduit.SyncSet # Interface org.conduit.SyncSet # Object path /syncset/{dbus, gui, UUID} # # Methods: # AddConduit # DeleteConduit # SaveToXml # RestoreFromXml # # Signals: # ConduitAdded(key) # ConduitRemoved(key) # # ==== Conduit ==== # Service org.conduit.Conduit # Interface org.conduit.Conduit # Object path /conduit/{some UUID} # # Methods: # EnableTwoWaySync # DisableTwoWaySync # IsTwoWay # AddDataprovider # DeleteDataprovider # Sync # Refresh # # Signals: # SyncStarted # SyncCompleted(aborted, error, conflict) # SyncConflict # SyncProgress(progress, completedUIDs) # DataproviderAdded # DataproviderRemoved # # ==== Exporter Conduit ==== # Service org.conduit.Conduit # Interface org.conduit.Exporter # Object path /conduit/{some UUID} # # Methods: # AddData # SinkConfigure # SinkGetInformation # SinkGetConfigurationXml # SinkSetConfigurationXml # # ==== DataProvider ==== # Service org.conduit.DataProvider # Interface org.conduit.DataProvider # Object path /dataprovider/{some UUID} # # Methods: # IsPending # IsConfigured # SetConfigurationXML # GetConfigurationXML # Configure # GetInformation # AddData # # Signals: #All objects currently exported over the bus EXPORTED_OBJECTS = {} class ConduitException(dbus.DBusException): _dbus_error_name = 'org.conduit.ConduitException' class DBusItem(dbus.service.Object): def __init__(self, iface, path): bus_name = dbus.service.BusName(iface, bus=dbus.SessionBus()) dbus.service.Object.__init__(self, bus_name, path) log.debug("DBus Exported: %s" % self.get_path()) def get_path(self): return self.__dbus_object_path__ def _print(self, message): if DEBUG_ALL_CALLS: log.debug("DBus Message from %s: %s" % (self.get_path(), message)) class ConduitDBusItem(DBusItem): def __init__(self, sync_manager, conduit, uuid): DBusItem.__init__(self, iface=CONDUIT_DBUS_IFACE, path="/conduit/%s" % uuid) self.sync_manager = sync_manager self.conduit = conduit self.conduit.connect("sync-started", self._on_sync_started) self.conduit.connect("sync-completed", self._on_sync_completed) self.conduit.connect("sync-conflict", self._on_sync_conflict) self.conduit.connect("sync-progress", self._on_sync_progress) def _on_sync_started(self, cond): if cond == self.conduit: self.SyncStarted() def _on_sync_completed(self, cond, aborted, error, conflict): if cond == self.conduit: self.SyncCompleted(bool(aborted), bool(error), bool(conflict)) def _on_sync_progress(self, cond, progress, UIDs): if cond == self.conduit: self.SyncProgress(float(progress), UIDs) def _on_sync_conflict(self, cond, conflict): if cond == self.conduit: self.SyncConflict() # # org.conduit.Conduit # @dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='', out_signature='') def EnableTwoWaySync(self): self._print("EnableTwoWaySync") self.conduit.enable_two_way_sync() @dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='', out_signature='') def DisableTwoWaySync(self): self._print("DisableTwoWaySync") self.conduit.disable_two_way_sync() @dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='', out_signature='b') def IsTwoWay(self): self._print("IsTwoWay") return self.conduit.is_two_way() @dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='ob', out_signature='') def AddDataprovider(self, dp, trySource): self._print("AddDataprovider: %s" % dp) #get the actual dps from their object paths try: dpw = EXPORTED_OBJECTS[str(dp)].dataprovider except KeyError, e: raise ConduitException("Could not locate dataprovider: %s" % e) if not self.conduit.add_dataprovider(dpw): raise ConduitException("Could not add dataprovider: %s" % e) @dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='o', out_signature='') def DeleteDataprovider(self, dp): self._print("DeleteDataprovider: %s" % dp) #get the actual dps from their object paths try: dpw = EXPORTED_OBJECTS[str(dp)].dataprovider except KeyError, e: raise ConduitException("Could not locate dataprovider: %s" % e) if not self.conduit.delete_dataprovider(dpw): raise ConduitException("Could not delete dataprovider: %s" % e) @dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='', out_signature='') def Sync(self): self._print("Sync") self.conduit.sync() @dbus.service.method(CONDUIT_DBUS_IFACE, in_signature='', out_signature='') def Refresh(self): self._print("Refresh") self.conduit.refresh() @dbus.service.signal(CONDUIT_DBUS_IFACE, signature='') def SyncStarted(self): self._print("SyncStarted") @dbus.service.signal(CONDUIT_DBUS_IFACE, signature='bbb') def SyncCompleted(self, aborted, error, conflict): self._print("SyncCompleted (abort:%s error:%s conflict:%s)" % (aborted,error,conflict)) @dbus.service.signal(CONDUIT_DBUS_IFACE, signature='') def SyncConflict(self): self._print("SyncConflict") @dbus.service.signal(CONDUIT_DBUS_IFACE, signature='das') def SyncProgress(self, progress, UIDs): self._print("SyncProgress %s%%\n\t%s" % ((progress*100.0), UIDs)) # # org.conduit.Exporter # @dbus.service.method(EXPORTER_DBUS_IFACE, in_signature='s', out_signature='') def SinkSetConfigurationXml(self, xml): self._print("SinkSetConfigurationXml: %s" % xml) if len(self.conduit.datasinks) != 1: raise ConduitException("Simple exporter must only have one sink") self.conduit.datasinks[0].set_configuration_xml(xml) @dbus.service.method(EXPORTER_DBUS_IFACE, in_signature='', out_signature='') def SinkConfigure(self): self._print("SinkConfigure") if len(self.conduit.datasinks) != 1: raise ConduitException("Simple exporter must only have one sink") dataprovider = self.conduit.datasinks[0] #FIXME Hard-coded GtkUI from conduit.gtkui.WindowConfigurator import WindowConfigurator from conduit.gtkui.ConfigContainer import ConfigContainer configurator = WindowConfigurator(None) container = dataprovider.module.get_config_container( configContainerKlass=ConfigContainer, name=dataprovider.get_name(), icon=dataprovider.get_icon(), configurator=configurator ) configurator.set_containers([container]) configurator.run(container) @dbus.service.method(EXPORTER_DBUS_IFACE, in_signature='s', out_signature='b') def AddData(self, uri): self._print("AddData: %s" % uri) if self.conduit.datasource == None: raise ConduitException("Simple exporter must have a source") return self.conduit.datasource.module.add(uri) @dbus.service.method(EXPORTER_DBUS_IFACE, in_signature='', out_signature='a{ss}') def SinkGetInformation(self): self._print("SinkGetInformation") if len(self.conduit.datasinks) != 1: raise ConduitException("Simple exporter must only have one sink") #Need to call get_icon so that the icon_name/path is loaded try: self.conduit.datasinks[0].get_icon() except: log.warn("DBus could not lookup dp icon") info = {} info["name"] = self.conduit.datasinks[0].name info["description"] = self.conduit.datasinks[0].description info["module_type"] = self.conduit.datasinks[0].module_type info["category"] = self.conduit.datasinks[0].category.name info["in_type"] = self.conduit.datasinks[0].get_input_type() info["out_type"] = self.conduit.datasinks[0].get_output_type() info["classname"] = self.conduit.datasinks[0].classname info["key"] = self.conduit.datasinks[0].get_key() info["enabled"] = str( self.conduit.datasinks[0].enabled) info["UID"] = self.conduit.datasinks[0].get_UID() info["icon_name"] = self.conduit.datasinks[0].icon_name info["icon_path"] = self.conduit.datasinks[0].icon_path return info @dbus.service.method(EXPORTER_DBUS_IFACE, in_signature='', out_signature='s') def SinkGetConfigurationXml(self): self._print("SinkGetConfigurationXml") if len(self.conduit.datasinks) != 1: raise ConduitException("Simple exporter must only have one sink") return self.conduit.datasinks[0].get_configuration_xml() class DataProviderDBusItem(DBusItem): def __init__(self, dataprovider, uuid): DBusItem.__init__(self, iface=DATAPROVIDER_DBUS_IFACE, path="/dataprovider/%s" % uuid) self.dataprovider = dataprovider @dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='', out_signature='b') def IsPending(self): self._print("IsPending") return self.dataprovider.module == None @dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='bb', out_signature='b') def IsConfigured(self, isSource, isTwoWay): self._print("IsConfigured") if self.dataprovider.module != None: return self.dataprovider.module.is_configured(isSource, isTwoWay) return False @dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='', out_signature='a{ss}') def GetInformation(self): self._print("GetInformation") #Need to call get_icon so that the icon_name/path is loaded try: self.dataprovider.get_icon() except: log.warn("DBus could not lookup dp icon") info = {} info["name"] = self.dataprovider.name info["description"] = self.dataprovider.description info["module_type"] = self.dataprovider.module_type info["category"] = self.dataprovider.category.name info["in_type"] = self.dataprovider.get_input_type() info["out_type"] = self.dataprovider.get_output_type() info["classname"] = self.dataprovider.classname info["key"] = self.dataprovider.get_key() info["enabled"] = str(self.dataprovider.enabled) info["UID"] = self.dataprovider.get_UID() info["icon_name"] = self.dataprovider.icon_name info["icon_path"] = self.dataprovider.icon_path return info @dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='', out_signature='s') def GetConfigurationXml(self): self._print("GetConfigurationXml") return self.dataprovider.get_configuration_xml() @dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='s', out_signature='') def SetConfigurationXml(self, xml): self._print("SetConfigurationXml: %s" % xml) self.dataprovider.set_configuration_xml(xml) @dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='', out_signature='') def Configure(self): self._print("Configure") #FIXME Hard-coded GtkUI from conduit.gtkui.WindowConfigurator import WindowConfigurator from conduit.gtkui.ConfigContainer import ConfigContainer configurator = WindowConfigurator(None) container = self.dataprovider.module.get_config_container( configContainerKlass=ConfigContainer, name=self.dataprovider.get_name(), icon=self.dataprovider.get_icon(), configurator=configurator ) configurator.set_containers([container]) configurator.run(container) @dbus.service.method(DATAPROVIDER_DBUS_IFACE, in_signature='s', out_signature='b') def AddData(self, uri): self._print("AddData: %s" % uri) return self.dataprovider.module.add(uri) class SyncSetDBusItem(DBusItem): def __init__(self, syncSet, name): DBusItem.__init__(self, iface=SYNCSET_DBUS_IFACE, path="/syncset/%s" % name) self.syncSet = syncSet self.syncSet.connect("conduit-added", self._on_conduit_added) self.syncSet.connect("conduit-removed", self._on_conduit_removed) def _on_conduit_added(self, syncset, cond): self.ConduitAdded() def _on_conduit_removed(self, syncset, cond): self.ConduitRemoved() @dbus.service.signal(SYNCSET_DBUS_IFACE, signature='') def ConduitAdded(self): self._print("Emmiting DBus signal ConduitAdded") @dbus.service.signal(SYNCSET_DBUS_IFACE, signature='') def ConduitRemoved(self): self._print("Emmiting DBus signal ConduitRemoved") @dbus.service.method(SYNCSET_DBUS_IFACE, in_signature='o', out_signature='') def AddConduit(self, cond): self._print("AddConduit: %s" % cond) try: c = EXPORTED_OBJECTS[str(cond)].conduit except KeyError, e: raise ConduitException("Could not locate Conduit: %s" % e) self.syncSet.add_conduit(c) @dbus.service.method(SYNCSET_DBUS_IFACE, in_signature='o', out_signature='') def DeleteConduit(self, cond): self._print("DeleteConduit: %s" % cond) try: c = EXPORTED_OBJECTS[str(cond)].conduit except KeyError, e: raise ConduitException("Could not locate Conduit: %s" % e) self.syncSet.remove_conduit(c) @dbus.service.method(SYNCSET_DBUS_IFACE, in_signature='s', out_signature='') def SaveToXml(self, path): self._print("SaveToXml: %s" % path) self.syncSet.save_to_xml(os.path.abspath(path)) @dbus.service.method(SYNCSET_DBUS_IFACE, in_signature='s', out_signature='') def RestoreFromXml(self, path): self._print("RestoreFromXml: %s" % path) self.syncSet.restore_from_xml(os.path.abspath(path)) class DBusInterface(DBusItem): def __init__(self, conduitApplication, moduleManager, typeConverter, syncManager, guiSyncSet): DBusItem.__init__(self, iface=APPLICATION_DBUS_IFACE, path="/") self.conduitApplication = conduitApplication #setup the module manager self.moduleManager = moduleManager self.moduleManager.connect("dataprovider-available", self._on_dataprovider_available) self.moduleManager.connect("dataprovider-unavailable", self._on_dataprovider_unavailable) #type converter and sync manager self.type_converter = typeConverter self.sync_manager = syncManager #export the syncsets new = SyncSetDBusItem(guiSyncSet, "gui") EXPORTED_OBJECTS[new.get_path()] = new self.sync_set = SyncSet.SyncSet(moduleManager,syncManager) new = SyncSetDBusItem(self.sync_set, "dbus") EXPORTED_OBJECTS[new.get_path()] = new #export myself EXPORTED_OBJECTS[self.get_path()] = self def _get_all_dps(self): datasources = self.moduleManager.get_modules_by_type("source") datasinks = self.moduleManager.get_modules_by_type("sink") twoways = self.moduleManager.get_modules_by_type("twoway") return datasources + datasinks + twoways def _new_syncset(self): ss = SyncSet.SyncSet( moduleManager=self.moduleManager, syncManager=self.sync_manager ) i = Utils.uuid_string() new = SyncSetDBusItem(ss, i) EXPORTED_OBJECTS[new.get_path()] = new return new def _get_dataprovider(self, key): """ Instantiates a new dataprovider (source or sink), storing it appropriately. @param key: Key of the DP to create @returns: The new DP """ dpw = self.moduleManager.get_module_wrapper_with_instance(key) if dpw == None: raise ConduitException("Could not find dataprovider with key: %s" % key) i = Utils.uuid_string() new = DataProviderDBusItem(dpw, i) EXPORTED_OBJECTS[new.get_path()] = new return new def _get_conduit(self, source=None, sink=None, sender=None): """ Instantiates a new dataprovider (source or sink), storing it appropriately. @param key: Key of the DP to create @returns: The new DP """ if sender == None: raise ConduitException("Invalid DBus Caller") cond = Conduit.Conduit(self.sync_manager) if source != None: if not cond.add_dataprovider(dataprovider_wrapper=source, trySourceFirst=True): raise ConduitException("Error adding source to conduit") if sink != None: if not cond.add_dataprovider(dataprovider_wrapper=sink, trySourceFirst=False): raise ConduitException("Error adding source to conduit") i = Utils.uuid_string() new = ConduitDBusItem(self.sync_manager, cond, i) EXPORTED_OBJECTS[new.get_path()] = new return new def _on_dataprovider_available(self, loader, dataprovider): self.DataproviderAvailable(dataprovider.get_key()) def _on_dataprovider_unavailable(self, loader, dataprovider): self.DataproviderUnavailable(dataprovider.get_key()) def quit(self): #need to call quit() on all sync sets or conduits as they may have been #created here... for path in EXPORTED_OBJECTS: if path.startswith("/syncset/"): EXPORTED_OBJECTS[path].syncSet.quit() elif path.startswith("/conduit/"): EXPORTED_OBJECTS[path].conduit.quit() def get_syncset(self): return self.sync_set def get_all_syncsets(self): return [EXPORTED_OBJECTS[path].syncSet for path in EXPORTED_OBJECTS if path.startswith("/syncset/") ] @dbus.service.signal(APPLICATION_DBUS_IFACE, signature='s') def DataproviderAvailable(self, key): self._print("Emmiting DBus signal DataproviderAvailable %s" % key) @dbus.service.signal(APPLICATION_DBUS_IFACE, signature='s') def DataproviderUnavailable(self, key): self._print("Emiting DBus signal DataproviderUnavailable %s" % key) @dbus.service.method(APPLICATION_DBUS_IFACE, in_signature='', out_signature='o') def NewSyncSet(self): self._print("NewSyncSet") return self._new_syncset() @dbus.service.method(APPLICATION_DBUS_IFACE, in_signature='', out_signature='as') def GetAllDataProviders(self): self._print("GetAllDataProviders") return [i.get_key() for i in self._get_all_dps()] @dbus.service.method(APPLICATION_DBUS_IFACE, in_signature='s', out_signature='o') def GetDataProvider(self, key): self._print("GetDataProvider: %s" % key) return self._get_dataprovider(key) @dbus.service.method(APPLICATION_DBUS_IFACE, in_signature='oo', out_signature='o', sender_keyword='sender') def BuildConduit(self, source, sink, sender=None): self._print("BuildConduit (sender: %s:) %s --> %s" % (sender, source, sink)) #get the actual dps from their object paths try: source = EXPORTED_OBJECTS[str(source)].dataprovider sink = EXPORTED_OBJECTS[str(sink)].dataprovider except KeyError, e: raise ConduitException("Could not find dataprovider with key: %s" % e) return self._get_conduit(source, sink, sender) @dbus.service.method(APPLICATION_DBUS_IFACE, in_signature='s', out_signature='o', sender_keyword='sender') def BuildExporter(self, key, sender=None): self._print("BuildExporter (sender: %s:) --> %s" % (sender,key)) source = self._get_dataprovider("FileSource") sink = self._get_dataprovider(key) return self._get_conduit(source.dataprovider, sink.dataprovider, sender) @dbus.service.method(APPLICATION_DBUS_IFACE, in_signature='', out_signature='') def Quit(self): if self.conduitApplication != None: self.conduitApplication.Quit()
Doesn't Everyone Have A Sidecar Motocycle In Their Front Yard? Does Your Home Have Gargoyles? Where'd All The Beach Sand Go? Ojai. Where Cars Give Way to Trees! Going UP The Hill ? Storm Brewing & Surf's Up! Read notes in prior image. Memories of home! Vewry well done! Wonderful pictures in this gallery Jim. Makes my pictures of our local events look very mundane.. Wonderful gallery Jim. Makes me wish very much that I lived somewhere warm :) Voted. Great collection of photos, you had captured the charm & laid back easy feeling of Ventura! Very nice collection of Ventura! Moved from Ventura a year and a half ago. I miss it. Enough said. I was born in Ventura and it's so great to see all this stuff, especially Robert. I moved away a couple years ago and am glad to see he's still around. Your work is amazing!! What a lovely example of local flavor! Many places I recognize but now know I have a few more to explore. Thank you for sharing! Excellent show of a county, full of its own mood.
# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. try: import json import os import types from config import config_fields except ImportError, e: import sys print "ImportError", e sys.exit(1) def getvalue(dictionary, key): if key in dictionary: return dictionary[key] else: return None def splitcsvstring(string): if string is not None: return filter(lambda x: x.strip() != '', string.split(',')) else: return [] def splitverbsubject(string): idx = 0 for char in string: if char.islower(): idx += 1 else: break return string[:idx].lower(), string[idx:].lower() def savecache(apicache, json_file): """ Saves apicache dictionary as json_file, returns dictionary as indented str """ if apicache is None or apicache is {}: return "" apicachestr = json.dumps(apicache, indent=2) with open(json_file, 'w') as cache_file: cache_file.write(apicachestr) return apicachestr def loadcache(json_file): """ Loads json file as dictionary, feeds it to monkeycache and spits result """ f = open(json_file, 'r') data = f.read() f.close() try: apicache = json.loads(data) except ValueError, e: print "Error processing json:", json_file, e return {} return apicache def monkeycache(apis): """ Feed this a dictionary of api bananas, it spits out processed cache """ if isinstance(type(apis), types.NoneType) or apis is None: return {} responsekey = filter(lambda x: 'response' in x, apis.keys()) if len(responsekey) == 0: print "[monkeycache] Invalid dictionary, has no response" return None if len(responsekey) != 1: print "[monkeycache] Multiple responsekeys, chosing first one" responsekey = responsekey[0] verbs = set() cache = {} cache['count'] = getvalue(apis[responsekey], 'count') cache['asyncapis'] = [] apilist = getvalue(apis[responsekey], 'api') if apilist is None: print "[monkeycache] Server response issue, no apis found" for api in apilist: name = getvalue(api, 'name') verb, subject = splitverbsubject(name) apidict = {} apidict['name'] = name apidict['description'] = getvalue(api, 'description') apidict['isasync'] = getvalue(api, 'isasync') if apidict['isasync']: cache['asyncapis'].append(name) apidict['related'] = splitcsvstring(getvalue(api, 'related')) required = [] apiparams = [] for param in getvalue(api, 'params'): apiparam = {} apiparam['name'] = getvalue(param, 'name') apiparam['description'] = getvalue(param, 'description') apiparam['required'] = (getvalue(param, 'required') is True) apiparam['length'] = int(getvalue(param, 'length')) apiparam['type'] = getvalue(param, 'type') apiparam['related'] = splitcsvstring(getvalue(param, 'related')) if apiparam['required']: required.append(apiparam['name']) apiparams.append(apiparam) apidict['requiredparams'] = required apidict['params'] = apiparams if verb not in cache: cache[verb] = {} cache[verb][subject] = apidict verbs.add(verb) cache['verbs'] = list(verbs) return cache def main(json_file): """ cachemaker.py creates a precache datastore of all available apis of CloudStack and dumps the precache dictionary in an importable python module. This way we cheat on the runtime overhead of completing commands and help docs. This reduces the overall search and cache_miss (computation) complexity from O(n) to O(1) for any valid cmd. """ f = open("precache.py", "w") f.write("""# -*- coding: utf-8 -*- # Auto-generated code by cachemaker.py # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License.""") f.write("\napicache = %s" % loadcache(json_file)) f.close() if __name__ == "__main__": cache_file = config_fields['core']['cache_file'] print "[cachemaker] Pre-caching using user's cloudmonkey cache", cache_file if os.path.exists(cache_file): main(cache_file) else: print "[cachemaker] Unable to cache apis, file not found", cache_file print "[cachemaker] Run cloudmonkey sync to generate cache"
Crypto EXPO Asia in Singapore - Crypto Airdrops info » Find the latest airdrops & bounties! Crypto EXPO Asia space will be filled with exhibitor booths and seminar halls where everyone will get an opportunity not only build the network but explore the phenomena of crypto world itself and get more concrete and detailed information. This event will include not only large exhibition, panels and diversity of discussions but entertaining magic shows, lucky draws, fantastic prizes and live performances. The diversity of topics in seminar halls is quite impressive and includes ICO and White Paper projects, digital AD, PR and marketing in blockchain, ICO due diligence, global capital markets, ICO, blockhchain and crypto-currency future, etc.
from sys import stderr from dice.app import BasicApp from threading import _start_new_thread def debug(msg): stderr.write(msg+"\n") stderr.flush() class Scheduler: def __init__(self, project): self.__project = project self.__run_stack = [] self.__prepare_stack = [] def schedule_run(self, app): """ Tries to run an application. The algorithm is to get all input apps into the FINISHED state (by calling schedule_run for them if needed) and calling prepare() and run() for the actual app. :param app: :return: """ # debug("schedule run for "+str(app)) if app in self.__run_stack: # debug("stack contains "+str(app)) return self.__run_stack.append(app) # app.connect("statusChanged", self.__process_run_signals(app)) app.status_changed.connect(self.__process_run_signals(app)) _start_new_thread(self.__schedule_run, (app,)) def schedule_prepare(self, app): # TODO app.prepare() def __process_run_signals(self, app): """ Returns a function that handles status changes for the given scheduled app. :param app: :return: """ def status_change_handler(): # debug(str(app)+" changed status to "+app.get_status()) if app.status == BasicApp.FINISHED: try: self.__run_stack.remove(app) except: pass # app.disconnect("statusChanged", status_change_handler) app.status_changed.disconnect(status_change_handler) for output_app in app.output_apps: if output_app in self.__run_stack: self.__try_run(output_app) elif app.status == BasicApp.ERROR: try: self.__run_stack.remove(app) except: pass return status_change_handler def __schedule_run(self, app): """ Scheduling part of schedule_run, extracted to run in its own thread :param app: :return: """ to_schedule = self.__try_run(app) # add the input apps to the scheduler if they are not finished for input_app in to_schedule: self.schedule_run(input_app) def __try_run(self, app): """ Tries to run the given app if all inputs apps of the app are finished. Otherwise it returns a list of all unfinished input apps. :param app: :return: """ all_input_apps_are_finished = True to_schedule = [] for input_app in app.input_apps: if input_app.status != BasicApp.FINISHED: all_input_apps_are_finished = False to_schedule.append(input_app) if all_input_apps_are_finished: # This is the default run behavior: # prepare() if not already prepared and call run() if prepare() was successful prepared = app.status == BasicApp.PREPARED if not prepared: app.status = BasicApp.PREPARING try: if app.prepare(): app.status = BasicApp.PREPARED prepared = True except BaseException as e: app.status = BasicApp.ERROR self.__project.dice.process_exception(e) return [] # do not schedule any more apps if prepared: app.status = BasicApp.RUNNING try: if app.run(): app.status = BasicApp.FINISHED else: app.status = BasicApp.ERROR except BaseException as e: app.status = BasicApp.ERROR self.__project.dice.process_exception(e) return [] # do not schedule any more apps else: # Set on WAITING. If called by schedule_run, all apps in to_schedule will be scheduled as well. # This will cause __process_run_signals to call __try_run again as needed. app.status = BasicApp.WAITING return to_schedule
The Dynamite Blues Band: 3rd place winner of the European Blues Challenge on The Azores 2019. Sean Webster will be part of the theatre tour of Johan Derksen ” The Sound of the Blues & Americana” . The tour starts in September 2019. Kevin Burt, will be in Europe in October 2019. He will do some solo shows and will also be playing with the band Blind B’ & the Visionairs.
import os import subprocess import json import arrow from flask import ( Flask, render_template, abort, request, jsonify, g ) from flask.ext.assets import Environment # from werkzeug.debug import get_current_traceback from werkzeug.contrib.cache import SimpleCache cache = SimpleCache() from linkmanager import settings from linkmanager.db import DataBase app = Flask(__name__) assets = Environment(app) if settings.SERVER: var_path = '/var/cache/linkmanager' if not os.path.exists(var_path): os.makedirs(var_path, mode=0o755) static_path = os.path.join(var_path, 'static') if not os.path.exists(static_path): os.symlink(assets.directory, static_path) assets.directory = static_path assets.url = assets.url[1:] db = DataBase() db.editmode = settings.EDITMODE def read_only(func): """ Decorator : get an Unauthorize 403 when read only's settings is True. """ def wrapper(): if settings.READ_ONLY: return abort(403) return func() return wrapper def is_server(func): """ Decorator : get an Unauthorize 403 when server settings is True """ def wrapper(): if settings.SERVER: return abort(403) return func() return wrapper def launch_browser(BROWSER=False): subprocess.call( 'sleep 0.5;nohup %s http://127.0.0.1:%s/ &' % ( BROWSER, settings.HTTP_PORT ), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True ) @app.route("/") def index(): return render_template( 'index.html', DEBUG=settings.DEBUG, SERVER=settings.SERVER, READ_ONLY=settings.READ_ONLY, EDITMODE=settings.EDITMODE, DELETEDIALOG=settings.DELETEDIALOG, nb_links=len(db) ) # try: # error # except Exception: # track = get_current_traceback( # skip=1, show_hidden_frames=True, # ignore_system_exceptions=False # ) # track.log() # abort(500) @read_only @is_server @app.route("/editmode", methods=['GET', 'POST']) def editmode(): if request.method == 'GET': return jsonify({'editmode': db.editmode}) db.editmode = not db.editmode return jsonify({'editmode': db.editmode}) @read_only @app.route("/add", methods=['POST']) def add(): fixture = {} link = request.form['link'] fixture[link] = { "tags": request.form['tags'].split(), "priority": request.form['priority'], "description": request.form['description'], "title": request.form['title'], "init date": str(arrow.now()) } result = db.add_link(json.dumps(fixture)) return jsonify({'is_add': result}) @read_only @app.route("/update", methods=['POST']) def update(): fixture = {} link = request.form['link'] if request.form['link'] != request.form['newlink']: result = db.delete_link(request.form['link']) if not result: return jsonify({'is_update': False}) link = request.form['newlink'] old_link = db.get_link_properties(link) fixture[link] = { "tags": request.form['tags'].split(), "priority": request.form['priority'], "description": request.form['description'], "title": request.form['title'], "init date": old_link['init date'], "update date": str(arrow.now()) } if request.form['link'] != request.form['newlink']: fixture[link]["init date"] = str(arrow.now()) fixture[link]["update date"] = old_link['update date'] result = db.add_link(json.dumps(fixture)) return jsonify({'is_update': result}) @read_only @app.route("/delete", methods=['POST']) def delete(): result = db.delete_link(request.form['link']) return jsonify({'is_delete': result}) @app.route("/search") def search(): results = {} try: tags = next(request.args.items())[0].split() links = db.sorted_links(*tags) except: links = db.sorted_links() results = {} for l in links: properties = db.get_link_properties(l) results[l] = properties return jsonify(**results) @app.route("/suggest") def suggest(): tags = request.args.get('tags') if not tags: return jsonify({}) keywords = tags.split() last_keyword = keywords[len(keywords) - 1] str_suggestion = ' '.join(keywords[:-1]) suggestions = {} for s in db.complete_tags(last_keyword): if s not in keywords: suggestions[str_suggestion + ' ' + s] = 10 return jsonify(**suggestions) def run(browser=None): BROWSER = settings.BROWSER if browser: BROWSER = browser if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': launch_browser(BROWSER) app.debug = settings.DEBUG app.run(host=settings.HTTP_HOST, port=settings.HTTP_PORT) settings.set_user_conf(WEBAPP=['EDITMODE', db.editmode]) if __name__ == '__main__': app.debug = settings.DEBUG app.run(host=settings.HTTP_HOST, port=settings.HTTP_PORT)
For Plaintiff: Robert E. Flaherty, Esq. For Defendant: Wyatt A. Brochu, Esq. In this zoning appeal, Henry W. Archetto, Maria A. Archetto, Paul H. Archetto, Linda C. Archetto, and Maria A. Archetto-Hickman (collectively, Appellants) seek judicial review of a decision of the Zoning Board of Review of the Town of Jamestown (Board). The decision approved Defendant Christian R. Smith's (Applicant or Mr. Smith) Application for a dimensional variance. Jurisdiction is pursuant to G.L. 1956 § 45-24-69. The plat plan covers an extensive area at the northern end of the island. At the time this plat plan was drawn, all of Conanicut Park was owned by the Conanicut Land Company. It is clear that the company's intention was to develop Conanicut Park into a residential neighborhood. The plat plan reveals a subdivision including more than 2, 000 lots, many streets, parks, groves, ponds, and even an area labeled 'Steamboat Landing' . . . Most of Conanicut Park remains undeveloped today . . . [A] great many of the streets shown on the plat plan do not exist, [and] . . . Conanicut Park is actually a relatively heavily wooded area with considerable brush and briar . . . [D]evelopment of this plat according to the plat plan was halted in the late 1800s because of a contaminated well and an outbreak of typhoid fever among the residents of that time. Gammons v. Caswell, 447 A.2d 361, 362- 63 (R.I. 1982). The Applicant's Property is bordered by a paper street called Circuit Avenue. Circuit Avenue has access to East Shore Road via another paper street called Providence Avenue. (Ex. 8D.) Although Providence Avenue traverses East Shore Road, this Decision only is concerned with the westerly portion of that paper street. Said portion is forty feet wide and abuts several properties, one of which belongs to Appellants. The Appellants' property is described as Lot No. 184 on Tax Assessor's Plat No. 1 (the Archetto property). (Ex. 8C and Tr. at 13, July 28, 2015 (Tr. I).). Currently, in order to access the Archetto property, Appellants use a driveway that is located entirely within the area delineated as Providence Avenue. The hearings were held on July 28, 2015 (Tr. I.), September 22, 2015 (Tr. II.), and November 17, 2015 (Tr. III.) Mr. Smith testified on his own behalf (Tr. I at 12-25.) Attorney Robert E. Flaherty spoke on behalf of Mr. Henry Archetto (Tr. III at 11-22.), and abutter Paul Sullivan also testified. (Tr. III at 23-24.) At the conclusion of the hearing, the Board unanimously voted in favor of the Applicant and granted the dimensional variance. Additional facts will be supplied in the analysis portion of this Decision as needed.
# -*- coding: utf-8 -*- # unicode color codes OKGREEN = '\033[92m' NOTOKRED = '\033[91m' OKBLUE = '\033[94m' WARNING = '\033[93m' HEADER = '\033[95m' ENDC = '\033[0m' from lib.sforce.base import SforceBaseClient from suds import WebFault from lib.sforce.partner import SforcePartnerClient from lib.sforce.metadata import SforceMetadataClient from lib.sforce.apex import SforceApexClient from lib.sforce.tooling import SforceToolingClient from optparse import OptionParser import lib.mm_util as mm_util import time import os import sys # Adds an option to command line to clean up all transactions and mappings on start # for dev purposes only. parser = OptionParser() parser.add_option("-u", "--user", dest="user", type="string", help="Salesforce username") parser.add_option("-p", "--password", dest="password", type="string", help="Salesforce password") parser.add_option("-t", "--token", dest="token", type="string", help="Salesforce token") parser.add_option("-s", "--apex-script", dest="apexscriptfilename", type="string", help="Apex code to execute") (options, args) = parser.parse_args() missing_args = False error_log = '\n'+NOTOKRED+' ✗'+ENDC+' Errors found \n\n' if options.user == None: missing_args = True error_log += " ~ Salesforce username is required \n" if options.password == None: missing_args = True error_log += " ~ Salesforce password is required \n" if options.apexscriptfilename == None: missing_args = True error_log += " ~ Apex script filename is required \n" if missing_args: print error_log else: print ' \n🏁 Starting apex execution \n ' print '- Loading partner WSDL' try: wsdl_location = os.path.join(mm_util.WSDL_PATH, 'partner.xml') client = SforcePartnerClient( wsdl_location, apiVersion=None, environment='production', sid=None, metadata_server_url=None, server_url=None) print OKGREEN+'✓'+ENDC+' WSDL loaded \n ' except Exception, e: print '\n'+NOTOKRED+'✗'+ENDC+' Unable to load the WSDL ' print e.message sys.exit() try: # login using partner wsdl print '- Authenticating' # sometimes password and token are provided together. # token parameter is not required. token_safe = '' if options.token: token_safe = options.token client.login(options.user,options.password,token_safe) # use token with apex wsdl apex_wsdl_location = os.path.join(mm_util.WSDL_PATH, 'apex.xml') apex_client = SforceApexClient( apex_wsdl_location, apiVersion=mm_util.SFDC_API_VERSION, environment='production', sid=client.getSessionId(), metadata_server_url=client.getMetadaServerUrl(), server_url=mm_util.get_sfdc_endpoint_by_type('enterprise')) print OKGREEN+'✓'+ENDC+' Authentication succesful. \n ' except Exception, e: print '\n'+NOTOKRED+'✗'+ENDC+' Error during authentication ' print e.message sys.exit() try: print '- Opening the file' # open script file f = open(options.apexscriptfilename, "r") apex_code = f.read() print OKGREEN+'✓'+ENDC+' File loaded succesfully. \n ' except Exception, e: print '\n'+NOTOKRED+'✗'+ENDC+' Error found reading the file ' print e.message sys.exit() try: # Execute code print '- Executing the script' t0 = time.clock() apex_execution = apex_client.executeAnonymous({"body":apex_code}) if apex_execution.success: print OKGREEN+'✓'+ENDC+' Script executed succesfully 🍻 \n ' print 'Code executed in '+str(time.clock() - t0)+ ' seconds. \n' else: print NOTOKRED+'✗'+ENDC+' Errors found: ' if apex_execution.exceptionMessage: print apex_execution.exceptionMessage if apex_execution.compileProblem: print 'Compilation error: '+apex_execution.compileProblem print 'Line: '+str(apex_execution.line) except Exception, e: #logger.error(str(e.message)) print '\n'+NOTOKRED+'✗'+ENDC+' Errors found ' print e.message sys.exit()
Priced below KBB Fair Purchase Price! 2016 Mustang Ford GT 5.0L V8 Ti-VCT Gray Mustang GT premium pkg, 2D Coupe, ABS brakes, Compass, Electronic Stability Control, Emergency communication system, Illuminated entry, Low tire pressure warning, Remote keyless entry, Traction control. Never pay to much for a new car again !!!Awards: * 2016 KBB.com Brand Image Awards Why pay thousands more at those big box dealerships when you can get the car of your dreams right here at Auto City your local hometown Hampton Roads dealership since 1989 !!!
README = """\ <u><b>1) Bluetooth</b></u> Tested succesfully working with Bluetooth Software : WIDCOMM Bluetooth Software 5.1.0.1100. 5.1.0.1100 is not the last version, but it's the most versatile version and works with most of Bluetooth adapter in a patched version. See <a href"http://forum.gsmhosting.com/vbb/forumdisplay.php?f=237"> this thread</a> to help about patched WIDCOMM Bluetooth Software 5.1.0.1100 (Restart PC, right click on bluetooth icon in task bar and stop/start bluetooth device can help) On remote, to activate discoverable mode, press simultaneously "start+enter". On PC choose "Next (no code)" Check in "Device Manager" / "Human Interface Devices" that the PS3 Remote appears as "HID-compliant game controller". If not, if it appears as "HID Keyboard Device" in "Keyboards", delete it, right click on bluetooth icon in task bar and stop/start bluetooth device to force new device detection. This time should appears as "HID-compliant game controller" <u><b>2) Plugin</b></u> This plugin generates: <ul> <li>ENDURING events named like "HID.Eject"</li> </ul> and lot of additional NORMAL events for: <ul> <li>short click on remote, events name end with ".S" eg. "HID.Eject.S"</li> <li>long click on remote, events name end with ".L"</li> <li>double click on remote, events name end with ".D"</li> </ul> and special selectable or not events: <ul> <li>"Sleep" when remote is not used</li> <li>"Hibernate" when remote is not use during a long time (also puts the remote into low-power mode if using the Widcomm Bluetooth stack)</li> <li>"WakeUp" for first event after "Sleep" or "Hibernate"</li> <li>"Zone.X" where X is relative to Zone Key in Remote (see Remote paper manual) event generated when a new key is pressed in another zone. each remote key belong of on zone except one, the key with strange symbol below the directional pad. this is by design.</li> <li>"Release" can be generated for each relase of each key.</li> </ul> Of course all these additional events are not needed, it's possible to do the same thing by EventGhost configuration but it's far more simple to have these events available ready to use, than play with timer inside EventGhost. This remote can generate events when 2 keys are pressed simultaneously. In this case the event code genered is an hexadecimal value. Note: some keys combination generate the same event. This is a Remote issue. After the "Hibernate" period expires, the remote will be put into a low-power (SNIFF) mode. It may take a few seconds for the first button press to be registered in this mode. The plugin will also automatically re-detect the PS3 remote after being in standby mode. """ eg.RegisterPlugin( name = "PlayStation 3 Bluetooth Remote", author = "Thierry Couquillou, Tim Delaney", version = "3.0.0", kind = "remote", url="http://www.eventghost.net/forum/viewtopic.php?t=640", description = "Hardware plugin for the PS3 Bluetooth Remote (based on the HID code of Bartman)", canMultiLoad = True, help = README, ) import itertools import time import binascii import ctypes import _winreg import sys import threading import win32con import win32event import win32file import wx import wx.lib.mixins.listctrl as listmix from ctypes import Structure, Union, c_byte, c_ubyte, c_char, c_int, c_long, c_ulong, c_ushort, c_wchar from ctypes import pointer, byref, sizeof, POINTER from ctypes.wintypes import ULONG, BOOLEAN, BOOL class Ps3Remote: button = {} button["000000FFFFFFFFFFFF00"]= "Release" button["00000016FFFFFFFFFF01"]= "Eject" button["00000064FFFFFFFFFF01"]= "Audio" button["00000065FFFFFFFFFF01"]= "Angle" button["00000063FFFFFFFFFF01"]= "Subtitle" button["00000000FFFFFFFFFF01"]= "Num1" button["00000001FFFFFFFFFF01"]= "Num2" button["00000002FFFFFFFFFF01"]= "Num3" button["00000003FFFFFFFFFF01"]= "Num4" button["00000004FFFFFFFFFF01"]= "Num5" button["00000005FFFFFFFFFF01"]= "Num6" button["00000006FFFFFFFFFF01"]= "Num7" button["00000007FFFFFFFFFF01"]= "Num8" button["00000008FFFFFFFFFF01"]= "Num9" button["0000000FFFFFFFFFFF01"]= "Clear" button["00000009FFFFFFFFFF01"]= "Num0" button["00000028FFFFFFFFFF01"]= "Time" button["00000081FFFFFFFFFF01"]= "Red" button["00000082FFFFFFFFFF01"]= "Green" button["00000083FFFFFFFFFF01"]= "Yellow" button["00000080FFFFFFFFFF01"]= "Blue" button["00000070FFFFFFFFFF01"]= "Display" button["0000001AFFFFFFFFFF01"]= "TopMenu" button["00000040FFFFFFFFFF01"]= "PopUpMenu" button["0000000EFFFFFFFFFF01"]= "Return" button["10000054FFFFFFFFFF01"]= "Up" button["300000FFFFFFFFFFFF01"]= "RightUp" button["20000055FFFFFFFFFF01"]= "Right" button["600000FFFFFFFFFFFF01"]= "RightDown" button["40000056FFFFFFFFFF01"]= "Down" button["C00000FFFFFFFFFFFF01"]= "LeftDown" button["80000057FFFFFFFFFF01"]= "Left" button["900000FFFFFFFFFFFF01"]= "LeftUp" button["0000080BFFFFFFFFFF01"]= "Enter" button["0010005CFFFFFFFFFF01"]= "Triangle" button["0020005DFFFFFFFFFF01"]= "Circle" button["0080005FFFFFFFFFFF01"]= "Square" button["0040005EFFFFFFFFFF01"]= "Cross" button["0004005AFFFFFFFFFF01"]= "L1" button["00010058FFFFFFFFFF01"]= "L2" button["02000051FFFFFFFFFF01"]= "L3" button["00000143FFFFFFFFFF01"]= "Zarbi" button["01000050FFFFFFFFFF01"]= "Select" button["08000053FFFFFFFFFF01"]= "Start" button["0008005BFFFFFFFFFF01"]= "R1" button["00020059FFFFFFFFFF01"]= "R2" button["04000052FFFFFFFFFF01"]= "R3" button["00000033FFFFFFFFFF01"]= "Scan-" button["00000032FFFFFFFFFF01"]= "Play" button["00000034FFFFFFFFFF01"]= "Scan+" button["00000030FFFFFFFFFF01"]= "Prev" button["00000038FFFFFFFFFF01"]= "Stop" button["00000031FFFFFFFFFF01"]= "Next" button["00000060FFFFFFFFFF01"]= "SlowStep-" button["00000039FFFFFFFFFF01"]= "Pause" button["00000061FFFFFFFFFF01"]= "SlowStep+" zone = {} zone["000000FFFFFFFFFFFF00"]= "none" zone["00000016FFFFFFFFFF01"]= "Zone.A1" zone["00000064FFFFFFFFFF01"]= "Zone.A1" zone["00000065FFFFFFFFFF01"]= "Zone.A1" zone["00000063FFFFFFFFFF01"]= "Zone.A1" zone["00000000FFFFFFFFFF01"]= "Zone.A2" zone["00000001FFFFFFFFFF01"]= "Zone.A2" zone["00000002FFFFFFFFFF01"]= "Zone.A2" zone["00000003FFFFFFFFFF01"]= "Zone.A2" zone["00000004FFFFFFFFFF01"]= "Zone.A2" zone["00000005FFFFFFFFFF01"]= "Zone.A2" zone["00000006FFFFFFFFFF01"]= "Zone.A2" zone["00000007FFFFFFFFFF01"]= "Zone.A2" zone["00000008FFFFFFFFFF01"]= "Zone.A2" zone["0000000FFFFFFFFFFF01"]= "Zone.A2" zone["00000009FFFFFFFFFF01"]= "Zone.A2" zone["00000028FFFFFFFFFF01"]= "Zone.A2" zone["00000081FFFFFFFFFF01"]= "Zone.A3" zone["00000082FFFFFFFFFF01"]= "Zone.A3" zone["00000083FFFFFFFFFF01"]= "Zone.A3" zone["00000080FFFFFFFFFF01"]= "Zone.A3" zone["00000070FFFFFFFFFF01"]= "Zone.A3" zone["0000001AFFFFFFFFFF01"]= "Zone.A3" zone["00000040FFFFFFFFFF01"]= "Zone.A3" zone["0000000EFFFFFFFFFF01"]= "Zone.A3" zone["10000054FFFFFFFFFF01"]= "Zone.Pad" zone["300000FFFFFFFFFFFF01"]= "Zone.Pad" zone["20000055FFFFFFFFFF01"]= "Zone.Pad" zone["600000FFFFFFFFFFFF01"]= "Zone.Pad" zone["40000056FFFFFFFFFF01"]= "Zone.Pad" zone["C00000FFFFFFFFFFFF01"]= "Zone.Pad" zone["80000057FFFFFFFFFF01"]= "Zone.Pad" zone["900000FFFFFFFFFFFF01"]= "Zone.Pad" zone["0000080BFFFFFFFFFF01"]= "Zone.Pad" zone["0010005CFFFFFFFFFF01"]= "Zone.B1" zone["0020005DFFFFFFFFFF01"]= "Zone.B1" zone["0080005FFFFFFFFFFF01"]= "Zone.B1" zone["0040005EFFFFFFFFFF01"]= "Zone.B1" zone["0004005AFFFFFFFFFF01"]= "Zone.B2" zone["00010058FFFFFFFFFF01"]= "Zone.B2" zone["02000051FFFFFFFFFF01"]= "Zone.B2" zone["00000143FFFFFFFFFF01"]= "none" zone["01000050FFFFFFFFFF01"]= "Zone.B2" zone["08000053FFFFFFFFFF01"]= "Zone.B2" zone["0008005BFFFFFFFFFF01"]= "Zone.B2" zone["00020059FFFFFFFFFF01"]= "Zone.B2" zone["04000052FFFFFFFFFF01"]= "Zone.B2" zone["00000033FFFFFFFFFF01"]= "Zone.C" zone["00000032FFFFFFFFFF01"]= "Zone.C" zone["00000034FFFFFFFFFF01"]= "Zone.C" zone["00000030FFFFFFFFFF01"]= "Zone.C" zone["00000038FFFFFFFFFF01"]= "Zone.C" zone["00000031FFFFFFFFFF01"]= "Zone.C" zone["00000060FFFFFFFFFF01"]= "Zone.C" zone["00000039FFFFFFFFFF01"]= "Zone.C" zone["00000061FFFFFFFFFF01"]= "Zone.C" class Text: manufacturer = "Manufacturer" deviceName = "Device Name" connected = "Connected" eventName = "Event prefix (optional):" yes = "Yes" no = "No" eventsSettings = "Remote Events Settings" enduringEvents = "Trigger enduring events for buttons" rawDataEvents = "Use raw Data as event name" ps3Settings = "PS3 Remote Events Settings" ps3DataEvents = "Use ps3 Remote Key as event name" ps3Release = "Generate ps3 Remote Release event" ps3Zone = "Generate ps3 Remote Zone event" shortKeyTime = "Short press if lower than" longKeyTime = "Long press if greater than" sleepTime = "Sleep event generated after" hibernateTime = "Hibernate event generated after" seconds = "seconds" noOtherPort = "Use selected device only if connected to current port" errorFind = "Error finding HID device: " errorOpen = "Error opening HID device: " errorRead = "Error reading HID device: " errorRetrieval = "Error getting HID device info." errorMultipleDevices = "Multiple devices found. Don't know which to use." errorInvalidDataIndex = "Found data index not defined as button or control value." vendorID = "Vendor ID " enteredLowPower = "%s entered low-power mode" exitedLowPower = "%s exited low-power mode" #structures for ctypes class GUID(Structure): _fields_ = [ ("Data1", c_ulong), ("Data2", c_ushort), ("Data3", c_ushort), ("Data4", c_byte * 8) ] class SP_DEVICE_INTERFACE_DATA(Structure): _fields_ = [("cbSize", c_ulong), ("InterfaceClassGuid", GUID), ("Flags", c_ulong), ("Reserved", POINTER(ULONG)) ] class SP_DEVICE_INTERFACE_DETAIL_DATA_A(Structure): _fields_ = [("cbSize", c_ulong), ("DevicePath", c_char * 255) ] class HIDD_ATTRIBUTES(Structure): _fields_ = [("cbSize", c_ulong), ("VendorID", c_ushort), ("ProductID", c_ushort), ("VersionNumber", c_ushort) ] class HIDP_CAPS(Structure): _fields_ = [ ("Usage", c_ushort), ("UsagePage", c_ushort), ("InputReportByteLength", c_ushort), ("OutputReportByteLength", c_ushort), ("FeatureReportByteLength", c_ushort), ("Reserved", c_ushort * 17), ("NumberLinkCollectionNodes", c_ushort), ("NumberInputButtonCaps", c_ushort), ("NumberInputValueCaps", c_ushort), ("NumberInputDataIndices", c_ushort), ("NumberOutputButtonCaps", c_ushort), ("NumberOutputValueCaps", c_ushort), ("NumberOutputDataIndices", c_ushort), ("NumberFeatureButtonCaps", c_ushort), ("NumberFeatureValueCaps", c_ushort), ("NumberFeatureDataIndices", c_ushort) ] class HIDP_CAPS_UNION(Union): class HIDP_BUTTON_CAPS_RANGE(Structure): _fields_ = [ ("UsageMin", c_ushort), ("UsageMax", c_ushort), ("StringMin", c_ushort), ("StringMax", c_ushort), ("DesignatorMin", c_ushort), ("DesignatorMax", c_ushort), ("DataIndexMin", c_ushort), ("DataIndexMax", c_ushort) ] class HIDP_BUTTON_CAPS_NOT_RANGE(Structure): _fields_ = [ ("Usage", c_ushort), ("Reserved1", c_ushort), ("StringIndex", c_ushort), ("Reserved2", c_ushort), ("DesignatorIndex", c_ushort), ("Reserved3", c_ushort), ("DataIndex", c_ushort), ("Reserved4", c_ushort) ] _fields_ = [ ("Range", HIDP_BUTTON_CAPS_RANGE), ("NotRange", HIDP_BUTTON_CAPS_NOT_RANGE) ] class HIDP_BUTTON_CAPS(Structure): _fields_ = [ ("UsagePage", c_ushort), ("ReportID", c_char), ("IsAlias", BOOLEAN), ("BitField", c_ushort), ("LinkCollection", c_ushort), ("LinkUsage", c_ushort), ("LinkUsagePage", c_ushort), ("IsRange", BOOLEAN), ("IsStringRange", BOOLEAN), ("IsDesignatorRange", BOOLEAN), ("IsAbsolute", BOOLEAN), ("Reserved", c_ulong * 10), ("Info", HIDP_CAPS_UNION) ] class HIDP_VALUE_CAPS(Structure): _fields_ = [ ("UsagePage", c_ushort), ("ReportID", c_char), ("IsAlias", BOOLEAN), ("BitField", c_ushort), ("LinkCollection", c_ushort), ("LinkUsage", c_ushort), ("LinkUsagePage", c_ushort), ("IsRange", BOOLEAN), ("IsStringRange", BOOLEAN), ("IsDesignatorRange", BOOLEAN), ("IsAbsolute", BOOLEAN), ("HasNull", BOOLEAN), ("Reserved", c_char), ("BitSize", c_ushort), ("ReportCount", c_ushort), ("Reserved2", c_ushort * 5), ("UnitsExp", c_ulong), ("Units", c_ulong), ("LogicalMin", c_long), ("LogicalMax", c_long), ("PhysicalMin", c_long), ("PhysicalMax", c_long), ("Info", HIDP_CAPS_UNION) ] class HIDP_DATA(Structure): class HIDP_DATA_VALUE(Union): _fields_ = [ ("RawValue", c_ulong), ("On", BOOLEAN), ] _fields_ = [ ("DataIndex", c_ushort), ("Reserved", c_ushort), ("Data", HIDP_DATA_VALUE) ] # Flags controlling what is included in the device information set built # by SetupDiGetClassDevs DIGCF_DEFAULT = 0x00000001 # only valid with DIGCF_DEVICEINTERFACE DIGCF_PRESENT = 0x00000002 DIGCF_ALLCLASSES = 0x00000004 DIGCF_PROFILE = 0x00000008 DIGCF_DEVICEINTERFACE = 0x00000010 #constants to identify the device info DEVICE_PATH = 0 VENDOR_ID = 1 VENDOR_STRING = 2 PRODUCT_ID = 3 PRODUCT_STRING = 4 VERSION_NUMBER= 5 BLUETOOTH_ADDRESS = 6 BLUETOOTH_LINK_MODE = MAX_INDEX = 7 #link mode ( LINK_MODE_NORMAL, LINK_MODE_HOLD, LINK_MODE_SNIFF, LINK_MODE_PARK, ) = xrange(4) # See if we've got widcomm - if not, we won't be changing the link mode ALLOW_CANCEL_SNIFF = True try: widcommDLL = ctypes.cdll.widcommsdk except WindowsError: widcommDLL = None else: IsStackServerUp = getattr(widcommDLL, '?IsStackServerUp@CBtIf@@QAEHXZ') IsStackServerUp.restype = BOOL if not IsStackServerUp(): widcommDLL = None if widcommDLL is None: def set_sniff_mode(bd_addr): return False def cancel_sniff_mode(bd_addr): return False def read_link_mode(bd_addr): return None else: SetSniffMode = getattr(widcommDLL, '?SetSniffMode@CBtIf@@SAHQAE@Z') SetSniffMode.restype = BOOL CancelSniffMode = getattr(widcommDLL, '?CancelSniffMode@CBtIf@@SAHQAE@Z') CancelSniffMode.restype = BOOL ReadLinkMode = getattr(widcommDLL, '?ReadLinkMode@CBtIf@@SAHQAEPAE@Z') ReadLinkMode.restype = BOOLEAN def set_sniff_mode(bd_addr): result = SetSniffMode(bd_addr) return bool(result) def cancel_sniff_mode(bd_addr): if ALLOW_CANCEL_SNIFF: result = CancelSniffMode(bd_addr) return bool(result) return False def read_link_mode(bd_addr): mode = c_ubyte(0) result = ReadLinkMode(bd_addr, byref(mode)) if result: return mode.value return None def check_link_mode_sniff(device): if device is None: return mode = read_link_mode(device[BLUETOOTH_ADDRESS]) if mode == LINK_MODE_SNIFF and mode != device[BLUETOOTH_LINK_MODE]: device[BLUETOOTH_LINK_MODE] = mode print Text.enteredLowPower % (device_name(device),) def check_link_mode_no_sniff(device): if device is None: return mode = read_link_mode(device[BLUETOOTH_ADDRESS]) if mode == LINK_MODE_NORMAL and mode != device[BLUETOOTH_LINK_MODE]: device[BLUETOOTH_LINK_MODE] = mode print Text.exitedLowPower % (device_name(device),) #helper class to iterate, find and open hid devices class HIDHelper: text = Text deviceList = [] def __init__(self): self.UpdateDeviceList() def UpdateDeviceList(self): self.deviceList = [] #dll references setupapiDLL = ctypes.windll.setupapi hidDLL = ctypes.windll.hid #prepare Interfacedata interfaceInfo = SP_DEVICE_INTERFACE_DATA() interfaceInfo.cbSize = sizeof(interfaceInfo) #prepare InterfaceDetailData Structure interfaceDetailData = SP_DEVICE_INTERFACE_DETAIL_DATA_A() interfaceDetailData.cbSize = 5 #prepare HIDD_ATTRIBUTES hiddAttributes = HIDD_ATTRIBUTES() hiddAttributes.cbSize = sizeof(hiddAttributes) #get guid for HID device class g = GUID() hidDLL.HidD_GetHidGuid(byref(g)) #get handle to the device information set hinfo = setupapiDLL.SetupDiGetClassDevsA(byref(g), None, None, DIGCF_PRESENT + DIGCF_DEVICEINTERFACE) #enumerate devices i = 0 while setupapiDLL.SetupDiEnumDeviceInterfaces(hinfo, None, byref(g), i, byref(interfaceInfo)): device = {} i += 1 #get the required size requiredSize = c_ulong() setupapiDLL.SetupDiGetDeviceInterfaceDetailA(hinfo, byref(interfaceInfo), None, 0, byref(requiredSize), None) if requiredSize.value > 250: eg.PrintError(self.text.errorRetrieval) continue #prevent a buffer overflow #get the actual info setupapiDLL.SetupDiGetDeviceInterfaceDetailA( hinfo, byref(interfaceInfo), byref(interfaceDetailData), requiredSize, pointer(requiredSize), None ) device[DEVICE_PATH] = interfaceDetailData.DevicePath #get handle to HID device try: hidHandle = win32file.CreateFile( device[DEVICE_PATH], win32con.GENERIC_READ | win32con.GENERIC_WRITE, win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE, None, win32con.OPEN_EXISTING, 0, 0 ) #skipping devices which cannot be opened #(e.g. mice & keyboards, which are opened exclusivly by OS) if int(hidHandle) <= 0: continue except: continue #getting additional info hidDLL.HidD_GetAttributes(int(hidHandle), byref(hiddAttributes)) device[VENDOR_ID] = hiddAttributes.VendorID device[PRODUCT_ID] = hiddAttributes.ProductID device[VERSION_NUMBER] = hiddAttributes.VersionNumber #prepare string buffer for device info strings hidpStringType = c_wchar * 128 infoStr = hidpStringType() #getting manufacturer result = hidDLL.HidD_GetManufacturerString( int(hidHandle), byref(infoStr), ctypes.sizeof(infoStr)) if not result or len(infoStr.value) == 0: #build a generic ManufacturerString with the vendor ID device[VENDOR_STRING] = self.text.vendorID + str(hiddAttributes.VendorID) else: device[VENDOR_STRING] = infoStr.value #getting device name result = hidDLL.HidD_GetProductString( int(hidHandle), byref(infoStr), ctypes.sizeof(infoStr)) if not result or len(infoStr.value) == 0: #getting product name via registry devicePathSplit = device[DEVICE_PATH][4:].split("#") regkey = "SYSTEM\\CurrentControlSet\\Enum\\" + devicePathSplit[0] + \ "\\" + devicePathSplit[1] + "\\" + devicePathSplit[2] regHandle = _winreg.OpenKey( _winreg.HKEY_LOCAL_MACHINE, regkey) device[PRODUCT_STRING], regType = _winreg.QueryValueEx(regHandle, "DeviceDesc") _winreg.CloseKey(regHandle) else: device[PRODUCT_STRING] = infoStr.value #close handle win32file.CloseHandle(hidHandle) #add device to internal list self.deviceList.append(device) #end loop #destroy deviceinfolist setupapiDLL.SetupDiDestroyDeviceInfoList(hinfo) # try to find Bluetooth device IDs self.findBluetoothDeviceIds(self.deviceList) def findBluetoothDeviceIds(self, deviceList): # try to find Bluetooth device ID - we'll check the Widcomm section of the registry regkey = "SYSTEM\\CurrentControlSet\\Enum\\{95C7A0A0-3094-11D7-A202-00508B9D7D5A}" mapping = self.findBluetoothDeviceIdNameMapping(regkey) for d in deviceList: devicePathSplit = d[DEVICE_PATH][4:].split("#") parentId = devicePathSplit[2] for parentIdPrefix in mapping: if parentId.startswith(parentIdPrefix): d[BLUETOOTH_ADDRESS] = mapping[parentIdPrefix] d[BLUETOOTH_LINK_MODE] = read_link_mode(d[BLUETOOTH_ADDRESS]) break else: d[BLUETOOTH_ADDRESS] = None d[BLUETOOTH_LINK_MODE] = None def findBluetoothDeviceIdNameMapping(self, regkey, stack=None, mapping=None): # iterate through all the subkeys, looking for the 'ParentIdPrefix' and 'BdAddr' # values. 'LocationInformation' will match the PRODUCT_STRING above. if stack is None: stack = [] if mapping is None: mapping = {} appended_parent = False try: regHandle = _winreg.OpenKey( _winreg.HKEY_LOCAL_MACHINE, regkey) except WindowsError: return mapping try: parentIdPrefix, regType = _winreg.QueryValueEx(regHandle, "ParentIdPrefix") stack.append(parentIdPrefix) appended_parent = True except EnvironmentError: pass try: bdaddr, regType = _winreg.QueryValueEx(regHandle, "BdAddr") if stack: mapping[stack[-1]] = bdaddr except EnvironmentError: pass subkeys = [] try: for i in itertools.count(0): subkeys.append(_winreg.EnumKey(regHandle, i)) except EnvironmentError: pass _winreg.CloseKey(regHandle) for k in subkeys: subkey = regkey + '\\' + k self.findBluetoothDeviceIdNameMapping(subkey, stack, mapping) if appended_parent: stack.pop() return mapping def _get_device(self, noOtherPort, devicePath, vendorID, productID, versionNumber ): found = 0 path = "" for item in self.deviceList: if noOtherPort: #just search for devicepath if item[DEVICE_PATH] == devicePath: #found right device return item else: #find the right vendor and product ids if item[VENDOR_ID] == vendorID \ and item[PRODUCT_ID] == productID \ and item[VERSION_NUMBER] == versionNumber: found = found + 1 if item[DEVICE_PATH] == devicePath: #found right device return item if found == 1: return item #multiple devices found #don't know which to use if found > 1: eg.PrintError(self.text.errorMultipleDevices) return None #gets the devicePath #the devicePath parameter is only used with multiple same devices def GetDevicePath(self, noOtherPort, devicePath, vendorID, productID, versionNumber ): device = self._get_device(noOtherPort, devicePath, vendorID, productID, versionNumber) if device is None: return None return device[DEVICE_PATH] #gets the device bluetooth address #the devicePath parameter is only used with multiple same devices def GetDeviceBTAddress(self, noOtherPort, devicePath, vendorID, productID, versionNumber ): device = self._get_device(noOtherPort, devicePath, vendorID, productID, versionNumber) if device is None: return None return device[BLUETOOTH_ADDRESS] class TimerThread(threading.Thread): def __init__(self, plugin, name, interval, prefix, evtName, ): self.start_time = time.time() self.plugin = plugin self.name = name self.interval = interval self.prefix = prefix self.evtName = evtName threading.Thread.__init__(self, name = name) self.finished = threading.Event() self.abort = False def run(self): now = time.time() elapsed = now - self.start_time remaining = max(0, min(self.interval, self.interval - elapsed)) self.finished.wait(remaining) self.finished.clear() if not self.abort: eg.TriggerEvent(self.evtName, prefix = self.prefix) def stop(self): self.abort = True self.finished.set() DEVICE = None class HIDThread(threading.Thread): def __init__(self, plugin, helper, enduringEvents, rawDataEvents, ps3DataEvents, ps3Release, ps3Zone, shortKeyTime, longKeyTime, sleepTime, hibernateTime, noOtherPort, devicePath, vendorID, vendorString, productID, productString, versionNumber, ): self.ps3Remote = Ps3Remote self.text = Text self.deviceName = vendorString + " " + productString self.abort = False self._overlappedRead = win32file.OVERLAPPED() self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None) self.evtName = "None" self.zoneName = "None" self.maskRegularEvent = False self.regularEvent = False self.Started = True self.timeStarted = time.time() #getting devicePath self.devicePath = helper.GetDevicePath( noOtherPort, devicePath, vendorID, productID, versionNumber ) if not self.devicePath: self.stop_enduring_event() eg.PrintError(self.text.errorFind + self.deviceName) return threading.Thread.__init__(self, name = self.devicePath) #setting members self.plugin = plugin self.helper = helper self.enduringEvents = enduringEvents self.rawDataEvents = rawDataEvents self.ps3DataEvents = ps3DataEvents self.ps3Release = ps3Release self.ps3Zone = ps3Zone self.shortKeyTime = shortKeyTime self.longKeyTime = longKeyTime self.sleepTime = sleepTime self.hibernateTime = hibernateTime global DEVICE DEVICE = helper._get_device( noOtherPort, devicePath, vendorID, productID, versionNumber ) self.bdAddr = DEVICE[BLUETOOTH_ADDRESS] self.start() def AbortThread(self): self.abort = True win32event.SetEvent(self._overlappedRead.hEvent) def run(self): #open file/devcice try: handle = win32file.CreateFile( self.devicePath, win32con.GENERIC_READ | win32con.GENERIC_WRITE, win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE, None, # no security win32con.OPEN_EXISTING, win32con.FILE_ATTRIBUTE_NORMAL | win32con.FILE_FLAG_OVERLAPPED, 0 ) except: self.stop_enduring_event() eg.PrintError(self.text.errorOpen + self.deviceName) return #getting data to get the right buffer size hidDLL = ctypes.windll.hid setupapiDLL = ctypes.windll.setupapi #get preparsed data preparsedData = c_ulong() result = hidDLL.HidD_GetPreparsedData( int(handle), ctypes.byref(preparsedData) ) #getCaps hidpCaps = HIDP_CAPS() result = hidDLL.HidP_GetCaps(preparsedData, ctypes.byref(hidpCaps)) n = hidpCaps.InputReportByteLength rt = c_int(0) #report type input rl = c_ulong(n) #report length maxDataL = hidDLL.HidP_MaxDataListLength(rt, preparsedData) #getting button caps bCapsArrL = c_ushort(hidpCaps.NumberInputButtonCaps) bCapsArrType = HIDP_BUTTON_CAPS * bCapsArrL.value bCapsArr = bCapsArrType() hidDLL.HidP_GetButtonCaps( rt, ctypes.byref(bCapsArr), ctypes.byref(bCapsArrL), preparsedData ) #getting value caps vCapsArrL = c_ushort(hidpCaps.NumberInputValueCaps) vCapsArrType = HIDP_VALUE_CAPS * vCapsArrL.value vCapsArr = vCapsArrType() hidDLL.HidP_GetValueCaps( rt, ctypes.byref(vCapsArr), ctypes.byref(vCapsArrL), preparsedData ) #parsing caps # prepare a list to find and store for each index # whether it is a button or value oldValues = {} dataIndexType = [0] * hidpCaps.NumberInputDataIndices #list entries depending on caps for i in range(bCapsArrL.value): if bCapsArr[i].IsRange: for ii in range( bCapsArr[i].Info.Range.DataIndexMin, bCapsArr[i].Info.Range.DataIndexMax + 1 ): dataIndexType[ii] = 1 else: ii = bCapsArr[i].Info.NotRange.DataIndex dataIndexType[ii] = 1 for i in range(vCapsArrL.value): if vCapsArr[i].IsRange: for ii in range( vCapsArr[i].Info.Range.DataIndexMin, vCapsArr[i].Info.Range.DataIndexMax + 1 ): dataIndexType[ii] = 2 oldValues[ii] = sys.maxint else: ii = vCapsArr[i].Info.NotRange.DataIndex dataIndexType[ii] = 2 oldValues[ii] = sys.maxint #prepare data array with maximum possible length DataArrayType = HIDP_DATA * maxDataL data = DataArrayType() while not self.abort: #try to read and wait for an event to happen try: win32event.ResetEvent(self._overlappedRead.hEvent) rc, buf = win32file.ReadFile(handle, n, self._overlappedRead) #waiting for an event win32event.WaitForSingleObject( self._overlappedRead.hEvent, win32event.INFINITE ) except: self.stop_enduring_event() eg.PrintError(self.text.errorRead + self.deviceName) self.abort = True #parse data if len(buf) == n and not self.abort: #raw data events if self.ps3DataEvents: read = str(buf) keycode = binascii.hexlify(read).upper()[2:22] try: evtName = self.ps3Remote.button[keycode] zoneName = self.ps3Remote.zone[keycode] regularEvent = True except KeyError: evtName = keycode zoneName = "Extended" regularEvent = False # Make sure any time we get a keypress, we come out of low-power mode cancel_sniff_mode(self.bdAddr) if result: eg.scheduler.AddTask(1.0, check_link_mode_no_sniff, DEVICE) if self.enduringEvents: self.stop_enduring_event() prefix = self.plugin.info.eventPrefix currentTime = time.time() elapsedTime = currentTime - self.timeStarted self.timeStarted = time.time() if self.Started: if not self.regularEvent or evtName == "Release": if elapsedTime < self.shortKeyTime: self.plugin.TriggerEvent(self.evtName + ".S") self.Started = False if evtName == "Release": if self.sleepTime > 0: self.Timer2 = TimerThread(self.plugin, "Timer2", self.sleepTime, prefix, "Sleep") self.Timer2.start() if self.hibernateTime > 0: self.Timer3 = TimerThread(self.plugin, "Timer3", self.hibernateTime, prefix, "Hibernate") self.Timer3.start() if self.ps3Release: self.plugin.TriggerEvent(evtName) self.maskRegularEvent = False else: if not self.maskRegularEvent or not regularEvent: if elapsedTime > self.sleepTime and self.sleepTime > 0: self.plugin.TriggerEvent("WakeUp") self.zoneName = "None" if self.ps3Zone and self.zoneName != zoneName and zoneName != "none": self.plugin.TriggerEvent(zoneName) self.plugin.TriggerEnduringEvent(evtName) if elapsedTime < self.shortKeyTime and evtName == self.evtName: self.Timer1 = TimerThread(self.plugin, "Timer1", self.longKeyTime, prefix, evtName + ".M") self.Timer1.start() eg.TriggerEvent(evtName + ".D", prefix = prefix) else: self.Timer1 = TimerThread(self.plugin, "Timer1", self.longKeyTime, prefix, evtName + ".L") self.Timer1.start() self.Started = True self.evtName = evtName self.zoneName = zoneName self.regularEvent = regularEvent if not regularEvent: self.maskRegularEvent = True else: self.plugin.TriggerEvent(evtName) elif maxDataL == 0 or self.rawDataEvents: read = str(buf) self.plugin.TriggerEvent( binascii.hexlify(read).upper() ) else: dataL = c_ulong(maxDataL) result = hidDLL.HidP_GetData( rt, ctypes.byref(data), ctypes.byref(dataL), preparsedData, ctypes.c_char_p(str(buf)), rl ) #parse data to trigger events btnPressed = [] for i in range(dataL.value): tmpIndex = data[i].DataIndex if dataIndexType[tmpIndex] == 1:#button #collect buttons pressed btnPressed.append(str(tmpIndex)) elif dataIndexType[tmpIndex] == 2:#control value newValue = int(data[i].Data.RawValue) if newValue == oldValues[tmpIndex]: continue oldValues[tmpIndex] = newValue self.plugin.TriggerEvent( "Value." + str(tmpIndex), payload = newValue ) else: eg.PrintError(self.text.errorInvalidDataIndex) if len(btnPressed): #one or more buttons pressed #btnPressed.sort() evtName = "Button." + "+".join(btnPressed) if self.enduringEvents: self.plugin.TriggerEnduringEvent(evtName) else: self.plugin.TriggerEvent(evtName) elif self.enduringEvents: #no buttons pressed anymore self.plugin.EndLastEvent() else: #trigger event so that releasing all buttons #can get noticed even w/o enduring events self.plugin.TriggerEvent("Button.None") #loop aborted if self.enduringEvents: self.stop_enduring_event() win32file.CloseHandle(handle) #free references hidDLL.HidD_FreePreparsedData(ctypes.byref(preparsedData)) #HID thread finished def stop_enduring_event(self): try: enduringEvents = self.enduringEvents except AttributeError: enduringEvents = False if enduringEvents: try: if self.Timer1.isAlive(): self.Timer1.stop() except AttributeError: pass else: del self.Timer1 try: if self.Timer2.isAlive(): self.Timer2.stop() except AttributeError: pass else: del self.Timer2 try: if self.Timer3.isAlive(): self.Timer3.stop() except AttributeError: pass else: del self.Timer3 self.plugin.EndLastEvent() def device_name(device): return device[VENDOR_STRING] + " " + device[PRODUCT_STRING] def handle_wake_up(event): global ALLOW_CANCEL_SNIFF if event.string == 'System.Resume': ALLOW_CANCEL_SNIFF = True device = DEVICE if device is None: return bd_addr = device[BLUETOOTH_ADDRESS] result = cancel_sniff_mode(bd_addr) if result: eg.scheduler.AddTask(1.0, check_link_mode_no_sniff, DEVICE) def handle_sleep(event): device = DEVICE if device is None: return bd_addr = device[BLUETOOTH_ADDRESS] result = set_sniff_mode(bd_addr) if result: eg.scheduler.AddTask(1.0, check_link_mode_sniff, DEVICE) def handle_init(event): # Put the PS3 remote to sleep if it isn't already handle_sleep(event) def handle_machine_sleep(event): global ALLOW_CANCEL_SNIFF if event.string == 'System.Suspend': ALLOW_CANCEL_SNIFF = False return handle_sleep(event) INSTANCE = None def handle_device_attached(event): instance = INSTANCE if not isinstance(instance, PS3): return eg.actionThread.Call(instance.__stop__) eg.actionThread.Call(instance.__start__, *instance.args) class PS3(eg.PluginClass): helper = None text = Text thread = None def __start__(self, *args): global INSTANCE INSTANCE = self # We store the arguments away so that we can use them again later (i.e. when we resume # from standby and need to restart ourself). self.args = args self.__start(*args) def __start(self, eventName, enduringEvents, rawDataEvents, ps3DataEvents, ps3Release, ps3Zone, shortKeyTime, longKeyTime, sleepTime, hibernateTime, noOtherPort, devicePath, vendorID, vendorString, productID, productString, versionNumber, # For backwards-compatibility with 2.0.2 and 3.0.0 - if a new config option is added this can just be replaced dummy=None ): # Set up bindings to ensure that we handle power states, etc correctly. eg.Bind('Main.OnInit', handle_init) eg.Bind('HID.WakeUp', handle_wake_up) eg.Bind('System.Resume', handle_wake_up) eg.Bind('HID.Hibernate', handle_sleep) eg.Bind('System.QuerySuspend', handle_machine_sleep) eg.Bind('System.Suspend', handle_machine_sleep) # If we get one of these, we __stop__ and __start__ the plugin so that we # pick up the device (if necessary). eg.Bind('System.DeviceAttached', handle_device_attached) if eventName: self.info.eventPrefix = eventName else: self.info.eventPrefix = "HID" #ensure helper object is up to date if not self.helper: self.helper = HIDHelper() else: self.helper.UpdateDeviceList() #create thread self.thread = HIDThread(self, self.helper, enduringEvents, rawDataEvents, ps3DataEvents, ps3Release, ps3Zone, shortKeyTime, longKeyTime, sleepTime, hibernateTime, noOtherPort, devicePath, vendorID, vendorString, productID, productString, versionNumber ) def __stop__(self): global INSTANCE INSTANCE = None self.thread.AbortThread() eg.Unbind('Main.OnInit', handle_init) eg.Unbind('HID.Hibernate', handle_sleep) eg.Unbind('System.QuerySuspend', handle_machine_sleep) eg.Unbind('System.Suspend', handle_machine_sleep) eg.Unbind('HID.Wake', handle_wake_up) eg.Unbind('System.Resume', handle_wake_up) eg.Unbind('System.DeviceAttached', handle_device_attached) def Configure(self, eventName = "", enduringEvents = True, rawDataEvents = False, ps3DataEvents = False, ps3Release = False, ps3Zone = False, shortKeyTime = 0.3, longKeyTime = 0.5, sleepTime = 5.0, hibernateTime = 60.0, noOtherPort = False, devicePath = None, vendorID = None, vendorString = None, productID = None, productString = None, versionNumber = None, # For backwards-compatibility with 2.0.2 and 3.0.0 - if a new config option is added this can just be replaced dummy=None ): #ensure helper object is up to date if not self.helper: self.helper = HIDHelper() else: self.helper.UpdateDeviceList() panel = eg.ConfigPanel(self, resizable=True) #building dialog hidList = wx.ListCtrl(panel, -1, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.LC_REPORT | wx.LC_SINGLE_SEL) #create GUI hidList.InsertColumn(0, self.text.deviceName) hidList.InsertColumn(1, self.text.manufacturer) hidList.InsertColumn(2, self.text.connected) path = self.helper.GetDevicePath(noOtherPort, devicePath, vendorID, productID, versionNumber) #fill list devices = {} idx = 0 for item in self.helper.deviceList: idx = hidList.InsertStringItem(sys.maxint, item[PRODUCT_STRING]) hidList.SetStringItem(idx, 1, item[VENDOR_STRING]) hidList.SetStringItem(idx, 2, self.text.yes) if item[DEVICE_PATH] == path: hidList.Select(idx) devices[idx] = item #add not connected device to bottom of list if not path: if not devicePath: #just select first entry on first start hidList.Select(0) else: item = { DEVICE_PATH: devicePath, VENDOR_ID: vendorID, VENDOR_STRING: vendorString, PRODUCT_ID: productID, PRODUCT_STRING: productString, VERSION_NUMBER: versionNumber, } idx = hidList.InsertStringItem(sys.maxint, item[PRODUCT_STRING]) hidList.SetStringItem(idx, 1, item[VENDOR_STRING]) hidList.SetStringItem(idx, 2, self.text.no) hidList.Select(idx) devices[idx] = item if hidList.GetFirstSelected() == -1: #no device selected, disable ok and apply button panel.dialog.buttonRow.okButton.Enable(False) panel.dialog.buttonRow.applyButton.Enable(False) #layout for i in range(hidList.GetColumnCount()): hidList.SetColumnWidth(i, wx.LIST_AUTOSIZE_USEHEADER) size = hidList.GetColumnWidth(i) hidList.SetColumnWidth(i, wx.LIST_AUTOSIZE) hidList.SetColumnWidth(i, max(size, hidList.GetColumnWidth(i) + 5)) panel.sizer.Add(hidList, 1, flag = wx.EXPAND) panel.sizer.Add((15,15)) #sizers eventsGroupSizer = wx.StaticBoxSizer( wx.StaticBox(panel, -1, self.text.eventsSettings), wx.VERTICAL ) eventsSizer = wx.GridBagSizer(0, 5) #eventname eventsSizer.Add( wx.StaticText(panel, -1, self.text.eventName), (0, 0), flag = wx.ALIGN_CENTER_VERTICAL) eventNameCtrl = wx.TextCtrl(panel, value = eventName) eventNameCtrl.SetMaxLength(32) eventsSizer.Add(eventNameCtrl, (0, 1), (1, 2), flag = wx.EXPAND) #checkbox for no other port option noOtherPortCtrl = wx.CheckBox(panel, -1, self.text.noOtherPort) noOtherPortCtrl.SetValue(noOtherPort) eventsSizer.Add(noOtherPortCtrl, (1, 0), (1, 3)) #checkbox for enduring event option enduringEventsCtrl = wx.CheckBox(panel, -1, self.text.enduringEvents) enduringEventsCtrl.SetValue(enduringEvents) eventsSizer.Add(enduringEventsCtrl, (2, 0), (1, 3)) #checkbox for raw data events rawDataEventsCtrl = wx.CheckBox(panel, -1, self.text.rawDataEvents) rawDataEventsCtrl.SetValue(rawDataEvents) eventsSizer.Add(rawDataEventsCtrl, (3, 0), (1, 3)) eventsGroupSizer.Add(eventsSizer, 0, wx.ALL, 10) panel.sizer.Add(eventsGroupSizer, 0, wx.EXPAND) panel.sizer.Add((15,15)) #sizers ps3GroupSizer = wx.StaticBoxSizer( wx.StaticBox(panel, -1, self.text.ps3Settings), wx.VERTICAL ) ps3Sizer = wx.GridBagSizer(0, 5) #checkbox for ps3 data events ps3DataEventsCtrl = wx.CheckBox(panel, -1, self.text.ps3DataEvents) ps3DataEventsCtrl.SetValue(ps3DataEvents) ps3Sizer.Add(ps3DataEventsCtrl, (0, 0), (1, 3)) #checkbox for ps3 release event ps3ReleaseCtrl = wx.CheckBox(panel, -1, self.text.ps3Release) ps3ReleaseCtrl.SetValue(ps3Release) ps3Sizer.Add(ps3ReleaseCtrl, (1, 0), (1, 3)) #checkbox for ps3 zone event ps3ZoneCtrl = wx.CheckBox(panel, -1, self.text.ps3Zone) ps3ZoneCtrl.SetValue(ps3Zone) ps3Sizer.Add(ps3ZoneCtrl, (2, 0), (1, 3)) #short key time ps3Sizer.Add( wx.StaticText(panel, -1, self.text.shortKeyTime), (3, 0), flag = wx.ALIGN_CENTER_VERTICAL) shortKeyTimeCtrl = eg.SpinNumCtrl( panel, -1, shortKeyTime, size=(200,-1), integerWidth=7, increment=0.05 ) ps3Sizer.Add(shortKeyTimeCtrl, (3, 1), flag = wx.EXPAND) ps3Sizer.Add( wx.StaticText(panel, -1, self.text.seconds), (3, 2), (1, 2), flag = wx.ALIGN_CENTER_VERTICAL) #long key time ps3Sizer.Add( wx.StaticText(panel, -1, self.text.longKeyTime), (4, 0), flag = wx.ALIGN_CENTER_VERTICAL) longKeyTimeCtrl = eg.SpinNumCtrl( panel, -1, longKeyTime, size=(200,-1), integerWidth=7, increment=0.05 ) ps3Sizer.Add(longKeyTimeCtrl, (4, 1), flag = wx.EXPAND) ps3Sizer.Add( wx.StaticText(panel, -1, self.text.seconds), (4, 2), (1, 2), flag = wx.ALIGN_CENTER_VERTICAL) #sleep time ps3Sizer.Add( wx.StaticText(panel, -1, self.text.sleepTime), (5, 0), flag = wx.ALIGN_CENTER_VERTICAL) sleepTimeCtrl = eg.SpinNumCtrl( panel, -1, sleepTime, size=(200,-1), integerWidth=7, increment=1.00 ) ps3Sizer.Add(sleepTimeCtrl, (5, 1), flag = wx.EXPAND) ps3Sizer.Add( wx.StaticText(panel, -1, self.text.seconds), (5, 2), (1, 2), flag = wx.ALIGN_CENTER_VERTICAL) #hibernate time ps3Sizer.Add( wx.StaticText(panel, -1, self.text.hibernateTime), (6, 0), flag = wx.ALIGN_CENTER_VERTICAL) hibernateTimeCtrl = eg.SpinNumCtrl( panel, -1, hibernateTime, size=(200,-1), integerWidth=7, increment=1.00 ) ps3Sizer.Add(hibernateTimeCtrl, (6, 1), flag = wx.EXPAND) ps3Sizer.Add( wx.StaticText(panel, -1, self.text.seconds), (6, 2), (1, 2), flag = wx.ALIGN_CENTER_VERTICAL) ps3GroupSizer.Add(ps3Sizer, 0, wx.ALL, 10) panel.sizer.Add(ps3GroupSizer, 0, wx.EXPAND) def OnHidListSelect(event): panel.dialog.buttonRow.okButton.Enable(True) panel.dialog.buttonRow.applyButton.Enable(True) event.Skip() def OnRawDataEventsChange(event): enduringEventsCtrl.Enable(not rawDataEventsCtrl.GetValue()) ps3DataEventsCtrl.Enable(not rawDataEventsCtrl.GetValue()) event.Skip() def OnEnduringEventsChange(event): rawDataEventsCtrl.Enable(not enduringEventsCtrl.GetValue()) ps3ReleaseCtrl.Enable(enduringEventsCtrl.GetValue() and ps3DataEventsCtrl.GetValue()) ps3ZoneCtrl.Enable(enduringEventsCtrl.GetValue() and ps3DataEventsCtrl.GetValue()) event.Skip() def OnPs3DataEventsChange(event): rawDataEventsCtrl.Enable(not ps3DataEventsCtrl.GetValue()) ps3ReleaseCtrl.Enable(enduringEventsCtrl.GetValue() and ps3DataEventsCtrl.GetValue()) ps3ZoneCtrl.Enable(enduringEventsCtrl.GetValue() and ps3DataEventsCtrl.GetValue()) event.Skip() def OnPs3ReleaseChange(event): rawDataEventsCtrl.Enable(not ps3DataEventsCtrl.GetValue()) event.Skip() def OnPs3ZoneChange(event): rawDataEventsCtrl.Enable(not ps3DataEventsCtrl.GetValue()) event.Skip() OnRawDataEventsChange(wx.CommandEvent()) OnPs3DataEventsChange(wx.CommandEvent()) OnPs3ReleaseChange(wx.CommandEvent()) OnPs3ZoneChange(wx.CommandEvent()) OnEnduringEventsChange(wx.CommandEvent()) rawDataEventsCtrl.Bind(wx.EVT_CHECKBOX, OnRawDataEventsChange) ps3DataEventsCtrl.Bind(wx.EVT_CHECKBOX, OnPs3DataEventsChange) ps3ReleaseCtrl.Bind(wx.EVT_CHECKBOX, OnPs3ReleaseChange) ps3ZoneCtrl.Bind(wx.EVT_CHECKBOX, OnPs3ZoneChange) enduringEventsCtrl.Bind(wx.EVT_CHECKBOX, OnEnduringEventsChange) hidList.Bind(wx.EVT_LIST_ITEM_SELECTED, OnHidListSelect) while panel.Affirmed(): device = devices[hidList.GetFirstSelected()] panel.SetResult( eventNameCtrl.GetValue(), enduringEventsCtrl.GetValue(), rawDataEventsCtrl.GetValue(), ps3DataEventsCtrl.GetValue(), ps3ReleaseCtrl.GetValue(), ps3ZoneCtrl.GetValue(), shortKeyTimeCtrl.GetValue(), longKeyTimeCtrl.GetValue(), sleepTimeCtrl.GetValue(), hibernateTimeCtrl.GetValue(), noOtherPortCtrl.GetValue(), device[DEVICE_PATH], device[VENDOR_ID], device[VENDOR_STRING], device[PRODUCT_ID], device[PRODUCT_STRING], device[VERSION_NUMBER] )
After an exciting Olympic Trials, 5 Questions with is back! Today we have the first of our interviews with a member of the 2012 London Olympic Team. She overcame sickness in Eugene and made her way from roads star to Olympian in just over a year. 1. Writing About Running: First off, congrats on becoming an Olympian. What has your last week been like as a member of the 2012 US Olympic team that will be traveling to London next month? Janet Cherobon-Bawcom: It's been super hectic. I am taking a full load of classes this summer, and after a couple of days of excitement in Oregon, it was back to reality - and to being way behind on my work. To make things worse, we got back to Georgia just in time for the heat wave to hit and decided to head back out to Flagstaff where there was less heat and less oxygen. I've been here since the 1st, and I'm finally getting caught up on sleep, homework, and getting back into some good training. 2. Writing About Running: With the nature of the Women's 10,000, you were one of three women (excluding Shalane Flanagan, who will run the Olympic Marathon) who had the Olympic A-Standard. What was your strategy with that in mind? Janet Cherobon-Bawcom: To be honest, I was so sick in Eugene that I can't even pretend that I had a strategy. I'd been fighting a respiratory illness ever since Bolder Boulder, and it took a real turn for the worse when I got out to Oregon. I wasn't going to publicize it before the race, but I knew that, if anyone made a successful run at the "A" standard there, I would be left off the team for sure. There was no way I was going to keep up with them - I could barely breath during the race. Of course, two miles in, I knew that the pace was too slow for anyone to hit the standard, and it was like a huge weight was off my shoulders. It's not how anyone would want to make the team, but with the way I was feeling that night, I had to say a big prayer of thanks that I even made it through the race. Of course, the really ironic part for me was that I went into the Payton Jordan meet just shooting for the trials "A" standard. I hadn't run on the track since 2006, my 10k PR on the road was 32:26, and I really didn't consider myself a contender for a spot on the team. The Friday before that meet, I did a workout in Flagstaff that made me think, "you know, I might be able to break 32:00 if I get lucky." Well, I got in the race, felt decent, and now here we are. 3. Writing About Running: You have had a whirlwind past few years with gaining your citizenship, winning the USA Running Circuit Championship and becoming an Olympian. Is this what you imagined when you heard the term "the American dream?" Janet Cherobon-Bawcom: Again, I can't pretend that I had any idea or dream about being where I am. I came to the US never having broken 20:00 for 5k. My dream at that time was just to run well enough to keep my scholarship so that I could pursue my "American Dream" of getting my nursing degree. I had zero aspirations of doing anything significant as an athlete. I loved watching the Olympics as a kid - my favorite athlete was Maria Mutola back then - but I wasn't involved in the sport at the time, so I certainly didn't dream that I'd be where I am. After college, I kept running just to give me something to do - even with 3 DII championships to my name, my PR's were not blazing fast (16:19 and 34:21), and I wasn't a US citizen - so I couldn't dream of running for the US. When I started the citizenship process, I would've been crazy to think that one day I'd be representing the US - at that time my marathon PR was just under 3 hours. I was hoping that I could become a citizen and one day work for the Center for Disease Control. Needless to say, I think God had other things in mind because there's no other explanation for the way things in my life have worked out. 4. Writing About Running: What's it like training with the legendary coach, Jack Daniels? Janet Cherobon-Bawcom: It's been great. Jack is the perfect fit for me. He's a true genius, but he is also someone who really respects the experience I have as an athlete and the knowledge I have about how my body responds to certain things. I do some pretty crazy things from time to time - racing too much, etc. - but instead of trying to talk me out of things, Jack works hard to make the best plan for me, even if it seems crazy. I really appreciate that he's committed to helping me reach my goals - not like some coaches who try so hard to force their ideas on the athletes. He understands that my running is just that, my running, and he does a brilliant job of helping me get the most out of it. 5. Writing About Running: Your twitter profile says "Started running so I wouldn't have to keep milking cows. Still running and still not milking cows!!!!" When exactly is the last time you milked a cow? and what are your plans after the Olympics? Janet Cherobon-Bawcom: Actually, I milked a cow in February when I was back in Kenya. The weather in Flagstaff was killing my training, and I wanted to be at altitude to get ready for my spring season, so I decided that it would be an ideal time to go visit my family. My little sisters started talking some trash about how I'd gone soft in America, and I had to defend myself, so I grabbed the old milking bucket and got to work. It's pretty much like riding a bicycle - once you learn how.... Of course, I'm not so great on the bicycle, so who knows. After the Olympics, I'm actually going for a long vacation with my husband's family, then back to training with my eye on doing a few USA Running Circuit events and a fall marathon. I've been running pretty much non-stop since last August, so after the marathon, I plan to take a long break from running - and then we'll see what happens! Great interview! So glad to read more about Janet! This was great! Fun to read more about Janet. I was there when Janet came to the U.S. (I was two years ahead of her at Harding on the CC and track teams), and it's amazing to see what she's done. I almost cried when she qualified for the Olympics - couldn't wipe the smile off of my face. Seeing Janet's steady rise in running success should remind all of us that dreams can come true, even in your 30s. Thanks for doing this interview, and thanks, Janet.
# -*- coding: utf-8 -*- ''' Common I/O routines''' # Note for errmsg, msg, and msg_nocr we don't want to simply make # an assignment of method names like self.msg = self.debugger.intf.msg, # because we want to allow the interface (intf) to change # dynamically. That is, the value of self.debugger may change # in the course of the program and if we made such an method assignemnt # we wouldn't pick up that change in our self.msg def errmsg(proc_obj, message, opts={}): response = proc_obj.response if 'set_name' in opts: response['name'] = 'error' return response['errs'].append(message) def msg(proc_obj, message, opts={}): response = proc_obj.response return response['msg'].append(message) # Demo it if __name__=='__main__': class Demo: def __init__(self): self.response = {'errs': [], 'msg' : []} pass pass import pprint demo = Demo() msg(demo, 'hi') pp = pprint.PrettyPrinter() pp.pprint(demo.response)
Double-needle coverseamed neck with lay-flat collar. Great item and repair! Fast delivery and great printing. they were of high quality and did not appear cheap or flimsy at the least. speed from order taking to delivery, what we like most. this was a great product. We used it as give-a-ways for a summer wedding. The guests loved them and they were personalized with the bride & grooms names . Many comments on what a clever idea. I was happy to see candy being offered on you site. Turn around time was fast and customer support was great! Would love to see my logo on a sample. It is great. We like everything. We also like your artwork for our logo.
__author__ = 'Dmitriy Korsakov' __doc__ = 'Farm management' import json import copy from scalrctl import commands from scalrctl import click from scalrctl import request, settings class FarmTerminate(commands.SimplifiedAction): epilog = "Example: scalr-ctl farms terminate --farmId <ID> --force" post_template = { "terminateFarmRequest": {"force": True} } def get_options(self): hlp = "It is used to terminate the Server immediately ignoring scalr.system.server_terminate_timeout." force_terminate = click.Option(('--force', 'force'), is_flag=True, default=False, help=hlp) options = [force_terminate, ] options.extend(super(FarmTerminate, self).get_options()) return options def pre(self, *args, **kwargs): """ before request is made """ force = kwargs.pop("force", None) post_data = copy.deepcopy(self.post_template) post_data["terminateFarmRequest"]["force"] = force kv = {"import-data": post_data} kv.update(kwargs) arguments, kw = super(FarmTerminate, self).pre(*args, **kv) return arguments, kw class FarmLaunch(commands.SimplifiedAction): epilog = "Example: scalr-ctl farms launch --farmId <ID>" post_template = {} def pre(self, *args, **kwargs): """ before request is made """ kv = {"import-data": {}} kv.update(kwargs) arguments, kw = super(FarmLaunch, self).pre(*args, **kv) return arguments, kw class FarmClone(commands.SimplifiedAction): epilog = "Example: scalr-ctl farms clone --farmId <ID> --name MyNewFarm" post_template = { "cloneFarmRequest": {"name": ""} } def get_options(self): hlp = "The name of a new Farm." name = click.Option(('--name', 'name'), required=True, help=hlp) options = [name, ] options.extend(super(FarmClone, self).get_options()) return options def pre(self, *args, **kwargs): """ before request is made """ name = kwargs.pop("name", None) post_data = copy.deepcopy(self.post_template) post_data["cloneFarmRequest"]["name"] = name kv = {"import-data": post_data} kv.update(kwargs) arguments, kw = super(FarmClone, self).pre(*args, **kv) return arguments, kw class FarmSuspend(FarmLaunch): epilog = "Example: scalr-ctl farms suspend --farmId <ID>" post_template = {} class FarmResume(FarmLaunch): epilog = "Example: scalr-ctl farms resume --farmId <ID>" post_template = {} class FarmLock(commands.SimplifiedAction): epilog = "Example: scalr-ctl farm lock --farmId <ID> --comment <COMMENT> --unlock-permission <ANYONE|OWNER|TEAM>" post_template = { "lockFarmRequest": {"lockComment": "", "unlockPermission": "anyone"} } def get_options(self): comment = click.Option(('--lockComment', 'comment'), default="", help="Comment to lock a Farm.") hlp = "If you would like to prevent other users unlocking the Farm you should set 'owner' options.\ With 'team' options only members of the Farm's Teams can unlock this Farm.\ Default value 'anyone' means that anyone with access can unlock this Farm." unlock_permission = click.Option(( '--unlockPermission', 'unlock_permission'), default="anyone", show_default=True, help=hlp) options = [comment, unlock_permission] options.extend(super(FarmLock, self).get_options()) return options def pre(self, *args, **kwargs): """ before request is made """ comment = kwargs.pop("comment", None) unlock_permission = kwargs.pop("unlock_permission", "anyone") post_data = copy.deepcopy(self.post_template) post_data["lockFarmRequest"]["lockComment"] = comment post_data["lockFarmRequest"]["unlockPermission"] = unlock_permission kv = {"import-data": post_data} kv.update(kwargs) arguments, kw = super(FarmLock, self).pre(*args, **kv) return arguments, kw class FarmCreateFromTemplate(commands.Action): def pre(self, *args, **kwargs): """ before request is made """ kwargs = self._apply_arguments(**kwargs) stdin = kwargs.pop('stdin', None) kwargs["FarmTemplate"] = self._read_object() if stdin else self._edit_example() return args, kwargs def run(self, *args, **kwargs): """ Callback for click subcommand. """ hide_output = kwargs.pop('hide_output', False) # [ST-88] args, kwargs = self.pre(*args, **kwargs) uri = self._request_template payload = {} data = {} if '{envId}' in uri and not kwargs.get('envId') and settings.envId: kwargs['envId'] = settings.envId if kwargs: # filtering in-body and empty params uri = self._request_template.format(**kwargs) for key, value in kwargs.items(): param = '{{{}}}'.format(key) if value and (param not in self._request_template): data.update(value) if self.dry_run: click.echo('{} {} {} {}'.format(self.http_method, uri, payload, data)) # returns dummy response return json.dumps({'data': {}, 'meta': {}}) data = json.dumps(data) raw_response = request.request(self.http_method, self.api_level, uri, payload, data) response = self.post(raw_response) text = self._format_response(response, hidden=hide_output, **kwargs) if text is not None: click.echo(text) return response def _edit_example(self): commentary = \ '''# The body must be a valid FarmTemplate object. # # Type your FarmTemplate object below this line. The above text will not be sent to the API server.''' text = click.edit(commentary) if text: raw_object = "".join([line for line in text.splitlines() if not line.startswith("#")]).strip() else: raw_object = "" return json.loads(raw_object)
Freddy’s Fast Cash is here to help you with your short-term financial needs. We are locally owned, and live and work in the same community as our customers. We are flexible and willing to work with you to find the best financial solutions for your financial needs. It is extremely quick and easy.
#!/usr/bin/python3 #coding:utf-8 import urllib.request import urllib.parse import http.cookiejar import smtplib from email.mime.text import MIMEText from email.header import Header user = '[email protected]' passwd = '202,118,239,46' to = '[email protected]' def autologin(url, params, req_encoding, res_encoding): cookiejar = http.cookiejar.CookieJar() opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookiejar)) urllib.request.install_opener(opener) params = urllib.parse.urlencode(params) params = params.encode(req_encoding) response = urllib.request.urlopen(url, params) text = response.read() text_decode = text.decode(req_encoding) text = text_decode.encode(res_encoding) return text def check(ip, passwd): params = {"fr":"00", "id_ip":ip, "pass":passwd, "set":"进入"} req_encoding = 'gb2312' res_encoding = 'utf-8' text = autologin('http://hitsun.hit.edu.cn/index1.php', params, req_encoding, res_encoding) text = str(text, 'utf-8') search_text = '所剩余额' for line in text.splitlines(): if line.find(search_text)!=-1: return(line.split(';')[2].split(' ')[0]) def genMail(iplist, user, to): context = '' for (ip,passwd) in iplist.items(): context += ip + ": " + check(ip, passwd) + '\n' context += '\n' msg = MIMEText(context.encode('utf-8'), 'plain', 'utf-8') sub = Header('当月服务器余额情况', 'utf-8') msg['Subject'] = sub msg['From'] = user msg['To'] = to return msg def sendMail(From, FromPass, To, mail): if not mail: return server = smtplib.SMTP("smtp.gmail.com", 587) server.ehlo() server.starttls() server.ehlo() server.login(From, FromPass) server.sendmail(From, To, mail) server.close() if __name__ == '__main__': iplist = { '202.118.239.46':'123456', '202.118.250.18':'123456', '202.118.250.19':'123456'} mail = genMail(iplist, user, to) sendMail(user, passwd, to, mail.as_string())
Prof. John Katsikadelis graduated of School of Civil Engineering of the national technical University of Athens ranking third among all the candidates for the year 1957. The School of Chemistry of the University of Athens ranking first among all the candidates for that year.3. The School of Mathematics of the University of Athens ranking among the first of all the candidates for that year.He attended the School of Civil Engineering (1957-1962). In 1970, after 8 years of intense professional activity as licensed civil engineer, he jointed the chair of Structural Analysis at School of Civil Engineering as research and teaching assistant and completed his Degree of Doctor Engineer in 1973. In 1974 he was awarded a scholarship by the Polytechnic University of New York, where he continued his graduate studies in the Department of Applied Mechanics of the School of Aerospace, These studies ended with an MSc and a new PhD in the field of Applied Mechanics under the supervision of Professor Anthony Armenakas. During the years 1972 and 1973 he attended courses of his interest at the School of Mathematics of the University of Athens. He has also attended CISM courses on Finite Elements and Boundary Elements at Udine in 1983 and 1986. His publication record includes 20 books, 6 guest edited journal special issues (3 of Engineering Analysis with Boundary Elements and 3 of Archive of Applied Mechanics), 8 invited chapters and original papers in books, Editor of 10 Conference Proceedings, 2 Doctoral Dissertations and 266 original papers in the most reputed international journals and International Conference proceedings. His text book on the BEM (Elsevier 2002) has been translated into Japanese (Asakura, Tokyo 2004), Russian (Publishing House of Russian Civil Engineering Universities, Moscow 2007) and Serbian Gradjevinska Knjiga, Belgrade 2010, to appear. His published work has received about 1500 citations with an h-index=20. About 200 of his 240 publications are devoted to the development and application of the BEM and in general to integral equation methods as well as to other mesh reduction methods.
# Copyright (c) 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from neutron.agent.common import config as agent_config from neutron.agent import l3_agent from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.agent.linux import ovs_lib from neutron.common import config from neutron.openstack.common import log as logging LOG = logging.getLogger(__name__) def setup_conf(): """Setup the cfg for the clean up utility. Use separate setup_conf for the utility because there are many options from the main config that do not apply during clean-up. """ opts = [ cfg.BoolOpt('ovs_all_ports', default=False, help=_('True to delete all ports on all the OpenvSwitch ' 'bridges. False to delete ports created by ' 'Neutron on integration and external network ' 'bridges.')) ] conf = cfg.CONF conf.register_cli_opts(opts) conf.register_opts(l3_agent.L3NATAgent.OPTS) conf.register_opts(interface.OPTS) agent_config.register_interface_driver_opts_helper(conf) agent_config.register_use_namespaces_opts_helper(conf) agent_config.register_root_helper(conf) return conf def collect_neutron_ports(bridges, root_helper): """Collect ports created by Neutron from OVS.""" ports = [] for bridge in bridges: ovs = ovs_lib.OVSBridge(bridge, root_helper) ports += [port.port_name for port in ovs.get_vif_ports()] return ports def delete_neutron_ports(ports, root_helper): """Delete non-internal ports created by Neutron Non-internal OVS ports need to be removed manually. """ for port in ports: if ip_lib.device_exists(port): device = ip_lib.IPDevice(port, root_helper) device.link.delete() LOG.info(_("Delete %s"), port) def main(): """Main method for cleaning up OVS bridges. The utility cleans up the integration bridges used by Neutron. """ conf = setup_conf() conf() config.setup_logging() configuration_bridges = set([conf.ovs_integration_bridge, conf.external_network_bridge]) ovs_bridges = set(ovs_lib.get_bridges(conf.AGENT.root_helper)) available_configuration_bridges = configuration_bridges & ovs_bridges if conf.ovs_all_ports: bridges = ovs_bridges else: bridges = available_configuration_bridges # Collect existing ports created by Neutron on configuration bridges. # After deleting ports from OVS bridges, we cannot determine which # ports were created by Neutron, so port information is collected now. ports = collect_neutron_ports(available_configuration_bridges, conf.AGENT.root_helper) for bridge in bridges: LOG.info(_("Cleaning %s"), bridge) ovs = ovs_lib.OVSBridge(bridge, conf.AGENT.root_helper) ovs.delete_ports(all_ports=conf.ovs_all_ports) # Remove remaining ports created by Neutron (usually veth pair) delete_neutron_ports(ports, conf.AGENT.root_helper) LOG.info(_("OVS cleanup completed successfully"))
Fundsurfer operates globally with a core network of world-leading VC’s, Family Offices and Angel Consortiums. Our real estate team focuses on finding, vetting, securing and monitoring investment opportunities worldwide. Fundsurfer Real Estate UK and Fundsurfer USA work with investor mandates to source off-market real estate acquisition and development deals. We provide core services to a wide range of stakeholders involved in Affordable Housing including developers, registered providers, local authorities and investors. With a clear understanding of clients individual objectives, funding needs and market dynamics, our Build to Rent funding team provides access to the solutions required to achieve your development and funding ambitions. Our team have extensive transactional experience advising and supporting our clients in a number of sectors including commercial, retail, out of town, shopping centres, capital markets and corporate occupier services. Our real estate team are specialists in multi-property type transaction and consultancy advice. With experts specialising in mixed-use and portfolios, we can provide services across the property lifecycle, from development strategy to transactional and everything in between. Our funding partners have deployed over £18bn in capital in the past five years with Fundsurfer Real Estate helping to secure opportunities across different asset classes, industries and geographies. "We are delighted to have partnered with a number of family offices and real estate funds globally to provide them access to off-market trades and investment opportunities, our focus is on building communities, boosting job growth and creating opportunities for both client and investor" The investment landscape is changing and evolving quickly - working with our team will provide our investor network with a source of unique opportunities and a key partner for sellers of off-market assets. Identifying diverse opportunities requires a team that can operate quickly to minimise delays and close funding faster and with no fuss. Fundsurfer is focused on helping capital cities secure new and innovative sources of capital to allow infrastructure development to flourish. Our primary focus is to help supercharge job creation and growth, working with local authorities and partners to develop projects in a responsible way that meets the needs of the local community. The information and material presented on this website (the “website") are provided to you for informational purposes only and are not to be used or considered as an offer or a solicitation to sell or an offer or solicitation to buy or subscribe for securities or other financial instruments or any advice or recommendation with respect to such securities or other financial instruments. Neither Fundsurfer (UK) Limited (“Fundsurfer") nor any of its affiliates makes any representation or warranty or guarantee as to the completeness, accuracy, timeliness or suitability of any information contained within any part of the website nor that it is free from error. Fundsurfer does not accept any liability (whether in contract, tort or otherwise howsoever and whether or not they have been negligent) for any loss or damage (including, without limitation, loss of profit), which may arise directly or indirectly from use of or reliance on such information. Whilst the information provided has been obtained from sources believed to be reliable, neither Fundsurfer nor any of its affiliates attest to its accuracy or completeness. Fundsurfer reserves the right to change any source without restriction or notice. The values quoted for any particular investment are indicative only and are subject to change. Past performance should not be taken as an indication or guarantee of future performance and no representation or warranty, express or implied, is made regarding future performance. Opinions and/or estimates reflect a judgment at the original date of publication by us and are subject to change without notice. The price of, value of and income from any of the securities or financial instruments can fall as well as rise. Foreign currency-denominated securities and financial instruments are subject to fluctuations in exchange rates that may have a positive or adverse effect on the value, price or income of such securities or financial instruments. Investors in securities, the values of which are influenced by currency volatility, effectively assume this risk.
# Copyright 2012 Google Inc. All Rights Reserved. import httplib import pickle import unittest import mock from google.appengine.ext import ndb from google.appengine.api import app_identity from google.appengine.api import memcache from google.appengine.api import urlfetch from google.appengine.ext import testbed try: from cloudstorage import api_utils from cloudstorage import rest_api from cloudstorage import test_utils except ImportError: from google.appengine.ext.cloudstorage import api_utils from google.appengine.ext.cloudstorage import rest_api from google.appengine.ext.cloudstorage import test_utils class RestApiTest(unittest.TestCase): def setUp(self): super(RestApiTest, self).setUp() self.testbed = testbed.Testbed() self.testbed.activate() self.testbed.init_app_identity_stub() self.testbed.init_datastore_v3_stub() self.testbed.init_memcache_stub() self.testbed.init_urlfetch_stub() api_utils._thread_local_settings.retry_params = None def tearDown(self): self.testbed.deactivate() super(RestApiTest, self).tearDown() def testBasicCall(self): api = rest_api._RestApi('scope') self.assertEqual(api.scopes, ['scope']) fut_get_token = ndb.Future() fut_get_token.set_result('blah') api.get_token_async = mock.create_autospec(api.get_token_async, return_value=fut_get_token) fut_urlfetch = ndb.Future() fut_urlfetch.set_result( test_utils.MockUrlFetchResult(200, {'foo': 'bar'}, 'yoohoo')) ctx_urlfetch = mock.Mock(return_value=fut_urlfetch) ndb.get_context().urlfetch = ctx_urlfetch res = api.do_request('http://example.com') self.assertEqual(res, (200, {'foo': 'bar'}, 'yoohoo')) ctx_urlfetch.assert_called_once_with( 'http://example.com', headers={'authorization': 'OAuth blah', 'User-Agent': 'AppEngine-Python-GCS'}, follow_redirects=False, payload=None, method='GET', deadline=None, callback=None) def testBasicCallWithUserAgent(self): user_agent = 'Test User Agent String' retry_params = api_utils.RetryParams(_user_agent=user_agent) api = rest_api._RestApi('scope', retry_params=retry_params) self.assertEqual(api.scopes, ['scope']) fut_get_token = ndb.Future() fut_get_token.set_result('blah') api.get_token_async = mock.create_autospec(api.get_token_async, return_value=fut_get_token) fut_urlfetch = ndb.Future() fut_urlfetch.set_result( test_utils.MockUrlFetchResult(200, {'foo': 'bar'}, 'yoohoo')) ctx_urlfetch = mock.Mock(return_value=fut_urlfetch) ndb.get_context().urlfetch = ctx_urlfetch res = api.do_request('http://example.com') self.assertEqual(res, (200, {'foo': 'bar'}, 'yoohoo')) ctx_urlfetch.assert_called_once_with( 'http://example.com', headers={'authorization': 'OAuth blah', 'User-Agent': user_agent}, follow_redirects=False, payload=None, method='GET', deadline=None, callback=None) def testNoToken(self): api = rest_api._RestApi('scope') self.assertEqual(api.scopes, ['scope']) fut_get_token = ndb.Future() fut_get_token.set_result(None) api.get_token_async = mock.create_autospec(api.get_token_async, return_value=fut_get_token) fut_urlfetch = ndb.Future() fut_urlfetch.set_result( test_utils.MockUrlFetchResult(200, {'foo': 'bar'}, 'yoohoo')) ctx_urlfetch = mock.Mock(return_value=fut_urlfetch) ndb.get_context().urlfetch = ctx_urlfetch res = api.do_request('http://example.com') self.assertEqual(res, (200, {'foo': 'bar'}, 'yoohoo')) ctx_urlfetch.assert_called_once_with( 'http://example.com', headers={'User-Agent': 'AppEngine-Python-GCS'}, follow_redirects=False, payload=None, method='GET', deadline=None, callback=None) def testMultipleScopes(self): api = rest_api._RestApi(['scope1', 'scope2']) self.assertEqual(api.scopes, ['scope1', 'scope2']) def testNegativeTimeout(self): api = rest_api._RestApi('scope') fut1 = ndb.Future() fut1.set_result(('token1', 0)) fut2 = ndb.Future() fut2.set_result(('token2', 0)) api.make_token_async = mock.create_autospec( api.make_token_async, side_effect=[fut1, fut2]) token1 = api.get_token() token2 = api.get_token() self.assertNotEqual(token1, token2) def testNoExpiredToken(self): with mock.patch('time.time') as t: t.side_effect = [2, 4, 5, 6] api = rest_api._RestApi('scope') fut1 = ndb.Future() fut1.set_result(('token1', 3 + api.expiration_headroom)) fut2 = ndb.Future() fut2.set_result(('token2', 7 + api.expiration_headroom)) api.make_token_async = mock.create_autospec( api.make_token_async, side_effect=[fut1, fut2]) token = api.get_token() self.assertEqual('token1', token) token = api.get_token() self.assertEqual('token2', token) token = api.get_token() self.assertEqual('token2', token) def testTokenMemoized(self): ndb_ctx = ndb.get_context() ndb_ctx.set_cache_policy(lambda key: False) ndb_ctx.set_memcache_policy(lambda key: False) api = rest_api._RestApi('scope') t1 = api.get_token() self.assertNotEqual(None, t1) api = rest_api._RestApi('scope') t2 = api.get_token() self.assertEqual(t2, t1) def testTokenSaved(self): retry_params = api_utils.RetryParams(save_access_token=True) api = rest_api._RestApi('scope', retry_params=retry_params) t1 = api.get_token() self.assertNotEqual(None, t1) api = rest_api._RestApi('scope', retry_params=retry_params) t2 = api.get_token() self.assertEqual(t2, t1) memcache.flush_all() ndb.get_context().clear_cache() api = rest_api._RestApi('scope', retry_params=retry_params) t3 = api.get_token() self.assertEqual(t3, t1) def testDifferentServiceAccounts(self): api1 = rest_api._RestApi('scope', 123) api2 = rest_api._RestApi('scope', 456) t1 = api1.get_token() t2 = api2.get_token() self.assertNotEqual(t1, t2) def testSameServiceAccount(self): api1 = rest_api._RestApi('scope', 123) api2 = rest_api._RestApi('scope', 123) t1 = api1.get_token() t2 = api2.get_token() self.assertEqual(t1, t2) def testCallUrlFetch(self): api = rest_api._RestApi('scope') fut = ndb.Future() fut.set_result(test_utils.MockUrlFetchResult(200, {}, 'response')) ndb.Context.urlfetch = mock.create_autospec( ndb.Context.urlfetch, return_value=fut) res = api.urlfetch('http://example.com', method='PUT', headers={'a': 'b'}) self.assertEqual(res.status_code, 200) self.assertEqual(res.content, 'response') def testPickling(self): retry_params = api_utils.RetryParams(max_retries=1000) api = rest_api._RestApi('scope', service_account_id=1, retry_params=retry_params) self.assertNotEqual(None, api.get_token()) pickled_api = pickle.loads(pickle.dumps(api)) self.assertEqual(0, len(set(api.__dict__.keys()) ^ set(pickled_api.__dict__.keys()))) for k, v in api.__dict__.iteritems(): if not hasattr(v, '__call__'): self.assertEqual(v, pickled_api.__dict__[k]) pickled_api.token = None fut_urlfetch = ndb.Future() fut_urlfetch.set_result( test_utils.MockUrlFetchResult(200, {'foo': 'bar'}, 'yoohoo')) pickled_api.urlfetch_async = mock.create_autospec( pickled_api.urlfetch_async, return_value=fut_urlfetch) res = pickled_api.do_request('http://example.com') self.assertEqual(res, (200, {'foo': 'bar'}, 'yoohoo')) def testUrlFetchCalledWithUserProvidedDeadline(self): retry_params = api_utils.RetryParams(urlfetch_timeout=90) api = rest_api._RestApi('scope', retry_params=retry_params) resp_fut1 = ndb.Future() resp_fut1.set_exception(urlfetch.DownloadError()) resp_fut2 = ndb.Future() resp_fut2.set_result(test_utils.MockUrlFetchResult(httplib.ACCEPTED, None, None)) ndb.Context.urlfetch = mock.create_autospec( ndb.Context.urlfetch, side_effect=[resp_fut1, resp_fut2]) self.assertEqual(httplib.ACCEPTED, api.do_request('foo')[0]) self.assertEqual( 90, ndb.Context.urlfetch.call_args_list[0][1]['deadline']) self.assertEqual( 90, ndb.Context.urlfetch.call_args_list[1][1]['deadline']) def testRetryAfterDoRequestUrlFetchTimeout(self): api = rest_api._RestApi('scope') resp_fut1 = ndb.Future() resp_fut1.set_exception(urlfetch.DownloadError()) resp_fut2 = ndb.Future() resp_fut2.set_result(test_utils.MockUrlFetchResult(httplib.ACCEPTED, None, None)) ndb.Context.urlfetch = mock.create_autospec( ndb.Context.urlfetch, side_effect=[resp_fut1, resp_fut2]) self.assertEqual(httplib.ACCEPTED, api.do_request('foo')[0]) self.assertEqual(2, ndb.Context.urlfetch.call_count) def testRetryAfterDoRequestResponseTimeout(self): api = rest_api._RestApi('scope') resp_fut1 = ndb.Future() resp_fut1.set_result(test_utils.MockUrlFetchResult(httplib.REQUEST_TIMEOUT, None, None)) resp_fut2 = ndb.Future() resp_fut2.set_result(test_utils.MockUrlFetchResult(httplib.ACCEPTED, None, None)) ndb.Context.urlfetch = mock.create_autospec( ndb.Context.urlfetch, side_effect=[resp_fut1, resp_fut2]) self.assertEqual(httplib.ACCEPTED, api.do_request('foo')[0]) self.assertEqual(2, ndb.Context.urlfetch.call_count) def testRetryAfterAppIdentityError(self): api = rest_api._RestApi('scope') token_fut = ndb.Future() token_fut.set_result('token1') api.get_token_async = mock.create_autospec( api.get_token_async, side_effect=[app_identity.InternalError, app_identity.InternalError, token_fut]) resp_fut = ndb.Future() resp_fut.set_result(test_utils.MockUrlFetchResult(httplib.ACCEPTED, None, None)) ndb.Context.urlfetch = mock.create_autospec( ndb.Context.urlfetch, side_effect=[resp_fut]) self.assertEqual(httplib.ACCEPTED, api.do_request('foo')[0]) self.assertEqual( 'OAuth token1', ndb.Context.urlfetch.call_args[1]['headers']['authorization']) self.assertEqual(3, api.get_token_async.call_count) if __name__ == '__main__': unittest.main()
If you are in search of an Authorized Heartland RV Dealer, J & R MARBLE is here to serve you. J & R MARBLE and Heartland RV are committed to helping with your Heartland RV Product needs for you and your family. J & R MARBLE is your authorized ATHENS, IL Heartland RV Dealer for the following Heartland brands: Bighorn, Bighorn Traveler, and Torque. You can find this Heartland RV dealership, J & R MARBLE, located at 14148 STATE HWY 29 or call them at 217-636-8536. With Heartland’s strong commitment to its customers and cutting-edge innovation of quality Heartland products, we are certain you will find the Toy Hauler, Travel Trailer or Fifth Wheel you are looking for today.
"""v3/mail/send response body builder""" from .personalization import Personalization from .header import Header class Mail(object): """A request to be sent with the SendGrid v3 Mail Send API (v3/mail/send). Use get() to get the request body. """ def __init__( self, from_email=None, subject=None, to_email=None, content=None): """Create a Mail object. If parameters are supplied, all parameters must be present. :param from_email: Email address to send from. :type from_email: Email, optional :param subject: Subject line of emails. :type subject: string, optional :param to_email: Email address to send to. :type to_email: Email, optional :param content: Content of the message. :type content: Content, optional """ self._from_email = None self._subject = None self._template_id = None self._send_at = None self._batch_id = None self._asm = None self._ip_pool_name = None self._mail_settings = None self._tracking_settings = None self._reply_to = None self._personalizations = [] self._contents = [] self._attachments = [] self._sections = [] self._headers = [] self._categories = [] self._custom_args = [] # Minimum required to send an email if from_email and subject and to_email and content: self.from_email = from_email self.subject = subject personalization = Personalization() personalization.add_to(to_email) self.add_personalization(personalization) self.add_content(content) def __str__(self): """Get a JSON representation of this Mail request. :rtype: string """ return str(self.get()) def get(self): """Get a response body for this Mail. :rtype: dict """ mail = {} if self.from_email is not None: mail["from"] = self.from_email.get() if self.subject is not None: mail["subject"] = self.subject if self.personalizations: mail["personalizations"] = [ personalization.get() for personalization in self.personalizations ] if self.contents: mail["content"] = [ob.get() for ob in self.contents] if self.attachments: mail["attachments"] = [ob.get() for ob in self.attachments] if self.template_id is not None: mail["template_id"] = self.template_id if self.sections: sections = {} for key in self.sections: sections.update(key.get()) mail["sections"] = sections if self.headers: headers = {} for key in self.headers: headers.update(key.get()) mail["headers"] = headers if self.categories: mail["categories"] = [category.get() for category in self.categories] if self.custom_args: custom_args = {} for key in self.custom_args: custom_args.update(key.get()) mail["custom_args"] = custom_args if self.send_at is not None: mail["send_at"] = self.send_at if self.batch_id is not None: mail["batch_id"] = self.batch_id if self.asm is not None: mail["asm"] = self.asm.get() if self.ip_pool_name is not None: mail["ip_pool_name"] = self.ip_pool_name if self.mail_settings is not None: mail["mail_settings"] = self.mail_settings.get() if self.tracking_settings is not None: mail["tracking_settings"] = self.tracking_settings.get() if self.reply_to is not None: mail["reply_to"] = self.reply_to.get() return mail @property def from_email(self): """The email from which this Mail will be sent. :rtype: string """ return self._from_email @from_email.setter def from_email(self, value): self._from_email = value @property def subject(self): """The global, or "message level", subject of this Mail. This may be overridden by personalizations[x].subject. :rtype: string """ return self._subject @subject.setter def subject(self, value): self._subject = value @property def template_id(self): """The id of a template that you would like to use. If you use a template that contains a subject and content (either text or html), you do not need to specify those at the personalizations nor message level. :rtype: int """ return self._template_id @template_id.setter def template_id(self, value): self._template_id = value @property def send_at(self): """A unix timestamp allowing you to specify when you want your email to be delivered. This may be overridden by the personalizations[x].send_at parameter. Scheduling more than 72 hours in advance is forbidden. :rtype: int """ return self._send_at @send_at.setter def send_at(self, value): self._send_at = value @property def batch_id(self): """An ID for this batch of emails. This represents a batch of emails sent at the same time. Including a batch_id in your request allows you include this email in that batch, and also enables you to cancel or pause the delivery of that batch. For more information, see https://sendgrid.com/docs/API_Reference/Web_API_v3/cancel_schedule_send.html :rtype: int """ return self._batch_id @batch_id.setter def batch_id(self, value): self._batch_id = value @property def asm(self): """The ASM for this Mail. :rtype: ASM """ return self._asm @asm.setter def asm(self, value): self._asm = value @property def mail_settings(self): """The MailSettings for this Mail. :rtype: MailSettings """ return self._mail_settings @mail_settings.setter def mail_settings(self, value): self._mail_settings = value @property def tracking_settings(self): """The TrackingSettings for this Mail. :rtype: TrackingSettings """ return self._tracking_settings @tracking_settings.setter def tracking_settings(self, value): self._tracking_settings = value @property def ip_pool_name(self): """The IP Pool that you would like to send this Mail email from. :rtype: string """ return self._ip_pool_name @ip_pool_name.setter def ip_pool_name(self, value): self._ip_pool_name = value @property def reply_to(self): """The email address to use in the Reply-To header. :rtype: Email """ return self._reply_to @reply_to.setter def reply_to(self, value): self._reply_to = value @property def personalizations(self): """The Personalizations applied to this Mail. Each object within personalizations can be thought of as an envelope - it defines who should receive an individual message and how that message should be handled. A maximum of 1000 personalizations can be included. :rtype: list """ return self._personalizations def add_personalization(self, personalizations): """Add a new Personalization to this Mail. :type personalizations: Personalization """ self._personalizations.append(personalizations) @property def contents(self): """The Contents of this Mail. Must include at least one MIME type. :rtype: list(Content) """ return self._contents def add_content(self, content): """Add a new Content to this Mail. Usually the plaintext or HTML message contents. :type content: Content """ if self._contents is None: self._contents = [] # Text content should be before HTML content if content._type == "text/plain": self._contents.insert(0, content) else: self._contents.append(content) @property def attachments(self): """The attachments included with this Mail. :returns: List of Attachment objects. :rtype: list(Attachment) """ return self._attachments def add_attachment(self, attachment): """Add an Attachment to this Mail. :type attachment: Attachment """ self._attachments.append(attachment) @property def sections(self): """The sections included with this Mail. :returns: List of Section objects. :rtype: list(Section) """ return self._sections def add_section(self, section): """Add a Section to this Mail. :type attachment: Section """ self._sections.append(section) @property def headers(self): """The Headers included with this Mail. :returns: List of Header objects. :rtype: list(Header) """ return self._headers def add_header(self, header): """Add a Header to this Mail. The header provided can be a Header or a dictionary with a single key-value pair. :type header: object """ if isinstance(header, dict): (k, v) = list(header.items())[0] self._headers.append(Header(k, v)) else: self._headers.append(header) @property def categories(self): """The Categories applied to this Mail. Must not exceed 10 items :rtype: list(Category) """ return self._categories def add_category(self, category): """Add a Category to this Mail. Must be less than 255 characters. :type category: string """ self._categories.append(category) @property def custom_args(self): """The CustomArgs attached to this Mail. Must not exceed 10,000 characters. :rtype: list(CustomArg) """ return self._custom_args def add_custom_arg(self, custom_arg): if self._custom_args is None: self._custom_args = [] self._custom_args.append(custom_arg)
Mama Dis is Pan: ‘Mr Ray’ delights. Virtuoso pannist and arranger Ray Holman showcased his well-honed skills in his recent concert series. The following report on the event was written by Gillian Moore and published in the Trinidad Guardian. Ray Holman’s recent concert series—Mama Dis Is Pan—staged at the Little Carib Theatre in Woodbrook February 23-25, was more than a presentation of the sweetness of the instrument; it was a showcase of a formidable body of work by the pan master, a nostalgic journey through his life in the steelband, and a celebration of pan hits through the years —all rendered in a style all his own. In his unassuming manner, the white-haired veteran took the stage before his band of musicians on opening night, to lead them on pan through two laid-back yet vibesy sets. Unlike many steelband virtuosos, his playing was fully integrated with the band. Although he took several lovely solos, he never dominated the sound. The musicians— including Anthony Woodroffe sax; Dean Williams guitar; Brian Perkins percussion; Kenneth Clarke congas Joey Samuels drums; Mike Germaine bass; and, Dereck Cadogan keyboards—did well, keeping the sound smooth, without letting it get boring. Also on the cast were several vocalists, including De Alberto, in his first local performance in 20 years, Kenny J and Jerelle Forde. Holman got things going with his Pan On the Move, the first “pan tune” ever written. London-based De Alberto joined in for pleasing renditions of Pan Woman and Scrunter’s Woman on the Bass. His performance was well appreciated by the audience. Holman changed the pace with I’ll Always Love You (Taylor Dane), which he said was one of his favourite songs, before introducing young vocalist Jerelle Forde, whom he called “the best-kept secret in Trinidad.” She sang Oh Trinidad, the last of many songs on which Holman collaborated with the late Merchant. The band then did a Merchant medley, including Barataria Sweet (but Morne Diablo sweeter) and Taxi Driver. The opening bars of Steel Band Clash were enough for the audience to chime in with “A-ha!” Alternating between the mic and his spot behind his double drums, Holman sang the lyrics with the crowd joining him for the refrain: “Never me again to jump in a steelband in Port-of-Spain.” The pan ace recounted how as a youth in Woodbrook, he was mentored by pan pioneer Ellie Mannette at Invaders panyard. He said he recalled, in 1955, first hearing Manette play double seconds on the radio. He said it was “like something out of this world”. Sweetly, he played Melody’s Michael, a simple song harking back to those early days. A Sparrow medley was next, Holman hailing the Birdie, along with Kithchener as one of his great musical influences. The band did Rose, Memories, Congo Man, Jane and Melda. The second half offered more memorable gems, including a delightful arrangement of Pan in Harmony; Jerelle Forde with Carnival is For the Woman and a lively performance of Shadow’s I Come Out to Play; De Alberto doing My Band; Kitchener’s Margie and Iron Man; and Kenny J doing Plenty Loving and Baron’s Somebody, which was a big hit with the crowd. The band got whimsical with I Feel Pretty, giving it a nice calypso swing. They segued into Penny Lane to close the show. The audience rose to their feet to give Holman a rousing ovation, and the emotion in the room was palpable. He had delivered a show that was nostalgic and sweet, romantic and musical. The band performed a short classical piece as an encore, topping off a beautiful show. For the original report: Mama Dis is Pan: ‘Mr Ray’ delights with musical nostalgia | The Trinidad Guardian Newspaper. Trinidad Guardian’s New York Correspondent, Dr. Glenville Ashby, reports on the event held at the Trinidad Consulate New York offices to commemorate Black History month. Drummers ignite the large audience. The Trinidad and Tobago Consulate in New York was transformed into a virtual palais as members of the Orisa and Shouter faith, backed by drummers, ignited the packed audience. The occasion was Black History Month celebration, the first of its kind at the downtown Manhattan consular offices. In her opening remarks, consul General Rudrawatee Nan Ramgoolam stressed the importance of culture in the lives of people the world over. She lamented the cultural disconnect by many of the nation’s youths, and beckoned them to re-examine their rich cultural heritage and the people who have excelled in every field despite struggles and obstacles. “Too many of our youths are without positive mentors and even heroes and we feel that celebrating the great persons in our past and present will offer a point of reference,” she noted. She identified the unparalleled contribution of Dr Eric Williams, ANR Robinson, CLR James, Rudolph Charles, Boscoe Holder, Len Boogsie Sharpe, Giselle La Ronde, Janelle Commissiong, Hasley Crawford and a host of others prominent Afro-Trinidadians. She also made mention of the distinctly Afro-centric faiths that make up the twin island’s religious mosaic, and the contribution of the community to the unique island cuisine. For the original report: T&T celebrates Black History Month in NY | The Trinidad Guardian Newspaper. The Caribbean is a fertile environment that fosters complex identities created through the fusion of cultures brought to the islands, identities that Caribbean peoples then take with them as they leave their nations and settle into new homes. The traditions transmitted within these communities are continually subject to loss, gain and reinterpretation. Communication practices play a role in this process as they help to maintain, express, transfer, and challenge the diasporic identities of Caribbean. “Re-Constructing Place and Space: Media, Culture, Discourse and the Constitution of a Caribbean Diaspora” examines the role of cultural performances and mediated expressions in the construction and maintenance of Caribbean diasporic identities. The objectives for the book are two-fold. The general objective is to contribute to discourse on diasporic identity and performativity. The more specific aim of the book is to highlight the diversity and complexity of Caribbean people’s production of and engagement with cultural forms. Though much work has been done to debunk the exoticized images of Caribbean nations, people from these countries are often perceived as an essentialized, undifferentiated category, and as technologically and intellectually backward, incapable of sophisticated cultural production, interaction and interpretation. “Re-Constructing Place and Space: Media, Culture, Discourse And the Constitution of a Caribbean Diaspora” seeks to present a more complex representation of people in the Caribbean diaspora, one that highlights their complicated and dynamic relationship to mediated material. The volume emerged from the 2009 New Media and the Global Diaspora Symposium: Exploring Media in Caribbean Diasporas held at Roger Williams University. The event sought to encourage academic discourse focused on Caribbean migratory populations, foregrounding the role of communicative practices in transmitting and sustaining their traditions. It was also designed as an interdisciplinary forum for Caribbean researchers who study the nature, significance and consequence of Caribbean migration. In keeping with the spirit of the symposium then, this volume applies a transdisciplinary lens to understanding the diversity and complexity of peoples from the Caribbean region and their diasporic communities. KAMILLE GENTLES-PEART (PH.D.) is Assistant Professor of Global Communication at Roger Williams University. She received a B.A. in Mass Communication, with a focus on multicultural journalism, from Lehman College of the City University of New York, and holds a Ph.D. in Communication from the University of Michigan, Ann Arbor. Her general research interests include the relationship between diasporic identity construction, particularly of West Indian women in the U.S., and media engagement. MAURICE L. HALL (PH.D.) is Chair and Associate Professor in the Communication Department at Villanova University, Pennsylvania where he teaches courses on communication in organizations, research methods, and organizational research and consulting. Dr. Hall has also worked as a consultant with a variety of organizations over the past ten years. He specializes in facilitating strategic planning sessions for non-profit organizations, and working with organizations on issues ranging from diversity training and strategic diversity management to conflict management, team building, and organizational communication management. “John Gray, master bibliographer of the Afro-Atlantic world has done it again. His powerful new work, Afro-Cuban Music: A Bibliographic Guide, covers the subject in all its facets, all its glory. I wandered happily through this wondrous text, learning, learning, learning. Gray makes you aware of what an amazing cultural machine black Cuba is, from the habanera to orisha rap and back again. A landmark publication in Black Studies. Despite its relatively small size Cuba has had an inordinately large musical influence both inside the Caribbean and abroad. From the “rhumba” craze of the 1920s and ’30s to mambo and cha-cha-cha in the 1950s and ’60s and the Buena Vista Social Club phenomenon of the late ’90s, Cuba has been central to popular music developments in Latin America, Europe, and the United States. Unfortunately, no one has ever attempted to survey the extensive literature on the island’s music, in particular the vernacular contributions of its Afro-Cuban population. This unprecedented bibliographic guide, the third in ADP’s critically acclaimed Black Music Reference Series, attempts to do just that. Ranging from the 19th century to early 2009 Afro-Cuban Music offers almost 5000 annotated entries on the island’s various festival and Carnival traditions as well as each of its main musical families-Cancion Cubana, Danzon, Jazz, Son, Rumba, and Sacred Musics (Santeria, Palo, Abakua, and Arara)-along with more recent developments such as timba, rap and regueton. It also provides sections on Afro-Cuban musical instruments, the music’s influence abroad, and a biographical and critical component covering the lives and careers of some 800 individual artists and ensembles. Spanish-language sources are covered comprehensively, in particular dozens of locally published journals, along with a sizable cross-section of the international literature in English, French, German, and other European languages. The work concludes with an extensive reference section offering lists of Sources Consulted, a guide to relevant Libraries and Archives, an appendix listing artists and individuals by idiom/occupation, and separate Author and Subject indexes. An essential tool for students, scholars and librarians seeking a window into Afro-Cuban expressive culture-its music and dance, religion, language, literature, aesthetics, and more-both on the island and abroad. The author is veteran bibliographer John Gray whose previous works include Blacks in Classical Music, African Music, Fire Music: a bibliography of the New Jazz, 1959-1990, From Vodou to Zouk, and, Jamaican Popular Music. To order please visit the ADP website: www.african-diaspora-press.com<http://www.african-diaspora-press.com/>. The book is also available through most library wholesalers. Afro-Cuban Music: A Bibliographic Guide is an impressive accomplishment that will prove an invaluable resource for researchers. It is well-organized and offers comprehensive coverage of the available literature, particularly periodical sources which would otherwise be very difficult to find. Users will also appreciate the many annotations included for the details they provide on each work’s contents. Marcia Rowe, Jamaica Gleaner writer, reports on the observation of the 50th anniversary of the Jamaica’s National Dance Theatre Company. In 1961, at the invitation of Norman Manley, 18 leading dancers from different dance schools were thrown together to form the Jamaica Dance Company. They had no repertoire. The following year, 1962, under the same name, they danced at Jamaica’s Independence celebration. But, in September of that year, all the dancers left the original schools that they were part of; and decided to establish themselves as the National Dance Theatre Company (NDTC). Fast forward 50 years later with an extensive repertoire, many international tours, a studio of their own, a feeder school, many new faces, the NDTC celebrates its 50th year of existence. “None among them [the founding members] could have predicted that the company would have accomplished all that it has,” said Barry Moncriefe, NDTC artistic director, at the Company’s 50th anniversary launch last Thursday. “The company has managed to sustain interest in and support for Jamaican dance theatre over five decades, and is revered as one of Jamaica’s most loved cultural treasures. It is for this reason that we remain resolute to our vision to forge out of the Jamaican and Caribbean culture and life, an art form faithful to reality while being a part of a wider world and universal landscape of the creative arts,” continued the longstanding company member. “The LTM and NDTC relationship speaks for itself, we need each other,” she said, before continuing to creatively embroider her personal experiences with the NDTC into her speech. She recounted her first overseas tour with NDTC to Canada as a young journalist at The Gleaner. “The first tour was a birth of what happened after – to travel the world.” There were to be numerous adventures along the way including a “nice time in Atlanta”. With the dance floor of the NDTC studio as the stage, the delightful evening’s programme flowed with a message from the chairman of the Rex Nettleford Foundation, Carlton Davis, a vote of thanks from NDTC Musical Director, Marjorie Whylie, and excerpts from the company’s’ repertoire. The dances were performed by the now generation of dancers. The dances ranged from works from the young choreographer to the old, from the classic to the contemporary. The entertainment package danced off with Oneil Pryce-choreographed Barre Talk. It was followed by Clive Thompson’s Phases of the Moon. Sandwiched between two of Netleford’s works, The Crossing and Odyssey, was a lovely presentation from the NDTC Singers. Some time later, after the formalities, The Gleaner caught up with one of the seven living, founding members of NDTC, Bert Rose. He described the journey of the company as wonderful. “Nettleford had a vision. When we started, we never knew that we would have celebrated 50 years. Only seven of us are still alive out of 18,” said Rose. “Eddy Thomas’ first choreography was called Legendary Lovers Leap, and Rex did a piece called Plantation Revelry,” Rose said, of the beginnings of the company. Rose also went on to explain some of the challenges of development the company faced. “We swept the stage, we painted the backdrop, ironed our clothes, ran box office,” Rose said. As the company developed, they got someone to wash the costumes and hired a costume mistress. Other noted changes over the years are the shift in the choice of musical genre and the incorporation of international choreographers like Cuban-born Edwardo Rivero Walker. Davis was the MC for the evening. Just a couple weeks into the position, he explained that one of his reasons for accepting the post was his belief that this sort of cultural organisation should be supported. It represents the best of Jamaica. And what is to be expected under his stewardship? The year-long NDTC 50th anniversary celebration will continue with the usual calendar events as well as some new ones. The evening was also used to launch the NDTC website. For original report: NDTC begins celebration of its 50th – Entertainment – Jamaica Gleaner – Tuesday | February 7, 2012. David Lewis reviews the release of Ifetayo album of the 1970s Trinidadian musical aggregation, Black Truth Rhythm Band, for Black Grooves. Soundway Records, a label that is invested in re-releasing vintage music from the Global South, has found a gem in this 1976 album that, at the time, made very little impact. The Black Truth Rhythm Band, an Afro-centric Trinidadian group, was formed in 1971 but made only one recording before disbanding, though Oluko Imo?the band leader who plays multiple instruments here?went on to record with Fela Kuti. The band formed and released Ifetayo in the midst of the Black Power Movement in Trinidad and, along with performers like Lancelot Layne and Cheryl Byron, responded to the African-centered vibe of the movement in their music, making liberal use of African-style drumming and instruments like the mbira. The album is not simply an African fusion album, though; the band skillfully weaves music from many traditions through a soul and funk-tinged set of tracks. The title track, “Ifetayo,” is deliciously funky and falls into a drum and flute break that sounds decidedly West African. “Kilimanjaro” ends with an up-tempo Latin American guitar break. The band also incorporates steelpan, the national instrument of Trinidad, as an integral part of the group (not simply a flourish to pander to tourists) in “Save D Musician” and “Umbala.” Further emphasizing their Trinidadian roots, “Aspire” is a decidedly funked-up calypso beat, and “Save D Musician” has a calypso-like shuffle and lyrics that address social issues, like any good calypso song. Since this album is a re-issue, listeners have to be invested in the early 1970s sound, but if you are, it’s a strong album that deserved much more attention than it received initially. The album also pre-figured a number of developments in Trinidadian music, including the African-influenced music of rapso artists like Brother Resistance and 3canal, and the current Trinidad-based world fusion band Terrenaisance. Ifetayo is currently available from Soundway in three formats: an MP3 download and CD, both of which contain a bonus track, or, for purists, an LP with a 7” disc containing the bonus track. For the original post: Ifetayo | blackgrooves.org.
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2007 Philippe LAWRENCE # # This file is part of pyBar. # pyBar is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyBar is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyBar; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA from gi.repository import Gtk, GLib class Singleton(object): def __new__(cls, *args, **kwargs): if '_inst' not in vars(cls): cls._inst = object.__new__(cls, *args, **kwargs) return cls._inst class Message(Singleton): def __init__(self): # ne rien mettre ici pass def set_message(self, content): """Formate le message (ne conserve que la première ligne) et lance son affichage si nécessaire""" #print "set_message", content if self._content == content: return if content is None: self._content = None else: text, ind = content pos = text.find('\n') if not pos == -1: text = text[:pos] self._content = (text, ind) if self.has_changed is False: self.has_changed = True GLib.idle_add(self._print_message) def ini_message(self, box): self.has_changed = False self.box = box self._content = None def _print_message(self): """type = 0 : error; 1 : warning; 2 : info""" #print "_print_message", self._content self.has_changed = False box = self.box if box is None: return for elem in box.get_children(): box.remove(elem) if self._content is None: return text, type = self._content # icone image = Gtk.Image() if type == 0: image.set_from_icon_name('dialog-error', Gtk.IconSize.BUTTON) elif type == 1: image.set_from_icon_name('dialog-warning', Gtk.IconSize.BUTTON) elif type == 2: image.set_from_icon_name("dialog-information", Gtk.IconSize.BUTTON) elif type == 3: image.set_from_icon_name("dialog-information", Gtk.IconSize.BUTTON) image.show() box.pack_start(image, False, True, 0) box.set_spacing(10) label = Gtk.Label() label.set_text(text) label.set_use_markup(True) label.show() box.pack_start(label, False, True, 0) class Dialog: def __init__(self, errors): text = '\n'.join(errors) if text == '': return dialog = Gtk.Dialog("Erreur", None, Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT, (Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE)) dialog.set_icon_from_file("glade/logo.png") box = dialog.get_content_area() box.set_border_width(80) hbox = Gtk.HBox() image = Gtk.Image() image.set_from_icon_name('dialog-error', Gtk.IconSize.DIALOG) hbox.pack_start(image, False, False, 0) label = Gtk.Label(label=text) label.set_margin_start(10) label.show() hbox.pack_start(label, False, False, 0) hbox.show_all() box.add(hbox) result = dialog.run() dialog.destroy()
types of roofing materials angies list . 20 roof types for your awesome homes complete with the pros cons . basic types of home roofing materials elevated roofing . different types of roofing materials 2019 roof services youtube . 36 types of roofs for houses illustrated guide . top 15 roof types plus their pros cons read before you build . types of roofs archives chandler roofing . know your roof 4 different roof types brown boys roofing . roofing types under fontanacountryinn com . commercial flat roof construction types uk youtube . top 5 types of metal roofing materials davis contracting . types of roofing materials aries inspection company . 4 roofing types for modern homes bauerle roofing llc . the most popular investment roofing types geoffrey lilge . roof design types dayus roofing . how long does each roofing material last in central florida types of . how long does each roofing material last in the phoenix valley . insight about types of roofing and roofing materials . different types of roofing materials w 2 home . types of roofing materials for the glulam house . what are the different types of roofing material used wilson . roofing 101 learn the forms structures materials types and . roofing ashburn types types of northern virginia roofing . top 5 types of metal roofing materials to use green metal blog . roofing types stunning tin roof rusted eduweb an com . aluminum roofing materials are the best roof replacement . jk johns roofing sheet metal inc 3323 mustang dr brooksville . all about different types of roofing materials students should know . types of metal roofing . best roofing materials for homes 2019 roofing material costs pros . single ply roofing composition shingle standing seam colorado utah . types of roofing asphalt shingles metal roofing flat roofing . roofing materials list nationwide . type of asphalt roofing shingles certainteed . roof metal sheets modern types of roofing materials stock photo . guide to choose the best roofing material amity home maintenance . roof types juffs roofing . roof types for your colorado home part 1 denver roofing . alternative roofing materials progressive materials . types of metal roofing materials . is it time to replace your roof renew financial . top 7 commercial roof types and roofing materials tema roofing . the various types of roofing materials roof replacement . berridge metal roofs i like the barrel and tile looking metal roofs . know your roof 4 roofing types and their materials . low slope roofing types home re nu llc . the different types of roofing materials top fort worth roofer . different types of roofing materials you should be knowing go . roofing types henrico va . 5 types of metal roofing materials pros cons . roofing types for homes constructive homes . roofing types k2 slate metal works . different types of metal roofing types of metal roof panels 2018 . types of roofing available for installation san jose ca . types of roofing materials for flat roofs . types of roofing materials for residential properties . exploring the different roofing materials available to you eagle . the type of roof you have matters for lighting natural light roofing . type of corrugated roofing sheet ma different types of metal roofing . roofing types written in stone contracting . miami roofing company expains the different types of roofing material . roofing types barca fontanacountryinn com . an in depth look at the different types of roofing materials . types of sustainable roofs and sustainable roofing materials . what is standing seam metal roofing comparisons types uses . what are the different types of metal roofing piedmont roofing . roofing types metal roofs elevated roofing . 9 best types of roofing materials for manchester residences . roof types find out what roof types we can put on your florida . roof metal sheets modern types roofing stock photo edit now . find the best roofing types for your home ranch roofing . residential specialty roofing types republic roofing restoration . 5 types of metal roofing materials pros cons jrc . types of roofing materials all you need to know bob vila . types of metal roofing cute roofing thule roof rack eduweb an com . residential roofing types millennium roofing and construction . pros cons of 5 different types of roofing in new jersey . roofing types to keep a house cool best in australia . residential roof types ppt download . roofing materials different types reveal360 inspection services .
from __future__ import absolute_import, division, print_function from collections import Mapping from keyword import iskeyword import re import datashape from datashape import ( dshape, DataShape, Record, Var, Fixed, promote, Option, Null, ) from datashape.predicates import ( isscalar, iscollection, isboolean, isrecord, istabular, ) import numpy as np from odo.utils import copydoc import toolz from toolz import concat, memoize, partial, first, unique, merge from toolz.curried import map, filter from ..compatibility import _strtypes, builtins, boundmethod, PY2 from .core import ( Node, _setattr, common_subexpression, path, resolve_args, subs, ) from .method_dispatch import select_functions from ..dispatch import dispatch from .utils import hashable_index, replace_slices, maxshape from ..utils import attribute, as_attribute __all__ = [ 'Apply', 'Cast', 'Coalesce', 'Coerce', 'ElemWise', 'Expr', 'Field', 'Label', 'Map', 'Projection', 'ReLabel', 'Selection', 'SimpleSelection', 'Slice', 'Symbol', 'apply', 'cast', 'coalesce', 'coerce', 'discover', 'drop_field', 'label', 'ndim', 'projection', 'relabel', 'selection', 'shape', 'symbol', ] def isvalid_identifier(s): """Check whether a string is a valid Python identifier Examples -------- >>> isvalid_identifier('Hello') True >>> isvalid_identifier('Hello world') False >>> isvalid_identifier('Helloworld!') False >>> isvalid_identifier('1a') False >>> isvalid_identifier('a1') True >>> isvalid_identifier('for') False >>> isvalid_identifier(None) False """ # the re module compiles and caches regexs so no need to compile it return (s is not None and not iskeyword(s) and re.match(r'^[_a-zA-Z][_a-zA-Z0-9]*$', s) is not None) def valid_identifier(s): """Rewrite a string to be a valid identifier if it contains >>> valid_identifier('hello') 'hello' >>> valid_identifier('hello world') 'hello_world' >>> valid_identifier('hello.world') 'hello_world' >>> valid_identifier('hello-world') 'hello_world' >>> valid_identifier(None) >>> valid_identifier('1a') """ if isinstance(s, _strtypes): if not s or s[0].isdigit(): return return s.replace(' ', '_').replace('.', '_').replace('-', '_') return s class Expr(Node): """ Symbolic expression of a computation All Blaze expressions (Join, By, Sort, ...) descend from this class. It contains shared logic and syntax. It in turn inherits from ``Node`` which holds all tree traversal logic """ def __repr__(self): return str(self) def _get_field(self, fieldname): if not isinstance(self.dshape.measure, (Record, datashape.Map)): if fieldname == self._name: return self raise ValueError( "Can not get field '%s' of non-record expression %s" % (fieldname, self)) return Field(self, fieldname) def __getitem__(self, key): if isinstance(key, _strtypes) and key in self.fields: return self._get_field(key) elif isinstance(key, Expr) and iscollection(key.dshape): return self._select(key) elif (isinstance(key, list) and builtins.all(isinstance(k, _strtypes) for k in key)): if set(key).issubset(self.fields): return self._project(key) else: raise ValueError('Names %s not consistent with known names %s' % (key, self.fields)) elif (isinstance(key, tuple) and all(isinstance(k, (int, slice, type(None), list, np.ndarray)) for k in key)): return sliceit(self, key) elif isinstance(key, (slice, int, type(None), list, np.ndarray)): return sliceit(self, (key,)) raise ValueError("Not understood %s[%s]" % (self, key)) def map(self, func, schema=None, name=None): return Map(self, func, schema, name) @attribute def schema(self): try: m = self._schema except AttributeError: schema = datashape.dshape(self.dshape.measure) else: schema = m() return _setattr(self, 'schema', schema) @attribute def dshape(self): return _setattr(self, 'dshape', self._dshape()) @property def fields(self): measure = self.dshape.measure if isinstance(self.dshape.measure, Option): measure = measure.ty if isinstance(measure, Record): return measure.names elif isinstance(measure, datashape.Map): if not isrecord(self.dshape.measure.value): raise TypeError('Foreign key must reference a ' 'Record datashape') return measure.value.names name = getattr(self, '_name', None) if name is not None: return [self._name] return [] def _len(self): try: return int(self.dshape[0]) except TypeError: raise ValueError('Can not determine length of table with the ' 'following datashape: %s' % self.dshape) def __len__(self): # pragma: no cover return self._len() def __iter__(self): raise NotImplementedError( 'Iteration over expressions is not supported.\n' 'Iterate over computed result instead, e.g. \n' "\titer(expr) # don't do this\n" "\titer(compute(expr)) # do this instead") def __dir__(self): result = dir(type(self)) if (isrecord(self.dshape.measure) or isinstance(self.dshape.measure, datashape.Map) and self.fields): result.extend(map(valid_identifier, self.fields)) result.extend(toolz.merge(schema_methods(self.dshape.measure), dshape_methods(self.dshape))) return sorted(set(filter(isvalid_identifier, result))) def __getattr__(self, key): assert key != '_hash', \ '%s should set _hash in _init' % type(self).__name__ try: result = object.__getattribute__(self, key) except AttributeError: fields = dict(zip(map(valid_identifier, self.fields), self.fields)) measure = self.dshape.measure if isinstance(measure, datashape.Map): # Foreign key measure = measure.key # prefer the method if there's a field with the same name methods = toolz.merge( schema_methods(measure), dshape_methods(self.dshape) ) if key in methods: func = methods[key] if func in method_properties: result = func(self) elif getattr(func, '__get__', None): result = func.__get__(self, type(self)) else: result = boundmethod(func, self) elif self.fields and key in fields: if isscalar(self.dshape.measure): # t.foo.foo is t.foo result = self else: result = self[fields[key]] else: raise # cache the attribute lookup, getattr will not be invoked again. _setattr(self, key, result) return result @attribute def _name(self): measure = self.dshape.measure if len(self._inputs) == 1 and isscalar(getattr(measure, 'key', measure)): child_measure = self._child.dshape.measure if isscalar(getattr(child_measure, 'key', child_measure)): # memoize the result return _setattr(self, '_name', self._child._name) def __enter__(self): """ Enter context """ return self def __exit__(self, *args): """ Exit context Close any open resource if we are called in context """ for value in self._resources().values(): try: value.close() except AttributeError: pass return True # Add some placeholders to help with refactoring. If we forget to attach # these methods later we will get better errors. # To find the real definition, look for usage of ``@as_attribute`` for method in ('_project', '_select', 'cast'): @attribute def _(self): raise AssertionError('method added after class definition') locals()[method] = _ del _ del method def sanitized_dshape(dshape, width=50): pretty_dshape = datashape.pprint(dshape, width=width).replace('\n', '') if len(pretty_dshape) > width: pretty_dshape = "{}...".format(pretty_dshape[:width]) return pretty_dshape class Symbol(Expr): """ Symbolic data. The leaf of a Blaze expression Examples -------- >>> points = symbol('points', '5 * 3 * {x: int, y: int}') >>> points <`points` symbol; dshape='5 * 3 * {x: int32, y: int32}'> >>> points.dshape dshape("5 * 3 * {x: int32, y: int32}") """ _arguments = '_name', 'dshape', '_token' _input_attributes = () def __repr__(self): fmt = "<`{}` symbol; dshape='{}'>" return fmt.format(self._name, sanitized_dshape(self.dshape)) def __str__(self): return self._name or '' def _resources(self): return {} @copydoc(Symbol) def symbol(name, dshape, token=None): return Symbol(name, datashape.dshape(dshape), token or 0) @dispatch(Symbol, Mapping) def _subs(o, d): """ Subs symbols using symbol function Supports caching""" newargs = (subs(arg, d) for arg in o._args) return symbol(*newargs) class ElemWise(Expr): """ Elementwise operation. The shape of this expression matches the shape of the child. """ def _dshape(self): return datashape.DataShape( *(self._child.dshape.shape + tuple(self.schema)) ) class Field(ElemWise): """ A single field from an expression. Get a single field from an expression with record-type schema. We store the name of the field in the ``_name`` attribute. Examples -------- >>> points = symbol('points', '5 * 3 * {x: int32, y: int32}') >>> points.x.dshape dshape("5 * 3 * int32") For fields that aren't valid Python identifiers, use ``[]`` syntax: >>> points = symbol('points', '5 * 3 * {"space station": float64}') >>> points['space station'].dshape dshape("5 * 3 * float64") """ _arguments = '_child', '_name' def __str__(self): fmt = '%s.%s' if isvalid_identifier(self._name) else '%s[%r]' return fmt % (self._child, self._name) @property def _expr(self): return symbol(self._name, datashape.DataShape(self.dshape.measure)) def _dshape(self): shape = self._child.dshape.shape measure = self._child.dshape.measure # TODO: is this too special-case-y? schema = getattr(measure, 'value', measure).dict[self._name] shape = shape + schema.shape schema = (schema.measure,) return DataShape(*(shape + schema)) class Projection(ElemWise): """Select a subset of fields from data. Examples -------- >>> accounts = symbol('accounts', ... 'var * {name: string, amount: int, id: int}') >>> accounts[['name', 'amount']].schema dshape("{name: string, amount: int32}") >>> accounts[['name', 'amount']] accounts[['name', 'amount']] See Also -------- blaze.expr.expressions.Field """ _arguments = '_child', '_fields' @property def fields(self): return list(self._fields) def _schema(self): measure = self._child.schema.measure d = getattr(measure, 'value', measure).dict return DataShape(Record((name, d[name]) for name in self.fields)) def __str__(self): return '%s[%s]' % (self._child, self.fields) def _project(self, key): if isinstance(key, list) and set(key).issubset(set(self.fields)): return self._child[key] raise ValueError("Column Mismatch: %s" % key) def _get_field(self, fieldname): if fieldname in self.fields: return Field(self._child, fieldname) raise ValueError("Field %s not found in columns %s" % (fieldname, self.fields)) @as_attribute(Expr, '_project') @copydoc(Projection) def projection(expr, names): if not names: raise ValueError("Projection with no names") if not isinstance(names, (tuple, list)): raise TypeError("Wanted list of strings, got %s" % names) if not set(names).issubset(expr.fields): raise ValueError("Mismatched names. Asking for names %s " "where expression has names %s" % (names, expr.fields)) return Projection(expr, tuple(names)) def sanitize_index_lists(ind): """ Handle lists/arrays of integers/bools as indexes >>> sanitize_index_lists([2, 3, 5]) [2, 3, 5] >>> sanitize_index_lists([True, False, True, False]) [0, 2] >>> sanitize_index_lists(np.array([1, 2, 3])) [1, 2, 3] >>> sanitize_index_lists(np.array([False, True, True])) [1, 2] """ if not isinstance(ind, (list, np.ndarray)): return ind if isinstance(ind, np.ndarray): ind = ind.tolist() if isinstance(ind, list) and ind and isinstance(ind[0], bool): ind = [a for a, b in enumerate(ind) if b] return ind def sliceit(child, index): index2 = tuple(map(sanitize_index_lists, index)) index3 = hashable_index(index2) s = Slice(child, index3) hash(s) return s class Slice(Expr): """Elements `start` until `stop`. On many backends, a `step` parameter is also allowed. Examples -------- >>> from blaze import symbol >>> accounts = symbol('accounts', 'var * {name: string, amount: int}') >>> accounts[2:7].dshape dshape("5 * {name: string, amount: int32}") >>> accounts[2:7:2].dshape dshape("3 * {name: string, amount: int32}") """ _arguments = '_child', '_index' def _dshape(self): return self._child.dshape.subshape[self.index] @property def index(self): return replace_slices(self._index) def __str__(self): if isinstance(self.index, tuple): index = ', '.join(map(str, self._index)) else: index = str(self._index) return '%s[%s]' % (self._child, index) class Selection(Expr): """ Filter elements of expression based on predicate Examples -------- >>> accounts = symbol('accounts', ... 'var * {name: string, amount: int, id: int}') >>> deadbeats = accounts[accounts.amount < 0] """ _arguments = '_child', 'predicate' _input_attributes = '_child', 'predicate' @property def _name(self): return self._child._name def __str__(self): return "%s[%s]" % (self._child, self.predicate) def _dshape(self): shape = list(self._child.dshape.shape) shape[0] = Var() return DataShape(*(shape + [self._child.dshape.measure])) class SimpleSelection(Selection): """Internal selection class that does not treat the predicate as an input. """ _arguments = Selection._arguments _input_attributes = '_child', @as_attribute(Expr, '_select') @copydoc(Selection) def selection(table, predicate): subexpr = common_subexpression(table, predicate) if not builtins.all( isinstance(node, (VarArgsExpr, ElemWise, Symbol)) or node.isidentical(subexpr) for node in concat([path(predicate, subexpr), path(table, subexpr)])): raise ValueError("Selection not properly matched with table:\n" "child: %s\n" "apply: %s\n" "predicate: %s" % (subexpr, table, predicate)) if not isboolean(predicate.dshape): raise TypeError("Must select over a boolean predicate. Got:\n" "%s[%s]" % (table, predicate)) return table._subs({subexpr: Selection(subexpr, predicate)}) class Label(ElemWise): """An expression with a name. Examples -------- >>> accounts = symbol('accounts', 'var * {name: string, amount: int}') >>> expr = accounts.amount * 100 >>> expr._name 'amount' >>> expr.label('new_amount')._name 'new_amount' See Also -------- blaze.expr.expressions.ReLabel """ _arguments = '_child', 'label' def _schema(self): return self._child.schema @property def _name(self): return self.label def _get_field(self, key): if key[0] == self.fields[0]: return self raise ValueError("Column Mismatch: %s" % key) def __str__(self): return 'label(%s, %r)' % (self._child, self.label) @copydoc(Label) def label(expr, lab): if expr._name == lab: return expr return Label(expr, lab) class ReLabel(ElemWise): """ Table with same content but with new labels Examples -------- >>> accounts = symbol('accounts', 'var * {name: string, amount: int}') >>> accounts.schema dshape("{name: string, amount: int32}") >>> accounts.relabel(amount='balance').schema dshape("{name: string, balance: int32}") >>> accounts.relabel(not_a_column='definitely_not_a_column') Traceback (most recent call last): ... ValueError: Cannot relabel non-existent child fields: {'not_a_column'} >>> s = symbol('s', 'var * {"0": int64}') >>> s.relabel({'0': 'foo'}) s.relabel({'0': 'foo'}) >>> s.relabel(0='foo') # doctest: +SKIP Traceback (most recent call last): ... SyntaxError: keyword can't be an expression Notes ----- When names are not valid Python names, such as integers or string with spaces, you must pass a dictionary to ``relabel``. For example .. code-block:: python >>> s = symbol('s', 'var * {"0": int64}') >>> s.relabel({'0': 'foo'}) s.relabel({'0': 'foo'}) >>> t = symbol('t', 'var * {"whoo hoo": ?float32}') >>> t.relabel({"whoo hoo": 'foo'}) t.relabel({'whoo hoo': 'foo'}) See Also -------- blaze.expr.expressions.Label """ _arguments = '_child', 'labels' def _schema(self): subs = dict(self.labels) param = self._child.dshape.measure.parameters[0] return DataShape(Record([[subs.get(name, name), dtype] for name, dtype in param])) def __str__(self): labels = self.labels if all(map(isvalid_identifier, map(first, labels))): rest = ', '.join('%s=%r' % l for l in labels) else: rest = '{%s}' % ', '.join('%r: %r' % l for l in labels) return '%s.relabel(%s)' % (self._child, rest) @copydoc(ReLabel) def relabel(child, labels=None, **kwargs): labels = {k: v for k, v in toolz.merge(labels or {}, kwargs).items() if k != v} label_keys = set(labels) fields = child.fields if not label_keys.issubset(fields): non_existent_fields = label_keys.difference(fields) raise ValueError("Cannot relabel non-existent child fields: {%s}" % ', '.join(map(repr, non_existent_fields))) if not labels: return child if isinstance(labels, Mapping): # Turn dict into tuples labels = tuple(sorted(labels.items())) if isscalar(child.dshape.measure): if child._name == labels[0][0]: return child.label(labels[0][1]) else: return child return ReLabel(child, labels) class Map(ElemWise): """ Map an arbitrary Python function across elements in a collection Examples -------- >>> from datetime import datetime >>> t = symbol('t', 'var * {price: real, time: int64}') # times as integers >>> datetimes = t.time.map(datetime.utcfromtimestamp) Optionally provide extra schema information >>> datetimes = t.time.map(datetime.utcfromtimestamp, ... schema='{time: datetime}') See Also -------- blaze.expr.expresions.Apply """ _arguments = '_child', 'func', '_asschema', '_name0' def _schema(self): if self._asschema: return dshape(self._asschema) else: raise NotImplementedError("Schema of mapped column not known.\n" "Please specify datashape keyword in " ".map method.\nExample: " "t.columnname.map(function, 'int64')") def label(self, name): assert isscalar(self.dshape.measure) return Map(self._child, self.func, self.schema, name) @property def shape(self): return self._child.shape @property def ndim(self): return self._child.ndim @property def _name(self): if self._name0: return self._name0 else: return self._child._name if PY2: copydoc(Map, Expr.map.im_func) else: copydoc(Map, Expr.map) class Apply(Expr): """ Apply an arbitrary Python function onto an expression Examples -------- >>> t = symbol('t', 'var * {name: string, amount: int}') >>> h = t.apply(hash, dshape='int64') # Hash value of resultant dataset You must provide the datashape of the result with the ``dshape=`` keyword. For datashape examples see http://datashape.pydata.org/grammar.html#some-simple-examples If using a chunking backend and your operation may be safely split and concatenated then add the ``splittable=True`` keyword argument >>> t.apply(f, dshape='...', splittable=True) # doctest: +SKIP See Also -------- blaze.expr.expressions.Map """ _arguments = '_child', 'func', '_asdshape', '_splittable' def _schema(self): if iscollection(self.dshape): return self.dshape.subshape[0] else: raise TypeError("Non-tabular datashape, %s" % self.dshape) def _dshape(self): return self._asdshape @copydoc(Apply) def apply(expr, func, dshape, splittable=False): return Apply(expr, func, datashape.dshape(dshape), splittable) class Coerce(ElemWise): """Coerce an expression to a different type. Examples -------- >>> t = symbol('t', '100 * float64') >>> t.coerce(to='int64') t.coerce(to='int64') >>> t.coerce('float32') t.coerce(to='float32') >>> t.coerce('int8').dshape dshape("100 * int8") """ _arguments = '_child', 'to' def _schema(self): return self.to def __str__(self): return '%s.coerce(to=%r)' % (self._child, str(self.schema)) @copydoc(Coerce) def coerce(expr, to): return Coerce(expr, dshape(to) if isinstance(to, _strtypes) else to) class Cast(Expr): """Cast an expression to a different type. This is only an expression time operation. Examples -------- >>> s = symbol('s', '?int64') >>> s.cast('?int32').dshape dshape("?int32") # Cast to correct mislabeled optionals >>> s.cast('int64').dshape dshape("int64") # Cast to give concrete dimension length >>> t = symbol('t', 'var * float32') >>> t.cast('10 * float32').dshape dshape("10 * float32") """ _arguments = '_child', 'to' def _dshape(self): return self.to def __str__(self): return 'cast(%s, to=%r)' % (self._child, str(self.to)) @as_attribute(Expr) @copydoc(Cast) def cast(expr, to): return Cast(expr, dshape(to) if isinstance(to, _strtypes) else to) def binop_name(expr): if not isscalar(expr.dshape.measure): return None l = getattr(expr.lhs, '_name', None) r = getattr(expr.rhs, '_name', None) if bool(l) ^ bool(r): return l or r elif l == r: return l return None def binop_inputs(expr): if isinstance(expr.lhs, Expr): yield expr.lhs if isinstance(expr.rhs, Expr): yield expr.rhs class Coalesce(Expr): """SQL like coalesce. .. code-block:: python coalesce(a, b) = { a if a is not NULL b otherwise } Examples -------- >>> coalesce(1, 2) 1 >>> coalesce(1, None) 1 >>> coalesce(None, 2) 2 >>> coalesce(None, None) is None True """ _arguments = 'lhs', 'rhs', 'dshape' _input_attributes = 'lhs', 'rhs' def __str__(self): return 'coalesce(%s, %s)' % (self.lhs, self.rhs) _name = property(binop_name) @property def _inputs(self): return tuple(binop_inputs(self)) @copydoc(Coalesce) def coalesce(a, b): a_dshape = discover(a) a_measure = a_dshape.measure isoption = isinstance(a_measure, Option) if isoption: a_measure = a_measure.ty isnull = isinstance(a_measure, Null) if isnull: # a is always null, this is just b return b if not isoption: # a is not an option, this is just a return a b_dshape = discover(b) return Coalesce(a, b, DataShape(*( maxshape((a_dshape.shape, b_dshape.shape)) + (promote(a_measure, b_dshape.measure),) ))) dshape_method_list = list() schema_method_list = list() method_properties = set() dshape_methods = memoize(partial(select_functions, dshape_method_list)) schema_methods = memoize(partial(select_functions, schema_method_list)) @dispatch(DataShape) def shape(ds): s = ds.shape s = tuple(int(d) if isinstance(d, Fixed) else d for d in s) return s @dispatch(object) def shape(expr): """ Shape of expression >>> symbol('s', '3 * 5 * int32').shape (3, 5) Works on anything discoverable >>> shape([[1, 2], [3, 4]]) (2, 2) """ s = list(discover(expr).shape) for i, elem in enumerate(s): try: s[i] = int(elem) except TypeError: pass return tuple(s) def ndim(expr): """ Number of dimensions of expression >>> symbol('s', '3 * var * int32').ndim 2 """ return len(shape(expr)) def drop_field(expr, field, *fields): """Drop a field or fields from a tabular expression. Parameters ---------- expr : Expr A tabular expression to drop columns from. *fields The names of the fields to drop. Returns ------- dropped : Expr The new tabular expression with some columns missing. Raises ------ TypeError Raised when ``expr`` is not tabular. ValueError Raised when a column is not in the fields of ``expr``. See Also -------- :func:`blaze.expr.expressions.projection` """ to_remove = set((field,)).union(fields) new_fields = [] for field in expr.fields: if field not in to_remove: new_fields.append(field) else: to_remove.remove(field) if to_remove: raise ValueError( 'fields %r were not in the fields of expr (%r)' % ( sorted(to_remove), expr.fields ), ) return expr[new_fields] dshape_method_list.extend([ (lambda ds: True, {apply}), (iscollection, {shape, ndim}), (lambda ds: iscollection(ds) and isscalar(ds.measure), {coerce}), (istabular, {drop_field}), ]) schema_method_list.extend([ (isscalar, {label, relabel, coerce}), (isrecord, {relabel}), (lambda ds: isinstance(ds, Option), {coalesce}), ]) method_properties.update([shape, ndim]) @dispatch(Expr) def discover(expr): return expr.dshape class VarArgsExpr(Expr): """An expression used for collecting variadic arguments into a single, typed container. Parameters ---------- _inputs : tuple[any] The arguments that this expression will compute. """ _arguments = '_inputs', @attribute def _inputs(self): raise NotImplementedError('overridden in _init') def _dshape(self): return DataShape(datashape.void) def varargsexpr(args): """Create a varargs expr which will be materialzed as a ``VarArgs`` """ # lazy import to break cycle from blaze.compute.varargs import register_varargs_arity args = tuple(args) register_varargs_arity(len(args)) return VarArgsExpr(args)
When it comes to moving into a new home, there is so many tasks to do and things to plan. After you hire NYC movers for your relocation and NYC professional packers for packing and unpacking your household, you can easily forget things that you will need to prepare for the first night in your NYC home. So, you need to add on your NYC moving checklist a plan for the first night in your NYC home. Also, NYC moving in the last minute can be very stressful but don’t forget to prepare for the first night in your NYC home. You don’t want to spend all of yours first night in your NYC home rummaging through all of your boxes in search of things that you need. It can be your underwear, toothbrushes, medications, toiletries etc. That’s why we bring you tips how to prepare for the first night in your NYC home. Checklist for the first night in NYC is most important for your moving day. So, if you are moving to NYC you need to prepare well, to avoid the biggest NYC moving mistakes. Big Apple moving can be very stressful but don’t forget to prepare for the first night in your NYC home. During the packing process, it is really important to organize and pack your first-night essentials into one of the NYC moving boxes. Packing your stuff for the first night in NYC in box, bag or suitcase will make all the moving process less stressful. But, if you are moving from another state to NYC, and your mover will arrive next week, you need to pack all the essentials that you will need for that week. So, your “first night box” will be a “first-week” suitcase. After you pack a first night bag for yourself, you also need to pack a home essentials box for the first night in your NYC home. You can label your box with: “first night home essentials”. And we recommend that you bring this box with you. In this way, you don’t need to wait movers to unload a moving truck and to find this important box. So, you will not waste time for searching toilet paper or something that you need immediately. That’s why we bring you list what to pack in the home essential box for the first night in your NYC home. If you are moving to NYC with kids, it is really important that you have time for preparing them and their stuff for the first night in your NYC home. So, to have time for this process first you should use NYC moving services. Make it easier for you by preparing all family essentials in an accessible box. Prepping a “first-night” bag with familiar items will also make their experience fun and less stressful. It is a good idea to plan what items to unpack that first night in your NYC home. Moving can be really exhausting, and you can be very tired, so, unpack first items that you really need. Start with sheets for the bed and then with stuff such as forks, knives, plates, mugs, etc. And don’t expect to unpack everything in short time. You don’t need to unpack everything in the first week. Take time. Have a plan what items to unpack first in the first night in your NYC home. How to enjoy the first night in your NYC home? After you unpack your essentials, it is time to enjoy in your new NYC home. You have time to do unpack other boxes, in the first night, you should celebrate our successful move. So, instead of stressing yourself with cooking, order take-out or delivery instead. Then you can enjoy dinner with your family and you can play a game with them. Or you can just relax and read a book. But if you are insisting on unpacking boxes, the first night just make sure that each box is in the right room. The first day in your NYC home you can introduce yourself to the new neighbors. If you have kids, find other neighbors with children to meet them. You should plan in advance what you need on the first night in your NYC home. Mark your box of essential things so you will know which box to unpack first. There is nothing so frustrating than searching for the right in the middle of the night. Unpack all the cooking and eating items you’ll need for the next couple of days. Also, electrical appliances you should put out on the nearest place, so you can easily prepare what you need. For example, tea, coffee, toast etc. Don’t forget to unpack your medical bag if you have any prescription drugs or painkillers for a stress headache. Also, think about essential items you’ll need for breakfast in the morning. For example cereal, butter or margarine, tea bags, coffee, milk, bread, etc. This also means that you have to keep the area in the kitchen clear so you can prepare your first breakfast. Good luck and happy first night in NYC!
import tensorflow as tf def conv(inputs, kernel_size, output_num, stride_size=1, init_bias=0.0, conv_padding='SAME', stddev=0.01, activation_func=tf.nn.relu): input_size = inputs.get_shape().as_list()[-1] conv_weights = tf.Variable( tf.random_normal([kernel_size, kernel_size, input_size, output_num], dtype=tf.float32, stddev=stddev), name='weights') conv_biases = tf.Variable(tf.constant(init_bias, shape=[output_num], dtype=tf.float32), 'biases') conv_layer = tf.nn.conv2d(inputs, conv_weights, [1, stride_size, stride_size, 1], padding=conv_padding) conv_layer = tf.nn.bias_add(conv_layer, conv_biases) if activation_func: conv_layer = activation_func(conv_layer) return conv_layer def fc(inputs, output_size, init_bias=0.0, activation_func=tf.nn.relu, stddev=0.01): input_shape = inputs.get_shape().as_list() if len(input_shape) == 4: fc_weights = tf.Variable( tf.random_normal([input_shape[1] * input_shape[2] * input_shape[3], output_size], dtype=tf.float32, stddev=stddev), name='weights') inputs = tf.reshape(inputs, [-1, fc_weights.get_shape().as_list()[0]]) else: fc_weights = tf.Variable(tf.random_normal([input_shape[-1], output_size], dtype=tf.float32, stddev=stddev), name='weights') fc_biases = tf.Variable(tf.constant(init_bias, shape=[output_size], dtype=tf.float32), name='biases') fc_layer = tf.matmul(inputs, fc_weights) fc_layer = tf.nn.bias_add(fc_layer, fc_biases) if activation_func: fc_layer = activation_func(fc_layer) return fc_layer def lrn(inputs, depth_radius=2, alpha=0.0001, beta=0.75, bias=1.0): return tf.nn.local_response_normalization(inputs, depth_radius=depth_radius, alpha=alpha, beta=beta, bias=bias)
Knocked off her feet after twenty years in public health nursing, Iris Graville quit her job and convinced her husband and their thirteen-year-old twin son and daughter to move to Stehekin, a remote mountain village in Washington State’s North Cascades. They sought adventure; she yearned for the quiet and respite of this community of eighty-five residents accessible only by boat, float plane, or hiking. Hiking Naked chronicles Graville’s journey through questions about work and calling as well as how she coped with ordering groceries by mail, black bears outside her kitchen window, a forest fire that threatened the valley, and a flood that left her and her family stranded for three days. This memoir of “seeking, not escaping” speaks to the hearts of those longing to be free from modern constraints—work, money, ambition, stress of all sorts—to find their bliss, wherever it might be. For Graville, in 1993, that means listening to the urgings of her heart and leaving her job as a public health nurse in Bellingham, WA, and moving her family to Stehekin, a remote village near North Cascades National Park. What resonates throughout is her deep connection to Quakerism; here a gentle, quiet spirituality that encourages places and periods of silence rather than imposing rigid external demands. As her husband and children agree to this experiment, over the two years, all come in their own way to say, “I thought I knew about powerlessness,” only to find that the rigors of living life simply require letting go of much more than they ever could have imagined. Graville concludes that “Far from feeling deprived, we found over and over again the riches of attending to what’s truly important.” VERDICT: Reading this expressive and beautifully written memoir is to experience one’s own quest toward self-discovery. In this lovely memoir of a time in transition, Iris Graville looks at the themes of what it means to unearth and follow an inner calling while yet responding to the demands of daily living and providing. In her own words she “attends to what is important”, stripping down to a naked awareness that “the smallest touch, the briefest contact, the quietest diligence can make a difference – can change the course of a river.” The reader will experience the difficulties and joys of living in the wilds in the remote village of Stehekin and will perhaps undergo with Iris the rich and nourishing journey of coming to clearness and balance.
import logging import ConfigParser import io import sys import utils from zest.releaser.git import Git as OGGit logger = logging.getLogger(__name__) class Git(OGGit): """ Command proxy for Git enhanced with gitflow commands. """ def cmd_gitflow_release_start(self, version, base=''): return 'git flow release start %s %s' % (version, base) def cmd_gitflow_release_finish(self, version): return 'git flow release finish -m "Release-%s" %s' % (version, version) def cmd_gitflow_hotfix_start(self, version, basename=''): return "git flow hotfix start %s %s" % (version, basename) def cmd_gitflow_hotfix_finish(self, version): return "git flow hotfix finish %s" % version def _config(self): """ Parse the git config into a ConfigParser object. """ config = open('./.git/config', 'r').read().replace('\t', '') config = config.replace('\t', '') # ConfigParser doesn't like tabs parser = ConfigParser.ConfigParser() parser.readfp(io.BytesIO(config)) return parser @property def extensions(self): config = self._config() return ['gitflow'] if 'gitflow "branch"' in config.sections() else [] def cmd_create_tag(self, version, base=''): if 'gitflow' in self.extensions: msg = "Release-%s" % version _start_cmd = 'git flow release start %s %s' % (version, base) _finish_cmd = 'git flow release finish -m "%s" %s' % (msg, version) return '; '.join([_start_cmd, _finish_cmd]) else: super(OGGit, self).cmd_create_tag(version) def gitflow_branches(self): config = self._config() return dict(config.items('gitflow "branch"')) def gitflow_get_branch(self, branch): branches = self.gitflow_branches() if branch in branches: return branches.get(branch) else: logger.critical( '"%s" is not a valid gitflow branch.' % branch) sys.exit(1) def gitflow_prefixes(self): config = self._config() return dict(config.items('gitflow "prefix"')) def gitflow_get_prefix(self, prefix): prefixes = self.gitflow_prefixes() if prefix in prefixes: return prefixes.get(prefix) else: logger.critical( '"%s" is not a valid gitflow prefix.' % prefix) sys.exit(1) def gitflow_check_prefix(self, prefix): prefix = self.gitflow_get_prefix(prefix) current = self.current_branch() return current.startswith(prefix) def gitflow_check_branch(self, branch, switch=False): branch = self.gitflow_get_branch(branch) current = self.current_branch() if current != branch: if switch: self.gitflow_switch_to_branch(branch, silent=False) else: logger.critical( 'You are not on the "%s" branch.' % branch) sys.exit(1) def gitflow_switch_to_branch(self, branch, silent=True): if not silent: logger.info( 'You are not on the "%s" branch, switching now.' % branch) utils.execute_command(self.cmd_checkout_from_tag(branch, '.')) def current_branch(self): return utils.execute_command("git rev-parse --abbrev-ref HEAD").strip() def enhance_with_gitflow(vcs): """ Return the vcs determined by the original function, unless we are dealing with git, in which case we return our gitflow enhanced Git(). """ return Git() if isinstance(vcs, OGGit) else vcs
The fate of the arena is up in the air after celebrated architect Frank Gehry stepped out of the project. Frank Gehry is bowing out as the architect of the planned Nets arena at the controversial Atlantic Yards project in downtown Brooklyn. Developer Bruce Ratner’s group announced today a “mutual agreement” in which “starchitect” Gehry would relinquish his role as chief designer of the Barclays Center, which would house the Brooklyn Nets. "I have an immense gratitude toward Frank Gehry for his amazing vision, unparalleled talent and steadfast partnership," Ratner said. Forest City Ratner had landed $400 million from Barclays Bank for naming rights to the Gehry-designed arena. From the very beginning, Ratner's plan to build a huge complex of residential towers, commercial space and an arena for the Nets in the middle of Brooklyn has been controversial. Local residents rallied against it, because of its use of taxpayer subsidies and its use of eminent domain to allow the state to seize land for the project's use. They also argued that it sacrificed the character of Brownstone Brooklyn. Support for the project came from lawmakers including Mayor Michael Bloomberg. Until the bottom fell out of the economy, it looked like the project would go through. Recently, construction has been beset by delays as the developers try to line up financing. Ratner has said that the project at the intersection of Atlantic and Pacific Streets in Brooklyn would be scaled back, but would still materialize. Now, analysts question whether Barclays and other investors will stay on board without the involvement of the famous architect.
# -*- coding: utf-8 -*- """ Local settings - Run in Debug mode - Use console backend for emails - Add Django Debug Toolbar - Add django-extensions as app """ from .common import * # noqa # DEBUG # ------------------------------------------------------------------------------ DEBUG = env.bool('DJANGO_DEBUG', default=True) TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # SECRET CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Note: This key only used for development and testing. SECRET_KEY = env('DJANGO_SECRET_KEY', default='r%-xf7+jh0o_!8-f3x&c#2iil!3-g!=anoo!m=_yge1io#bv3)') # Mail settings # ------------------------------------------------------------------------------ DATABASE_URL="postgres://postgres:[email protected]:5432/twitter" EMAIL_PORT = 1025 EMAIL_HOST = 'localhost' EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend') # CACHING # ------------------------------------------------------------------------------ CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': '' } } # django-debug-toolbar # ------------------------------------------------------------------------------ MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',) INSTALLED_APPS += ('debug_toolbar', ) INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',) DEBUG_TOOLBAR_CONFIG = { 'DISABLE_PANELS': [ 'debug_toolbar.panels.redirects.RedirectsPanel', ], 'SHOW_TEMPLATE_CONTEXT': True, } # django-extensions # ------------------------------------------------------------------------------ INSTALLED_APPS += ('django_extensions', ) # TESTING # ------------------------------------------------------------------------------ TEST_RUNNER = 'django.test.runner.DiscoverRunner' # Your local stuff: Below this line define 3rd party library settings
With the 1955 debut of the new Rolls-Royce Silver Cloud and its Bentley S-type sibling, British magazine “The Autocar” raved, “The latest Bentley model offers a degree of safety, comfort and performance that is beyond the experience and perhaps even the imagination of the majority of the world’s motorists.” Praise indeed, but considering its price at nearly twice that of a top-range Cadillac, it really had to deliver. Larger and roomier than the preceding R-type, with a longer all-new chassis and front suspension setup, the five- to six-seater S-type washed away any prewar influences with fully integrated front fenders and a tapering profile that satisfied modern tastes without offending traditionalists. While the 4-speed GM Hydra-Matic automatic transmission used in the preceding R-type was retained, performance was enhanced by adopting the enlarged 4887cc 6-cylinder block employed in the final R-type Continentals. The S-type was road tested at 103 MPH, sprinting from 0-60 MPH in 13.1 seconds. The lavish appointments, including picnic tables and a leather interior set off with burr walnut, were taken as granted, as well as refinements such as an electric ride control switch to vary the rear shock absorber settings. This 1956 right-hand drive example tastefully liveried in two-tone silver and blue features the GM automatic transmission fitted to all but a very few S1s, and it rides on radial whitewall tires. The S1 is a fitting final flourish of the illustrious 6-cylinder Bentley theme that began in the 1920s, a fact appreciated by many owners who consider the S1 easier to live with than the S2 which followed. Moreover, although differing only in detail from its Rolls-Royce Silver Cloud counterpart, the S1 was considerably more popular. Perhaps that’s because Bentley owners generally preferred to drive rather than let chauffeurs have all the fun.
# Copyright (c) Members of the EGEE Collaboration. 2004. # See http://www.eu-egee.org/partners/ for details on the copyright # holders. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from GLUEInfoProvider import CommonUtils # # This module should replace glite-ce-glue2-computingservice-static # and the related YAIM function config_cream_gip_glue2 # def process(siteDefs, out=sys.stdout): now = CommonUtils.getNow() srvType = 'org.glite.ce.CREAM' endpointCount = 2 # CREAM + RTEPublisher (CEMon ?) shareCount = siteDefs.ruleTable.getShareCount() resourceCount = len(siteDefs.resourceTable) out.write("dn: GLUE2ServiceID=%s,GLUE2GroupID=resource,o=glue\n" % siteDefs.compServiceID) out.write("objectClass: GLUE2Entity\n") out.write("objectClass: GLUE2Service\n") out.write("objectClass: GLUE2ComputingService\n") out.write("GLUE2EntityCreationTime: %s\n" % now) out.write("GLUE2EntityName: Computing Service %s\n" % siteDefs.compServiceID) out.write("GLUE2EntityOtherInfo: InfoProviderName=%s\n" % CommonUtils.providerName) out.write("GLUE2EntityOtherInfo: InfoProviderVersion=%s\n" % CommonUtils.providerVersion) out.write("GLUE2EntityOtherInfo: InfoProviderHost=%s\n" % siteDefs.ceHost) out.write("GLUE2ServiceID: %s\n" % siteDefs.compServiceID) out.write("GLUE2ServiceType: %s\n" % srvType) out.write("GLUE2ServiceCapability: executionmanagement.jobexecution\n") out.write("GLUE2ServiceQualityLevel: production\n") out.write("GLUE2ServiceComplexity: endpointType=%d, share=%d, resource=%d\n" % (endpointCount, shareCount, resourceCount)) out.write("GLUE2ServiceAdminDomainForeignKey: %s\n" % siteDefs.siteName) out.write("\n")
Kia is one of the most successful car brands in the United States, and this is due to a great marketing campaign backed by the excellent quality and competitive pricing. The amazing achievement here is that Kia increased their market share by more than 300 percent since the early 2000s. So what happened that made Kia so popular? Kia Motors introduced a strategy many foreign car companies since implemented for the U.S. market; specific models available and tailored for North America alone. This proved to be the right move, and the latest Kia models hold their finger on the pulse of American car owners better than some domestic car companies. Everything from design to mechanical solutions, from marketing to pricing is geared towards needs and habits of the drivers in North America, creating a line of vehicles that offer comfort, reliability, style, and competitive pricing. Used car dealerships in Las Vegas owe plenty of their business to Kia models, and this is mainly because of the build quality. Since Kia keeps updating their models every couple of years, used Kia vehicles can often be found in excellent shape and for a very reasonable price. We at Reliable Auto Sales offer a number of certified pre-owned Kia models, many of which are still under factory warranty. Seeing as many of the Kia dealerships in Las Vegas are booming with the sales of new models, used Kia's are affordable and offer excellent value for money. We have an excellent selection of used Kia models, and the best way to really assess and comprehend advantages of these cars is to test drive them and get to know the specific details regarding the car and the offer itself. Visit us at our Las Vegas location and schedule a test drive where our courteous staff can walk you through the car specs and the details of the offer.
# -*- coding: utf-8 -*- ################################################################################ ################# Representational Similarity Analysis ######################### ################################################################################ # Amelie Haugg # Julia Brehm # Pia Schröder import os from os.path import dirname, abspath import numpy as np from scipy.spatial.distance import cdist from scipy.stats import spearmanr import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import datetime import markdown import webbrowser # Global variables and their default values matrix_plot1 = True matrix_plot2 = False bar_plot = False correlations1 = False correlations2 = False pvalues = False no_relabelings = 10000 dist_metric = 1 output_first = True output_second = False scale_to_max = False now = datetime.datetime.now() def import_data(paths): """ Import header and data matrix from VOM files specified in paths. Returns dictionary DATA containing data set names as keys.""" DATA = dict() # Iterate through files and save data in dictionary for set_no, file_name in enumerate(paths): header = dict() # Read header and store last line with open(file_name, 'r') as file: for index, line in enumerate(file): string_list = line.split() item_list = [int(i) if i.isdigit() else i for i in string_list] # For non empty list save first element as key and rest as value in # header dictionary if item_list: key = item_list.pop(0) if len(item_list) > 1: header[key] = item_list else: header[key] = item_list.pop() # Use 'NrOfVoxels' as indicator for the end of the header if 'NrOfVoxels:' in line: break header_end = index + 1 # Read data into array data = np.loadtxt(file_name, skiprows = header_end) # Save data set in DATA dictionary key = "data_set_" + str(set_no + 1) DATA[key] = {'header': header, 'data': data} return DATA def extract_data(DATA): """ Get voxel data from data matrices in DATA. One matrix per area, rows = voxels, columns = conditions. """ # Extracts those columns in data that contain measurements (excluding voxel coordinates) data = [] for i in range(1,len(DATA)+1): data.append(DATA['data_set_' + str(i)]['data'][:,3:]) return data def first_order_rdm(condition_data): """ Return Specified distance matrices (1 = Pearson correlation, 2 = Euclidian distance, 3 = Absolute activation difference) of data in input matrices. One matrix per area/subject/method/... Number of rows/columns = number of conditions = number of columns in each matrix in condition_data""" RDMs = list() # Iterate through matrices in condition_data and save one RDM per matrix for i in range(len(condition_data)): if dist_metric == 1: # Use correlation distance RDM = 1-np.corrcoef(condition_data[i],rowvar=0) elif dist_metric == 2: # Use Eucledian distance RDM = cdist(condition_data[i].T,condition_data[i].T,'euclidean') elif dist_metric == 3: # Use absolute activation difference means = np.mean(condition_data[i], axis=0) # Determine mean activation per condition m, n = np.meshgrid(means,means) # Create all possible combinations RDM = abs(m-n) # Calculate difference for each combination RDMs.append(RDM) return RDMs def get_pvalue(matrix1, matrix2): """ Randomize condition labels to test significance """ order = range(0,len(matrix2)) dist = np.zeros(no_relabelings) # First, determine actual correlation flat1 = matrix1.flatten(1).transpose() flat2 = matrix2.flatten(1).transpose() corr = spearmanr(flat1,flat2)[0] # Relabel N times to obtain distribution of correlations for i in range(0,no_relabelings): np.random.shuffle(order) dummy = matrix2.take(order, axis=1).take(order, axis=0) flat2 = dummy.flatten(1).transpose() dist[i] = spearmanr(flat1,flat2)[0] # Determine p value of actual correlation from distribution p = float((dist >= corr).sum()) / len(dist) # Mit dieser Methode braucht man mindestens 4 conditions, also 4!=24 mögliche # Reihenfolgen um auf p < 0.05 zu kommen. Nicht gut! return p def bootstrap(data): """ computes the variability of the obtained second-order RDM (i.e. distance between areas, models, ...) for the same experiment with different stimuli by bootstrapping 100 times from the condition set. """ all_RDMs = list() # Iterate through 100 resamplings for ind in range(100): index = np.random.random_integers(0, high=len(data[0].T)-1, size=(1,len(data[0].T)))[0] new_data = np.array(data) # Reorder columns in data (conditions) for elem in range(len(data)): new_data[elem] = new_data[elem][:,index] # Recompute first and second-order RDMs with new conditions new_RDM1 = first_order_rdm(list(new_data)) new_RDM2 = second_order_rdm(new_RDM1, data, False)[0] # Remove off-diagonal zeros to avoid artefactually small standard deviations m_index = [new_RDM2 == 0] ident = np.invert(np.identity(len(new_RDM2), dtype=bool)) m_index = m_index & ident new_RDM2[m_index[0]] = np.nan all_RDMs.append(new_RDM2) all_RDMs = np.array(all_RDMs) # Compute standard deviation along first dimension (across RDMs) variability = np.nanstd(all_RDMs,0) return variability def second_order_rdm(RDMs, data, firstcall): """ Returns representational dissimilarity matrix computed with Spearman rank correlations between variable number of equally sized input matrices. """ # Flatten input matrices flat = [m.flatten(1) for m in RDMs] flat = np.array(flat).transpose() # Compute Spearman rank correlation matrix c_matrix = spearmanr(flat)[0] # In case only two conditions are compared, spearmanr returns single correlation # coefficient and c_matrix has to be built manually if not(isinstance(c_matrix, np.ndarray)): c_matrix = np.array([[1,c_matrix],[c_matrix,1]]) # Compute RDM (distance matrix) with correlation distance: 1 - correlation RDM = np.ones(c_matrix.shape) - c_matrix p_values = [] variability = [] if firstcall: if bar_plot: # Determine variability of distance estimates for different stimuli # Bootstrap from condition set (100 times, with replacement) variability = bootstrap(data) if pvalues or bar_plot: # Determine significance of second order RDM p_values = np.zeros(RDM.shape) # Iterate through pvalue matrix and fill in p-values but only for upper # triangle to improve performance for i in range(0,len(p_values)): for j in range(i,len(p_values)): p_values[i,j] = get_pvalue(RDMs[i], RDMs[j]) # mirror matrix to obtain all p-values p_values = p_values + np.triu(p_values,1).T return [RDM, p_values, variability] def plot_RDM(RDMs, labels, names, fig): """ Create RDM plot. Creates one first-order plot for each area if fig=1 and a single second-order plot if fig=2.""" # Determine optimal arrangement for plots rows = int(np.sqrt(len(RDMs))) columns = int(np.ceil(len(RDMs)/float(rows))) ticks = np.arange(len(labels)) # Use maximum value in RDMs for scaling if desired dist_max = np.max(np.array(RDMs)) if fig == 1: f = plt.figure(fig, figsize=(18, 8)) if fig == 2: f = plt.figure(fig, figsize=(6, 6)) # New: add_subplot instead of subplots to control figure instance for index in np.arange(len(RDMs)): ax = f.add_subplot(rows,columns,index+1, xticklabels = labels, yticklabels = labels, xticks = ticks, yticks = ticks) if scale_to_max: im = ax.imshow(RDMs[index], interpolation = 'none', cmap = 'jet', vmin = 0, vmax = dist_max) else: im = ax.imshow(RDMs[index], interpolation = 'none', cmap = 'jet') for label in ax.get_xticklabels(): label.set_fontsize(6) ax.xaxis.tick_top() ax.set_title(names[index], y = 1.08) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) dist_max = np.max(RDMs[index]) cbar = plt.colorbar(im, ticks=[0, dist_max], cax=cax) cbar.ax.set_yticklabels(['0', str(np.around(dist_max,decimals=2))]) cbar.ax.set_ylabel('Dissimilarity') f.subplots_adjust(hspace=0.1, wspace=0.3) if fig == 1: if dist_metric == 1: f.suptitle('First order distance metric: Correlation distance', y=0.9, fontsize=18) elif dist_metric == 2: f.suptitle('First order distance metric: Euclidean distance', y=0.9, fontsize=18) elif dist_metric == 3: f.suptitle('First order distance metric: Absolute activation difference', y=0.9, fontsize=18) figure_name = "Figure%d_%d-%d-%d-%d-%d-%d.png" % (fig, now.day, now.month, now.year, now.hour, now.minute, now.second) plt.savefig(figure_name, transparent=True) return figure_name def plot_bars(RDM, pvalues, variability, names): """ Creates bar plot depicticting the distances between different areas. Bars are sorted by significance, errorbars indicate the standard error of the distnace estimate (estimated as the standard deviation of 100 distance estimates obtained from bootstrapping of the condition labels)""" length = len(RDM) f = plt.figure(3, figsize=(14,6)) for index in np.arange(length): maxim = np.max(RDM[index]) xticks = np.arange(length-1)+1 d_values = RDM[index,:] plot_dvalues = d_values[d_values != 0] v_values = variability[index,:] plot_vvalues = v_values[d_values != 0] p_values = pvalues[index,:] plot_pvalues = np.around(p_values[d_values != 0], decimals=4) plot_names = np.array(names)[d_values != 0] sort = np.argsort(plot_pvalues) ax = f.add_subplot(1,length, index+1, xticks = xticks, xticklabels = plot_pvalues[sort]) ax.set_ylabel('Correlation distance (1-Spearman rank correlation)') ax.set_xlabel('P-values') ax.bar(xticks, plot_dvalues[sort], 0.5, yerr = plot_vvalues[sort], error_kw=dict(ecolor='black', lw=2), align = 'center') scale_y = max(plot_dvalues + plot_vvalues)+maxim*0.1 plt.axis([0.5, length-0.5, 0, scale_y]) ax.set_title(names[index]) for ind in np.arange(length-1): ax.text(xticks[ind], scale_y*0.1, plot_names[sort][ind], rotation='vertical', horizontalalignment='center', backgroundcolor='w', color='k', visible=True) f.subplots_adjust(hspace=0.1, wspace=0.3) figure_name = "Figure3_%d-%d-%d-%d-%d-%d.png" % (now.day, now.month, now.year, now.hour, now.minute, now.second) plt.savefig(figure_name, transparent=True) return figure_name def generate_output(*args): """ Generates text file including all output and converts it into html (markdown) file """ if len(args) > 3: [withinRDMs, betweenRDM, names, labels] = args else: [withinRDMs, names, labels] = args # Produce text file filename = "RSA_output_%d-%d-%d-%d-%d-%d.txt" % (now.day, now.month, now.year, now.hour, now.minute, now.second) output = "RSA_output_%d-%d-%d-%d-%d-%d.html" % (now.day, now.month, now.year, now.hour, now.minute, now.second) with open(filename, 'w') as fid: fid.write("#Representational similarity analysis\n\n") fid.write("###Areas: "+str(', '.join(names))+"\n") fid.write("###Conditions: "+str(', '.join(labels))+"\n\n\n\n") # first-order RDMs if output_first: fid.write("##First-order analysis\n\n") # Numerical correlations if correlations1: distances = {1:'Correlation distance', 2:'Euclidean distance', 3:'Absolute activation difference'} fid.write("###Dissimilarity between conditions: "+distances[dist_metric]+"\n\n") for ind in np.arange(len(withinRDMs)): fid.write("\n###"+names[ind]+"\n") np.savetxt(fid, withinRDMs[ind], fmt='%.4f')# , header="\n"+names[ind]+"\n") fid.write("\n") # RDM Plot if matrix_plot1: figure_name = plot_RDM(withinRDMs, labels, names, 1) fid.write("![Figure1](%s)" % figure_name) # second-order RDM if output_second: fid.write("\n") fid.write("##Second-order analysis\n\n") # Numerical correlations if correlations2: fid.write("###Dissimilarity between areas: 1-Spearman rank correlation\n\n") np.savetxt(fid, betweenRDM[0], fmt='%.4f') fid.write("\n\n") # P-values if pvalues: fid.write("###Statistical significance of Dissimilarity between areas\n") fid.write("P-values are obtained by random relabeling of conditions.\nNo. of relabelings = %d \n\n" % (no_relabelings)) np.savetxt(fid, betweenRDM[1], fmt='%.4f') fid.write("\n\n") # RDM plot if matrix_plot2: figure_name = plot_RDM([betweenRDM[0]], names, ['Second order RDM'], 2) fid.write("\n") fid.write("![Figure2](%s)" % figure_name) fid.write("\n") # Bar plot if bar_plot: figure_name = plot_bars(betweenRDM[0], betweenRDM[1], betweenRDM[2], names) fid.write("\n") fid.write("![Figure3](%s)" % figure_name) fid.write("\n") with open(output, 'w') as output_file: html = markdown.markdownFromFile(filename, output_file, extensions=['markdown.extensions.nl2br']) os.remove(filename) webbrowser.open(output, new=2) def RSA(paths, files, labels): ''' Imports input files, extracts relevant data, computes first and second order RDMs and plots them''' data = import_data(paths) data = extract_data(data) withinRDMs = first_order_rdm(data) names = [file[0:-4] for file in files] if output_second: betweenRDM = second_order_rdm(withinRDMs, data, True) if output_second: generate_output(withinRDMs, betweenRDM, names, labels) else: generate_output(withinRDMs, names, labels)
As the media attacks against our President intensify, millions of Trump supporters become even more resolute to defend and support him. It’s interesting to note, that some of the most notable attacks on President Trump over the past week don’t have anything to do with Russia. It’s almost as though ever since President Trump demanded that the FISA warrant for Carter Page and FBI text messages are declassified without redaction, the media has moved away from the phony Trump-Russia collusion narrative?? Just in the past 3 weeks, an anonymous op-ed was published in the Trump-hating New York Times, from a “senior officials in the White House”. Last week, Diane Feinstein dropped a bombshell about an anonymous letter from a woman, who turned out to be a Democrat activist. In her letter, that was delivered in July to Senator Feinstein, Christine Blasey Ford accused Trump’s US Supreme Court Justice candidate, Judge Brett Kavanaugh of sexual assault 36 years ago. And only yesterday, the New York Times dropped another bombshell, claiming Deputy AG, Rod Rosenstein wanted to wear a wire to reveal “the chaos” inside President Trump’s White House, to use as evidence to help invoke the 25th Amendment, used to remove a sitting President who is deemed “unfit” to serve. Last night, however, over 11,000 Americans showed their allegiance to President Trump and his steadfast resolve to “drain the swamp”, as they flocked to the Missouri State University campus to support our President and to hear Trump speak. It was a sea of red. The number of people who wore “Make America Great Again” caps and pro-Trump shirts far outweighed a few hundred protesters gathered near the entrance of the arena. John Brown, a reporter who attended the incredible event, tweeted an image of the line. In his tweet, Brown claimed that the line was 2 miles long and that the first people in line started at about 5:30 am. “AnythingWithWheels” tweeted a couple of great pictures showing the crowd, standing in the rain, as they wait to get into Trump’s rally. He claims the photo only reveals a “small portion” of the crowd. “Trump rally impossible to get into” because of the massive line of Trump supporters. Americans are traveling from all over the United States to see President Trump at his high-energy, rock-star-like rallies. The Springfield News Leader was at the MAGA event. Another Trump supporter who said she’d found her people was Brenda Rounds. Today’s joint rally with President Trump and AG Josh Hawley began with a prayer, the Pledge of Allegiance and the National Anthem.
# -*- coding: utf-8 -*- # --------------------------------------------------------------------------- # Name: isbnutils # Purpose: Utility functions for checking ISBNs # # Author: Michael Amrhein ([email protected]) # # Copyright: (c) 2016 Michael Amrhein # License: This program is part of a larger application. For license # details please read the file LICENSE.TXT provided together # with the application. # --------------------------------------------------------------------------- # $Source$ # $Revision$ """Utility functions for checking ISBNs""" import os.path from bisect import bisect from typing import Iterator, Tuple from xml.etree import ElementTree as ETree def _iter_rules(root: ETree.Element) -> Iterator: for elem in root.findall('RegistrationGroups/Group'): prefix = elem.findtext('Prefix').replace('-', '') prefix_length = len(prefix) for subelem in elem.findall('Rules/Rule'): number_range = subelem.findtext('Range') lower, upper = number_range.split('-') lower_prefix = prefix + lower upper_prefix = prefix + upper length = int(subelem.findtext('Length')) if length > 0: item_idx = prefix_length + length else: item_idx = 0 yield lower_prefix, upper_prefix, prefix_length, item_idx file_name = os.path.join(os.path.dirname(__file__), "ISBN_Ranges.xml") etree = ETree.parse(file_name) root = etree.getroot() rule_list = list(_iter_rules(root)) def lookup_isbn_prefix(digits: str) -> Tuple[int, int]: """Check ISBN prefix in `digits`.""" idx = max(bisect(rule_list, (digits,)) - 1, 0) lower_prefix, upper_prefix, registrant_idx, item_idx = rule_list[idx] if lower_prefix <= digits <= upper_prefix: if item_idx > 0: return registrant_idx, item_idx raise ValueError(f"Excluded prefix range: '{lower_prefix}' - " f"'{upper_prefix}'.") if lower_prefix[:3] != digits[:3]: raise ValueError("Undefined prefix.") raise ValueError("Undefined registration group or registrant.")
I’ve been out of the loop the last couple of weeks. Wizard World Comic Con was great, our table was about ten feet away from Amy Acker’s, I met a lot of really nice people, sold a bunch of books, was even interviewed a couple of times. Unfortunately, during all of my glad-handing, I picked up an extremely unpleasant flu virus that hit me with five days of fever. Even now, a week later, I’m on the mend but still very weak. It’s taken me a little while but I’m finally starting to catch up on things. Pictures from Comic Con are now available on my Facebook page. One of the two interviews I gave is now online, over at Press +1. The two best things about this con: seeing people come up to the table and pointing at one of the Felix Renn chapbooks and saying, “I already have that one!” And a couple of people who bought chapbooks coming back to the table later in the day to say they already started reading them and couldn’t put them down. I found out that my Felix Renn story, “My Body,” and another short story, “The Candle” (that appeared in Shadows & Tall Trees), received honourable mentions in Ellen Datlow’s Best Horror of the Year, Volume 4. In other news, I have seen the almost-final draft of the cover artwork for my forthcoming ChiZine collection, Every House Is Haunted, and it is absolutely fantastic. Ever since my book was accepted, I’ve been imagining what my cover was going to look like, because the CZP covers are always so incredible. Artist Erik Mohr didn’t disappoint, and I think you’re really going to dig his take on haunted houses. I should be able to post it here in the next day or two, and the pre-order info for the limited edition hardcover should be online around the same time. A new Felix Renn story, “Out of the Blue,” has been sold to an anthology called Fungi, edited by by Silvia Moreno-Garcia and Orrin Grey, to be published by Innsmouth Free Press later this fall. I remember meeting Orrin Grey a few years ago at Readercon, and he told me about his fungus fascination. It was one of the first things he told me, in fact, and I recall smiling, nodding, and backing away slowly. Then he explained that he was interested in fungus-related stories (Hodgson’s “The Voice in the Night,” Lovecraft’s Fungi from Yuggoth, Jeff VanderMeer’s Ambergis books) and that it was his hope to one day put together a collection of them for a themed anthology. Well, it finally happened, and I’m very happy to be a part of it. I’m particularly excited about “Out of the Blue” because it introduces a new character to the Black Lands universe. Jerry Baldwin is a smooth-talking real estate agent (and Felix’s sometime lawyer) who only represents haunted properties. Haunted houses, haunted apartments, haunted condos, haunted farms, etc. In addition to providing a bit of comic relief, Jerry is an example of the way ordinary people have adapted to live in a world where the supernatural exists as a part of their daily lives. In Jerry’s case, he tries to make a few bucks out of the situation. Fungi will be published in hardcover, paperback, and as an e-book. The hardcover will feature some bonus material not included in the other editions. Very excited about this one! When I say standalone, I mean that while this story takes place in the Black Lands universe, it does not feature Toronto p.i. Felix Renn. I love writing about Felix, and I plan to do so for many years to come, but you only get a small view of the world through the eyes of a single character. This new story is one of many that explores how other people in other parts of the world are dealing with the existence of the Black Lands. “Day Pass” will appeared in the EDGE books anthology Chilling Tales 2, edited by Michael Kelly. The book isn’t scheduled to come out until February 2013, but I’ve got a couple of other announcements coming up for some things that you’ll be able to get your mitts on a bit sooner. Details to come.