text
stringlengths
29
850k
from flask import render_template from . import main @main.route('/') @main.route('/name/<name>/') @main.route('/<one>/name/<name>') @main.route('/<one>/<two>/name/<name>') @main.route('/<one>/<two>/<three>/name/<name>') @main.route('/<one>/<two>/<three>/<four>/name/<name>') @main.route('/<one>/<two>/<three>/<four>/<five>/name/<name>') @main.route('/<one>/<two>/<three>/<four>/<five>/<six>/name/<name>') @main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/name/<name>') @main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/<eight>/name/<name>') @main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/<eight>/<nine>/name/<name>') @main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/<eight>/<nine>/<ten>/name/<name>') @main.route('/<one>') @main.route('/<one>/<two>') @main.route('/<one>/<two>/<three>') @main.route('/<one>/<two>/<three>/<four>') @main.route('/<one>/<two>/<three>/<four>/<five>') @main.route('/<one>/<two>/<three>/<four>/<five>/<six>') @main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>') @main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/<eight>') @main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/<eight>/<nine>') @main.route('/<one>/<two>/<three>/<four>/<five>/<six>/<seven>/<eight>/<nine>/<ten>') def index(**kwargs): name = None directive = "" possible_keys=("one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten") if kwargs: if 'name' in kwargs.keys(): name = kwargs['name'] del kwargs['name'] if kwargs: directive = ' '.join([ kwargs[possible_keys[key]] for key in range(len(kwargs.keys()))]) return render_template('index.html', name=name, directive=directive)
The SOFTWARE PRODUCT (CLUSTER) is protected by copyright laws and international copyright treaties, as well as other intellectual property laws and treaties. The SOFTWARE PRODUCT is licensed, not sold. CLUSTER may be installed and used for free on CHROME, a license must be purchased for using extended features such as syncing saved tabs across devices. Licenses are per user account and valid for use with CLUSTER extended services. A licensed account may be used on multiple computers with CHROME. Maintenance of Copyright Notices. You must not remove or alter any copyright notices on any copy of CLUSTER. Distribution. DAG HOLMBERG expressly disclaims any warranty for CLUSTER, which is provided 'as is' without any express or implied warranty of any kind, including but not limited to any warranties of merchantability, non-infringement, or fitness of a particular purpose. In no event shall DAG HOLMBERG be liable for any damages due to use of CLUSTER, to the maximum extent permitted by law. This includes without limitation, lost profits, business interruption, or lost information. In no event will DAG HOLMBERG be liable for loss of data or for indirect, special, incidental, consequential (including lost profit), or other damages. DAG HOLMBERG shall have no liability with respect to the content of CLUSTER or any part thereof, including but not limited to errors or omissions contained therein, libel, trademark rights, business interruption, loss of privacy or the disclosure of confidential information. DAG HOLMBERG reserves the right to cancel support in the event of increase operation costs resulting in monetary loss. Cluster is a Tab Manager that helps you and your projects stay organized.
# -*- coding: utf-8 -*- # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. from __future__ import division # just to be safe... import numpy as np ############################################################################### # Utility functions def _check_color_dim(val): """Ensure val is Nx(n_col), usually Nx3""" val = np.atleast_2d(val) if val.shape[1] not in (3, 4): raise RuntimeError('Value must have second dimension of size 3 or 4') return val, val.shape[1] ############################################################################### # RGB<->HEX conversion def _hex_to_rgba(hexs): """Convert hex to rgba, permitting alpha values in hex""" hexs = np.atleast_1d(np.array(hexs, '|U9')) out = np.ones((len(hexs), 4), np.float32) for hi, h in enumerate(hexs): assert isinstance(h, str) off = 1 if h[0] == '#' else 0 assert len(h) in (6+off, 8+off) e = (len(h)-off) // 2 out[hi, :e] = [int(h[i:i+2], 16) / 255. for i in range(off, len(h), 2)] return out def _rgb_to_hex(rgbs): """Convert rgb to hex triplet""" rgbs, n_dim = _check_color_dim(rgbs) return np.array(['#%02x%02x%02x' % tuple((255*rgb[:3]).astype(np.uint8)) for rgb in rgbs], '|U7') ############################################################################### # RGB<->HSV conversion def _rgb_to_hsv(rgbs): """Convert Nx3 or Nx4 rgb to hsv""" rgbs, n_dim = _check_color_dim(rgbs) hsvs = list() for rgb in rgbs: rgb = rgb[:3] # don't use alpha here idx = np.argmax(rgb) val = rgb[idx] c = val - np.min(rgb) if c == 0: hue = 0 sat = 0 else: if idx == 0: # R == max hue = ((rgb[1] - rgb[2]) / c) % 6 elif idx == 1: # G == max hue = (rgb[2] - rgb[0]) / c + 2 else: # B == max hue = (rgb[0] - rgb[1]) / c + 4 hue *= 60 sat = c / val hsv = [hue, sat, val] hsvs.append(hsv) hsvs = np.array(hsvs, dtype=np.float32) if n_dim == 4: hsvs = np.concatenate((hsvs, rgbs[:, 3]), axis=1) return hsvs def _hsv_to_rgb(hsvs): """Convert Nx3 or Nx4 hsv to rgb""" hsvs, n_dim = _check_color_dim(hsvs) # In principle, we *might* be able to vectorize this, but might as well # wait until a compelling use case appears rgbs = list() for hsv in hsvs: c = hsv[1] * hsv[2] m = hsv[2] - c hp = hsv[0] / 60 x = c * (1 - abs(hp % 2 - 1)) if 0 <= hp < 1: r, g, b = c, x, 0 elif hp < 2: r, g, b = x, c, 0 elif hp < 3: r, g, b = 0, c, x elif hp < 4: r, g, b = 0, x, c elif hp < 5: r, g, b = x, 0, c else: r, g, b = c, 0, x rgb = [r + m, g + m, b + m] rgbs.append(rgb) rgbs = np.array(rgbs, dtype=np.float32) if n_dim == 4: rgbs = np.concatenate((rgbs, hsvs[:, 3]), axis=1) return rgbs ############################################################################### # RGB<->CIELab conversion # These numbers are adapted from MIT-licensed MATLAB code for # Lab<->RGB conversion. They provide an XYZ<->RGB conversion matrices, # w/D65 white point normalization built in. #_rgb2xyz = np.array([[0.412453, 0.357580, 0.180423], # [0.212671, 0.715160, 0.072169], # [0.019334, 0.119193, 0.950227]]) #_white_norm = np.array([0.950456, 1.0, 1.088754]) #_rgb2xyz /= _white_norm[:, np.newaxis] #_rgb2xyz_norm = _rgb2xyz.T _rgb2xyz_norm = np.array([[0.43395276, 0.212671, 0.01775791], [0.37621941, 0.71516, 0.10947652], [0.18982783, 0.072169, 0.87276557]]) #_xyz2rgb = np.array([[3.240479, -1.537150, -0.498535], # [-0.969256, 1.875992, 0.041556], # [0.055648, -0.204043, 1.057311]]) #_white_norm = np.array([0.950456, 1., 1.088754]) #_xyz2rgb *= _white_norm[np.newaxis, :] _xyz2rgb_norm = np.array([[3.07993271, -1.53715, -0.54278198], [-0.92123518, 1.875992, 0.04524426], [0.05289098, -0.204043, 1.15115158]]) def _rgb_to_lab(rgbs): rgbs, n_dim = _check_color_dim(rgbs) # convert RGB->XYZ xyz = rgbs[:, :3].copy() # a misnomer for now but will end up being XYZ over = xyz > 0.04045 xyz[over] = ((xyz[over] + 0.055) / 1.055) ** 2.4 xyz[~over] /= 12.92 xyz = np.dot(xyz, _rgb2xyz_norm) over = xyz > 0.008856 xyz[over] = xyz[over] ** (1. / 3.) xyz[~over] = 7.787 * xyz[~over] + 0.13793103448275862 # Convert XYZ->LAB L = (116. * xyz[:, 1]) - 16 a = 500 * (xyz[:, 0] - xyz[:, 1]) b = 200 * (xyz[:, 1] - xyz[:, 2]) labs = [L, a, b] # Append alpha if necessary if n_dim == 4: labs.append(np.atleast1d(rgbs[:, 3])) labs = np.array(labs, order='F').T # Becomes 'C' order b/c of .T return labs def _lab_to_rgb(labs): """Convert Nx3 or Nx4 lab to rgb""" # adapted from BSD-licensed work in MATLAB by Mark Ruzon # Based on ITU-R Recommendation BT.709 using the D65 labs, n_dim = _check_color_dim(labs) # Convert Lab->XYZ (silly indexing used to preserve dimensionality) y = (labs[:, 0] + 16.) / 116. x = (labs[:, 1] / 500.) + y z = y - (labs[:, 2] / 200.) xyz = np.concatenate(([x], [y], [z])) # 3xN over = xyz > 0.2068966 xyz[over] = xyz[over] ** 3. xyz[~over] = (xyz[~over] - 0.13793103448275862) / 7.787 # Convert XYZ->LAB rgbs = np.dot(_xyz2rgb_norm, xyz).T over = rgbs > 0.0031308 rgbs[over] = 1.055 * (rgbs[over] ** (1. / 2.4)) - 0.055 rgbs[~over] *= 12.92 if n_dim == 4: rgbs = np.concatenate((rgbs, labs[:, 3]), axis=1) rgbs = np.clip(rgbs, 0., 1.) return rgbs
You’re looking for a property in Fairy Meadow and you’re not sure of its state? It’s a huge amount of coin and you don’t want to make a mistake. Now that’s settled, how do you find a good building inspection business in Fairy Meadow 2519? That's how how you find a good building inspection business in Fairy Meadow 2519?
# # Copyright 2008-2014 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Refer to the README and COPYING files for full details of the license # from operator import itemgetter import xml.dom import xml.dom.minidom import xml.etree.ElementTree as etree from vdsm import constants from vdsm import utils import caps def has_channel(domXML, name): domObj = etree.fromstring(domXML) devices = domObj.findall('devices') if len(devices) == 1: for chan in devices[0].findall('channel'): targets = chan.findall('target') if len(targets) == 1: if targets[0].attrib['name'] == name: return True return False def all_devices(domXML): domObj = xml.dom.minidom.parseString(domXML) devices = domObj.childNodes[0].getElementsByTagName('devices')[0] for deviceXML in devices.childNodes: if deviceXML.nodeType == xml.dom.Node.ELEMENT_NODE: yield deviceXML def filter_devices_with_alias(devices): for deviceXML in devices: aliasElement = deviceXML.getElementsByTagName('alias') if aliasElement: alias = aliasElement[0].getAttribute('name') yield deviceXML, alias class Device(object): # since we're inheriting all VM devices from this class, __slots__ must # be initialized here in order to avoid __dict__ creation __slots__ = () def createXmlElem(self, elemType, deviceType, attributes=()): """ Create domxml device element according to passed in params """ elemAttrs = {} element = Element(elemType) if deviceType: elemAttrs['type'] = deviceType for attrName in attributes: if not hasattr(self, attrName): continue attr = getattr(self, attrName) if isinstance(attr, dict): element.appendChildWithArgs(attrName, **attr) else: elemAttrs[attrName] = attr element.setAttrs(**elemAttrs) return element class Element(object): def __init__(self, tagName, text=None, **attrs): self._elem = xml.dom.minidom.Document().createElement(tagName) self.setAttrs(**attrs) if text is not None: self.appendTextNode(text) def __getattr__(self, name): return getattr(self._elem, name) def setAttrs(self, **attrs): for attrName, attrValue in attrs.iteritems(): self._elem.setAttribute(attrName, attrValue) def appendTextNode(self, text): textNode = xml.dom.minidom.Document().createTextNode(text) self._elem.appendChild(textNode) def appendChild(self, element): self._elem.appendChild(element) def appendChildWithArgs(self, childName, text=None, **attrs): child = Element(childName, text, **attrs) self._elem.appendChild(child) return child class Domain(object): def __init__(self, conf, log, arch): """ Create the skeleton of a libvirt domain xml <domain type="kvm"> <name>vmName</name> <uuid>9ffe28b6-6134-4b1e-8804-1185f49c436f</uuid> <memory>262144</memory> <currentMemory>262144</currentMemory> <vcpu current='smp'>160</vcpu> <devices> </devices> </domain> """ self.conf = conf self.log = log self.arch = arch self.doc = xml.dom.minidom.Document() if utils.tobool(self.conf.get('kvmEnable', 'true')): domainType = 'kvm' else: domainType = 'qemu' domainAttrs = {'type': domainType} # Hack around libvirt issue BZ#988070, this is going to be removed as # soon as the domain XML format supports the specification of USB # keyboards if self.arch == caps.Architecture.PPC64: domainAttrs['xmlns:qemu'] = \ 'http://libvirt.org/schemas/domain/qemu/1.0' self.dom = Element('domain', **domainAttrs) self.doc.appendChild(self.dom) self.dom.appendChildWithArgs('name', text=self.conf['vmName']) self.dom.appendChildWithArgs('uuid', text=self.conf['vmId']) if 'numOfIoThreads' in self.conf: self.dom.appendChildWithArgs('iothreads', text=str(self.conf['numOfIoThreads'])) memSizeKB = str(int(self.conf.get('memSize', '256')) * 1024) self.dom.appendChildWithArgs('memory', text=memSizeKB) self.dom.appendChildWithArgs('currentMemory', text=memSizeKB) vcpu = self.dom.appendChildWithArgs('vcpu', text=self._getMaxVCpus()) vcpu.setAttrs(**{'current': self._getSmp()}) self._devices = Element('devices') self.dom.appendChild(self._devices) def appendClock(self): """ Add <clock> element to domain: <clock offset="variable" adjustment="-3600"> <timer name="rtc" tickpolicy="catchup"> </clock> for hyperv: <clock offset="variable" adjustment="-3600"> <timer name="hypervclock"> </clock> """ m = Element('clock', offset='variable', adjustment=str(self.conf.get('timeOffset', 0))) if utils.tobool(self.conf.get('hypervEnable', 'false')): m.appendChildWithArgs('timer', name='hypervclock') else: m.appendChildWithArgs('timer', name='rtc', tickpolicy='catchup') m.appendChildWithArgs('timer', name='pit', tickpolicy='delay') if self.arch == caps.Architecture.X86_64: m.appendChildWithArgs('timer', name='hpet', present='no') self.dom.appendChild(m) def appendOs(self): """ Add <os> element to domain: <os> <type arch="x86_64" machine="pc">hvm</type> <boot dev="cdrom"/> <kernel>/tmp/vmlinuz-2.6.18</kernel> <initrd>/tmp/initrd-2.6.18.img</initrd> <cmdline>ARGs 1</cmdline> <smbios mode="sysinfo"/> </os> """ oselem = Element('os') self.dom.appendChild(oselem) DEFAULT_MACHINES = {caps.Architecture.X86_64: 'pc', caps.Architecture.PPC64: 'pseries', caps.Architecture.PPC64LE: 'pseries'} machine = self.conf.get('emulatedMachine', DEFAULT_MACHINES[self.arch]) oselem.appendChildWithArgs('type', text='hvm', arch=self.arch, machine=machine) qemu2libvirtBoot = {'a': 'fd', 'c': 'hd', 'd': 'cdrom', 'n': 'network'} for c in self.conf.get('boot', ''): oselem.appendChildWithArgs('boot', dev=qemu2libvirtBoot[c]) if self.conf.get('initrd'): oselem.appendChildWithArgs('initrd', text=self.conf['initrd']) if self.conf.get('kernel'): oselem.appendChildWithArgs('kernel', text=self.conf['kernel']) if self.conf.get('kernelArgs'): oselem.appendChildWithArgs('cmdline', text=self.conf['kernelArgs']) if self.arch == caps.Architecture.X86_64: oselem.appendChildWithArgs('smbios', mode='sysinfo') if utils.tobool(self.conf.get('bootMenuEnable', False)): oselem.appendChildWithArgs('bootmenu', enable='yes') def appendSysinfo(self, osname, osversion, serialNumber): """ Add <sysinfo> element to domain: <sysinfo type="smbios"> <bios> <entry name="vendor">QEmu/KVM</entry> <entry name="version">0.13</entry> </bios> <system> <entry name="manufacturer">Fedora</entry> <entry name="product">Virt-Manager</entry> <entry name="version">0.8.2-3.fc14</entry> <entry name="serial">32dfcb37-5af1-552b-357c-be8c3aa38310</entry> <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry> </system> </sysinfo> """ sysinfoelem = Element('sysinfo', type='smbios') self.dom.appendChild(sysinfoelem) syselem = Element('system') sysinfoelem.appendChild(syselem) def appendEntry(k, v): syselem.appendChildWithArgs('entry', text=v, name=k) appendEntry('manufacturer', constants.SMBIOS_MANUFACTURER) appendEntry('product', osname) appendEntry('version', osversion) appendEntry('serial', serialNumber) appendEntry('uuid', self.conf['vmId']) def appendFeatures(self): """ Add machine features to domain xml. Currently only <features> <acpi/> <features/> for hyperv: <features> <acpi/> <hyperv> <relaxed state='on'/> </hyperv> <features/> """ if (utils.tobool(self.conf.get('acpiEnable', 'true')) or utils.tobool(self.conf.get('hypervEnable', 'false'))): features = self.dom.appendChildWithArgs('features') if utils.tobool(self.conf.get('acpiEnable', 'true')): features.appendChildWithArgs('acpi') if utils.tobool(self.conf.get('hypervEnable', 'false')): hyperv = Element('hyperv') features.appendChild(hyperv) hyperv.appendChildWithArgs('relaxed', state='on') # turns off an internal Windows watchdog, and by doing so avoids # some high load BSODs. hyperv.appendChildWithArgs('vapic', state='on') # magic number taken from recomendations. References: # https://bugzilla.redhat.com/show_bug.cgi?id=1083529#c10 # https://bugzilla.redhat.com/show_bug.cgi?id=1053846#c0 hyperv.appendChildWithArgs( 'spinlocks', state='on', retries='8191') def appendCpu(self): """ Add guest CPU definition. <cpu match="exact"> <model>qemu64</model> <topology sockets="S" cores="C" threads="T"/> <feature policy="require" name="sse2"/> <feature policy="disable" name="svm"/> </cpu> """ cpu = Element('cpu') if self.arch in (caps.Architecture.X86_64): cpu.setAttrs(match='exact') features = self.conf.get('cpuType', 'qemu64').split(',') model = features[0] if model == 'hostPassthrough': cpu.setAttrs(mode='host-passthrough') elif model == 'hostModel': cpu.setAttrs(mode='host-model') else: cpu.appendChildWithArgs('model', text=model) # This hack is for backward compatibility as the libvirt # does not allow 'qemu64' guest on intel hardware if model == 'qemu64' and '+svm' not in features: features += ['-svm'] for feature in features[1:]: # convert Linux name of feature to libvirt if feature[1:6] == 'sse4_': feature = feature[0] + 'sse4.' + feature[6:] featureAttrs = {'name': feature[1:]} if feature[0] == '+': featureAttrs['policy'] = 'require' elif feature[0] == '-': featureAttrs['policy'] = 'disable' cpu.appendChildWithArgs('feature', **featureAttrs) if ('smpCoresPerSocket' in self.conf or 'smpThreadsPerCore' in self.conf): maxVCpus = int(self._getMaxVCpus()) cores = int(self.conf.get('smpCoresPerSocket', '1')) threads = int(self.conf.get('smpThreadsPerCore', '1')) cpu.appendChildWithArgs('topology', sockets=str(maxVCpus / cores / threads), cores=str(cores), threads=str(threads)) # CPU-pinning support # see http://www.ovirt.org/wiki/Features/Design/cpu-pinning if 'cpuPinning' in self.conf: cputune = Element('cputune') cpuPinning = self.conf.get('cpuPinning') for cpuPin in cpuPinning.keys(): cputune.appendChildWithArgs('vcpupin', vcpu=cpuPin, cpuset=cpuPinning[cpuPin]) self.dom.appendChild(cputune) # Guest numa topology support # see http://www.ovirt.org/Features/NUMA_and_Virtual_NUMA if 'guestNumaNodes' in self.conf: numa = Element('numa') guestNumaNodes = sorted( self.conf.get('guestNumaNodes'), key=itemgetter('nodeIndex')) for vmCell in guestNumaNodes: nodeMem = int(vmCell['memory']) * 1024 numa.appendChildWithArgs('cell', cpus=vmCell['cpus'], memory=str(nodeMem)) cpu.appendChild(numa) self.dom.appendChild(cpu) # Guest numatune support def appendNumaTune(self): """ Add guest numatune definition. <numatune> <memory mode='strict' nodeset='0-1'/> </numatune> """ if 'numaTune' in self.conf: numaTune = self.conf.get('numaTune') if 'nodeset' in numaTune.keys(): mode = numaTune.get('mode', 'strict') numatune = Element('numatune') numatune.appendChildWithArgs('memory', mode=mode, nodeset=numaTune['nodeset']) self.dom.appendChild(numatune) def _appendAgentDevice(self, path, name): """ <channel type='unix'> <target type='virtio' name='org.linux-kvm.port.0'/> <source mode='bind' path='/tmp/socket'/> </channel> """ channel = Element('channel', type='unix') channel.appendChildWithArgs('target', type='virtio', name=name) channel.appendChildWithArgs('source', mode='bind', path=path) self._devices.appendChild(channel) def appendInput(self): """ Add input device. <input bus="ps2" type="mouse"/> """ if utils.tobool(self.conf.get('tabletEnable')): inputAttrs = {'type': 'tablet', 'bus': 'usb'} else: if self.arch == caps.Architecture.PPC64: mouseBus = 'usb' else: mouseBus = 'ps2' inputAttrs = {'type': 'mouse', 'bus': mouseBus} self._devices.appendChildWithArgs('input', **inputAttrs) def appendKeyboardDevice(self): """ Add keyboard device for ppc64 using a QEMU argument directly. This is a workaround to the issue BZ#988070 in libvirt <qemu:commandline> <qemu:arg value='-usbdevice'/> <qemu:arg value='keyboard'/> </qemu:commandline> """ commandLine = Element('qemu:commandline') commandLine.appendChildWithArgs('qemu:arg', value='-usbdevice') commandLine.appendChildWithArgs('qemu:arg', value='keyboard') self.dom.appendChild(commandLine) def appendEmulator(self): emulatorPath = '/usr/bin/qemu-system-' + self.arch emulator = Element('emulator', text=emulatorPath) self._devices.appendChild(emulator) def appendDeviceXML(self, deviceXML): self._devices.appendChild( xml.dom.minidom.parseString(deviceXML).firstChild) def toxml(self): return self.doc.toprettyxml(encoding='utf-8') def _getSmp(self): return self.conf.get('smp', '1') def _getMaxVCpus(self): return self.conf.get('maxVCpus', self._getSmp())
Members of the Scotland Supporter’s Club should be given a say over the Scottish Football Association’s choice of the next FIFA President, says SNP MP Patrick Grady. The call comes as the Scottish Government is carrying out a survey on supporter involvement in local football clubs, and as sports TV network ESPN has invited candidates for the presidency of the global football federation to take part in a televised debate. Patrick Grady, SNP MP for Glasgow North, whose constituency office is next to Partick Thistle’s Firhill Stadium, says giving members of the Scotland Supporter’s Club, known as the Tartan Army, a say in who the SFA delegate should vote for in the election of a replacement for Sepp Blatter, due later this year, could help improve transparency and accountability in football's scandal-hit world governing body. Patrick Grady MP said: “Global football has been hit by too many corruption scandals in recent years. Involving fans in the decisions of how the global game is run could help improve confidence and transparency in FIFA. “Scotland has already set a gold standard for democratic engagement with the independence referendum. Now the SFA could lead the way in global football by involving fans in the decision over how its delegates vote. Giving the Tartan Army a vote or a say in who should be the next FIFA president could encourage other national football associations to do the same - and shed some much-needed light and accountability into FIFA governance. “The Scottish Government is consulting on how supporters can get actively involved in their local clubs - this idea would mean fans of the national team also have a say on how country and global football is run.
from .. import tables from .functions import AD, V, W, H_ import numpy as n H = H_ T = tables.Basic() V_ = V n_ = n def ADV(note_dict={}, adsr_dict={}): return AD(sonic_vector=V_(**note_dict), **adsr_dict) class Being: def __init__(self): rhythm = [1.] # repetition of one second rhythm2 = [1/2, 1/2] # repetition of one second rhythm3 = [1/3, 1/3, 1/3] # repetition of one second rhythm4 = [1/4, 1/4, 1/3] # repetition of one second # assume duration = 1 (be 1 second, minute or whatnot): rhythmic_spectrum = [ [1./i]*i for i in range(1,300) ] # pitch or frequency sequences (to be used at will) f = 110 freqs = [220] freq_spectrum = [i*f for i in range(1, 300)] neg_spec = [f/i for i in range(2,300)] freq_sym = [[f*2**((i*j)/12) for i in range(j)] for j in [2,3,4,6]] freq_sym_ = [[f*2**((i*j)/12) for i in range(300)] for j in [2,3,4,6]] dia = [2,2,1,2,2,2,1] notes_diatonic = [[dia[(j+i)%7] for i in range(7)] for j in range(7)] notes_diatonic_ = [sum(notes_diatonic[i]) for i in range(7)] freq_diatonic = [[f*2**( (12*i + notes_diatonic_[j])/12) for i in range(30)] for j in range(7)] intensity_octaves = [[10**((i*10)/(j*20)) for i in range(300)] for j in range(1,20)] # steps of 10db - 1/2 dB db0=10**(-120/20) intensity_spec = [[db0*i for i in j] for j in intensity_octaves] # diatonic noise, noises derived from the symmetric scales etc: one sinusoid or other basic waveform in each note. # Synth on the freq domain to optimize and simplify the process # make music of the spheres using ellipses and relations recalling gravity self.resources = locals() self.startBeing() def walk(self, n, method='straight'): # walk n steps up (n<0 => walk |n| steps down, n==0 => don't move, return [] if method == 'straight': # ** TTM sequence = [self.grid[self.pointer + i] for i in range(n)] self.pointer += n elif method == 'low-high': sequence = [ self.grid[ self.pointer + i % (self.seqsize + 1) + i // self.seqsize ] for i in range(n*self.seqsize) ] elif method == 'perm-walk': # restore walk from 02peal pass self.addSeq(sequence) def setPar(self, par='f'): # set parameter to be developed in walks and stays if par == 'f': self.grid = self.fgrid self.pointer = self.fpointer def setSize(self, ss): self.seqsize = ss def setPerms(self, perms): self.perms = perms def stay(self, n, method='perm'): # stay somewhere for n notes (n<0 => stay for n cycles or n permutations) if method == 'straight': sequence = [self.grid[(self.pointer + i) % self.seqsize] for i in range(n)] elif method == 'perm': # ** TTM sequence = [] if type(self.domain) != n_.ndarray: if not self.domain: domain = self.grid[self.pointer : self.pointer + self.seqsize] else: domain = n_.array(self.domain) print("Implemented OK?? TTM") else: domain = self.domain # nel = self.perms[0].size # should match self.seqsize ? count = 0 while len(sequence) < n: perm = self.perms[count % len(self.perms)] seq = perm(domain) sequence.extend(seq) count += 1 sequence = sequence[:n] self.addSeq(sequence) self.total_notes += n def addSeq(self, sequence): if type(self.__dict__[self.curseq]) == list: self.__dict__[self.curseq].extend(sequence) else: self.__dict__[self.curseq] = H(self.__dict__[self.curseq], sequence) def render(self, nn, fn=False): # Render nn notes of the Being! # Render with legatto, with V__ or whatever it is called self.mkArray() ii = n.arange(nn) d = self.d_[ii%len(self.d_)]*self.dscale f = self.f_[ii%len(self.f_)] tab = self.tab_[ii%len(self.tab_)] fv = self.fv_[ii%len(self.fv_)] nu = self.nu_[ii%len(self.nu_)] A = self.A_[ii%len(self.A_)] D = self.D_[ii%len(self.D_)] S = self.S_[ii%len(self.S_)] R = self.R_[ii%len(self.R_)] notes = [ADV({'f':ff, 'd':dd, 'fv':fvv, 'nu':nuu, 'tab': tabb}, {'A':AA, 'D': DD, 'S': SS, 'R':RR}) for ff,dd,fvv,nuu,tabb,AA,DD,SS,RR in zip(f, d, fv, nu, tab, A, D, S, R)] if fn: if type(fn) != str: fn = 'abeing.wav' if fn[-4:] != '.wav': fn += '.wav' W(H(*notes), fn) else: return H(*notes) def startBeing(self): self.dscale = 1 self.d_ = [1] self.f_ = [220] self.fv_ = [3] self.nu_ = [1] self.tab_ = [T.triangle] self.A_ = [20] self.D_ = [20] self.S_ = [-5] self.R_ = [50] self.mkArray() self.total_notes = 0 def mkArray(self): self.d_ = n.array(self.d_ ) self.f_ = n.array(self.f_ ) self.fv_ = n.array(self.fv_) self.nu_ = n.array(self.nu_) self.tab_ = n.array(self.tab_) self.A_ = n.array(self.A_) self.D_ = n.array(self.D_) self.S_ = n.array(self.S_) self.R_ = n.array(self.R_) def howl(self): # some sound ressembing a toki pona mu, a grown or any other animal noise. pass def freeze(self): # a long sound/note with the parameters set into the being pass # use sequences of parameters to be iterated though with or without permutations. # use the fact that sequences of different sizes might yield longer cycles
1 And he spake a parable unto them to this end, that men ought always to pray, and not faint; 2 Saying, there was in a city a judge, which feared not God, neither regarded man: 3 And there was a widow in that city; and she came unto him, saying, Avenge me of mine adversary. 4 And he would not for a while: but afterward he said within himself, Though I fear not God, nor regard man; Yet because this widow troubleth me, I will avenge her, lest by her continual coming she weary me. 6 And the Lord said, "Hear what the unjust judge saith. 7 And shall not God avenge his own elect, which cry day and night unto him, though he bears long with them? 8 I tell you that he will avenge them speedily, Nevertheless, when the Son of man cometh, shall he find faith on the earth?" These verses include two of Jesus' parables about prayer. Note one was addressed to the disciples (vv. 1-8) and the other (vv. 9-14) to "some who were confident of their own righteousness." 18: 1-8. Jesus told the Parable of the Unjust Judge to teach persistence in prayer: that they, His disciples, should always pray and not give up. Verse 2-5 contains the parable itself: A widow continued to go before an unjust judge to plead for justice in her case. He continually refused to "hear" her case, but finally he decided to give her justice so that she would not wear him out with complaining. Jesus interpreted the parable (vv. 6-8), pointing out that if the unjust judge would give justice, then imagine how God (the just Judge) would see that they get justice, and quickly Jesus' question, When the Son of Man comes, will He find faith on the earth? was not spoken out of ignorance, nor was He questioning whether all believers would be gone when He returns. Instead, He asked the question to exhort the disciples on to faithfulness in prayer, challenging them to keep on in praying. 9 And he spake this parable unto some, which trusted in themselves that they were righteous, and despised others. 10 Two men went up into the temple to pray, the one a Pharisee and the other a publican. 11 The Pharisee stood and prayed thus with himself, God I thank thee, that I am not as other men are, extortioners, unjust, adulterers, or even as this publican. 12 I fast twice in the week, I give tithes of all that I possess. 13 And the publican, standing afar off, would not lift up so much as his eyes unto heaven, but smote upon his breast, saying, God be merciful to me a sinner. 14 I tell you, this man went down to his house justified rather than the other: for every one that exalteth himself shall be abased; and he that humbleth himself shall be exalteth." 18: 9-14. The purpose of the Parable of Prayers of the Pharisee and the Tax Collector were to show that one cannot trust in himself for righteousness and should not view others with contempt (v.9). The Pharisee's prayer was concerned with telling God what a good man he was, for not only did he keep the Law by fasting and tithing (v. 12), but also he considered himself better than other people (v. 11). He was using other people as his standard for measuring righteousness. On the other hand, the tax collector used God as his standard for measuring rightness. He realized that he had to throw himself on the mercy of God for forgiveness. Jesus' application of the parable echoed His teaching 13: 30. It is necessary for people to humble themselves before God to gain forgiveness, and those who are proud ((everyone who exalts himself) will be brought low (humbled) by God. The widow's persistence in prayer (vv.1-8) and the Publican's humble heart (v. I 3) are both laudable whereas, the Pharisee's self righteousness (vv. 1 I, 12) is sadly laughable.
"""Sequence-to-sequence models.""" # EDIT: Modified inheritance strucutre (see _models.py) so these *should* work again. from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import numpy as np import tensorflow as tf from tensorflow.contrib.legacy_seq2seq import embedding_attention_seq2seq from tensorflow.contrib.legacy_seq2seq import model_with_buckets #from tensorflow.contrib.rnn.python.ops import core_rnn from tensorflow.contrib.rnn.python.ops import core_rnn_cell from tensorflow.python.ops import embedding_ops from chatbot._models import BucketModel class ChatBot(BucketModel): """Sequence-to-sequence model with attention and for multiple buckets. The input-to-output path can be thought of (on a high level) as follows: 1. Inputs: Batches of integer lists, where each integer is a word ID to a pre-defined vocabulary. 2. Embedding: each input integer is mapped to an embedding vector. Each embedding vector is of length 'layer_size', an argument to __init__. The encoder and decoder have their own distinct embedding spaces. 3. Encoding: The embedded batch vectors are fed to a multi-layer cell containing GRUs. 4. Attention: At each timestep, the output of the multi-layer cell is saved, so that the decoder can access them in the manner specified in the paper on jointly learning to align and translate. (should give a link to paper...) 5. Decoding: The decoder, the same type of embedded-multi-layer cell as the encoder, is initialized with the last output of the encoder, the "context". Thereafter, we either feed it a target sequence (when training) or we feed its previous output as its next input (chatting). """ def __init__(self, buckets, dataset, params): logging.basicConfig(level=logging.INFO) logger = logging.getLogger('ChatBotLogger') super(ChatBot, self).__init__( logger=logger, buckets=buckets, dataset=dataset, params=params) if len(buckets) > 1: self.log.error("ChatBot requires len(buckets) be 1 since tensorflow's" " model_with_buckets function is now deprecated and BROKEN. The only" "workaround is ensuring len(buckets) == 1. ChatBot apologizes." "ChatBot also wishes it didn't have to be this way. " "ChatBot is jealous that DynamicBot does not have these issues.") raise ValueError("Not allowed to pass buckets with len(buckets) > 1.") # ========================================================================================== # Define basic components: cell(s) state, encoder, decoder. # ========================================================================================== #cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.GRUCell(state_size)for _ in range(num_layers)]) cell = tf.contrib.rnn.GRUCell(self.state_size) self.encoder_inputs = ChatBot._get_placeholder_list("encoder", buckets[-1][0]) self.decoder_inputs = ChatBot._get_placeholder_list("decoder", buckets[-1][1] + 1) self.target_weights = ChatBot._get_placeholder_list("weight", buckets[-1][1] + 1, tf.float32) target_outputs = [self.decoder_inputs[i + 1] for i in range(len(self.decoder_inputs) - 1)] # If specified, sample from subset of full vocabulary size during training. softmax_loss, output_proj = None, None if 0 < self.num_samples < self.vocab_size: softmax_loss, output_proj = ChatBot._sampled_loss(self.num_samples, self.state_size, self.vocab_size) # ========================================================================================== # Combine the components to construct desired model architecture. # ========================================================================================== # The seq2seq function: we use embedding for the input and attention. def seq2seq_f(encoder_inputs, decoder_inputs): # Note: the returned function uses separate embeddings for encoded/decoded sets. # Maybe try implementing same embedding for both. # Question: the outputs are projected to vocab_size NO MATTER WHAT. # i.e. if output_proj is None, it uses its own OutputProjectionWrapper instead # --> How does this affect our model?? A bit misleading imo. #with tf.variable_scope(scope or "seq2seq2_f") as seq_scope: return embedding_attention_seq2seq(encoder_inputs, decoder_inputs, cell, num_encoder_symbols=self.vocab_size, num_decoder_symbols=self.vocab_size, embedding_size=self.state_size, output_projection=output_proj, feed_previous=self.is_chatting, dtype=tf.float32) # Note that self.outputs and self.losses are lists of length len(buckets). # This allows us to identify which outputs/losses to compute given a particular bucket. # Furthermore, \forall i < j, len(self.outputs[i]) < len(self.outputs[j]). (same for loss) self.outputs, self.losses = model_with_buckets( self.encoder_inputs, self.decoder_inputs, target_outputs, self.target_weights, buckets, seq2seq_f, softmax_loss_function=softmax_loss) # If decoding, append _projection to true output to the model. if self.is_chatting and output_proj is not None: self.outputs = ChatBot._get_projections(len(buckets), self.outputs, output_proj) with tf.variable_scope("summaries"): self.summaries = {} for i, loss in enumerate(self.losses): name = "loss{}".format(i) self.summaries[name] = tf.summary.scalar("loss{}".format(i), loss) def step(self, encoder_inputs, decoder_inputs, target_weights, bucket_id, forward_only=False): """Run a step of the model. Args: encoder_inputs: list of numpy int vectors to feed as encoder inputs. decoder_inputs: list of numpy int vectors to feed as decoder inputs. target_weights: list of numpy float vectors to feed as target weights. bucket_id: which bucket of the model to use. Returns: [summary, gradient_norms, loss, outputs] """ encoder_size, decoder_size = self.buckets[bucket_id] super(ChatBot, self).check_input_lengths( [encoder_inputs, decoder_inputs, target_weights], [encoder_size, decoder_size, decoder_size]) input_feed = {} for l in range(encoder_size): input_feed[self.encoder_inputs[l].name] = encoder_inputs[l] for l in range(decoder_size): input_feed[self.decoder_inputs[l].name] = decoder_inputs[l] input_feed[self.target_weights[l].name] = target_weights[l] input_feed[self.decoder_inputs[decoder_size].name] = np.zeros([self.batch_size], dtype=np.int32) if not forward_only: # Not just for decoding; also for validating in training. fetches = [self.summaries["loss{}".format(bucket_id)], self.apply_gradients[bucket_id], # Update Op that does SGD. self.losses[bucket_id]] # Loss for this batch. outputs = self.sess.run(fetches=fetches, feed_dict=input_feed) return outputs[0], None, outputs[2], None # Summary, no gradients, loss, outputs. else: fetches = [self.losses[bucket_id]] # Loss for this batch. for l in range(decoder_size): # Output logits. fetches.append(self.outputs[bucket_id][l]) outputs = self.sess.run(fetches=fetches, feed_dict=input_feed) return None, None, outputs[0], outputs[1:] # No summary, no gradients, loss, outputs. @staticmethod def _sampled_loss(num_samples, hidden_size, vocab_size): """Defines the samples softmax loss op and the associated output _projection. Args: num_samples: (context: importance sampling) size of subset of outputs for softmax. hidden_size: number of units in the individual recurrent states. vocab_size: number of unique output words. Returns: sampled_loss, apply_projection - function: sampled_loss(labels, inputs) - apply_projection: transformation to full vocab space, applied to decoder output. """ assert(0 < num_samples < vocab_size) # Define the standard affine-softmax transformation from hidden_size -> vocab_size. # True output (for a given bucket) := tf.matmul(decoder_out, w) + b w_t = tf.get_variable("proj_w", [vocab_size, hidden_size], dtype=tf.float32) w = tf.transpose(w_t) b = tf.get_variable("proj_b", [vocab_size], dtype=tf.float32) output_projection = (w, b) def sampled_loss(labels, inputs): labels = tf.reshape(labels, [-1, 1]) return tf.nn.sampled_softmax_loss( weights=w_t, biases=b, labels=labels, inputs=inputs, num_sampled=num_samples, num_classes=vocab_size) return sampled_loss, output_projection @staticmethod def _get_projections(num_buckets, unprojected_vals, projection_operator): """Apply _projection operator to unprojected_vals, a tuple of length num_buckets. :param num_buckets: the number of projections that will be applied. :param unprojected_vals: tuple of length num_buckets. :param projection_operator: (in the mathematical meaning) tuple of shape unprojected_vals.shape[-1]. :return: tuple of length num_buckets, with entries the same shape as entries in unprojected_vals, except for the last dimension. """ projected_vals = unprojected_vals for b in range(num_buckets): projected_vals[b] = [tf.matmul(output, projection_operator[0]) + projection_operator[1] for output in unprojected_vals[b]] return projected_vals @staticmethod def _get_placeholder_list(name, length, dtype=tf.int32): """ Args: name: prefix of name of each tf.placeholder list item, where i'th name is [name]i. length: number of items (tf.placeholders) in the returned list. Returns: list of tensorflow placeholder of dtype=tf.int32 and unspecified shape. """ return [tf.placeholder(dtype, shape=[None], name=name+str(i)) for i in range(length)] class SimpleBot(BucketModel): """Primitive implementation from scratch, for learning purposes. 1. Inputs: same as ChatBot. 2. Embedding: same as ChatBot. 3. BasicEncoder: Single GRUCell. 4. DynamicDecoder: Single GRUCell. """ def __init__(self, dataset, params): # SimpleBot allows user to not worry about making their own buckets. # SimpleBot does that for you. SimpleBot cares. max_seq_len = dataset.max_seq_len buckets = [(max_seq_len // 2, max_seq_len // 2), (max_seq_len, max_seq_len)] logging.basicConfig(level=logging.INFO) logger = logging.getLogger('SimpleBotLogger') super(SimpleBot, self).__init__( logger=logger, buckets=buckets, dataset=dataset, params=params) # ========================================================================================== # Create placeholder lists for encoder/decoder sequences. # ========================================================================================== with tf.variable_scope("placeholders"): self.encoder_inputs = [tf.placeholder(tf.int32, shape=[None], name="encoder"+str(i)) for i in range(self.max_seq_len)] self.decoder_inputs = [tf.placeholder(tf.int32, shape=[None], name="decoder"+str(i)) for i in range(self.max_seq_len+1)] self.target_weights = [tf.placeholder(tf.float32, shape=[None], name="weight"+str(i)) for i in range(self.max_seq_len+1)] # ========================================================================================== # Before bucketing, need to define the underlying model(x, y) -> outputs, state(s). # ========================================================================================== def seq2seq(encoder_inputs, decoder_inputs, scope=None): """Builds basic encoder-decoder model and returns list of (2D) output tensors.""" with tf.variable_scope(scope or "seq2seq"): encoder_cell = tf.contrib.rnn.GRUCell(self.state_size) encoder_cell = tf.contrib.rnn.EmbeddingWrapper(encoder_cell, self.vocab_size, self.state_size) # BasicEncoder(raw_inputs) -> Embed(raw_inputs) -> [be an RNN] -> encoder state. _, encoder_state = tf.contrib.rnn.static_rnn(encoder_cell, encoder_inputs, dtype=tf.float32) with tf.variable_scope("decoder"): def loop_function(x): with tf.variable_scope("loop_function"): params = tf.get_variable("embed_tensor", [self.vocab_size, self.state_size]) return embedding_ops.embedding_lookup(params, tf.argmax(x, 1)) _decoder_cell = tf.contrib.rnn.GRUCell(self.state_size) _decoder_cell = tf.contrib.rnn.EmbeddingWrapper(_decoder_cell, self.vocab_size, self.state_size) # Dear TensorFlow: you should replace the 'reuse' param in # OutputProjectionWrapper with 'scope' and just do scope.reuse in __init__. # sincerely, programming conventions. decoder_cell = tf.contrib.rnn.OutputProjectionWrapper( _decoder_cell, self.vocab_size, reuse=tf.get_variable_scope().reuse) decoder_outputs = [] prev = None decoder_state = None for i, dec_inp in enumerate(decoder_inputs): if self.is_chatting and prev is not None: dec_inp = loop_function(tf.reshape(prev, [1, 1])) if i == 0: output, decoder_state = decoder_cell(dec_inp, encoder_state, scope=tf.get_variable_scope()) else: tf.get_variable_scope().reuse_variables() output, decoder_state = decoder_cell(dec_inp, decoder_state, scope=tf.get_variable_scope()) decoder_outputs.append(output) return decoder_outputs # ==================================================================================== # Now we can build a simple bucketed seq2seq model. # ==================================================================================== self.losses = [] self.outputs = [] values = self.encoder_inputs + self.decoder_inputs + self.decoder_inputs with tf.name_scope("simple_bucket_model", values): for idx_b, bucket in enumerate(buckets): # Reminder: you should never explicitly set reuse=False. It's a no-no. with tf.variable_scope(tf.get_variable_scope(), reuse=True if idx_b > 0 else None)\ as bucket_scope: # The outputs for this bucket are defined entirely by the seq2seq function. self.outputs.append(seq2seq( self.encoder_inputs[:bucket[0]], self.decoder_inputs[:bucket[1]], scope=bucket_scope)) # Target outputs are just the inputs time-shifted by 1. target_outputs = [self.decoder_inputs[i + 1] for i in range(len(self.decoder_inputs) - 1)] # Compute loss by comparing outputs and target outputs. self.losses.append(SimpleBot._simple_loss(self.batch_size, self.outputs[-1], target_outputs[:bucket[1]], self.target_weights[:bucket[1]])) with tf.variable_scope("summaries"): self.summaries = {} for i, loss in enumerate(self.losses): name = "loss{}".format(i) self.summaries[name] = tf.summary.scalar("loss{}".format(i), loss) @staticmethod def _simple_loss(batch_size, logits, targets, weights): """Compute weighted cross-entropy loss on softmax(logits).""" # Note: name_scope only affects names of ops, # while variable_scope affects both ops AND variables. with tf.name_scope("simple_loss", values=logits+targets+weights): log_perplexities = [] for l, t, w in zip(logits, targets, weights): cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=t, logits=l) log_perplexities.append(cross_entropy * w) # Reduce via elementwise-sum. log_perplexities = tf.add_n(log_perplexities) # Get weighted-averge by dividing by sum of the weights. log_perplexities /= tf.add_n(weights) + 1e-12 return tf.reduce_sum(log_perplexities) / tf.cast(batch_size, tf.float32) def step(self, encoder_inputs, decoder_inputs, target_weights, bucket_id, forward_only=False): """Run a step of the model. Args: encoder_inputs: list of numpy int vectors to feed as encoder inputs. decoder_inputs: list of numpy int vectors to feed as decoder inputs. target_weights: list of numpy float vectors to feed as target weights. bucket_id: which bucket of the model to use. Returns: [summary, gradient_norms, loss, outputs]: """ encoder_size, decoder_size = self.buckets[bucket_id] super(SimpleBot, self).check_input_lengths( [encoder_inputs, decoder_inputs, target_weights], [encoder_size, decoder_size, decoder_size]) input_feed = {} for l in range(encoder_size): input_feed[self.encoder_inputs[l].name] = encoder_inputs[l] for l in range(decoder_size): input_feed[self.decoder_inputs[l].name] = decoder_inputs[l] input_feed[self.target_weights[l].name] = target_weights[l] input_feed[self.decoder_inputs[decoder_size].name] = np.zeros([self.batch_size], dtype=np.int32) # Fetches: the Operations/Tensors we want executed/evaluated during session.run(...). if not forward_only: # Not just for decoding; also for validating in training. fetches = [self.summaries["loss{}".format(bucket_id)], self.apply_gradients[bucket_id], # Update Op that does SGD. self.losses[bucket_id]] # Loss for this batch. outputs = self.sess.run(fetches=fetches, feed_dict=input_feed) return outputs[0], None, outputs[2], None # summaries, No gradient norm, loss, no outputs. else: fetches = [self.losses[bucket_id]] # Loss for this batch. for l in range(decoder_size): # Output logits. fetches.append(self.outputs[bucket_id][l]) outputs = self.sess.run(fetches=fetches, feed_dict=input_feed) return None, None, outputs[0], outputs[1:] #No summary, No gradient norm, loss, outputs.
The TWIST Scholarship recognizes Wallingford high school seniors for the excellence in the classroom as well as their community service for both the WYSL and TWIST. To date, TWIST has awarded $179,150 to 252 deserving students from our community. For Scholarship application, click here.
# coding=utf-8 import random import numpy as np import pygame from numpy import array as vector WIDTH = 800 HEIGHT = 600 BLACK = (0, 0, 0) WHITE = (255, 255, 255) GREEN = (0, 255, 0) RED = (255, 0, 0) mousepos = np.array([WIDTH / 2, HEIGHT / 2]) is_mouse_down = False is_rmouse_down = False class Particle: def __init__(self, pos): spread = 1.0 self.position = pos self.velocity = np.array([random.uniform(-spread, spread), random.uniform(-spread, spread)]) self.acceleration = np.array([0.0, 0.0]) self.mover = None self.mass = 1.0 # self.mass = random.uniform(0.5, 2.0) self.radius = 5 * self.mass self.lifespan = 75 self.size = vector([random.random() * 10 + 10, random.random() * 5 + 5]) def apply(self, force): self.acceleration += force / self.mass def update(self): self.velocity += self.acceleration self.position += self.velocity self.acceleration = np.array([0.0, 0.0]) self.lifespan -= 1 def draw(self, scr): s = pygame.Surface(self.size, pygame.SRCALPHA) # per-pixel alpha s.fill((127, 127, 127, (128 - self.lifespan))) # notice the alpha value in the color pygame.draw.rect(s, (0, 0, 0, ((255 / 75) * self.lifespan)), [0, 0, self.size[0], self.size[1]], 3) # s = pygame.transform.rotate(s, 45) scr.blit(s, self.position) class ParticleSystem: def __init__(self, pos): self.pos = pos.copy() self.particles = [] def draw(self, c): for particle in self.particles: particle.draw(c) def update(self): self.particles.append(Particle(self.pos.copy())) for particle in self.particles: particle.update() if particle.lifespan <= 0: self.particles.remove(particle) def run(self, c): self.update() self.draw(c) def apply(self, force): for particle in self.particles: particle.apply(force) screen = pygame.display.set_mode((WIDTH, HEIGHT)) done = False clock = pygame.time.Clock() ps = ParticleSystem(vector([WIDTH / 2, 50])) def main(): global ps screen.fill(WHITE) ps.run(screen) for system in pss: system.run(screen) gravity = np.array([0, 0.1]) ps.apply(gravity) for system in pss: system.apply(gravity) pygame.display.flip() clock.tick(60) # drag_coeff = -0.005 # drag = drag_coeff * bob.velocity * np.linalg.norm(bob.velocity) # bob.apply(drag) pss = [] while not done: for event in pygame.event.get(): if event.type == pygame.QUIT: done = True elif event.type == pygame.MOUSEMOTION: mousepos = [event.pos[0], event.pos[1]] elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1: is_mouse_down = True pss.append(ParticleSystem(mousepos.copy())) elif event.type == pygame.MOUSEBUTTONUP and event.button == 1: is_mouse_down = False elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 3: is_rmouse_down = True elif event.type == pygame.MOUSEBUTTONUP and event.button == 3: is_rmouse_down = False main() # root.mainloop()
A checklist can be an important source of information to ensure there are no unanswered questions when you or someone you care for is discharged from hospital. Do you know how you will be getting home? Have you provided the correct contact details, including a forwarding address for any post? Have you enquired as to whether you need to collect your hospital discharge letter for your GP or if it is to be sent directly to your GP? Do you have all the medication you need? Do you understand what your medication is for, how to take it, and any associated side effects? Do you know how to manage your condition, if you have ongoing care needs? Do you need a follow-up appointment? Do you have all your belongings, including any cash or valuables? Do you have contact names and numbers for organisations and services, if you require further support? Do you have any information leaflets about your condition, if needed? This checklist has been taken from the BMA Patient Liaison Group publication Hospital discharge: the patient, carer and doctor perspective, January 2014.
#!/usr/bin/env python """Minimal linear algebra operations to matrices.""" import ast import numpy as np input_vars = {} def save_namespace(filename='.workspace.mat'): """Save the current workspace.""" namespace_file = open(filename, 'wb') namespace_file.write(str(input_vars) + '\n') namespace_file.close() def load_namespace(filename='.workspace.mat'): """Load saved workspace if any.""" try: namespace_file = open(filename, 'r') v = namespace_file.read() namespace_file.close() vars = ast.literal_eval(v) input_vars.update(vars) except IOError: pass class Matrix(object): # noqa """Perform basic matrix operations.""" def __init__(self): """Initialize the matrix class. :token: list or string to matrix. """ pass def to_matrix(self, tokens): """Transform list or string to matrix.""" tokens = str(tokens) try: if tokens.find(';') < 0: return np.array(tokens) else: tokens.replace('[', '') tokens.replace(']', '') return np.matrix(tokens) except ValueError: return None def var_assigner(self, var_dict): """Keep track of assigned variables.""" input_vars[str(var_dict[0])] = str(var_dict[1]) def find_variable(self, var): """Find the variable value or raise error.""" value = input_vars.get(str(var), None) return value def transpose(self, mx): """Perform a transpose.""" return np.transpose(mx) def arith(self, operands): """Perform arithmetic operations.""" try: result = '' if operands[2] == '+': result = operands[0] + operands[1] elif operands[2] == '-': result = operands[0] - operands[1] elif operands[2] == '*': result = np.dot(operands[0], operands[1]) elif operands[0].startswith('inv'): result = np.linalg.inv(operands[1]) return result except (TypeError, ValueError, np.linalg.LinAlgError): return None
WHAT HAPPENS TO A CITY WHEN INDUSTRY LEAVES? The textile city – its past, present and future – is prototypical of cities shaped by departed industries. In America, and worldwide, textiles profoundly altered physical and social landscapes. Entire cities grew around rivers where mills could draw power, and slave populations swelled to cultivate vast territories of cotton. Like many industries that once underpinned the United States’ economy, textiles gradually faded. And yet the cities and people remain, often disenfranchised and struggling. Urban Fabric brings together independent analysis with lectures by academic, practice and policy experts from across the country. The project presents case studies from around the world that offer successful strategies for elevating textile cities, and other cities that are looking to fill the void left by failing or relocated industries. Urban Fabric is on the road. Follow the latest updates from all of our upcoming events on our NEWS page, as well as our Twitter feed and Facebook page.
#!/usr/bin/env python # setup # Setup script for the actors simulation (gvas) # # Author: Benjamin Bengfort <[email protected]> # Created: Thu Nov 05 15:13:02 2015 -0500 # # Copyright (C) 2015 University of Maryland # For license information, see LICENSE.txt # # ID: setup.py [] [email protected] $ """ Setup script for the actors simulation (gvas) """ ########################################################################## ## Imports ########################################################################## try: from setuptools import setup from setuptools import find_packages except ImportError: raise ImportError("Could not import \"setuptools\"." "Please install the setuptools package.") ########################################################################## ## Package Information ########################################################################## # Read the __init__.py file for version info version = None versfile = os.path.join(os.path.dirname(__file__), "gvas", "__init__.py") with open(versfile, 'r') as versf: exec(versf.read(), namespace) version = namespace['get_version']() ## Discover the packages packages = find_packages(where=".", exclude=("tests", "bin", "docs", "fixtures", "register",)) ## Load the requirements requires = [] with open('requirements.txt', 'r') as reqfile: for line in reqfile: requires.append(line.strip()) ## Define the classifiers classifiers = ( 'Development Status :: 4 - Beta', 'Environment :: Console', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', ) ## Define the keywords keywords = ('simulation', 'actors', 'distributed computing') ## Define the description long_description = "" ## Define the configuration config = { "name": "GVAS Actors Simulation", "version": version, "description": "A simulation of the Actor model of communication for a variety of applications.", "long_description": long_description, "license": "MIT", "author": "Benjamin Bengfort, Allen Leis, Konstantinos Xirogiannopoulos", "author_email": "[email protected], [email protected], [email protected]", "url": "https://github.com/tipsybear/actors-simulation", "download_url": 'https://github.com/tipsybear/actors-simulation/tarball/v%s' % version, "packages": packages, "install_requires": requires, "classifiers": classifiers, "keywords": keywords, "zip_safe": True, "scripts": ['simulate.py'], } ########################################################################## ## Run setup script ########################################################################## if __name__ == '__main__': setup(**config)
I am so excited to share this recipe with you. It is simple,nutritious and will satisfy anyones craving for lemon pie. I recently wanted to make a simple treat to share at a brunch. I knew I wanted to use my new 4oz mason jars, because they are just so darn cute. So I thought, lemon pies would be perfect. This recipe is gluten + dairy free and with it’s perfect portion size it won’t leave anyone feeling guilty about it’s decadence. Place 1 jar +1/4 of second jar of coconut cream in a blender or food processor with sugar, lemon zest, juice, 1tsp. vanilla extract, blend until combined (make sure you mixture is lemony and sweet enough for you, you can always adjust ingredients to your liking). Place graham crackers in a baggie and crush until crumbled. Line your jars on a cookie sheet and fill the bottom fourth of jar with cookie crumbs. Fill the jars 3/4 with lemon filling mixture. Cover with lids and place in the fridge until ready to serve. To prepare you whipped topping. Place remaining coconut cream in a bowl. Add powdered sugar, salt and extract to it and blend with a hand mixer until desired consistency. Again test the cream to make sure it is sweet enough for your liking, add more powdered powdered sugar if desired. When ready to serve, to with a dollop of whipped cream and toasted coconut! I hope you enjoy this as much a my family and friend do!
import math, sys, csv, string, os, re print('GCSE Controlled Assesment A453\nThomas Bass 4869\nTask 2') database = [] def start(database, orderGtin, orderPos, orderQty, orderName, items): print('Reading File...') os.getcwd() filename = 'task2.csv' filepath = os.path.join(os.getcwd(), filename) database = [] with open(filename, 'r') as csvfile: reader = csv.reader(csvfile) for row in reader: database.append(row) find(database, orderGtin, orderPos, orderQty, orderName, items) def find(database, orderGtin, orderPos, orderQty, orderName, items): print('Enter the GTIN Number of the product you wish to find') gtin = input(':') for value in database: if gtin in value[0]: print('Product Found!') product = database.index(value) name = database[product][1] volume = database[product][2] price = database[product][3] stock = database[product][4] if volume.isnumeric() == True: full = volume+'ml' else: full = '' print('Product Name =', full, name) print(stock, 'in stock') print('Enter the quantity you wish to buy') qty = input(': ') print(qty, full, name, '@', price) print('Add to order? Y/N (This can not be un-done)') add = input(': ') if add == 'y' or add == 'Y': orderGtin.append(gtin) orderPos.append(product) orderQty.append(qty) orderName.append(qty+'x'+full+' '+name+' @ £'+price) items = items + 1 print('Current order') print(orderName) editStock(gtin, product, stock, qty) print('Add another item?(Y/N)') again = input(': ') if again == 'y' or again == 'Y': find(database, orderGtin, orderPos, orderQty, orderName, items) else: print('Final order') print(orderName) print('Order Shipped!') else: print('Order Cancled') find(database) def editStock(gtin, product, stock, qty): changeTo = int(stock) - int(qty) changeTo = str(changeTo) data = open("task2.csv").read() data = data.split("\n") for i, s in enumerate(data): data[i] = data[i].split(",") data[product][4] = changeTo for i, s in enumerate(data): data[i] = str(",".join(data[i])) data = "\n".join(data) o = open("task2.csv","w") o.write(data) o.close() orderGtin = [] orderPos = [] orderQty = [] orderName = [] items = 0 start(database, orderGtin, orderPos, orderQty, orderName, items)
To all of our friends, family, and loyal guest, after 42 years of serving the Colorado Springs, we will be closing our doors on Sunday, February 5th. Sitting on top of Rattlesnake Hill, the Sunbird has been a destination for local cuisine, special events, Sunday brunch and amazing views. Our guests will have a few more weeks to dine and reminisce with us before we close. We hope you’ll stop by, raise a last glass with us and send the Sunbird off in style! Don't miss out on Drink the Bar Dry featuring drink and food specials going on now! Well Drinks $3, Domestic Drafts $3, Premium Beer $5, Premium Drinks $5, & More. The Sunbird will be closing its doors on February 5th. Sun-Thurs 4 p.m.-8 p.m; Fri-Sat 3 p.m.-9 p.m. Dining Room: 4 p.m.-8 p.m. Brunch: Sun: 9:30 a.m.-2:30 p.m. Happy Hour: Mon-Fri 4-7 p.m. *Excludes Happy Hour and cannot be combined with any other offer.
import os import os.path from codecs import open from collections import OrderedDict class FunctionRegistry(object): def __init__(self): self.functions = self.__parse_cpp() def __parse_cpp(self): root = os.path.dirname(__file__) src = os.path.join(root, "../function_registry.cpp") with open(src) as f: lines = [line.strip() for line in f] start = lines.index("// definition start") end = lines.index("// definition end") definitions = lines[start + 1:end] i = 0 L = OrderedDict() while i < len(definitions): line = definitions[i] if line.startswith("add_trusted("): name = line[len("add_trusted("):][1:-2] description = definitions[i+1][1:-2] L[name] = description i += 2 elif line.startswith("add("): name = line[len("add("):][1:-2] description = definitions[i+1][1:-2] L[name] = description i += 2 else: i += 1 return L
Reflection Graphics was founded in Northern California in 1998 and was built upon the Graphic Recording practice of founder, Mary Brake. In 2002 the firm moved to Devonport, Auckland, New Zealand where Chris Keenan joined the organization, bringing advanced 3D graphics capabilities to the firm. Mary was part of a group of San Francisco Bay Area professionals that pioneered the graphic recording field. In the US she accumulated extensive experience working with many of the world’s largest organizations. She returned to New Zealand at the end of 2001 and has introduced Graphic Recording to scores of organizations throughout Australasia and Asia Pacific. She is called to work at meetings around the world and brings the benefit of emerging techniques in organizational change to every meeting. Mary has astute listening skills and focuses on accurately reflecting meeting content, into easily understood and memorable graphics with text. Her graphic recording skills encourage whole brain thinking and, ultimately, help individuals, teams and organisations become more effective and successful. Mary has particular expertise in strategic planning, culture change and engagement, stakeholder consultation, and process planning. She has a law degree from Canterbury University, New Zealand and her initial experience was in criminal justice and social work. She was involved with alternative dispute resolution for the University of Auckland and the NZ Housing Corporation. After moving to California, she joined California Lawyers for the Arts. Transitioning into Graphic Recording, Mary studied art and design. She is an experienced offshore sailor and competes each year in the Mahurangi Classic Boat Regatta. Chris is a computer industry veteran with 20 years experience as president of successful technology firms. His California Company provided 3D software and solutions for architecture, civil engineering, mechanical engineering, and multimedia markets. Chris gained recognition with interactive software and has served on the software reseller advisory boards of AutoDesk, Macromedia and Softdesk. He has engaged with many of the world largest corporations including AT&T, Lucas Film, PG&E, Pacific Bell, Bechtel, Electronic Arts, United Airlines, Ford Motor Company, ABB, 3M and the State of California. In 2001 Chris sold the business, moved to New Zealand and created the 3D division of Reflection Graphics, Ltd. In New Zealand Chris applied his 3D knowledge to animation, filmmaking and organizational graphics. A major client has been the NZ Department of Conservation which presented Chris with a conservation achievement award. He is an advocate of sustainability and has filmed parts of the Hauraki Gulf island restoration program for the DOC. He is passionate about renewable energy and his views on energy reform have been published in the New Zealand Herald. Chris also consults at UNITEC’s Architecture School as a senior lecturer on 3D Animation, Film Design and Renewable Energy. He has a keen interest in naval architecture and using 3D software, he designed and built a new 12 meter yacht. In addition to a background in offshore sailing, his experience includes an early career as a professional pilot. He holds a Bachelor of Science in Aviation Administration and is US FAA rated as a Multiengine Airline Transport Pilot. Reflection Graphics utilizes talented associates to provide graphic design, illustration and graphic recording services. Our pool of artists, cartoonists and digital designers can develop engaging graphics for your organization.
class User: def __init__(self, uid, age, gender, country, songs_played): self.__uid = uid self.__age = age self.__gender = gender self.__country = country self.__songs_played = songs_played #print max(songs_played) == songs_played[0]#debug self.__most_played = songs_played[0]#assume that are already sorted (I check it before) #if we dont asume that artists are already sorted... #self.__most_played = max(songs_played) sum_times = reduce(lambda x, y: x + y.times, songs_played, 0)#sumatory of times of all artists coef = 1.0 * self.__most_played.times / sum_times#percentage of the best respect total self.__relevance = coef def __str__(self): out = "" out += "User: " + self.__uid[:16] + "..." + "\n"#id is ellided, is too long! out += "Country: " + self.__country + "\n" out += "Age: " + str(self.__age) + "\n" out += "Gender: " + str(self.__gender) + "\n" out += "Most Played: " + self.__most_played.name + "\n" out += "Relevance: " + '{0:.2%}'.format(self.__relevance) + "\n"#percentage formating return out def __cmp__(self, other): return cmp(self.__uid, other.uid()) def uid(self): #relevance getter return self.__uid class Artist: __slots__ = ('name', 'times') def __init__(self, *args): self.name = args[0] #I found corruption in big.dat file. An artist parsed without times (i.e. no "::" to split) if len(args) > 1:#times is provided self.times = int(args[1]) else:#times is not provided self.times = 0 def __str__(self): return self.name + ' ' + str(self.times) def __cmp__(self, other): return cmp(self.times, other.times)
Hiya! Today I have a different kind of post….a crochet post! After Christmas, I had the desire for a big crochet project. Crocheting for me is normally just rectangles, squares, hearts, and other random shapes, but I really wanted to learn some new skills. I started off with two skiens of yarn, and ended up buying 7 skeins. I was inspired by this blanket. Well, 7 skeins, one month, 100 BBC Merlin episodes, and 64 granny squares later, I came up with this. Charlie wanted to be in the shot again. He got so much love last time, so I figured I might as well include him again. I’m so happy with the result. Most of it was making the squares, which made a huge stack, but when I actually crocheted them together the blanket was smaller. If I didn’t know, I never would have guessed 64 squares went into this! The rows have been coming apart at the corners. I crocheted a long rectangle of 8 granny squares together and then crocheted the rows together. NOTE TO SELF: Darn your ends in. I never thought that darning/sewing-crocheting in the end pieces of yarn could make a difference. I was lazy and I knotted the end of the rows and cut it off. Well, I wish I didn’t do that, because I have been fixing my falling apart blanket all week! If you would like to learn how to crochet, I would recommend learning. Crochet is great for something to do in the car, or something to carry in your purse. It’s very portable, which is something I always liked about it. Bella Coco has tons of great beginner YouTube videos for the beginner. I would make it again. It ended up a little bit smaller than I planned, but it’s really good as a lap blanket or a decorative blanket for my bed. I also think I would use this technique again for a baby blanket. I also added a small single crochet border for effect. Well, I suppose that’s all! Thoughts anyone? I’ve learned from my mistakes this past year of sewing, and I wish I knew some of the stuff I know now. Here are my tips. 1. Make sure you have enough fabric for your project. Sometimes I’m so caught up in sewing something that I forget to check how much fabric I need. This has happened to me a million times, and it’s frustrating! 2. Clean your machine often. Need I say more? Check out this link where I am reminded painfully of this. 3. When cutting a pattern, make sure you have cut all the pieces before you throw away the scraps. Last year, I was sewing a dress, and I cut all the pieces without checking for the back bodice piece. It either didn’t come in the pattern package, or I threw it out, but it caused a lot of trauma and stress. Eventually, I drafted my own pattern with my mom. This was the dress I made–it’s hard to tell that I drafted my own back bodice pattern. 4. When buying fabric at a store, be sure to check how much the fabric is per yard. I think everybody knows this, but once I get to the checkout counter, it’s shocking how much fabric supplies I buy! Just walking into Joann’s instantly makes me want to buy stuff. 5. When buying fabric, have an idea of what you’re actually going to do with it. This is similar to the 4th and 1st tip. I always buy fabric and don’t actually have an idea of what to do with it. Then when I wan to make something out of it, I either don’t have enough or it’s the wrong type of fabric, etc. 6. Remember to change out your needle for the right fabric. I’m always so lazy about this, and the other day I was sewing really thick fabric with the standard needle. And what do you think happened? The needle got caught when sewing, and it wouldn’t sew or come up. I eventually had to take the bobbin out, turn the machine off, and cut the fabric out until the needle finally came up bent. So I had to throw it away! You would have thought I learned my lesson, but then the next week the same thing happened! 7. Use the right stich length and tension for the right project. I can’t tell you how many times this has happened, and again, I was lazy and didn’t change it. Oh well. 8. Don’t use a million pins. I’m pin-crazy, and I use tons of pins when I don’t really need them. Once it’s time to sew, though, I get annoyed at having to pull out the pins. 9. Don’t sew over the pin. Okay, so maybe this one is like the 8th tip, but it’s happened to all of us. 10. Last, but not least, read all the instructions on the pattern. More often than not, I skim over the instructions, and it comes back to me later when I sew the wrong pieces together. I would LOVE to hear your funny sewing stories. Either let me know in the comments or tell me here. I’ll post all the stories on the blog in the next post. Thanks! DUST. It was clogged up with dust! I opened it up last in February, it and was clean, but I’ve sewed considerably more since then. I took 15 minutes cleaning out all the loose thread and dust. My manual didn’t say anything about oiling it, except for saying not to oil the bobbin case. I’ll have to look into that more. Once I was done, I put back all the screws, the presser foot, and the other things I needed to take off, I threaded the machine and bobbin, and what happens when I sew? The thread gets tangled. I cleaned out the thread, and then what happpens? The stiches are tight and aren’t sewing properly. Note that the tension is fine, the stich length is at the largest setting…and it won’t sew. I give up! I love my Brother machine, but I think it’s time for me to take a break for a week or so. Tell me in the comments about your most frustrating sewing problems! Hello Freckled Fashionistas! I’m sitting on my back porch right now in a sundress soaking in the nice breeze and fresh air. My sewing room used to be in my bedroom until my mother suprised me with a separate space for me to sew. Today I’m going to give you a tour. I had 2 yards of this beautiful fabric and I ended up using it to make a couple throw pillows and pillowcases. It may not have been the best thing I could do with it, but I’m pleased with the outcome. You might recognize this dress from last year. This basket is just for extra fabric and bits and bobs. This fabric shelf is pretty much just filled with children prints (I’m working on a child collection, but more on that later), because I keep my clothing fabric in a seperate container in my room. Sneak peak on a vest I’m working on! How’s your sewing room (if you have one)? Let me know in the comments! Happy Sewing!
##################################################################### ##### IMPORT STANDARD MODULES ##################################################################### #Python 3 support: from __future__ import absolute_import, division from __future__ import print_function, unicode_literals import pandas as pd import numpy as np import matplotlib.pyplot as plt # import pydot import os from scipy.stats.mstats import chisquare, mode from sklearn.linear_model import LogisticRegression from sklearn.model_selection import KFold, GridSearchCV from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier, export_graphviz from sklearn import metrics, model_selection from sklearn.feature_selection import RFE, RFECV from abc import ABCMeta, abstractmethod # from StringIO import StringIO # import xgboost as xgb # from xgboost.sklearn import XGBClassifier from .genericmodelclass import GenericModelClass from .data import DataBlock ##################################################################### ##### GENERIC MODEL CLASS ##################################################################### class base_classification(GenericModelClass): """ A base class which defines the generic classification functions and variable definitions. Parameters ---------- alg : object An sklearn-style estimator data_block : object An object of easyML's DataBlock class. You should first create an object of that class and then pass it as a parameter. predictors : list of strings, default [] A list of columns which are to be used as predictors (also called independent variables or features). The default value is an empty list because these need not always be defined at the time of class initialization. The set_predictors method can be used later but before creating any predictive model. cv_folds : int, default 5 The number of folds to be created while performing CV. This parameter can be adjusted later by passing using the set_parameters method scoring_metric : str, default 'accuracy' The scoring metric to be used for evaluating the model across the different functions available. The available options are - 'accuracy' - 'auc' - 'log_loss' - 'f1' - 'average_precision' additional_display_metrics : list of string, default [] A list of additional display metrics to be shown for the test and train dataframes in data_block. Note: - These will be just shown for user reference and not actually used for model evaluation - The same available options as scoring_metric apply """ #Define as a meta class to disable direct instances __metaclass__ = ABCMeta # Map possible inputs to functions in sklean.metrics. # Each value of the dictionary is a tuple of 3: # (function, multi-class support, requires-probabilities) # function: the sklearn metrics function # multi-class support: if True, function allows multi-class support # requires-probabilities: if True, the function requires # probabilities to be passed as arguments metrics_map = { 'accuracy':(metrics.accuracy_score,True,False), 'auc':(metrics.roc_auc_score,False,True), 'log_loss':(metrics.log_loss,True,True), 'f1':(metrics.f1_score,True,False), 'average_precision':(metrics.average_precision_score,False,True) } def __init__( self, alg, data_block, predictors=[],cv_folds=5, scoring_metric='accuracy',additional_display_metrics=[] ): GenericModelClass.__init__( self, alg=alg, data_block=data_block, predictors=predictors, cv_folds=cv_folds,scoring_metric=scoring_metric, additional_display_metrics=additional_display_metrics) #Run input datatype checks: self.check_datatype(data_block,'data_block',DataBlock) self.subset_check(predictors) self.check_datatype(cv_folds,'cv_folds',int) self.check_datatype(scoring_metric,'scoring_metric',basestring) self.check_datatype( additional_display_metrics,'additional_display_metrics',list) #Store predicted probabilities in a dictionary with keys as the # name of the dataset (train/test/predict) and values as the actual # predictions. self.predictions_probabilities = {} #Boolean to store whether the estimator chosen allows probability # predictions self.probabilities_available = True #Define number of classes in target. self.num_target_class = len( self.datablock.train[self.datablock.target].unique()) #A Series object to store generic classification model outcomes. self.classification_output=pd.Series( index = ['ModelID','CVScore_mean','CVScore_std','AUC', 'ActualScore (manual entry)','CVMethod','Predictors'] ) #Get the dictionary of available dataframes self.dp = self.datablock.data_present() #Check all the entered metrics. Note that this check has to be #placed after declaration of num_target_class attribute for metric in [scoring_metric]+additional_display_metrics: self.check_metric(metric,self.num_target_class) @classmethod def check_metric(cls,metric,num_target_class): if metric not in cls.metrics_map: raise self.InvalidInput("The input '%s' is not a valid scoring metric for this module"%metric) if num_target_class>2: if not cls.metrics_map[metric][1]: raise self.InvalidInput("The %s metric does not support multi-class classification case"%metric) def fit_model( self, performCV=True, printResults=True, printTopN=None, printConfusionMatrix=True, printModelParameters=True): """An advanced model fit function which fits the model on the training data and performs cross-validation. It prints a model report containing the following: - The parameters being used to fit the model - Confusion matrix for the train and test data - Scoring metrics for the train and test data - CV mean and std scores for scoring metric - Additional scoring metrics on train and test data, if specified Note that you can decide which details are to be printed using method arguments. Parameters ---------- performCV : bool, default True if True, the model performs cross-validation using the number of folds as the cv_folds parameter of the model printResults : bool, default True if True, prints the report of the model. This should be kept as True unless the module being used in a background script printTopN : int, default None The number of top scored features to be displayed in the feature importance or coefficient plot of the model. If None, all the features will be displayed by default. Note: - For algorithms supporting real coefficient, the features will be sorted by their magnitudes (absolute values). - For algorithms supporting positive feature importance scores, features are sorted on the score itself. This will be ignored is printResults is False. printConfusionMatrix : bool, default True if True, the confusion matrix for the train and test dataframes are printed, otherwise they are ommitted. This will be ignored is printResults is False. print printModelParameters : bool, default True if True, the parameters being used to the run the model are printed. It helps in validating the parameters and also makes jupyter notebooks more informative if used """ self.check_datatype(performCV,'performCV',bool) self.check_datatype(printResults,'printResults',bool) self.check_datatype(printConfusionMatrix,'printConfusionMatrix',bool) self.check_datatype(printModelParameters,'printModelParameters',bool) if printTopN: self.check_datatype(printTopN,'printTopN',int) self.alg.fit( self.datablock.train[self.predictors], self.datablock.train[self.datablock.target]) #Get algo_specific_values self.algo_specific_fit(printTopN) #Get predictions: for key,data in self.dp.items(): self.predictions_class[key] = self.alg.predict( data[self.predictors]) if self.probabilities_available: for key,data in self.dp.items(): self.predictions_probabilities[key] = self.alg.predict_proba( data[self.predictors]) self.calc_model_characteristics(performCV) if printResults: self.printReport(printConfusionMatrix, printModelParameters) def calc_model_characteristics(self, performCV=True): # Determine key metrics to analyze the classification model. These # are stored in the classification_output series object belonginf to # this class. for metric in [self.scoring_metric]+self.additional_display_metrics: #Determine for both test and train, except predict: for key,data in self.dp.items(): if key!='predict': name = '%s_%s'%(metric,key) #Case where probabilities to be passed as arguments if base_classification.metrics_map[metric][2]: self.classification_output[name] = \ base_classification.metrics_map[metric][0]( data[self.datablock.target], self.predictions_probabilities[key]) #case where class predictions to be passed as arguments else: self.classification_output[name] = \ base_classification.metrics_map[metric][0]( data[self.datablock.target], self.predictions_class[key]) #Determine confusion matrix: name = 'ConfusionMatrix_%s'%key self.classification_output[name] = pd.crosstab( data[self.datablock.target], self.predictions_class[key] ).to_string() if performCV: cv_score = self.KFold_CrossValidation( scoring_metric=self.scoring_metric) else: cv_score = { 'mean_error': 0.0, 'std_error': 0.0 } self.classification_output['CVMethod'] = \ 'KFold - ' + str(self.cv_folds) self.classification_output['CVScore_mean'] = cv_score['mean_error'] self.classification_output['CVScore_std'] = cv_score['std_error'] self.classification_output['Predictors'] = str(self.predictors) def printReport(self, printConfusionMatrix, printModelParameters): # Print the metric determined in the previous function. print("\nModel Report") #Outpute the parameters used for modeling if printModelParameters: print('\nModel being built with the following parameters:') print(self.alg.get_params()) if printConfusionMatrix: for key,data in self.dp.items(): if key!='predict': print("\nConfusion Matrix for %s data:"%key) print(pd.crosstab( data[self.datablock.target], self.predictions_class[key]) ) print('Note: rows - actual; col - predicted') print("\nScoring Metric:") for key,data in self.dp.items(): if key!='predict': name = '%s_%s'%(self.scoring_metric,key) print("\t%s (%s): %s" % ( self.scoring_metric, key, "{0:.3%}".format(self.classification_output[name]) ) ) print("\nCV Score for Scoring Metric (%s):"%self.scoring_metric) print("\tMean - %f | Std - %f" % ( self.classification_output['CVScore_mean'], self.classification_output['CVScore_std']) ) if self.additional_display_metrics: print("\nAdditional Scoring Metrics:") for metric in self.additional_display_metrics: for key,data in self.dp.items(): if key!='predict': name = '%s_%s'%(metric,key) print("\t%s (%s): %s" % ( metric, key, "{0:.3%}".format( self.classification_output[name]) ) ) def plot_feature_importance(self, printTopN): num_print = len(self.feature_imp) if printTopN is not None: num_print = min(printTopN,len(self.feature_imp)) self.feature_imp.iloc[:num_print].plot( kind='bar', title='Feature Importances') plt.ylabel('Feature Importance Score') plt.show(block=False) def plot_abs_coefficients(self,coeff,printTopN): num_print = len(coeff) if printTopN is not None: num_print = min(printTopN,num_print) coeff_abs_sorted = sorted( abs(coeff).index, key=lambda x: abs(coeff_abs[x]), reverse=True ) coeff[coeff_abs_sorted].iloc[:num_print,].plot( kind='bar', title='Feature Coefficients (Sorted by Magnitude)' ) plt.ylabel('Magnitute of Coefficients') plt.show(block=False) def submission_proba( self, IDcol, proba_colnames,filename="Submission.csv"): """ """ submission = pd.DataFrame({ x: self.datablock.predict[x] for x in list(IDcol) }) if len(list(proba_colnames))>1: for i in range(len(proba_colnames)): submission[proba_colnames[i]] = self.test_pred_prob[:,i] else: submission[list(proba_colnames)[0]] = self.test_pred_prob[:,1] submission.to_csv(filename, index=False) def set_parameters(self, param=None, cv_folds=None, set_default=False): """ Set the parameters of the model. Only the parameters to be updated are required to be passed. Parameters __________ param : dict, default None A dictionary of key,value pairs where the keys are the parameters to be updated and values as the new value of those parameters. If None, no update performed Ignored if set_default iss True. cv_folds : int, default None Pass the number of CV folds to be used in the model. If None, no update performed. set_default : bool, default True if True, the model will be set to default parameters as defined in model definition by scikit-learn. Note that this will not affect the cv_folds parameter. """ #Check input self.check_datatype(param,'param',dict) self.check_datatype(set_default,'set_default',bool) if param: if set(param.keys()).issubset( set(base_classification.default_parameters.keys()) ): raise self.InvalidInput("""The parameters passed should be a subset of the model parameters""") if set_default: param = self.default_parameters self.alg.set_params(**param) self.model_output.update(pd.Series(param)) if cv_folds: self.cv_folds = cv_folds def export_model_base(self, IDcol, mstr): self.create_ensemble_dir() filename = os.path.join(os.getcwd(),'ensemble/%s_models.csv'%mstr) comb_series = self.classification_output.append( self.model_output, verify_integrity=True) if os.path.exists(filename): models = pd.read_csv(filename) mID = int(max(models['ModelID'])+1) else: mID = 1 models = pd.DataFrame(columns=comb_series.index) comb_series['ModelID'] = mID models = models.append(comb_series, ignore_index=True) models.to_csv(filename, index=False, float_format="%.5f") model_filename = os.path.join( os.getcwd(), 'ensemble/%s_%s.csv'%(mstr,str(mID)) ) self.submission(IDcol, model_filename) @abstractmethod def algo_specific_fit(self,printTopN): #Run algo-specific commands pass @abstractmethod def export_model(self,IDcol): #Export models pass ##################################################################### ##### LOGISTIC REGRESSION ##################################################################### class logistic_regression(base_classification): """ Create a Logistic Regression model using implementation from scikit-learn. Parameters ---------- data_block : object of type easyML.DataBlock An object of easyML's DataBlock class. You should first create an object of that class and then pass it as a parameter. predictors : list of strings, default [] A list of columns which are to be used as predictors (also called independent variables or features). The default value is an empty list because these need not always be defined at the time of class initialization. The set_predictors method can be used later but before creating any predictive model. cv_folds : int, default 5 The number of folds to be created while performing CV. This parameter can be adjusted later by passing using the set_parameters method scoring_metric : str, default 'accuracy' The scoring metric to be used for evaluating the model across the different functions available. The available options are - 'accuracy' - 'auc' - 'log_loss' - 'f1' - 'average_precision' additional_display_metrics : list of string, default [] A list of additional display metrics to be shown for the test and train dataframes in data_block. Note: - These will be just shown for user reference and not actually used for model evaluation - The same available options as scoring_metric apply """ default_parameters = { 'C':1.0, 'tol':0.0001, 'solver':'liblinear', 'multi_class':'ovr', 'class_weight':'balanced' } def __init__( self,data_block, predictors=[],cv_folds=10, scoring_metric='accuracy',additional_display_metrics=[]): base_classification.__init__( self, alg=LogisticRegression(), data_block=data_block, predictors=predictors,cv_folds=cv_folds, scoring_metric=scoring_metric, additional_display_metrics=additional_display_metrics ) self.model_output=pd.Series(self.default_parameters) self.model_output['Coefficients'] = "-" #Set parameters to default values: self.set_parameters(set_default=True) def algo_specific_fit(self, printTopN): if self.num_target_class==2: coeff = pd.Series( np.concatenate( (self.alg.intercept_, self.alg.coef_[0])), index=["Intercept"]+self.predictors ) self.plot_abs_coefficients(coeff,printTopN) else: cols=['coef_class_%d'%i for i in range(0,self.num_target_class)] coeff = pd.DataFrame( self.alg.coef_.T, columns=cols, index=self.predictors ) print('\nCoefficients:') print(coeff) self.model_output['Coefficients'] = coeff.to_string() def export_model(self, IDcol): #Export the model into the model file as well as create a submission #with model index. This will be used for creating an ensemble. self.export_model_base(IDcol,'logistic_reg') ##################################################################### ##### DECISION TREE ##################################################################### class decision_tree(base_classification): """ Create a Decision Tree model using implementation from scikit-learn. Parameters ---------- data_block : object of type easyML.DataBlock An object of easyML's DataBlock class. You should first create an object of that class and then pass it as a parameter. predictors : list of strings, default [] A list of columns which are to be used as predictors (also called independent variables or features). The default value is an empty list because these need not always be defined at the time of class initialization. The set_predictors method can be used later but before creating any predictive model. cv_folds : int, default 5 The number of folds to be created while performing CV. This parameter can be adjusted later by passing using the set_parameters method scoring_metric : str, default 'accuracy' The scoring metric to be used for evaluating the model across the different functions available. The available options are - 'accuracy' - 'auc' - 'log_loss' - 'f1' - 'average_precision' additional_display_metrics : list of string, default [] A list of additional display metrics to be shown for the test and train dataframes in data_block. Note: - These will be just shown for user reference and not actually used for model evaluation - The same available options as scoring_metric apply """ default_parameters = { 'criterion':'gini', 'max_depth':None, 'min_samples_split':2, 'min_samples_leaf':1, 'max_features':None, 'random_state':None, 'max_leaf_nodes':None, 'class_weight':'balanced' } def __init__( self,data_block, predictors=[],cv_folds=10, scoring_metric='accuracy',additional_display_metrics=[]): base_classification.__init__( self, alg=DecisionTreeClassifier(), data_block=data_block, predictors=predictors,cv_folds=cv_folds, scoring_metric=scoring_metric, additional_display_metrics=additional_display_metrics ) self.model_output = pd.Series(self.default_parameters) self.model_output['Feature_Importance'] = "-" #Set parameters to default values: self.set_parameters(set_default=True) def algo_specific_fit(self, printTopN): # print Feature Importance Scores table self.feature_imp = pd.Series( self.alg.feature_importances_, index=self.predictors ).sort_values(ascending=False) self.plot_feature_importance(printTopN) self.model_output['Feature_Importance'] = \ self.feature_imp.to_string() def export_model(self, IDcol): #Export the model into the model file as well as create a submission #with model index. This will be used for creating an ensemble. self.export_model_base(IDcol,'decision_tree') ## UNDER DEVELOPMENT CODE FOR PRINTING TREES # def get_tree(self): # return self.alg.tree_ # Print the tree in visual format # Inputs: # export_pdf - if True, a pdf will be exported with the # filename as specified in pdf_name argument # pdf_name - name of the pdf file if export_pdf is True # def printTree(self, export_pdf=True, file_name="Decision_Tree.pdf"): # dot_data = StringIO() # export_graphviz( # self.alg, out_file=dot_data, feature_names=self.predictors, # filled=True, rounded=True, special_characters=True) # export_graphviz( # self.alg, out_file='data.dot', feature_names=self.predictors, # filled=True, rounded=True, special_characters=True # ) # graph = pydot.graph_from_dot_data(dot_data.getvalue()) # if export_pdf: # graph.write_pdf(file_name) # return graph ##################################################################### ##### RANDOM FOREST ##################################################################### class random_forest(base_classification): """ Create a Random Forest model using implementation from scikit-learn. Parameters ---------- data_block : object of type easyML.DataBlock An object of easyML's DataBlock class. You should first create an object of that class and then pass it as a parameter. predictors : list of strings, default [] A list of columns which are to be used as predictors (also called independent variables or features). The default value is an empty list because these need not always be defined at the time of class initialization. The set_predictors method can be used later but before creating any predictive model. cv_folds : int, default 5 The number of folds to be created while performing CV. This parameter can be adjusted later by passing using the set_parameters method scoring_metric : str, default 'accuracy' The scoring metric to be used for evaluating the model across the different functions available. The available options are - 'accuracy' - 'auc' - 'log_loss' - 'f1' - 'average_precision' additional_display_metrics : list of string, default [] A list of additional display metrics to be shown for the test and train dataframes in data_block. Note: - These will be just shown for user reference and not actually used for model evaluation - The same available options as scoring_metric apply """ default_parameters = { 'n_estimators':10, 'criterion':'gini', 'max_depth':None, 'min_samples_split':2, 'min_samples_leaf':1, 'max_features':'auto', 'max_leaf_nodes':None, 'oob_score':False, 'random_state':None, 'class_weight':'balanced', 'n_jobs':1 } def __init__( self,data_block, predictors=[],cv_folds=10, scoring_metric='accuracy',additional_display_metrics=[]): base_classification.__init__( self, alg=RandomForestClassifier(), data_block=data_block, predictors=predictors,cv_folds=cv_folds, scoring_metric=scoring_metric, additional_display_metrics=additional_display_metrics ) self.model_output = pd.Series(self.default_parameters) self.model_output['Feature_Importance'] = "-" self.model_output['OOB_Score'] = "-" #Set parameters to default values: self.set_parameters(set_default=True) def algo_specific_fit(self, printTopN): # print Feature Importance Scores table self.feature_imp = pd.Series( self.alg.feature_importances_, index=self.predictors ).sort_values(ascending=False) self.plot_feature_importance(printTopN) self.model_output['Feature_Importance'] = \ self.feature_imp.to_string() if self.model_output['oob_score']: print('OOB Score : %f' % self.alg.oob_score_) self.model_output['OOB_Score'] = self.alg.oob_score_ def export_model(self, IDcol): #Export the model into the model file as well as create a submission #with model index. This will be used for creating an ensemble. self.export_model_base(IDcol,'random_forest') ##################################################################### ##### EXTRA TREES FOREST ##################################################################### class extra_trees(base_classification): """ Create an Extra Trees Forest model using implementation from scikit-learn. Parameters ---------- data_block : object of type easyML.DataBlock An object of easyML's DataBlock class. You should first create an object of that class and then pass it as a parameter. predictors : list of strings, default [] A list of columns which are to be used as predictors (also called independent variables or features). The default value is an empty list because these need not always be defined at the time of class initialization. The set_predictors method can be used later but before creating any predictive model. cv_folds : int, default 5 The number of folds to be created while performing CV. This parameter can be adjusted later by passing using the set_parameters method scoring_metric : str, default 'accuracy' The scoring metric to be used for evaluating the model across the different functions available. The available options are - 'accuracy' - 'auc' - 'log_loss' - 'f1' - 'average_precision' additional_display_metrics : list of string, default [] A list of additional display metrics to be shown for the test and train dataframes in data_block. Note: - These will be just shown for user reference and not actually used for model evaluation - The same available options as scoring_metric apply """ default_parameters = { 'n_estimators':10, 'criterion':'gini', 'max_depth':None, 'min_samples_split':2, 'min_samples_leaf':1, 'max_features':'auto', 'max_leaf_nodes':None, 'oob_score':False, 'random_state':None, 'class_weight':'balanced', 'n_jobs':1 } def __init__( self,data_block, predictors=[],cv_folds=10, scoring_metric='accuracy',additional_display_metrics=[]): base_classification.__init__( self, alg=ExtraTreesClassifier(), data_block=data_block, predictors=predictors,cv_folds=cv_folds, scoring_metric=scoring_metric, additional_display_metrics=additional_display_metrics) self.model_output = pd.Series(self.default_parameters) self.model_output['Feature_Importance'] = "-" self.model_output['OOB_Score'] = "-" #Set parameters to default values: self.set_parameters(set_default=True) def algo_specific_fit(self, printTopN): # print Feature Importance Scores table self.feature_imp = pd.Series( self.alg.feature_importances_, index=self.predictors ).sort_values(ascending=False) self.plot_feature_importance(printTopN) self.model_output['Feature_Importance'] = \ self.feature_imp.to_string() if self.model_output['oob_score']: print('OOB Score : %f' % self.alg.oob_score_) self.model_output['OOB_Score'] = self.alg.oob_score_ def export_model(self, IDcol): #Export the model into the model file as well as create a submission #with model index. This will be used for creating an ensemble. self.export_model_base(IDcol,'extra_trees') ##################################################################### ##### ADABOOST CLASSIFICATION ##################################################################### class adaboost(base_classification): """ Create an AdaBoost model using implementation from scikit-learn. Parameters ---------- data_block : object of type easyML.DataBlock An object of easyML's DataBlock class. You should first create an object of that class and then pass it as a parameter. predictors : list of strings, default [] A list of columns which are to be used as predictors (also called independent variables or features). The default value is an empty list because these need not always be defined at the time of class initialization. The set_predictors method can be used later but before creating any predictive model. cv_folds : int, default 5 The number of folds to be created while performing CV. This parameter can be adjusted later by passing using the set_parameters method scoring_metric : str, default 'accuracy' The scoring metric to be used for evaluating the model across the different functions available. The available options are - 'accuracy' - 'auc' - 'log_loss' - 'f1' - 'average_precision' additional_display_metrics : list of string, default [] A list of additional display metrics to be shown for the test and train dataframes in data_block. Note: - These will be just shown for user reference and not actually used for model evaluation - The same available options as scoring_metric apply """ default_parameters = { 'n_estimators':50, 'learning_rate':1.0 } def __init__( self,data_block, predictors=[],cv_folds=10, scoring_metric='accuracy',additional_display_metrics=[]): base_classification.__init__( self, alg=AdaBoostClassifier(), data_block=data_block, predictors=predictors,cv_folds=cv_folds, scoring_metric=scoring_metric, additional_display_metrics=additional_display_metrics ) self.model_output = pd.Series(self.default_parameters) self.model_output['Feature_Importance'] = "-" #Set parameters to default values: self.set_parameters(set_default=True) def algo_specific_fit(self, printTopN): # print Feature Importance Scores table self.feature_imp = pd.Series( self.alg.feature_importances_, index=self.predictors ).sort_values(ascending=False) self.plot_feature_importance(printTopN) self.model_output['Feature_Importance'] = \ self.feature_imp.to_string() plt.xlabel("AdaBoost Estimator") plt.ylabel("Estimator Error") plt.plot( range(1, int(self.model_output['n_estimators'])+1), self.alg.estimator_errors_ ) plt.plot( range(1, int(self.model_output['n_estimators'])+1), self.alg.estimator_weights_ ) plt.legend( ['estimator_errors','estimator_weights'], loc='upper left' ) plt.show(block=False) def export_model(self, IDcol): #Export the model into the model file as well as create a submission #with model index. This will be used for creating an ensemble. self.export_model_base(IDcol,'adaboost') ##################################################################### ##### GRADIENT BOOSTING MACHINE ##################################################################### class gradient_boosting_machine(base_classification): """ Create a GBM (Gradient Boosting Machine) model using implementation from scikit-learn. Parameters ---------- data_block : object of type easyML.DataBlock An object of easyML's DataBlock class. You should first create an object of that class and then pass it as a parameter. predictors : list of strings, default [] A list of columns which are to be used as predictors (also called independent variables or features). The default value is an empty list because these need not always be defined at the time of class initialization. The set_predictors method can be used later but before creating any predictive model. cv_folds : int, default 5 The number of folds to be created while performing CV. This parameter can be adjusted later by passing using the set_parameters method scoring_metric : str, default 'accuracy' The scoring metric to be used for evaluating the model across the different functions available. The available options are - 'accuracy' - 'auc' - 'log_loss' - 'f1' - 'average_precision' additional_display_metrics : list of string, default [] A list of additional display metrics to be shown for the test and train dataframes in data_block. Note: - These will be just shown for user reference and not actually used for model evaluation - The same available options as scoring_metric apply """ default_parameters = { 'loss':'deviance', 'learning_rate':0.1, 'n_estimators':100, 'subsample':1.0, 'min_samples_split':2, 'min_samples_leaf':1, 'max_depth':3, 'init':None, 'random_state':None, 'max_features':None, 'verbose':0, 'max_leaf_nodes':None, 'warm_start':False, 'presort':'auto' } def __init__( self, data_block, predictors=[],cv_folds=10, scoring_metric='accuracy',additional_display_metrics=[]): base_classification.__init__( self, alg=GradientBoostingClassifier(), data_block=data_block, predictors=predictors,cv_folds=cv_folds, scoring_metric=scoring_metric, additional_display_metrics=additional_display_metrics ) self.model_output = pd.Series(self.default_parameters) self.model_output['Feature_Importance'] = "-" #Set parameters to default values: self.set_parameters(set_default=True) def algo_specific_fit(self, printTopN): # print Feature Importance Scores table self.feature_imp = pd.Series( self.alg.feature_importances_, index=self.predictors ).sort_values(ascending=False) self.plot_feature_importance(printTopN) self.model_output['Feature_Importance'] = \ self.feature_imp.to_string() #Plot OOB estimates if subsample <1: if self.model_output['subsample']<1: plt.xlabel("GBM Iteration") plt.ylabel("Score") plt.plot( range(1, self.model_output['n_estimators']+1), self.alg.oob_improvement_ ) plt.legend(['oob_improvement_','train_score_'], loc='upper left') plt.show(block=False) def export_model(self, IDcol): #Export the model into the model file as well as create a submission #with model index. This will be used for creating an ensemble. self.export_model_base(IDcol,'gbm') ##################################################################### ##### Support Vector Classifier ##################################################################### class linear_svm(base_classification): """ Create a Linear Support Vector Machine model using implementation from scikit-learn. Parameters ---------- data_block : object of type easyML.DataBlock An object of easyML's DataBlock class. You should first create an object of that class and then pass it as a parameter. predictors : list of strings, default [] A list of columns which are to be used as predictors (also called independent variables or features). The default value is an empty list because these need not always be defined at the time of class initialization. The set_predictors method can be used later but before creating any predictive model. cv_folds : int, default 5 The number of folds to be created while performing CV. This parameter can be adjusted later by passing using the set_parameters method scoring_metric : str, default 'accuracy' The scoring metric to be used for evaluating the model across the different functions available. The available options are - 'accuracy' - 'auc' - 'log_loss' - 'f1' - 'average_precision' additional_display_metrics : list of string, default [] A list of additional display metrics to be shown for the test and train dataframes in data_block. Note: - These will be just shown for user reference and not actually used for model evaluation - The same available options as scoring_metric apply """ default_parameters = { 'C':1.0, 'kernel':'linear', #modified not default 'degree':3, 'gamma':'auto', 'coef0':0.0, 'shrinking':True, 'probability':False, 'tol':0.001, 'cache_size':200, 'class_weight':None, 'verbose':False, 'max_iter':-1, 'decision_function_shape':None, 'random_state':None } def __init__( self,data_block, predictors=[],cv_folds=10, scoring_metric='accuracy',additional_display_metrics=[]): base_classification.__init__( self, alg=SVC(), data_block=data_block, predictors=predictors, cv_folds=cv_folds,scoring_metric=scoring_metric, additional_display_metrics=additional_display_metrics ) self.model_output=pd.Series(self.default_parameters) self.model_output['Coefficients'] = "-" #Set parameters to default values: self.set_parameters(set_default=True) #Check if probabilities enables: if not self.alg.get_params()['probability']: self.probabilities_available = False def algo_specific_fit(self, printTopN): if self.num_target_class==2: coeff = pd.Series( np.concatenate((self.alg.intercept_,self.alg.coef_[0])), index=["Intercept"]+self.predictors ) #print the chart of importances self.plot_abs_coefficients(coeff, printTopN) else: cols=['coef_class_%d'%i for i in range(0,self.num_target_class)] coeff = pd.DataFrame( self.alg.coef_.T, columns=cols, index=self.predictors ) print('\nCoefficients:') print(coeff) self.model_output['Coefficients'] = coeff.to_string() def export_model(self, IDcol): #Export the model into the model file as well as create a submission #with model index. This will be used for creating an ensemble. self.export_model_base(IDcol,'linear_svm') ##################################################################### ##### XGBOOST ALGORITHM (UNDER DEVELOPMENT) ##################################################################### """ #Define the class similar to the overall classification class class XGBoost(base_classification): def __init__(self,data_block, predictors, cv_folds=5,scoring_metric_skl='accuracy', scoring_metric_xgb='error'): base_classification.__init__(self, alg=XGBClassifier(), data_block=data_block, predictors=predictors,cv_folds=cv_folds,scoring_metric=scoring_metric_skl) #Define default parameters on your own: self.default_parameters = { 'max_depth':3, 'learning_rate':0.1, 'n_estimators':100, 'silent':True, 'objective':"binary:logistic", 'nthread':1, 'gamma':0, 'min_child_weight':1, 'max_delta_step':0, 'subsample':1, 'colsample_bytree':1, 'colsample_bylevel':1, 'reg_alpha':0, 'reg_lambda':1, 'scale_pos_weight':1, 'base_score':0.5, 'seed':0, 'missing':None } self.model_output = pd.Series(self.default_parameters) #create DMatrix with nan as missing by default. If later this is changed then the matrix are re-calculated. If not set,will give error is nan present in data self.xgtrain = xgb.DMatrix(self.datablock.train[self.predictors].values, label=self.datablock.train[self.datablock.target].values, missing=np.nan) self.xgtest = xgb.DMatrix(self.datablock.predict[self.predictors].values, missing=np.nan) self.num_class = 2 self.n_estimators = 10 self.eval_metric = 'error' self.train_predictions = [] self.train_pred_prob = [] self.test_predictions = [] self.test_pred_prob = [] self.num_target_class = len(data_train[target].unique()) #define scoring metric: self.scoring_metric_skl = scoring_metric_skl # if scoring_metric_xgb=='f1': # self.scoring_metric_xgb = self.xg_f1 # else: self.scoring_metric_xgb = scoring_metric_xgb #Define a Series object to store generic classification model outcomes; self.classification_output=pd.Series(index=['ModelID','Accuracy','CVScore_mean','CVScore_std','SpecifiedMetric', 'ActualScore (manual entry)','CVMethod','ConfusionMatrix','Predictors']) #feature importance (g_scores) self.feature_imp = None self.model_output['Feature_Importance'] = "-" #Set parameters to default values: # self.set_parameters(set_default=True) #Define custom f1 score metric: def xg_f1(self,y,t): t = t.get_label() y_bin = [1. if y_cont > 0.5 else 0. for y_cont in y] # binaryzing your output return 'f1',metrics.f1_score(t,y_bin) # Set the parameters of the model. # Note: # > only the parameters to be updated are required to be passed # > if set_default is True, the passed parameters are ignored and default parameters are set which are defined in scikit learn module def set_parameters(self, param=None, set_default=False): if set_default: param = self.default_parameters self.alg.set_params(**param) self.model_output.update(pd.Series(param)) if 'missing' in param: #update DMatrix with missing: self.xgtrain = xgb.DMatrix(self.datablock.train[self.predictors].values, label=self.datablock.train[self.datablock.target].values, missing=param['missing']) self.xgtest = xgb.DMatrix(self.datablock.predict[self.predictors].values, missing=param['missing']) if 'num_class' in param: self.num_class = param['num_class'] if 'cv_folds' in param: self.cv_folds = param['cv_folds'] # def set_feature_importance(self): # fs = self.alg.booster().get_fscore() # ftimp = pd.DataFrame({ # 'feature': fs.keys(), # 'importance_Score': fs.values() # }) # ftimp['predictor'] = ftimp['feature'].apply(lambda x: self.predictors[int(x[1:])]) # self.feature_imp = pd.Series(ftimp['importance_Score'].values, index=ftimp['predictor'].values) #Fit the model using predictors and parameters specified before. # Inputs: # printCV - if True, CV is performed def modelfit(self, performCV=True, useTrainCV=False, TrainCVFolds=5, early_stopping_rounds=20, show_progress=True, printTopN='all'): if useTrainCV: xgb_param = self.alg.get_xgb_params() if self.num_class>2: xgb_param['num_class']=self.num_class if self.scoring_metric_xgb=='f1': cvresult = xgb.cv(xgb_param,self.xgtrain, num_boost_round=self.alg.get_params()['n_estimators'], nfold=self.cv_folds, metrics=['auc'],feval=self.xg_f1,early_stopping_rounds=early_stopping_rounds, show_progress=show_progress) else: cvresult = xgb.cv(xgb_param,self.xgtrain, num_boost_round=self.alg.get_params()['n_estimators'], nfold=self.cv_folds, metrics=self.scoring_metric_xgb, early_stopping_rounds=early_stopping_rounds, show_progress=show_progress) self.alg.set_params(n_estimators=cvresult.shape[0]) print(self.alg.get_params()) obj = self.alg.fit(self.datablock.train[self.predictors], self.datablock.train[self.datablock.target], eval_metric=self.eval_metric) #Print feature importance # self.set_feature_importance() self.feature_imp = pd.Series(self.alg.booster().get_fscore()).sort_values(ascending=False) num_print = len(self.feature_imp) if printTopN is not None: if printTopN != 'all': num_print = min(printTopN,len(self.feature_imp)) self.feature_imp.iloc[:num_print].plot(kind='bar', title='Feature Importances') plt.ylabel('Feature Importance Score') plt.show(block=False) self.model_output['Feature_Importance'] = self.feature_imp.to_string() #Get train predictions: self.train_predictions = self.alg.predict(self.datablock.train[self.predictors]) self.train_pred_prob = self.alg.predict_proba(self.datablock.train[self.predictors]) #Get test predictions: self.test_predictions = self.alg.predict(self.datablock.predict[self.predictors]) self.test_pred_prob = self.alg.predict_proba(self.datablock.predict[self.predictors]) self.calc_model_characteristics(performCV) self.printReport() #Export the model into the model file as well as create a submission with model index. This will be used for creating an ensemble. def export_model(self, IDcol): self.create_ensemble_dir() filename = os.path.join(os.getcwd(),'ensemble/xgboost_models.csv') comb_series = self.classification_output.append(self.model_output, verify_integrity=True) if os.path.exists(filename): models = pd.read_csv(filename) mID = int(max(models['ModelID'])+1) else: mID = 1 models = pd.DataFrame(columns=comb_series.index) comb_series['ModelID'] = mID models = models.append(comb_series, ignore_index=True) models.to_csv(filename, index=False, float_format="%.5f") model_filename = os.path.join(os.getcwd(),'ensemble/xgboost_'+str(mID)+'.csv') self.submission(IDcol, model_filename) """ ##################################################################### ##### ENSEMBLE (UNDER DEVELOPMENT) ##################################################################### """ #Class for creating an ensemble model using the exported files from previous classes class Ensemble_Classification(object): #initialize the object with target variable def __init__(self, target, IDcol): self.datablock.target = target self.data = None self.relationMatrix_chi2 = None self.relationMatrix_diff = None self.IDcol = IDcol #create the ensemble data # Inputs: # models - dictionary with key as the model name and values as list containing the model numbers to be ensebled # Note: all the models in the list specified should be present in the ensemble folder. Please cross-check once def create_ensemble_data(self, models): self.data = None for key, value in models.items(): # print key,value for i in value: fname = key + '_' + str(i) fpath = os.path.join(os.getcwd(), 'ensemble', fname+'.csv') tempdata = pd.read_csv(fpath) tempdata = tempdata.rename(columns = {self.datablock.target: fname}) if self.data is None: self.data = tempdata else: self.data = self.data.merge(tempdata,on=self.data.columns[0]) #get the data being used for ensemble def get_ensemble_data(self): return self.data #Check chisq test between different model outputs to check which combination of ensemble will generate better results. Note: Models with high correlation should not be combined together. def chisq_independence(self, col1, col2, verbose = False): contingencyTable = pd.crosstab(col1,col2,margins=True) if len(col1)/((contingencyTable.shape[0] - 1) * (contingencyTable.shape[1] - 1)) <= 5: return "TMC" expected = contingencyTable.copy() total = contingencyTable.loc["All","All"] # print contingencyTable.index # print contingencyTable.columns for m in contingencyTable.index: for n in contingencyTable.columns: expected.loc[m,n] = contingencyTable.loc[m,"All"]*contingencyTable.loc["All",n]/float(total) if verbose: print('\n\nAnalysis of models: %s and %s' % (col1.name, col2.name)) print('Contingency Table:') print(contingencyTable) # print '\nExpected Frequency Table:' # print expected observed_frq = contingencyTable.iloc[:-1,:-1].values.ravel() expected_frq = expected.iloc[:-1,:-1].values.ravel() numless1 = len(expected_frq[expected_frq<1]) perless5 = len(expected_frq[expected_frq<5])/len(expected_frq) #Adjustment in DOF so use the 1D chisquare to matrix shaped data; -1 in row n col because of All row and column matrixadj = (contingencyTable.shape[0] - 1) + (contingencyTable.shape[1] - 1) - 2 # print matrixadj pval = np.round(chisquare(observed_frq, expected_frq,ddof=matrixadj)[1],3) if numless1>0 or perless5>=0.2: return str(pval)+"*" else: return pval #Create the relational matrix between models def check_ch2(self, verbose=False): col = self.data.columns[1:] self.relationMatrix_chi2 = pd.DataFrame(index=col,columns=col) for i in range(len(col)): for j in range(i, len(col)): if i==j: self.relationMatrix_chi2.loc[col[i],col[j]] = 1 else: pval = self.chisq_independence(self.data.iloc[:,i+1],self.data.iloc[:,j+1], verbose=verbose) self.relationMatrix_chi2.loc[col[j],col[i]] = pval self.relationMatrix_chi2.loc[col[i],col[j]] = pval print('\n\n Relational Matrix (based on Chi-square test):') print(self.relationMatrix_chi2) def check_diff(self): col = self.data.columns[1:] self.relationMatrix_diff = pd.DataFrame(index=col,columns=col) nrow = self.data.shape[0] for i in range(len(col)): for j in range(i, len(col)): if i==j: self.relationMatrix_diff.loc[col[i],col[j]] = '-' else: # print col[i],col[j] pval = "{0:.2%}".format(sum( np.abs(self.data.iloc[:,i+1]-self.data.iloc[:,j+1]) )/float(nrow)) self.relationMatrix_diff.loc[col[j],col[i]] = pval self.relationMatrix_diff.loc[col[i],col[j]] = pval print('\n\n Relational Matrix (based on perc difference):') print(self.relationMatrix_diff) #Generate submission for the ensembled model by combining the mentioned models. # Inputs: # models_to_use - list with model names to use; if None- all models will be used # filename - the filename of the final submission # Note: the models should be odd in nucmber to allow a clear winner in terms of mode otherwise the first element will be chosen def submission(self, models_to_use=None, filename="Submission_ensemble.csv"): #if models_to_use is None then use all, else filter: if models_to_use is None: data_ens = self.data else: data_ens = self.data[models_to_use] def mode_ens(x): return int(mode(x).mode[0]) ensemble_output = data_ens.apply(mode_ens,axis=1) submission = pd.DataFrame({ self.IDcol: self.data.iloc[:,0], self.datablock.target: ensemble_output }) submission.to_csv(filename, index=False) """
Autumn isn't the prettiest season in the Bay Area. The hot, cloudless days of summer have given way to parched hills, crackling brush and dusty trails that mimic a sheet of Teflon coated in corn starch. The leaves of oak trees turn a sickly shade of green; maples might change yellow in November if the leaves don't dry up and fall off first. Even the poison oak, which turns a stunning shade of crimson in the late summer, has started to drop singed leaves that will soon disintegrate to itchy dust. Grass has withered, creeks have dried up, and even the redwoods seem to sag with a certain weariness. Thirst. Autumn is a thirsty season. We're all just waiting for the winter rains to revitalize the trees, green up the hills, and add some tack to these slippery chunder trails. Revitalization. It feels far away, yet inevitable, like the waning daylight and winter clouds. In September my spirit felt withered; sometimes I'd go to quiet places and ponder the reasons why. The motions were there but the zest for the activities I love was missing. "I'm still recovering," I'd tell myself, but I knew it cut deeper than that. "I met demons out there, in the shadows of those beautiful Alps. They were heartless and cruel, they showed me the worst sides of myself, the apathy and hopelessness and unfocused fear. They brought out the worst in me and I did not vanquish them. No, they won." So I went through the motions. I did some work. I did some writing. I did some oblivious staring off into space. I'd get out when I could. Some nights when I woke up at odd hours, I'd walk out on the deck, lean over the railing and gaze at stars. Maybe you recall when I wrote a few weeks ago about seeing a shooting star in France and believing that it was a person who had fallen off the mountain? That memory still haunts me. Even though I know it wasn't real. The mind is strange like that. In the afternoons, I'd embark on my daily exercise — mellow rides or runs. I didn't feel great, and I didn't push it, but I had a strong sense that couch sitting was not going to aid in my recovery. After all, most of the damage was not in my body, but my mind. Going outside for at least a short time every day was the best course of action. So I cranked out some heavy-legged rides, and plodded through the most basic newbie running pains, like IT band soreness and side stitches. The weekend came, and Beat installed a blingy new drivetrain on the Fatback, so of course I had to go for a fat-bike ride. Beat and I rode four and a half hours over the parched hills, churning up dust and tentatively reintroducing ourselves to loose descents. I cranked Fatback's new teeny-tiny granny gear up a steep hill until I felt dizzy and pukey and had to get off and hike. I made it a little farther than Beat did on his medium-geared singlespeed ... but not much. "I'm out of shape," I'd shrug. "I'll get it back." "2013," I remarked to Beat, "has not been my best year for racing." I'd think about this year's races and wonder where they'd left me. I'd chat with friends about training rides and remark that I sometimes regretted having such a thorough record of my routine activities. "All of my best times are more than two years old," I'd lament. "I really was a better cyclist back in 2011, and not that much worse of a runner." What's happening? Age? Too much racing? Or am I just losing heart? Today after a productive but mentally exhausting morning of writing, I decided to head out an hour early on the road bike. With about three hours to burn, I opted to pedal a favorite loop, from home up Highway 9, along Skyline Drive to Page Mill and back. The ride is about 34 miles with 3,700 feet of climbing. It's a climby loop with bone-shaking chipseal and a hairpin descent, and consistently beautiful even amid the bland hues of autumn. I used to ride this route frequently when I first moved to California in 2011, but it's become more of a once-a-month-or-less outing these days. As I spun along Foothill Boulevard, I noticed my legs felt peppy today. Maybe peppy is not the right word, but they felt a bit less like chunks of cement. I rolled along the shoreline of what was once Stevens Creek Reservoir but is now a stagnant puddle amid a cracked mudflat, and reflected on memories of the route. "Back in 2011 I use to ride in the rain. There was sleet, actual sleet, on Skyline, remember that? Oh, I miss the rain here. It needs to rain." My best time on the whole loop was something in the low 2:20s. I remembered that, and I wondered how much longer it would take me to ride the route today. But as I spun up the Mount Eden climb, that tiny little voice that I so seldom listen to — I'll call her my competitive spirit — said, "Screw 2011 Jill and her strong cycling legs. We could ride faster if we wanted to." And that was that. It was on! I had soft-pedaled most of first six miles, so I'd have to make up some time. The Mount Eden descent is mostly broken pavement, but it was as good of a place as any to lay on the throttle. There was one bucking bounce that nearly launched my body skyward, but soon enough I was settling into the 2,500-foot climb to the top of Skyline Drive. I tried to hit that sweet spot of efficient climbing, where a bit of bile burns in my throat but I don't have to resort to open-mouth breathing. It felt like I reached the crest in no time, and then there was the chipseal to contend with. My wrists won't soon forgive me, and there were two pavement crack bounces that convinced me I'd squeezed all of the air out of my tubeless rear tire (thankfully I did not.) But I ignored the rough surface and throttled that rolling traverse before turning onto Page Mill. Last weekend there was tons of loose gravel on Page Mill, and rangers have told me horror stories about peeling injured and bloody road bikers off the pavement, so I took the descent easy. But back on Foothill it was on again, cranking the big ring past a long line of backed-up rush hour traffic. Back at my home intersection, I hit stop on my watch and looked at it for the first time since I consciously started "racing." 2:17:41! At home I did some digging in Garmin Connect and concluded that 2:17 is a new PR, possibly my first "frequently ridden cycling route" PR of 2013. Of course I had to go upload my track to Strava to check my status against the geeky Strava'ing subgroup of the Bay Area road cyclist community. Moved up to ninth on the popular Highway 9 climb segment. That's definitely an improvement over 2011 Jill's standing, I'm certain. Yay. Another small victory in the battle of matter over mind. Sometimes all it takes is acting strong to feel strong, which in turn leads to becoming strong. I'd like to suggest another challenge for you: Randonneuring and Paris-Brest-Paris (PBP) in 2015. PBP only occurs once every four years so it's not an event where you can say: "Oh, I'll do it next year." PBP is the "olympics" of brevets. And it requires qualifiers in the PBP year: Complete a series consisting of 200km, 300km, 400km, and 600km before June me thinks. One of the challenges of a brevet is that 50% of it is 90% mental. Right up your alley. Instead of "Being John Malkovitch", I'd enjoy your writing about PBP so I could "Be Jill Outside" on the journey. Plus, it's France and the experience of the helpful country side folks is a treasure to wrap your arms and heart around. My running times are so much slower now. I don't know if it is age or really because I don't care. Probably a combination. I really think you should do triathlons! I would take dry hills over ten days of cold rain any day. Jill - have you thought about riding this route in the opposite direction? Warm up on foothill, page mill climb which I liked more than descending on it, skyline rollers and down on 9 - much safer going down at 45mph when cars do 35 IMHO than climbing. And it is a great surface for fast downhill. Then pierce and mt Eden climbs as a cherry before cruising down from the reservoir? Randonneuring — I've considered it, but for whatever reason haven't made the leap to organized events yet. I go through phases with my enthusiasm for riding long distances on pavement, but right now the idea seems awesome. Just as long as there are lots of hills thrown it. It's like fast touring. With Triathlon, I'm not a swimmer. I can chop through water but I have no technique, and just enough pathological fear of water to struggle with putting my face in the water. I realize I can get over this fear and learn technique. But dirt triathlons are more limited and I have no interest in running on pavement. It's fine for "commuting" or a quick outing in places where there's limited trail access, but for the most part road running is just not for me. Jan — I have ridden that loop both directions. It's a great workout because the climb up Page Mill is tough, and then you're generally climbing Skyline rather than descending. But I don't like the descent down Highway 9 because I never go the same speed as cars and there are a lot more cars on Highway 9. Climbing Highway 9, on the other hand, is very enjoyable because it's a more even grade and there's a nice, wide shoulder, unlike climbing narrow Page Mill alongside cars. There's never much traffic on Page Mill, at least on weekdays, so I feel more comfortable descending that road even though it's slower. When I do descend Highway 9, I always cheat and drop down Redwood Gulch rather than make the climb over Mount Eden. I climb that way too when I'm feeling saucy, but it's a crazy steep grade for the first half mile, as I'm sure you remember. And I do remember what weeks of cold rain is like. I sympathize with you. You said: "..haven't made the leap to organized events..." Please ejukate the ignorant me, but what's the difference tween PBP (self soupported, control times) and an event like PTL (organized, cut off times at controls, etc)? I meant organized brevets, specifically. I've looked into them, including some fantastic-looking 200K and 300K routes north of San Francisco. But I haven't showed up at the start line yet. P.S. ooops me thinks you were talkn about "pavedroad" organized events. Yah? Got it..our posts collided. Also santa cruz and davis area brevets. Hope you add em to your to do list. PBP is awesome.
from feeluown.library import AbstractProvider from feeluown.models import SongModel, ArtistModel, \ AlbumModel, PlaylistModel, UserModel, SearchModel from .base import try_cast_model_to_v1 class ModelSerializerMixin: def _get_items(self, model): model = try_cast_model_to_v1(model) # initialize fields that need to be serialized # if as_line option is set, we always use fields_display if self.opt_as_line or self.opt_brief: fields = model.meta.fields_display else: fields = self._declared_fields items = [("provider", model.source), ("identifier", model.identifier), ("uri", str(model))] if self.opt_fetch: for field in fields: items.append((field, getattr(model, field))) else: for field in fields: items.append((field, getattr(model, field + '_display'))) return items class SongSerializerMixin: class Meta: types = (SongModel, ) # since url can be too long, we put it at last fields = ('title', 'duration', 'album', 'artists', 'url') line_fmt = '{uri:{uri_length}}\t# {title:_18} - {artists_name:_20}' class ArtistSerializerMixin: class Meta: types = (ArtistModel, ) fields = ('name', 'songs') line_fmt = '{uri:{uri_length}}\t# {name:_40}' class AlbumSerializerMixin: class Meta: types = (AlbumModel, ) fields = ('name', 'artists', 'songs') line_fmt = '{uri:{uri_length}}\t# {name:_18} - {artists_name:_20}' class PlaylistSerializerMixin: class Meta: types = (PlaylistModel, ) fields = ('name', ) line_fmt = '{uri:{uri_length}}\t# {name:_40}' class UserSerializerMixin: class Meta: types = (UserModel, ) fields = ('name', 'playlists') line_fmt = '{uri:{uri_length}}\t# {name:_40}' class SearchSerializerMixin: """ .. note:: SearchModel isn't a standard model, it does not have identifier, the uri of SearchModel instance is also not so graceful, so we handle it as a normal object temporarily. """ class Meta: types = (SearchModel, ) def _get_items(self, result): fields = ('songs', 'albums', 'artists', 'playlists',) items = [] for field in fields: value = getattr(result, field) if value: # only append if it is not empty items.append((field, value)) return items class ProviderSerializerMixin: class Meta: types = (AbstractProvider, ) def _get_items(self, provider): """ :type provider: AbstractProvider """ return [ ('identifier', provider.identifier), ('uri', 'fuo://{}'.format(provider.identifier)), ('name', provider.name), ]
Keep your data secure and protected with Auto Backup & Offsite Storage. Switch between languages (screen prompts and report labels) while running ResourceMate®. A great addition to libraries that are not English-based and just as great for bilingual libraries!
# # fulla -- work with Digital Ocean # # Copyright (C) 2015 John H. Dulaney <[email protected]> # # Licensed under the GNU General Public License Version 2 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. # """Interact with Digital Ocean account""" try: from io import BytesIO except ImportError: from StringIO import StringIO as BytesIO import pycurl import json from fulla import settings Null = json.dumps(None) def get_info(location): """Retreive Droplet data from Digital Ocean""" buff = BytesIO() auth = 'Authorization: Bearer ' + settings.token curler = pycurl.Curl() curler.setopt(curler.URL, settings.api_url + location) curler.setopt(curler.HTTPHEADER, [auth]) curler.setopt(curler.WRITEDATA, buff) try: curler.perform() except: raise curler.close() results = buff.getvalue() results = results.decode('iso-8859-1') results = json.loads(results) return results def send_request(location, request): location = settings.api_url + location class _Buffer(object): def __init__(self): self.data = '' def incoming(self, buff): self.data += buff.decode('iso-8859-1') auth = 'Authorization: Bearer ' + settings.token post_request = json.dumps(request) try: buff = _Buffer() curler = pycurl.Curl() curler.setopt(curler.HTTPHEADER, [auth, "Content-type: application/json"]) curler.setopt(curler.URL, location) curler.setopt(curler.POSTFIELDS, post_request) curler.setopt(curler.WRITEFUNCTION, buff.incoming) curler.perform() curler.close() return buff.data except: raise def send_delete(location): location = settings.api_url + location buff = BytesIO() auth = 'Authorization: Bearer ' + settings.token try: curler = pycurl.Curl() curler.setopt(curler.HTTPHEADER, [auth, "Content-type: application/json"]) curler.setopt(curler.URL, location) curler.setopt(curler.CUSTOMREQUEST, "DELETE") curler.setopt(curler.WRITEDATA, buff) curler.perform() curler.close() result = json.loads(buff.getvalue().decode('iso-8859-1')) return result except: raise class Account(object): """Digital Ocean Account object""" def __init__(self): self.droplet_limit = 0 self.email = '' self.uuid = '' self.email_verified = None self.status = '' self.status_message = '' def get_data(self): """Retreive user data from Digital Ocean""" results = get_info('account') try: results = results['account'] self.droplet_limit = results['droplet_limit'] self.email = results['email'] self.uuid = results['uuid'] self.email_verified = results['email_verified'] self.status = results['status'] self.status_message = ['status_message'] except: print(results['id'], results['message']) raise return 0 def get_droplets(): """Retreive Droplet data from Digital Ocean""" results = get_info('droplets') try: droplets = results['droplets'] num_droplets = results['meta']['total'] except: print(results['id'], results['message']) return droplets, num_droplets def get_imagelist(): """Get list of available images""" results = get_info('images?page=1') try: num_pages = int(results['links']['pages']['last'].rsplit('=', 1)[1]) except: print(results['id'], results['message']) raise image_list = results['images'] for page in range(2, num_pages + 1): results = get_info('images?page=' + str(page)) image_list += results['images'] return image_list def get_keys(): results = get_info('account/keys') try: num_keys = int(results['meta']['total']) keys = results['ssh_keys'] except: print(results['id'], results['message']) raise return keys, num_keys def create_droplet(name, region, size, image_slug, ssh_keys, user_data=Null, private_networking=Null, ipv6=Null, backups=Null): """Create new droplet Note: ssh_keys *must* be a list """ images = get_imagelist() droplet = None for image in images: if (image_slug == image['slug'] or image_slug == image['id']): droplet = {"name": name, "region": region, "size": size, "image": image_slug, "ssh_keys": ssh_keys, "backups": backups, "ipv6": ipv6, "user_data": user_data, "private_networking": private_networking} if droplet is not None: result = send_request('droplets', droplet) try: result = json.loads(result) except: print(result['id'], result['message']) raise return result else: print("Image does not exist") raise def delete_droplet(droplet_id): send_delete('droplets/' + str(droplet_id)) return 0 def reboot_droplet(droplet_id): """Reboot droplet""" request = 'droplets/' + str(droplet_id) + '/actions' result = send_request(request, '{"type":"reboot"}') return result
Biography & Genealogy: John Rufus Pearson, Class of 1865: Genealogy: Born- March 31, 1846, probably in Salisbury, NC. Father- Giles W. Pearson; Mother- Elizabeth Ellis. Pat. Grandfather- Richmond Pearson, a Col. in the Revolutionary War; Pat. Grandmother- Eliza Mumford. Mat. Grandfather- Anderson Ellis; Mat. Grandmother- Judith Bailey. Never married, no children. VMI Record: Entered VMI- Jan. 1, 1862; Resigned after one year to join the C.S.A. Civil War Record: Appointed 2nd Lieut. Co. F 7th NC Infantry Oct. 15, 1863. Died- Sept. 30, 1864, killed in action at Jones's Farm. Buried in Salisbury, NC.
# -*- coding: utf-8 -*- # # OpenERP, Open Source Management Solution # Copyright (C) 2012-2013 Elanz (<http://www.openelanz.fr>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # __author__ = 'vchemiere' from openerp.osv import osv, fields class sale_order(osv.osv): _inherit = 'sale.order' def create(self, cr, uid, vals, context=None): usr_obj = self.pool.get('res.users') group_obj = self.pool.get('res.groups') ir_model_data = self.pool.get('ir.model.data') adv_group_id = ir_model_data.get_object_reference(cr, uid, 'sale', 'adv')[1] adv_users = group_obj.browse(cr, uid, adv_group_id).users if not vals.get('message_follower_ids', False): vals['message_follower_ids'] = [] if adv_users: for adv_user_id in adv_users: adv_id = usr_obj.browse(cr, uid, adv_user_id.id).partner_id.id vals['message_follower_ids'] += [4, adv_id] mrp_group_id = ir_model_data.get_object_reference(cr, uid, 'mrp', 'team')[1] mrp_users = group_obj.browse(cr, uid, mrp_group_id).users if mrp_users: for mrp_user_id in mrp_users: mrp_id = usr_obj.browse(cr, uid, mrp_user_id.id).partner_id.id vals['message_follower_ids'] += [4, mrp_id] new_id = super(sale_order, self).create(cr, uid, vals, context) follower_ids = self.pool.get('mail.followers').search(cr, uid, [('res_id', '=', new_id)]) for follower_id in follower_ids: follower = self.pool.get('mail.followers').browse(cr, uid, follower_id) return new_id def action_button_confirm(self, cr, uid, ids, context=None): if context is None: context = {} res = super(sale_order, self).action_button_confirm(cr, uid, ids, context) self.pool.get('mail.proxy').send_mail(cr, uid, ids, 'sale.order', 'Sales Order - Send by Email', context) return res
A Complete Guide To The Shar Pei Dog Breed Regarding Chinese Puppy Design 0 Chinese Shar Pei Puppies Portrait Greeting Card For Sale By Waldek Inside Puppy Designs 1 Chineses Shar Pei Puppies Are Displayed For Sale Photographic Print Pertaining To Chinese Puppy Ideas 2 The Chinese Shar Pei Puppy Wow Amazing For Inspirations 3 Shar Pei Puppies For Sale Breed Profile Greenfield With Chinese Puppy Inspirations 4 Chinese Shar Pei Puppies Breed Information For Sale Within Puppy Idea 5 Chinese Shar Pei Dog Breed Information Pertaining To Puppy Remodel 6 Puppies Shar Pei For Sale In Michigan Classifieds Buy And Sell Within Chinese Puppy Design 7 Puppies For Sale From Beaux Art Chinese Shar Pei Member Since Within Puppy Plans 8 Chinese Shar Pei Puppy Photograph By Rita Boyd Intended For Designs 9 Cute Chinese Shar Pei Puppy Animals Pinterest Inside Inspirations 10 8 Fun Facts About The Chinese Shar Pei Within Puppy Plans 11 Chinese Shar Pei Dog Breed Selector Animal Planet For Puppy Idea 12 Chinese Shar Pei With Puppy Plan 13 Caramel Chinese Shar Pei Puppy For Sale Near Seoul Korea With Regard To Decorations 14 . On this website we recommend many designs abaout Chinese Shar Pei Puppy that we have collected from various sites. dott2.org that we have collected from various sites, and of course what we recommend is the most excellent of image for Chinese Shar Pei Puppy. If you like the design on our website, please do not hesitate to visit again and get inspiration from all the houses in the design of our web design.
""" Django settings for ticketsystem project. Generated by 'django-admin startproject' using Django 1.11.3. For more information on this file, see https://docs.djangoproject.com/en/1.11/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.11/ref/settings/ """ ######## SETTINGS SUITABLE FOR DEVELOPMENT ######## from .settings_dev import * # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False ALLOWED_HOSTS = ['localhost'] #DIRECTORY AND URL FOR MEDIA (FileField in Models) MEDIA_ROOT = os.path.join(BASE_DIR, 'C:/#DjangoApp/ticketsystem/media/') MEDIA_URL = '/' #LOGOUT USER ON BROWSER CLOSE SESSION_EXPIRE_AT_BROWSER_CLOSE = True #SET EXPIRATION AGE IN SECONDS COOKIE_EXP_AGE = 30*60 # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'djangodatabase', 'USER': 'DjangoApp', 'PASSWORD': 'testDjango', 'HOST': 'localhost', 'PORT': '', } } SECURE_CONTENT_TYPE_NOSNIFF = True SECURE_BROWSER_XSS_FILTER = True CSRF_COOKIE_SECURE = True SESSION_COOKIE_SECURE = True X_FRAME_OPTIONS = 'DENY' STATIC_ROOT = os.path.join(BASE_DIR, 'C:/#DjangoApp/ticketsystem/static/') STATICFILES_DIRS = [ BASE_DIR+'/static/', ]
CCU science building project involved BIM services. An hour based billing business model was used of this project that lasted for a period of 3 months. Advenser’s scope of work was modeling and carrying out the coordination task of MEP services including the HVAC and plumbing models too. The task of modeling was carried out using Revit while coordination and clash detection was carried out using Navisworks manage & Revit. The project at CCU science building, Conway, SC involved the addition of a 71,150 sq. ft. second annex to the CCU’s existing building which was constructed in the year 1980. The proposed annex comprises of offices for 40 faculties, 8 teaching labs, a classroom with a capacity of 460 seats, 2 classrooms with 245 seats and one Ecology teaching lab and six small labs for Ecology research. Using the input design drawings 3D models of the MEP elements were developed. Once the 3D models were developed, coordination and clash detection were done with the architectural and structural 3D models.
# _*_ coding:utf-8 _*_ import urllib,urllib2 def loadPage(url,filename): #根据url发送请求,获取服务器响应文件 print '正在下载' + filename headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'} request = urllib2.Request(url,headers = headers) content = urllib2.urlopen(request).read() return content def writePage(html,filename): #将html内容写入到本地 print '正在保存' + filename with open(unicode(filename,'utf-8'),'w') as f: f.write(html) print '_' * 30 def tiebaSpider(url,beginPage,endPage): #贴吧爬虫调度器,负责组合处理每个页面的url for page in range(beginPage,endPage + 1): pn = (page - 1) * 50 filename = '第' + str(page) + '页.html' fullurl = url + '&pn=' + str(pn) # print fullurl html = loadPage(fullurl,filename) writePage(html,filename) if __name__ == '__main__': kw = raw_input('请输入贴吧名:') beginPage = int(raw_input('请输入起始页:')) endPage = int(raw_input('请输入结束页:')) url = 'https://tieba.baidu.com/f?' key = urllib.urlencode({'kw':kw}) fullurl = url + key tiebaSpider(fullurl,beginPage,endPage)
In keeping with the philosophy of student-centered learning, the teaching methodology at IIKMT emphasizes the importance of learning through direct experience, which promotes the development of independent learning and key transferable areas. A combination of teacher-supervised and teacher-independent learning activities at the individual and group levels are integrated into the system of learning such that students are optimally involved in assessment of their own progress through faculty and peer feedback. In the current system of education, where knowledge economy is taking a priority, the quality of education has to conform to global standards. The employers seek skills and competency besides the basic knowledge from their employees. They also expect their employees to be effective right from day one. Realizing that the teaching and learning methods employed by the faculty members have a profound impact on the progression of the students, we have introduced an intense teaching-learning methodology in IIKMT. We expect that the system would develop a deep sense of learning in a relaxed environment and inculcate among the students a lasting learning desire. We also expect that the teaching system would develop skills and competencies along with the values among the students. Mentoring is another tool used by IIKM™ to nurture and grow its students. The mentor's job is to promote intentional learning, which includes capacity building through methods such as instructing, coaching, profiling experiences, modeling and advising. Important benefit of this approach is that students are equipped for their life long career progression. In order to have better understanding of management, a number of industry visits are organized for students in consultation with mentor/ faculty. Students identify the companies as per the course need and submit their report and make presentations. •Further, a student is expected to maintain a organizer, which is an attempt to cultivate the habit of documentation, to develop own thought process, and reasoning abilities. Seminars are organized by students on a regular basis. Such seminars help students to interact with renowned personalities in different fields, learn from their experiences and imbibe all the inspiration required by them to move ahead in their professional life. Workshops on various Management skills will help our students to learn how to lead, coach and be an integral part of their work team. Strategic tools for improving team effectiveness are learned and practiced in this hands-on workshop. This particular effort provides the students and the corporate to get an opportunity to enhance their skills. Taking a step further to give an exhaustive coverage to the programmes, the students have initiated workshop on specific subjects/issues which may not be covered in their regular curriculum. Special English language training will be provided for all the students of IIKM™ for effective communication and writing skills. English speaking is mandatory in the IIKM campuses as a part of language development. All IIKM™ students have to undergo compulsory Soft Skill Development module on soft skills as Grooming, Etiquettes, Leadership attributes, Effective communication, Resume writing, Interview handling etc. The focus is on enabling students to develop an independent and reflective style of personality that will help them in their careers and their lives. Public Speaking is another effective tool in the teaching methodologies adopted at IIKM. The motive is to prepare the students to be able to speak among a wide audience. This practice instills confidence to communicate effectively for various purposes like motivation, influence, persuasion, or entertaining. Thus the students develop confidence to face the audience. The minimum attendance requirement for all individual subjects is 80 percentages. Students having less than the prescribed attendance will not be allowed to appear for the University examinations. It is a mass event that helps students to uncover their extra capacity in areas beyond Programs. By participating in debates, quizzes on management and other cultural activities students contribute to their own excitement and add to their own learning. Finally, it is the annual convocation that holds the majestic pride of an Institute. It is the grand finale of all the hard work put in by each student and the most promising professionals win laurels in the form of awards. Placement of students is the top priority at IIKM™, and we strive to ensure a 100% record level. We at IIKM™ conduct an extensive research on what gets students the best career. Accordingly, students are groomed in the Life Skills program that gives them an edge when they join any company. We are committed to a successful future and better placement opportunity for all our students. We ensure that students get the best jobs available in the national & international levelsIIKM™ has a placement committee that coordinates all the placement activities and also acts as a liaison between the organization and the students. The committee takes care of the correspondence with the prospective employers, arranging pre-placement confabulations, personal campus interviews and providing other facilities that may be required by visiting organizations. Some of the companies and organizations where our alumni have found their moorings include ICICI, HDFC, Airtel, Bharti AXA, HCL, TCS, Xansa, Verizone, Satyam, Reliance, Union Bank, IOB, RBI, Standard Chartered, Axis Bank, UK Trade, Genpact etc.
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import logging import argparse from train import train from utils import generate_machine_list_file, generate_train_conf_file logger = logging.getLogger(__name__) def main(args, extra_args): master_addr = os.environ["MASTER_ADDR"] master_port = os.environ["MASTER_PORT"] worker_addrs = os.environ["WORKER_ADDRS"] worker_port = os.environ["WORKER_PORT"] world_size = int(os.environ["WORLD_SIZE"]) rank = int(os.environ["RANK"]) logger.info( "extract cluster info from env variables \n" f"master_addr: {master_addr} \n" f"master_port: {master_port} \n" f"worker_addrs: {worker_addrs} \n" f"worker_port: {worker_port} \n" f"world_size: {world_size} \n" f"rank: {rank} \n" ) if args.job_type == "Predict": logging.info("starting the predict job") elif args.job_type == "Train": logging.info("starting the train job") logging.info(f"extra args:\n {extra_args}") machine_list_filepath = generate_machine_list_file( master_addr, master_port, worker_addrs, worker_port ) logging.info(f"machine list generated in: {machine_list_filepath}") local_port = worker_port if rank else master_port config_file = generate_train_conf_file( machine_list_file=machine_list_filepath, world_size=world_size, output_model="model.txt", local_port=local_port, extra_args=extra_args, ) logging.info(f"config generated in: {config_file}") train(config_file) logging.info("Finish distributed job") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--job_type", help="Job type to execute", choices=["Train", "Predict"], required=True, ) logging.basicConfig(format="%(message)s") logging.getLogger().setLevel(logging.INFO) args, extra_args = parser.parse_known_args() main(args, extra_args)
We examine the impact of coastal wind turbines on coastal tourism and recreation for residents of the northern CAMA counties in North Carolina. A combination of telephone and web survey data are used to assess the impact of coastal wind farms on trip behavior and site choice. Most of the respondents to our telephone survey claim to support offshore wind energy development, and independent survey data suggest that the observed levels of support may be indicative of the broader population in this region. Overall, we find very little impact of coastal wind turbines on aggregate recreational visitation; loss in consumer surplus associated with wide spread wind development in the coastal zone is insignificant at $17 (or about 1.5%). Results suggest that NC coastal residents are averse to wind farms in the near-shore zone; average compensating variation for wind farms one mile from the shore is estimated at $55 per household. On average, we find no evidence of aversion to wind farms 4 miles out in the ocean, or for wind farms located in coastal estuaries. For all wind farm scenarios, we find evidence of preference heterogeneity– some respondents find this appealing while others find it aversive.
"""Unit tests for the ``usergroups`` paths. Each ``APITestCase`` subclass tests a single URL. A full list of URLs to be tested can be found here: http://theforeman.org/api/1.11/apidoc/v2/usergroups.html @Requirement: Usergroup @CaseAutomation: Automated @CaseLevel: Acceptance @CaseComponent: API @TestType: Functional @CaseImportance: High @Upstream: No """ from fauxfactory import gen_string from nailgun import entities from random import randint from requests.exceptions import HTTPError from robottelo.datafactory import ( invalid_values_list, valid_data_list, valid_usernames_list, ) from robottelo.decorators import tier1, tier2 from robottelo.test import APITestCase class UserGroupTestCase(APITestCase): """Tests for the ``usergroups`` path.""" @tier1 def test_positive_create_with_name(self): """Create new user group using different valid names @id: 3a2255d9-f48d-4f22-a4b9-132361bd9224 @Assert: User group is created successfully. """ for name in valid_data_list(): with self.subTest(name): user_group = entities.UserGroup(name=name).create() self.assertEqual(user_group.name, name) @tier1 def test_positive_create_with_user(self): """Create new user group using valid user attached to that group. @id: ab127e09-31d2-4c5b-ae6c-726e4b11a21e @Assert: User group is created successfully. """ for login in valid_usernames_list(): with self.subTest(login): user = entities.User(login=login).create() user_group = entities.UserGroup(user=[user]).create() self.assertEqual(len(user_group.user), 1) self.assertEqual(user_group.user[0].read().login, login) @tier1 def test_positive_create_with_users(self): """Create new user group using multiple users attached to that group. @id: b8dbbacd-b5cb-49b1-985d-96df21440652 @Assert: User group is created successfully and contains all expected users. """ users = [entities.User().create() for _ in range(randint(3, 5))] user_group = entities.UserGroup(user=users).create() self.assertEqual( sorted([user.login for user in users]), sorted([user.read().login for user in user_group.user]) ) @tier1 def test_positive_create_with_role(self): """Create new user group using valid role attached to that group. @id: c4fac71a-9dda-4e5f-a5df-be362d3cbd52 @Assert: User group is created successfully. """ for role_name in valid_data_list(): with self.subTest(role_name): role = entities.Role(name=role_name).create() user_group = entities.UserGroup(role=[role]).create() self.assertEqual(len(user_group.role), 1) self.assertEqual(user_group.role[0].read().name, role_name) @tier1 def test_positive_create_with_roles(self): """Create new user group using multiple roles attached to that group. @id: 5838fcfd-e256-49cf-aef8-b2bf215b3586 @Assert: User group is created successfully and contains all expected roles """ roles = [entities.Role().create() for _ in range(randint(3, 5))] user_group = entities.UserGroup(role=roles).create() self.assertEqual( sorted([role.name for role in roles]), sorted([role.read().name for role in user_group.role]) ) @tier1 def test_positive_create_with_usergroup(self): """Create new user group using another user group attached to the initial group. @id: 2a3f7b1a-7411-4c12-abaf-9a3ca1dfae31 @Assert: User group is created successfully. """ for name in valid_data_list(): with self.subTest(name): sub_user_group = entities.UserGroup(name=name).create() user_group = entities.UserGroup( usergroup=[sub_user_group], ).create() self.assertEqual(len(user_group.usergroup), 1) self.assertEqual(user_group.usergroup[0].read().name, name) @tier2 def test_positive_create_with_usergroups(self): """Create new user group using multiple user groups attached to that initial group. @id: 9ba71288-af8b-4957-8413-442a47057634 @Assert: User group is created successfully and contains all expected user groups @CaseLevel: Integration """ sub_user_groups = [ entities.UserGroup().create() for _ in range(randint(3, 5))] user_group = entities.UserGroup(usergroup=sub_user_groups).create() self.assertEqual( sorted([usergroup.name for usergroup in sub_user_groups]), sorted( [usergroup.read().name for usergroup in user_group.usergroup]) ) @tier1 def test_negative_create_with_name(self): """Attempt to create user group with invalid name. @id: 1a3384dc-5d52-442c-87c8-e38048a61dfa @Assert: User group is not created. """ for name in invalid_values_list(): with self.subTest(name): with self.assertRaises(HTTPError): entities.UserGroup(name=name).create() @tier1 def test_negative_create_with_same_name(self): """Attempt to create user group with a name of already existent entity. @id: aba0925a-d5ec-4e90-86c6-404b9b6f0179 @Assert: User group is not created. """ user_group = entities.UserGroup().create() with self.assertRaises(HTTPError): entities.UserGroup(name=user_group.name).create() @tier1 def test_positive_update(self): """Update existing user group with different valid names. @id: b4f0a19b-9059-4e8b-b245-5a30ec06f9f3 @Assert: User group is updated successfully. """ user_group = entities.UserGroup().create() for new_name in valid_data_list(): with self.subTest(new_name): user_group.name = new_name user_group = user_group.update(['name']) self.assertEqual(new_name, user_group.name) @tier1 def test_positive_update_with_new_user(self): """Add new user to user group @id: e11b57c3-5f86-4963-9cc6-e10e2f02468b @Assert: User is added to user group successfully. """ user = entities.User().create() user_group = entities.UserGroup().create() user_group.user = [user] user_group = user_group.update(['user']) self.assertEqual(user.login, user_group.user[0].read().login) @tier2 def test_positive_update_with_existing_user(self): """Update user that assigned to user group with another one @id: 71b78f64-867d-4bf5-9b1e-02698a17fb38 @Assert: User group is updated successfully. @CaseLevel: Integration """ users = [entities.User().create() for _ in range(2)] user_group = entities.UserGroup(user=[users[0]]).create() user_group.user[0] = users[1] user_group = user_group.update(['user']) self.assertEqual(users[1].login, user_group.user[0].read().login) @tier1 def test_positive_update_with_new_role(self): """Add new role to user group @id: 8e0872c1-ae88-4971-a6fc-cd60127d6663 @Assert: Role is added to user group successfully. """ new_role = entities.Role().create() user_group = entities.UserGroup().create() user_group.role = [new_role] user_group = user_group.update(['role']) self.assertEqual(new_role.name, user_group.role[0].read().name) @tier1 def test_positive_update_with_new_usergroup(self): """Add new user group to existing one @id: 3cb29d07-5789-4f94-9fd9-a7e494b3c110 @Assert: User group is added to existing group successfully. """ new_usergroup = entities.UserGroup().create() user_group = entities.UserGroup().create() user_group.usergroup = [new_usergroup] user_group = user_group.update(['usergroup']) self.assertEqual( new_usergroup.name, user_group.usergroup[0].read().name) @tier1 def test_negative_update(self): """Attempt to update existing user group using different invalid names. @id: 03772bd0-0d52-498d-8259-5c8a87e08344 @Assert: User group is not updated. """ user_group = entities.UserGroup().create() for new_name in invalid_values_list(): with self.subTest(new_name): user_group.name = new_name with self.assertRaises(HTTPError): user_group.update(['name']) self.assertNotEqual(user_group.read().name, new_name) @tier1 def test_negative_update_with_same_name(self): """Attempt to update user group with a name of already existent entity. @id: 14888998-9282-4d81-9e99-234d19706783 @Assert: User group is not updated. """ name = gen_string('alphanumeric') entities.UserGroup(name=name).create() new_user_group = entities.UserGroup().create() new_user_group.name = name with self.assertRaises(HTTPError): new_user_group.update(['name']) self.assertNotEqual(new_user_group.read().name, name) @tier1 def test_positive_delete(self): """Create user group with valid name and then delete it @id: c5cfcc4a-9177-47bb-8f19-7a8930eb7ca3 @assert: User group is deleted successfully """ user_group = entities.UserGroup().create() user_group.delete() with self.assertRaises(HTTPError): user_group.read()
This is a stunning bronze example of the Winged Victory of Samothrace, also called the Nike of Samothrace. The original is made of marble of the Hellenistic Period, 2nd century BC and graces the stairway entrance to the Antiquities Gallery of the Louvre. The patina on the bronze is superb. The sculpture stands on a granite base. We have a marble pedestal that will raise the height to 72“. This gorgeous pair of bronze griffins were designed after the antique in the Regency period in England, ca. 1820. They are standing on red marble bases. Orientalist bronze sculpture of a girl with removable skirt. Signed E. Villains.
# Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import logging from st2client.models import core LOG = logging.getLogger(__name__) class KeyValuePair(core.Resource): _alias = "Key" _display_name = "Key Value Pair" _plural = "Keys" _plural_display_name = "Key Value Pairs" _repr_attributes = ["name", "value"] # Note: This is a temporary hack until we refactor client and make it support non id PKs def get_id(self): return self.name def set_id(self, value): pass id = property(get_id, set_id)
"This is a message I hoped I would never have to send. Today (Sunday, June 27, 2010), just before 2:45 PM, Barbara Moore died. She had been in failing health for several years and had become rather frail. A recent bout with bronchitis seemed to add to a few other health problems and the other health problems brought about her death. Barbara was a wonderful wife for 48 years and a wonderful mother for 36 years. I know of no one who would say anything but good things about her--she loved her friends and they loved her. She is truly a person who cannot be replaced, and Russell and I will always miss her. Barbara Hyder Moore - Melbourne - Barbara Hyder Moore passed away Sunday, June 27, 2010. She was born July 8, 1938 in East Flat Rock, NC, moved to Melbourne, FL in 1967, and was currently residing in Suntree. She also lived in Hendersonville, Charlotte and Gastonia, NC. She was preceded in death by her mother, Eileen Bailey Hyder, and her father, Lecter L. Hyder, Sr., of Bat Cave NC. Surviving family members include her husband of 48 years, Gene W. Moore; her son, Russell B. Moore of Maitland, FL; her brother, Lecter L. Hyder, Jr. (Anna) of Ada, MI; her sister, Ann H. Hall (Joe H.) of Yakima, WA; and six nieces and nephews. Barbara's ashes will be interred in the Memorial Grove on the campus of the University of North Carolina, Chapel Hill, NC. No flowers, please.
import feedparser import json import urllib.request from urllib.parse import quote from flask import Flask from flask import render_template from flask import request from pprint import pprint app = Flask(__name__) RSS_FEEDS = { 'bbc' : 'http://feeds.bbci.co.uk/news/rss.xml', 'cnn' : 'http://rss.cnn.com/rss/edition.rss', 'fox' : 'http://feeds.foxnews.com/foxnews/latest', 'iol' : 'http://www.iol.co.za/cmlink/1.640' } DEFAULTS = { 'publication' : 'cnn', 'city' : 'Calcutta, IN', 'currency_from' : 'USD', 'currency_to' : 'INR' } WEATHER_URL = "http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid=65b8831d1736fe05836815097ae4a457" CURRENCY_URL = "https://openexchangerates.org//api/latest.json?app_id=09f8ae338add4275a341e3c556444eae" @app.route("/") def home(): publication = request.args.get("publication") if not publication: publication = DEFAULTS['publication'] articles = get_news(publication) city = request.args.get('city') if not city: city = DEFAULTS['city'] weather = get_weather(city) currency_from = request.args.get("currency_from") currency_to = request.args.get("currency_to") if not currency_from: currency_from = DEFAULTS['currency_from'] if not currency_to: currency_to = DEFAULTS['currency_to'] rate, currencies = get_rates(currency_from, currency_to) return render_template( "home2.html", articles = articles, weather = weather, feeds = RSS_FEEDS, publication = publication, city = city, currency_from = currency_from, currency_to = currency_to, rate = rate, currencies = sorted(currencies) ) def get_news(query): if not query or query.lower() not in RSS_FEEDS: publication = DEFAULTS['publication'] else: publication = query.lower() feed = feedparser.parse(RSS_FEEDS[publication]) return feed['entries'] def get_weather(query): query = quote(query) url = WEATHER_URL.format(query) data = urllib.request.urlopen(url).read().decode("utf-8") parsed = json.loads(data) weather = None if parsed.get("weather"): weather = { "description" : parsed["weather"][0]["description"], "temperature" : parsed["main"]["temp"], "city" : parsed["name"], "country" : parsed["sys"]["country"] } return weather def get_rates(from_rate, to_rate): all_currency = urllib.request.urlopen(CURRENCY_URL).read().decode("utf-8") parsed = json.loads(all_currency).get('rates') parsed_from_rate = parsed.get(from_rate.upper()) parsed_to_rate = parsed.get(to_rate.upper()) return (parsed_to_rate/parsed_from_rate, parsed.keys()) if __name__ == "__main__": app.run(port=5000, debug=True) #65b8831d1736fe05836815097ae4a457 #WEATHER_URL #09f8ae338add4275a341e3c556444eae #CURRENCY_URL
Novatech is a Hampshire-based technology company that makes computers for our customers' individual needs. We give our customers all the technology, software and support that they need from one trustworthy source. We build our own desktops, laptops and servers for home, for school, the office and far beyond. That is what we call worry free computing. Over 200 people work at Novatech and every one of them is involved in giving our customers amazing machines, each with the power to do remarkable things. We build computers capable of everything from school work to 3D rendering and from share trading to sports car tuning. Novatech machines edited an Oscar winning film and helped process the data that cracked complex genetic codes. Just tell us what you need to do and if we don't have a machine that will do it, ready and waiting, we will build one - usually for much less than you'd expect. Over seven thousand of the UK's leading businesses and education establishments trust Novatech technology and service. Novatech : Get free next day deliv.. Novatech : Get free next day delivery on everything By Applying coupon code of Novatech. Novatech : Free next working day d..
""" This script generates a mock/random catalogue from a lightcone python3 generate_mock.py [--random] <n> Args: n: index of lightcone Options: --param [=param.json]: parameter file --random: generate random catalogue Input: halo_lightcone/lightcone_<n>.h5 rand_lightcone/lightcone_<n>.h5 Output: mocks/mock_<n>.txt rands/random_<n>.txt """ import os import argparse import json import signal import numpy as np import mockgallib as mock signal.signal(signal.SIGINT, signal.SIG_DFL) # stop with ctrl-c # # Command-line options # parser = argparse.ArgumentParser() parser.add_argument('n', help='index of lightcone') parser.add_argument('--reg', default='w1', help='region w1/w4') parser.add_argument('--dir', default='.', help='base data directory') parser.add_argument('--param', default='param.json', help='parameter json file') parser.add_argument('--mock', help='generate mock catalogue', action="store_true") parser.add_argument('--rand', help='generate random catalogue', action="store_true") arg = parser.parse_args() data_dir = '/workplace/wp2e/como5/data' # # Read parameter file # print('Parameter file: %s' % arg.param) with open(arg.param, 'r') as f: param = json.load(f) omega_m = param['omega_m'] print('Setting cosmology: omega_m= %.4f' % omega_m) # # Initilise # mock.set_loglevel(0) mock.cosmology.set(omega_m) mock.power.init(arg.dir + '/' + param['power_spectrum']) # # redshift range # z_min = 0.39 z_max = 1.21 print('redshift range %f %f' % (z_min, z_max)) # nz # nbar_obs= mock.array.loadtxt(arg.dir + '/' + param['nz']) # sky sky = {} for reg in param['reg']: sky[reg['name']] = mock.Sky(reg['ra'], reg['dec'], [z_min, z_max]) # # Set HOD parameters # hod = mock.Hod() hod_param = [11.632682100874081, -0.5706390738948128, 4.904043697780981, -1.0126352684312565, 0.45, 0.9, 1.05, 0.0, 0.9, 0.0, 4.0, 2.0] hod.set_coef(hod_param) lightcones = mock.LightCones() cats = mock.Catalogues() n = int(arg.n) def write_catalogue(filename, a): with open(filename, 'w') as f: for i in range(a.shape[0]): f.write('%d %e %e %e %e %e %e %e %e %e %e\n' % ( i, a[i, 0], a[i, 1], a[i, 2], a[i, 4], a[i, 3], a[i, 5], a[i, 6], a[i, 7], a[i, 10], a[i, 11])) reg = arg.reg # mock if arg.mock: halo_lightcones = mock.LightCones() halo_lightcones.load_h5( ['%s/halo_lightcone/%s/lightcone_%05d.h5' % (arg.dir, reg, n)]) galaxy_catalogues = mock.Catalogues() galaxy_catalogues.generate_galaxies(hod, halo_lightcones, sky[reg], z_min, z_max) write_catalogue('mocks/%s/mock_%s_%05d.txt' % (reg, reg, n), galaxy_catalogues[0]) if arg.rand: rand_lightcones = mock.LightCones() rand_lightcones.load_h5( ['%s/rand_lightcone/%s/lightcone_%05d.h5' % (arg.dir, reg, n)]) random_catalogues = mock.Catalogues() random_catalogues.generate_randoms(hod, rand_lightcones, sky[reg], z_min, z_max) write_catalogue('rands/%s/rand_%s_%05d.txt' % (reg, reg, n), random_catalogues[0]) # Column 0: index # Column 1: x realspace [1/h Mpc] # Column 2: y # Column 3: z # Column 4: vr [km/s] # Column 5: redshift # Column 6: RA # Column 7: Dec # Column 8: M_host_halo # Column 9: r_satellite # Column 10: vr_satellite
This is a one-page logic puzzle in Spanish that centers around sports. There are six kids who play six different sports. Who plays what sport? There are eight clues in Spanish that students will use to decide. A working knowledge of dates and seasons will help, but if your students don't know these words you can add a text box with useful vocabulary. Logic puzzles are good for students' brains and they will ask for more of them after doing one! The document is editable.
import os from setuptools import setup, find_packages README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name = 'cmsplugin-rt', version = '0.5.1', packages = find_packages(), #'cmsplugin_rt', #find_packages(), include_package_data = True, license = 'BSD License', # example license description = 'This package contains a number of basic plugins to kick start your DjangoCMS project, such as Twitter Bootstrap navbar and buttons, Facebook and Twitter buttons, a Style Modifier, Google Analytics tracking code, Google fonts, meta tags and resizable pictures.', long_description = README, keywords = "button meta twitter bootstrap style modifier racing tadpole", url = 'https://github.com/RacingTadpole/cmsplugin-rt', author = 'Art Street', author_email = '[email protected]', classifiers = [ 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', # example license 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', ], zip_safe = False, )
Electronic weigh system ensures uniform pre-weighed bales for maximum legal loading. Variable compaction control adjusts material density and bale length to fit your trailer or container fleet. Full length constant density compaction force provides equal weight distribution throughout payload length. Ejection system accurately positions loads to achieve optimum and legal weight distribution. Free standing compacted loads allow you to use lighter tare weight hauling equipment, resulting in increased net payloads.
# -*- coding: utf-8 -*- import os, sys, time, bz2, click import pandas as pd import pandas.io.sql as sql import numpy as np import itertools @click.command() @click.argument('file_path', type=click.Path(exists=True)) @click.option('output_path', '--output', '-o', help='Path to save files to.', type=click.Path(), required=True, prompt="Output path") def main(file_path, output_path): nestings = [] fieldA = "hs" fieldB = "cnae" df = pd.read_csv(file_path, converters={fieldA: str, fieldB: str}) df = df[ (df[fieldA].str.len() > 0) & (df[fieldB].str.len() >0)] df = df[[fieldA, fieldB]] if fieldA == "hs": df.hs = df.hs.str.slice(2, 6) df = df.drop_duplicates() print df print print # depths = {"hs" : [2, 6], "cnae": [1, 5]} # for depthcol, lengths in depths.items(): # my_nesting.append(lengths) # my_nesting_cols.append(depthcol) # print my_nesting, my_nesting_cols # for depths in itertools.product(*my_nesting): # series = {} # print depths # for col_name, l in zip(my_nesting_cols, depths): # series[col_name] = df[col_name].str.slice(0, l) # addtl_rows = pd.DataFrame(series) # full_table = pd.concat([addtl_rows, full_table]) # # print pk # print full_table df.to_csv("pi_crosswalk.csv", index=False) if __name__ == "__main__": main()
Fiscal sponsorship provides administrative and programmatic platforms that enable you to develop, launch and promote high-impact projects around the globe. Many individuals, foundations, corporations, or government organizations restrict funding of independent projects to tax-exempt organizations. Fiscal Sponsorship allows AMIA to serve as a conduit for financial resources, and provides some support and oversight for project grants. Projects fiscally sponsored by AMIA are able to offer the incentive of a charitable deduction for income tax purposes, a service individuals couldn’t otherwise offer. It also offers grant seekers the ability to access foundation and government grant funding that may only be accessible to non-profit organizations or fiscally sponsored projects. Establishing a fiscal sponsorship under AMIA’s 501(c)(3) public charity designation substantially reduces the costs typically associated with developing nonprofit governance, management and operational infrastructure. It also can be a useful interim step while your project makes the transition to independent charitable status. Your project must be a non-commercial work – not intended as a for-profit business opportunity – and fall within AMIA’s mission, which is dedicated to media preservation and access. AMIA Membership is required for all projects accepted into the program at either the Individual or Institutional level. Membership must be maintained throughout the life of the fiscal sponsorship account.
from sqlalchemy import event from datetime import datetime from ggrc import db from sqlalchemy.ext.declarative import declared_attr from .mixins import deferred from ggrc.login import get_current_user_id from .reflection import PublishOnly class HasObjectState(object): _publish_attrs = [ PublishOnly('os_state'), ] def __init__(self, *args, **kwargs): self._skip_os_state_update = False; super(HasObjectState, self).__init__(*args, **kwargs) @declared_attr def os_state(cls): return deferred(db.Column(db.String, nullable=False, default=ObjectStates.DRAFT), cls.__name__) def skip_os_state_update(self): self._skip_os_state_update = True class ObjectStates: DRAFT = 'Draft' APPROVED = 'Approved' DECLINED = 'Declined' MODIFIED = 'Modified' # This table class ObjectStateTables: table_names = [ 'programs', 'objectives', 'controls', 'sections', 'systems', 'data_assets', 'facilities', 'markets', 'products', 'projects', 'directives', 'org_groups', 'vendors' ] def state_before_insert_listener(mapper, connection, target): if hasattr(target, 'os_state'): target.os_state = ObjectStates.DRAFT def state_before_update_listener(mapper, connection, target): if hasattr(target, 'os_state'): if hasattr(target, '_skip_os_state_update'): if True == target._skip_os_state_update: return target.os_state = ObjectStates.MODIFIED def track_state_for_class(object_class): event.listen(object_class, 'before_insert', state_before_insert_listener) event.listen(object_class, 'before_update', state_before_update_listener)
High school graduation is a very special time in the lives of seniors, their friends and family. It is the time to recognize all of the achievements you’ve made at Pittsburgh Milliones University Prep. As your school’s official provider, Herff Jones is here at every step, to help you celebrate all of your high school accomplishments and the many memories you’ve made along the way. From high school class rings and yearbooks, to graduation caps and gowns, graduation announcements and diploma frames, Herff Jones products enrich the lives of the students at Pittsburgh Milliones University Prep by celebrating the experiences that our products make memorable.
#!/usr/bin/pythonTest # -*- coding: utf-8 -*- # # string functions Eat # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # The GNU General Public License is available from: # The Free Software Foundation, Inc. # 51 Franklin Street, Fifth Floor # Boston MA 02110-1301 USA # # http://www.gnu.org/licenses/gpl.html # # Copyright 2004-2016 Rick Graves # from String.Test import isAsciiAlpha, isAsciiDigit def _eatOffOneEnd( sText, sEatThese = '', fEatThese = None, bEatOffFront = True, bEatOffBoth = False ): # """ This is the generic program, it is normally only called by specific implementations below. """ # from String.Get import getTheseCharsOffOneEnd # iEat = len( getTheseCharsOffOneEnd( sText, sEatThese, fEatThese, bEatOffFront ) ) # if bEatOffFront or bEatOffBoth: # sText = sText[ iEat : ] # if bEatOffBoth: # iEat = len( getTheseCharsOffOneEnd( sText, sEatThese, fEatThese, False ) ) # # if bEatOffBoth or not bEatOffFront: # if iEat: sText = sText[ : - iEat ] # # return sText def eatCharsOffBeg( sText, sEatThese = '', fEatThese = None ): # return _eatOffOneEnd( sText, sEatThese, fEatThese, bEatOffFront = True ) def eatCharsOffEnd( sText, sEatThese = '', fEatThese = None ): # return _eatOffOneEnd( sText, sEatThese, fEatThese, bEatOffFront = False ) def eatCharsOffBothEnds( sText, sEatThese = '', fEatThese = None ): # return _eatOffOneEnd( sText, sEatThese, fEatThese, bEatOffFront = False, bEatOffBoth = True ) def eatPunctuationBegAndEnd( sFrag ): # from String.Test import isPunctuation # return eatCharsOffEnd( eatCharsOffBeg( sFrag, fEatThese = isPunctuation ), fEatThese = isPunctuation ) def eatPunctuationEnd( sFrag ): # from String.Test import isPunctuation # return eatCharsOffEnd( sFrag, fEatThese = isPunctuation ) def eatPunctAndSpacesOffEnd( sFrag ): # from String.Test import isPunctOrSpace # return eatCharsOffEnd( sFrag, fEatThese = isPunctOrSpace ) def eatPunctAndSpacesOffBegAndEnd( sFrag ): # from String.Test import isPunctOrSpace # return eatCharsOffEnd( eatCharsOffBeg( sFrag, fEatThese = isPunctOrSpace ), fEatThese = isPunctOrSpace ) def eatFrontNonAlpha( sText ): # def fEatThese( sChar ): return not isAsciiAlpha( sChar ) # return _eatOffOneEnd( sText, fEatThese = fEatThese ) def eatFrontNonDigits( sText ): # from String.Test import isNotDigit # return _eatOffOneEnd( sText, fEatThese = isNotDigit ) def eatBackNonDigits( sText ): # from String.Test import isNotDigit # return _eatOffOneEnd( sText, fEatThese = isNotDigit, bEatOffFront = False ) def eatFrontNonAlphaNum( sText ): # def fEatThese( sChar ): return not ( isAsciiAlpha( sChar ) or isAsciiDigit( sChar ) ) # return _eatOffOneEnd( sText, fEatThese = fEatThese ) def eatFrontNonAlphaNumButKeepLF( sText ): # def fEatThese( sChar ): return not ( isAsciiAlpha( sChar ) or isAsciiDigit( sChar ) or sChar == '\n' ) # return _eatOffOneEnd( sText, fEatThese = fEatThese ) def eatEndNonAlphaNum( sText ): # #def fEatThese( sChar ): return not ( sChar.isalpha() or sChar.isdigit() ) def fEatThese( sChar ): return not ( isAsciiAlpha( sChar ) or isAsciiDigit( sChar ) ) # return _eatOffOneEnd( sText, fEatThese = fEatThese, bEatOffFront = False ) def eatNonAlphaNumBothEnds( sText ): # return eatEndNonAlphaNum( eatFrontNonAlphaNum( sText ) ) def eatNonAlphaBothEnds( sText ): # return eatEndNonAlpha( eatFrontNonAlpha( sText ) ) def eatAlphaOffEnd( sText ): # return eatCharsOffEnd( sText, fEatThese = isAsciiAlpha ) setCRLF = frozenset( ( '\n', '\r' ) ) def _gotCRLF( sChar ): return sChar in setCRLF def eatEndCRLF( sText ): # return _eatOffOneEnd( sText, fEatThese = _gotCRLF, bEatOffFront = False ) def eatBegCRLF( sText ): # return _eatOffOneEnd( sText, fEatThese = _gotCRLF ) def eatEndAlpha( sText ): # def fEatThese( sChar ): return sChar.isalpha() # return _eatOffOneEnd( sText, fEatThese = fEatThese, bEatOffFront = False ) def eatEndNonAlpha( sText ): # def fEatThese( sChar ): return not isAsciiAlpha( sChar ) # return _eatOffOneEnd( sText, fEatThese = fEatThese, bEatOffFront = False ) def eatFrontDigits( sText ): # from String.Test import isDigit # return _eatOffOneEnd( sText, fEatThese = isDigit ) def eatEndDigits( sText ): # from String.Test import isDigit # return _eatOffOneEnd( sText, fEatThese = isDigit, bEatOffFront = False ) def eatWhiteSpaceBothEnds( sText ): # from string import whitespace # return _eatOffOneEnd( _eatOffOneEnd( sText, whitespace ), whitespace, bEatOffFront = False ) def eatWhiteSpaceFront( sText ): # from string import whitespace # return _eatOffOneEnd( sText, whitespace ) def eatEndSpaces( sText ): # from String.Test import isSpace # return _eatOffOneEnd( sText, fEatThese = isSpace, bEatOffFront = False ) def _getFrontCharOff( s, sDigit ): # while s.startswith( sDigit ): # s = s[ 1 : ] # # return s def eatFrontZeros( s ): # return _getFrontCharOff( s, '0' ) def eatFrontOnes( s ): # return _getFrontCharOff( s, '1' ) _setZeroOne = frozenset( ( '0', '1' ) ) def eatFrontZerosOnes( s ): # while s and s[0] in _setZeroOne: # s = s[ 1 : ] # # return s def eatFrontOneByOne( sOrig, sEat ): # from String.Get import getTextAfter # sRest = sOrig # for c in sEat: # sRest = getTextAfter( sRest, c ) # # return sRest if __name__ == "__main__": # from string import digits, whitespace from string import ascii_lowercase as lowercase from string import ascii_uppercase as uppercase # from six import print_ as print3 # from Iter.AllVers import iZip, iMap, tMap from String.Get import getStringInRange from Utils.Result import sayTestResult # lProblems = [] # def fEatThese( s ): return not s.isalpha() # sLeft = _eatOffOneEnd( lowercase + digits, fEatThese = fEatThese, bEatOffFront = False ) # if sLeft != lowercase: # print3( 'sLeft:', sLeft ) lProblems.append( '_eatOffOneEnd()' ) # # if eatCharsOffBeg( lowercase, 'lkjihgfedcba' ) != 'mnopqrstuvwxyz': # lProblems.append( 'eatTheseCharsOffBeg()' ) # if eatCharsOffEnd( lowercase, 'zyxwvutsrqponm' ) != 'abcdefghijkl': # lProblems.append( 'eatTheseCharsOffEnd()' ) # # if eatCharsOffBothEnds( '/abc/', '/' ) != 'abc': # # print3( eatCharsOffBothEnds( '/abc/', '/' ) ) lProblems.append( 'eatCharsOffBothEnds() remove' ) # # if eatCharsOffBothEnds( 'abc', '/' ) != 'abc': # lProblems.append( 'eatCharsOffBothEnds() nothing to remove' ) # # # if eatPunctuationBegAndEnd( ',-./0123456789:;' ) != '0123456789': # lProblems.append( 'RemovePunctuationBegAndEnd()' ) # # # getStringInRange( 32, 91 ) = # ' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ' # if eatPunctuationEnd( ',-./0123456789:;' ) != ',-./0123456789': # lProblems.append( 'eatPunctuationEnd()' ) # # if eatPunctAndSpacesOffEnd( ',-./0123456789: ; ' ) != ',-./0123456789': # lProblems.append( 'eatPunctAndSpacesOffEnd()' ) # # if eatPunctAndSpacesOffBegAndEnd( ', -./0123456789: ; ' ) != '0123456789': # lProblems.append( 'eatPunctAndSpacesOffBegAndEnd()' ) # # s32to90 = getStringInRange( 32, 91 ) # if eatFrontNonAlpha( s32to90 ) != uppercase: # lProblems.append( 'eatFrontNonAlpha()' ) # if eatFrontNonAlphaNum( s32to90 ) != '0123456789:;<=>?@' + uppercase: # lProblems.append( 'eatFrontNonAlphaNum()' ) # if eatFrontNonAlphaNumButKeepLF( '\n' + s32to90 ) != '\n' + s32to90 or \ eatFrontNonAlphaNumButKeepLF( '\r' + s32to90 ) != '0123456789:;<=>?@' + uppercase: # lProblems.append( 'eatFrontNonAlphaNumButKeepLF()' ) # # # getStringInRange( 48, 65 ) = '0123456789:;<=>?@' # if eatEndNonAlphaNum( lowercase + whitespace ) != lowercase: # print3( eatEndNonAlphaNum( lowercase + whitespace ) ) lProblems.append( 'eatEndNonAlphaNum( lowercase + whitespace )' ) # # if eatEndNonAlphaNum( getStringInRange( 97, 256 ) ) != lowercase: # s = eatEndNonAlphaNum( getStringInRange( 97, 256 ) ) # # print3( tMap( str, iMap( ord, ( s[0], s[-1] ) ) ) ) lProblems.append( 'eatEndNonAlphaNum( getStringInRange( 97, 256 ) )' ) # # if eatEndNonAlphaNum( getStringInRange( 48, 65 ) ) != digits: # print3( eatEndNonAlphaNum( getStringInRange( 48, 65 ) ) ) lProblems.append( 'eatEndNonAlphaNum( getStringInRange( 48, 65 ) )' ) # # # print3( 'getStringInRange( 65, 123 )', '= ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz' ) # print3( 'getStringInRange( 32, 97 )', '''= !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`''' ) # if eatNonAlphaNumBothEnds( getStringInRange( 32, 256 ) ) != getStringInRange( 48, 123 ) or \ eatNonAlphaNumBothEnds( getStringInRange( 32, 97 ) ) != getStringInRange( 48, 91 ): # #print3( eatNonAlphaNumBothEnds( getStringInRange( 32, 256 ) ) ) #print3( eatNonAlphaNumBothEnds( getStringInRange( 32, 97 ) ) ) lProblems.append( 'eatNonAlphaNumBothEnds()' ) # if eatNonAlphaBothEnds( getStringInRange( 32, 97 ) ) != uppercase: # lProblems.append( 'eatNonAlphaBothEnds()' ) # if eatAlphaOffEnd( '1234abcd' ) != '1234': # lProblems.append( 'eatAlphaOffEnd()' ) # # if eatEndCRLF( '\r\n' + uppercase + '\r\n' ) != '\r\n' + uppercase: # lProblems.append( 'eatEndCRLF()' ) # if eatBegCRLF( '\r\n' + uppercase + '\r\n' ) != uppercase + '\r\n': # lProblems.append( 'eatBegCRLF()' ) # # # getStringInRange( 65, 123 ) = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz' # if eatEndAlpha( getStringInRange( 65, 123 ) ) != uppercase + '[\\]^_`': # lProblems.append( 'eatEndAlpha()' ) # if eatEndNonAlpha( getStringInRange( 97, 256 ) ) != lowercase: # print3( eatEndNonAlpha( getStringInRange( 97, 256 ) ) ) lProblems.append( 'eatEndNonAlpha()' ) # # # getStringInRange( 48, 65 ) = '0123456789:;<=>?@' # if eatFrontDigits( getStringInRange( 48, 65 ) ) != ':;<=>?@': # lProblems.append( 'eatFrontDigits()' ) # # # getStringInRange( 32, 58 ) = ' !"#$%&\'()*+,-./0123456789' # if eatEndDigits( getStringInRange( 32, 58 ) ) != ' !"#$%&\'()*+,-./': # lProblems.append( 'eatEndDigits()' ) # # if eatWhiteSpaceBothEnds( whitespace + lowercase + whitespace ) != lowercase: # lProblems.append( 'eatWhiteSpaceBothEnds()' ) # # if eatWhiteSpaceFront( whitespace + lowercase + whitespace ) != \ lowercase + whitespace: # lProblems.append( 'eatWhiteSpaceFront()' ) # # if eatEndSpaces( '\t\n\x0b\x0c\r ' ) != '\t\n\x0b\x0c\r' or \ eatEndSpaces( 'abc' ) != 'abc': # lProblems.append( 'eatEndSpaces()' ) # # if eatFrontNonDigits( '-206-632-9929' ) != '206-632-9929': # lProblems.append( 'eatFrontNonDigits()' ) # # if eatBackNonDigits( '123xzy' ) != '123': # lProblems.append( 'eatBackNonDigits()' ) # # sOrig = '1-2-3-4-5-6-7-8-9' sEat = '123' # if eatFrontOneByOne( sOrig, sEat ) != '-4-5-6-7-8-9': # lProblems.append( 'eatFrontOneByOne()' ) # # # # # sayTestResult( lProblems )
Truly eloquent individuals hardly go unnoticed as they instantly give a vibe of being engaging speakers. Attaining such status should be essential if you want to be heard, whether in a speech, informally debating with partners on a topic, or even expressing your perspective in Twitter. However, as we approach the end of a decade marked by abbreviated communication (e.g. informal writing through e-mail and chat) and the start of another that seems to be dominated by even shorter messages (think SMS text messages How to Send Email to Any Cell Phone (for Free) How to Send Email to Any Cell Phone (for Free) Read More and micro-blogging), it’s easier to forget to practice coming up with longer, thoughtful remarks. Here is a tutorial on public speaking with 5 tips on improving how you deliver a speech or message. Help your audience stay focused by choosing topics they can relate to and including in your message carefully, especially at the beginning. Doing that could make a difference in whether or not you get someone to become engaged, then believe and share your perspective. Make a “tour” of your potential audience’s minds. Sure, you may have a lot of opinionated content you are eager to share but when you deliver a message, speaking about topics outside of the audience’s interests means that they less likely to see your point. In order to pick the right combination of topics to focus on so your audience will become interested at once, research what the audience is interested in. Give the audience something that they can take away and do for themselves. What is the impact on the audience’s lives? Stand out from the rest and avoid topics that people have already heard or read. Aside from giving the audience hints on what they really want to hear about, other appealing factors for the audience include positive messages that focus less on failures. In a University of Pennsylvania study of presidential nomination speeches from 1900 to 1984, it was found that American voters choose candidates who express optimism and ruminate less. In other words, using a pessimistic approach to explain the causes of problems, and attempting to make more explanations for failures made candidates look more helpless and less appealing to the public. Susceptibility to helplessness is a psychologically discouraging sign to the public, who is looking for confident candidates to boost up their trust in government officials. Even if you are not delivering a presidential candidate message, you should aim to make optimistic remarks and minimize talking about unresolved problems or failures, unless you are discussing how to attack them in your speech. When delivering a speech, be concise and end it earlier than the time you are given (the audience will thank you). Sometimes, adding relevant metaphors may seem appropriate, but don’t waste time including too many. That is because every single part of the message is potentially a moment for you to hold your audience’s interest together and for people to remember. You should craft each sentence to reflect something uniquely brilliant. Thus, including many metaphors challenges your audience’s trust that they should stay focused in your topic. Dedicating time to craft the body of your message is as important as creating an original name for your speech or message. This is never stressed enough – your title should wake the audience’s interest so they anticipate reading or listening to your message. Think of the title as your red carpet to grab your audience’s attention. This statement must summarize your main point in a unique way. As you are figuring out a concise title, try to be more general than too specific. Think about your audience when you name your work and how it relates to them. Compare “The Psychology of eloquent speeches” with “Tips on giving convincing messages.” The former includes the name of a scientific field that may not interest a number of people. In closing, practicing building your concise, positive, invaluable, and appropriately-named message around your audience can ensure that your point gets across and stays fresh in people’s minds. Want to seriously improve your public speaking skills? Check out these free resources, such Toastmasters, a non-profit organization that helps individuals grow comfortable giving speeches; SpeechTips that offers a free speechwriting course; PsychologyToday’s confidence-building article; From Speech Worrier to Speech Warrior, Scholastic tips gathered from political speechwriters, and dedicated public speaking and presentation skills blog, Six Minutes. Want samples of excellent speeches? Art of Manliness features thirty-five excerpts and full speeches. Don’t have time to read all these resources? Take the speeches with you by downloading the podcasts to your iPod or mobile phone so you can be constantly educated. What audience-engaging techniques do you recommend? Delight us in the comments. Explore more about: Education Technology, Presentations, Soft Skills. Public speaking is a big issue that a lot of people have trouble with. Instead of letting it cause you fear and hindering your career path, address it! There are wonderful public speaking and executive communications courses out here. Here's an example of one in my area that I've done before: ellisstrategies.com/public-speaking-rhode-island/. It was fantastic, and I continue to work closely with my communications coach! @Darren That's a good, practical way to look at it! @Simon True. In a particular group, there might be the possibility of people going off on a tangent so it's up to you to decide what advice suits your needs. 99% of Toastmasters are fantastic organisations and you'll get a lot of support from your peers. Therein lies the rub, however, because you'll only be able to get support from your *peers*, rather than experts. In occasional Toastmasters I've found some of the advice given was a little counter-productive. (And a little more often, not very specific or helpful). Don't get me wrong though, TM is a great place to practice and great 99% of the time. Just be aware that you might need to shop around a bit. Being concsice is probably the greatest point. Very few sinners are saved after the first 20 minutes of a sermon. No one has ever been boo'ed for getting off early. @Preston Thank you and best of luck! A lot of practice always does good. @Sue Thank you for elaborating on the benefits of joining Toastmasters. I can attest to that. Some nice fellows from a local Toastmasters took about 3 hours to help me and a few other orientation advisors at my University become less nervous about speaking in front of a lot of fellow classmates. I didn't become an expert like them but I felt much more prepared with their advice. I imagine a few months of attending the Toastmasters sessions would help a ton. Thanks for pointing people to Toastmasters. It is really a great, supportive, helpful organization. I have seen people change from nervous wrecks to competent speakers in just a few months. Great post! I have always really blocked when it came to public speaking, but now I can start to refocus my efforts and get as good as any celebrity speaker who is making the circuit these days.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Estimators that combine explicit kernel mappings with linear models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from tensorflow.contrib import layers from tensorflow.contrib.kernel_methods.python.mappers import dense_kernel_mapper as dkm from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import head as head_lib from tensorflow.contrib.learn.python.learn.estimators import linear from tensorflow.contrib.learn.python.learn.estimators import prediction_key from tensorflow.python.ops import array_ops from tensorflow.python.platform import tf_logging as logging _FEATURE_COLUMNS = "feature_columns" _KERNEL_MAPPERS = "kernel_mappers" _OPTIMIZER = "optimizer" def _check_valid_kernel_mappers(kernel_mappers): """Checks that the input kernel_mappers are valid.""" if kernel_mappers is None: return True for kernel_mappers_list in six.itervalues(kernel_mappers): for kernel_mapper in kernel_mappers_list: if not isinstance(kernel_mapper, dkm.DenseKernelMapper): return False return True def _check_valid_head(head): """Returns true if the provided head is supported.""" if head is None: return False # pylint: disable=protected-access return isinstance(head, head_lib._BinaryLogisticHead) or isinstance( head, head_lib._MultiClassHead) # pylint: enable=protected-access def _update_features_and_columns(features, feature_columns, kernel_mappers_dict): """Updates features and feature_columns based on provided kernel mappers. Currently supports the update of `RealValuedColumn`s only. Args: features: Initial features dict. The key is a `string` (feature column name) and the value is a tensor. feature_columns: Initial iterable containing all the feature columns to be consumed (possibly after being updated) by the model. All items should be instances of classes derived from `FeatureColumn`. kernel_mappers_dict: A dict from feature column (type: _FeatureColumn) to objects inheriting from KernelMapper class. Returns: updated features and feature_columns based on provided kernel_mappers_dict. """ if kernel_mappers_dict is None: return features, feature_columns # First construct new columns and features affected by kernel_mappers_dict. mapped_features = dict() mapped_columns = set() for feature_column in kernel_mappers_dict: column_name = feature_column.name # Currently only mappings over RealValuedColumns are supported. if not isinstance(feature_column, layers.feature_column._RealValuedColumn): # pylint: disable=protected-access logging.warning( "Updates are currently supported on RealValuedColumns only. Metadata " "for FeatureColumn {} will not be updated.".format(column_name)) continue mapped_column_name = column_name + "_MAPPED" # Construct new feature columns based on provided kernel_mappers. column_kernel_mappers = kernel_mappers_dict[feature_column] new_dim = sum([mapper.output_dim for mapper in column_kernel_mappers]) mapped_columns.add( layers.feature_column.real_valued_column(mapped_column_name, new_dim)) # Get mapped features by concatenating mapped tensors (one mapped tensor # per kernel mappers from the list of kernel mappers corresponding to each # feature column). output_tensors = [] for kernel_mapper in column_kernel_mappers: output_tensors.append(kernel_mapper.map(features[column_name])) tensor = array_ops.concat(output_tensors, 1) mapped_features[mapped_column_name] = tensor # Finally update features dict and feature_columns. features = features.copy() features.update(mapped_features) feature_columns = set(feature_columns) feature_columns.update(mapped_columns) return features, feature_columns def _kernel_model_fn(features, labels, mode, params, config=None): """model_fn for the Estimator using kernel methods. Args: features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`). labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64` in the range `[0, n_classes)`. mode: Defines whether this is training, evaluation or prediction. See `ModeKeys`. params: A dict of hyperparameters. The following hyperparameters are expected: * head: A `Head` instance. * feature_columns: An iterable containing all the feature columns used by the model. * optimizer: string, `Optimizer` object, or callable that defines the optimizer to use for training. If `None`, will use a FTRL optimizer. * kernel_mappers: Dictionary of kernel mappers to be applied to the input features before training. config: `RunConfig` object to configure the runtime settings. Returns: A `ModelFnOps` instance. Raises: ValueError: If mode is not any of the `ModeKeys`. """ feature_columns = params[_FEATURE_COLUMNS] kernel_mappers = params[_KERNEL_MAPPERS] updated_features, updated_columns = _update_features_and_columns( features, feature_columns, kernel_mappers) params[_FEATURE_COLUMNS] = updated_columns return linear._linear_model_fn( # pylint: disable=protected-access updated_features, labels, mode, params, config) class _KernelEstimator(estimator.Estimator): """Generic kernel-based linear estimator.""" def __init__(self, feature_columns=None, model_dir=None, weight_column_name=None, head=None, optimizer=None, kernel_mappers=None, config=None): """Constructs a `_KernelEstimator` object.""" if not feature_columns and not kernel_mappers: raise ValueError( "You should set at least one of feature_columns, kernel_mappers.") if not _check_valid_kernel_mappers(kernel_mappers): raise ValueError("Invalid kernel mappers.") if not _check_valid_head(head): raise ValueError( "head type: {} is not supported. Supported head types: " "_BinaryLogisticHead, _MultiClassHead.".format(type(head))) params = { "head": head, _FEATURE_COLUMNS: feature_columns or [], _OPTIMIZER: optimizer, _KERNEL_MAPPERS: kernel_mappers } super(_KernelEstimator, self).__init__( model_fn=_kernel_model_fn, model_dir=model_dir, config=config, params=params) class KernelLinearClassifier(_KernelEstimator): """Linear classifier using kernel methods as feature preprocessing. It trains a linear model after possibly mapping initial input features into a mapped space using explicit kernel mappings. Due to the kernel mappings, training a linear classifier in the mapped (output) space can detect non-linearities in the input space. The user can provide a list of kernel mappers to be applied to all or a subset of existing feature_columns. This way, the user can effectively provide 2 types of feature columns: * those passed as elements of feature_columns in the classifier's constructor * those appearing as a key of the kernel_mappers dict. If a column appears in feature_columns only, no mapping is applied to it. If it appears as a key in kernel_mappers, the corresponding kernel mappers are applied to it. Note that it is possible that a column appears in both places. Currently kernel_mappers are supported for _RealValuedColumns only. Example usage: ``` real_column_a = real_valued_column(name='real_column_a',...) sparse_column_b = sparse_column_with_hash_bucket(...) kernel_mappers = {real_column_a : [RandomFourierFeatureMapper(...)]} optimizer = ... # real_column_a is used as a feature in both its initial and its transformed # (mapped) form. sparse_column_b is not affected by kernel mappers. kernel_classifier = KernelLinearClassifier( feature_columns=[real_column_a, sparse_column_b], model_dir=..., optimizer=optimizer, kernel_mappers=kernel_mappers) # real_column_a is used as a feature in its transformed (mapped) form only. # sparse_column_b is not affected by kernel mappers. kernel_classifier = KernelLinearClassifier( feature_columns=[sparse_column_b], model_dir=..., optimizer=optimizer, kernel_mappers=kernel_mappers) # Input builders def train_input_fn: # returns x, y ... def eval_input_fn: # returns x, y ... kernel_classifier.fit(input_fn=train_input_fn) kernel_classifier.evaluate(input_fn=eval_input_fn) kernel_classifier.predict(...) ``` Input of `fit` and `evaluate` should have following features, otherwise there will be a `KeyError`: * if `weight_column_name` is not `None`, a feature with `key=weight_column_name` whose value is a `Tensor`. * for each `column` in `feature_columns`: - if `column` is a `SparseColumn`, a feature with `key=column.name` whose `value` is a `SparseTensor`. - if `column` is a `WeightedSparseColumn`, two features: the first with `key` the id column name, the second with `key` the weight column name. Both features' `value` must be a `SparseTensor`. - if `column` is a `RealValuedColumn`, a feature with `key=column.name` whose `value` is a `Tensor`. """ def __init__(self, feature_columns=None, model_dir=None, n_classes=2, weight_column_name=None, optimizer=None, kernel_mappers=None, config=None): """Construct a `KernelLinearClassifier` estimator object. Args: feature_columns: An iterable containing all the feature columns used by the model. All items in the set should be instances of classes derived from `FeatureColumn`. model_dir: Directory to save model parameters, graph etc. This can also be used to load checkpoints from the directory into an estimator to continue training a previously saved model. n_classes: number of label classes. Default is binary classification. Note that class labels are integers representing the class index (i.e. values from 0 to n_classes-1). For arbitrary label values (e.g. string labels), convert to class indices first. weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. optimizer: The optimizer used to train the model. If specified, it should be an instance of `tf.Optimizer`. If `None`, the Ftrl optimizer is used by default. kernel_mappers: Dictionary of kernel mappers to be applied to the input features before training a (linear) model. Keys are feature columns and values are lists of mappers to be applied to the corresponding feature column. Currently only _RealValuedColumns are supported and therefore all mappers should conform to the `DenseKernelMapper` interface (see ./mappers/dense_kernel_mapper.py). config: `RunConfig` object to configure the runtime settings. Returns: A `KernelLinearClassifier` estimator. Raises: ValueError: if n_classes < 2. ValueError: if neither feature_columns nor kernel_mappers are provided. ValueError: if mappers provided as kernel_mappers values are invalid. """ super(KernelLinearClassifier, self).__init__( feature_columns=feature_columns, model_dir=model_dir, weight_column_name=weight_column_name, head=head_lib.multi_class_head( n_classes=n_classes, weight_column_name=weight_column_name), kernel_mappers=kernel_mappers, config=config) def predict_classes(self, input_fn=None): """Runs inference to determine the predicted class per instance. Args: input_fn: The input function providing features. Returns: A generator of predicted classes for the features provided by input_fn. Each predicted class is represented by its class index (i.e. integer from 0 to n_classes-1) """ key = prediction_key.PredictionKey.CLASSES predictions = super(KernelLinearClassifier, self).predict( input_fn=input_fn, outputs=[key]) return (pred[key] for pred in predictions) def predict_proba(self, input_fn=None): """Runs inference to determine the class probability predictions. Args: input_fn: The input function providing features. Returns: A generator of predicted class probabilities for the features provided by input_fn. """ key = prediction_key.PredictionKey.PROBABILITIES predictions = super(KernelLinearClassifier, self).predict( input_fn=input_fn, outputs=[key]) return (pred[key] for pred in predictions)
Today I wrote an email to everyone in my office that began, "To my utter horror..." Which made Creative Me happy but made Sensible Me cringe later in the realization that people are probably rolling their eyes while reading my email via blackberry. Awesome job on the steps and very cool news about Brendan's company! But what did the rest of that office email say?
import logging import time import threading from coapthon import defines __author__ = 'Giacomo Tanganelli' logger = logging.getLogger(__name__) class ObserveItem(object): def __init__(self, timestamp, non_counter, allowed, transaction, serv=None): """ Data structure for the Observe option :param timestamp: the timestamop of last message sent :param non_counter: the number of NON notification sent :param allowed: if the client is allowed as observer :param transaction: the transaction :param serv: reference to CoAP object """ self.timestamp = timestamp self.non_counter = non_counter self.allowed = allowed self.transaction = transaction # parameters for dynamic resource observing self.conditional = False self.conditions = {} self.last_notify = time.time() self.timer = None self.coap = serv # timer for notification procedure is set at (pmax - pmin)/2 def pmax_timer(self): self.coap.notify(self.transaction.resource) def start_timer(self): pmin = 0 pmax = 0 for cond in self.conditions: if cond == "pmin": pmin = self.conditions[cond] elif cond == "pmax": pmax = self.conditions[cond] if pmax == 0: return else: self.timer = threading.Timer((pmax-pmin)/2, self.pmax_timer) self.timer.start() class ObserveLayer(object): """ Manage the observing feature. It store observing relationships. """ def __init__(self, server=None): self._relations = {} self._server = server def send_request(self, request): """ Add itself to the observing list :param request: the request :return: the request unmodified """ if request.observe == 0: # Observe request host, port = request.destination key_token = hash(str(host) + str(port) + str(request.token)) self._relations[key_token] = ObserveItem(time.time(), None, True, None) if request.observe == 1: # Cancelling observe explicitly self.remove_subscriber(request) return request def receive_response(self, transaction): """ Sets notification's parameters. :type transaction: Transaction :param transaction: the transaction :rtype : Transaction :return: the modified transaction """ host, port = transaction.response.source key_token = hash(str(host) + str(port) + str(transaction.response.token)) if key_token in self._relations and transaction.response.type == defines.Types["CON"]: transaction.notification = True return transaction def send_empty(self, message): """ Eventually remove from the observer list in case of a RST message. :type message: Message :param message: the message :return: the message unmodified """ host, port = message.destination key_token = hash(str(host) + str(port) + str(message.token)) if key_token in self._relations and message.type == defines.Types["RST"]: del self._relations[key_token] return message def receive_request(self, transaction): """ Manage the observe option in the request end eventually initialize the client for adding to the list of observers or remove from the list. :type transaction: Transaction :param transaction: the transaction that owns the request :rtype : Transaction :return: the modified transaction """ if transaction.request.observe == 0: # Observe request host, port = transaction.request.source key_token = hash(str(host) + str(port) + str(transaction.request.token)) non_counter = 0 if key_token in self._relations: # Renew registration allowed = True else: allowed = False self._relations[key_token] = ObserveItem(time.time(), non_counter, allowed, transaction, self._server) # check if the observing request has dynamic parameters (sent inside uri_query field) if transaction.request.uri_query is not None: logger.info("Dynamic Observing registration") self._relations[key_token].conditional = True self._relations[key_token].conditions = ObserveLayer.parse_uri_query(transaction.request.uri_query) self._relations[key_token].start_timer() elif transaction.request.observe == 1: host, port = transaction.request.source key_token = hash(str(host) + str(port) + str(transaction.request.token)) logger.info("Remove Subscriber") try: del self._relations[key_token] except KeyError: pass return transaction def receive_empty(self, empty, transaction): """ Manage the observe feature to remove a client in case of a RST message receveide in reply to a notification. :type empty: Message :param empty: the received message :type transaction: Transaction :param transaction: the transaction that owns the notification message :rtype : Transaction :return: the modified transaction """ if empty.type == defines.Types["RST"]: host, port = transaction.request.source key_token = hash(str(host) + str(port) + str(transaction.request.token)) logger.info("Remove Subscriber") try: del self._relations[key_token] except KeyError: pass transaction.completed = True return transaction def send_response(self, transaction): """ Finalize to add the client to the list of observer. :type transaction: Transaction :param transaction: the transaction that owns the response :return: the transaction unmodified """ host, port = transaction.request.source key_token = hash(str(host) + str(port) + str(transaction.request.token)) if key_token in self._relations: if transaction.response.code == defines.Codes.CONTENT.number: if transaction.resource is not None and transaction.resource.observable: transaction.response.observe = transaction.resource.observe_count self._relations[key_token].allowed = True self._relations[key_token].transaction = transaction self._relations[key_token].timestamp = time.time() else: del self._relations[key_token] elif transaction.response.code >= defines.Codes.ERROR_LOWER_BOUND: del self._relations[key_token] return transaction def notify(self, resource, root=None): """ Prepare notification for the resource to all interested observers. :rtype: list :param resource: the resource for which send a new notification :param root: deprecated :return: the list of transactions to be notified """ ret = [] if root is not None: resource_list = root.with_prefix_resource(resource.path) else: resource_list = [resource] for key in self._relations.keys(): if self._relations[key].transaction.resource in resource_list: # checking dynamic resource parameters if self._relations[key].conditional: if self.verify_conditions(self._relations[key]) is False: continue # updating relation timestamp and resetting timer self._relations[key].last_notify = time.time() self._relations[key].timer.cancel() self._relations[key].start_timer() if self._relations[key].non_counter > defines.MAX_NON_NOTIFICATIONS \ or self._relations[key].transaction.request.type == defines.Types["CON"]: self._relations[key].transaction.response.type = defines.Types["CON"] self._relations[key].non_counter = 0 elif self._relations[key].transaction.request.type == defines.Types["NON"]: self._relations[key].non_counter += 1 self._relations[key].transaction.response.type = defines.Types["NON"] self._relations[key].transaction.resource = resource del self._relations[key].transaction.response.mid del self._relations[key].transaction.response.token ret.append(self._relations[key].transaction) return ret def remove_subscriber(self, message): """ Remove a subscriber based on token. :param message: the message """ logger.debug("Remove Subscriber") host, port = message.destination key_token = hash(str(host) + str(port) + str(message.token)) try: self._relations[key_token].transaction.completed = True del self._relations[key_token] except AttributeError: logger.warning("No Transaction") except KeyError: logger.warning("No Subscriber") @staticmethod def parse_uri_query(uri_query): """ parse the conditional parameters for the conditional observing :return: a map with pairs [parameter, value] """ dict_att = {} print(uri_query) attributes = uri_query.split(";") for att in attributes: a = att.split("=") if len(a) > 1: if str(a[0]) == "band": a[1] = bool(a[1]) if a[1].isdigit(): a[1] = int(a[1]) dict_att[str(a[0])] = a[1] else: dict_att[str(a[0])] = a[0] print (dict_att) return dict_att @staticmethod def verify_conditions(item): """ checks if the changed resource requires a notification :param item: ObserveItem :return: Boolean """ for cond in item.conditions: if cond == "pmin": # CURRENT TIME - TIMESTAMP < PMIN t = int(time.time() - item.last_notify) if t < int(item.conditions[cond]): return False return True
Silva’s Concrete & Interlocking has been in operation for 14 years now. Our owner brings over 30 years of concrete experience to the business. He provides a wealth of knowledge on the types of concrete products and tools that produce the best results for your landscape project. Ernesto has always worked in the concrete industry. With a hands-on knowledge of the industry’s ups and downs, he can provide solid design and installation advice on what works best for your home and property. Very professional and a great teacher, he can explain in detail the steps we will take in creating and installing your concrete products. Ernesto makes you feel like family, taking every job as a new personal challenge. He always treats your home like his own. David is going for his ARZ License for operation of dump trucks. Very friendly, David continues his father’s tradition of making every customer feel like family. David is your go-to guy when it comes to problems and questions you have. Additionally, he runs the site when his father is not there. Similar to his father, David loves to work with his hands, gaining experience in a wide variety of concrete types. He’s learned the theory, and now he’s doing the hands-on work needed to get the job done.
#!/usr/bin/env python from distutils.core import setup, Extension, Distribution, Command import distutils.sysconfig import sys import os import os.path from translate import __version__ from translate import __doc__ try: import py2exe build_exe = py2exe.build_exe.py2exe Distribution = py2exe.Distribution except ImportError: py2exe = None build_exe = Command # TODO: check out installing into a different path with --prefix/--home join = os.path.join PRETTY_NAME = 'Translate Toolkit' translateversion = __version__.sver packagesdir = distutils.sysconfig.get_python_lib() sitepackages = packagesdir.replace(sys.prefix + os.sep, '') infofiles = [(join(sitepackages,'translate'), [join('translate',filename) for filename in 'ChangeLog', 'COPYING', 'LICENSE', 'README'])] initfiles = [(join(sitepackages,'translate'),[join('translate','__init__.py')])] subpackages = [ "convert", "filters", "lang", "misc", join("misc", "typecheck"), "storage", join("storage", "placeables"), join("storage", "versioncontrol"), join("storage", "xml_extract"), "search", join("search", "indexing"), "services", "tools", ] # TODO: elementtree doesn't work in sdist, fix this packages = ["translate"] translatescripts = [apply(join, ('translate', ) + script) for script in ('convert', 'pot2po'), ('convert', 'moz2po'), ('convert', 'po2moz'), ('convert', 'oo2po'), ('convert', 'po2oo'), ('convert', 'oo2xliff'), ('convert', 'xliff2oo'), ('convert', 'prop2po'), ('convert', 'po2prop'), ('convert', 'csv2po'), ('convert', 'po2csv'), ('convert', 'txt2po'), ('convert', 'po2txt'), ('convert', 'ts2po'), ('convert', 'po2ts'), ('convert', 'html2po'), ('convert', 'po2html'), ('convert', 'ical2po'), ('convert', 'po2ical'), ('convert', 'ini2po'), ('convert', 'po2ini'), ('convert', 'json2po'), ('convert', 'po2json'), ('convert', 'tiki2po'), ('convert', 'po2tiki'), ('convert', 'php2po'), ('convert', 'po2php'), ('convert', 'rc2po'), ('convert', 'po2rc'), ('convert', 'xliff2po'), ('convert', 'po2xliff'), ('convert', 'sub2po'), ('convert', 'po2sub'), ('convert', 'symb2po'), ('convert', 'po2symb'), ('convert', 'po2tmx'), ('convert', 'po2wordfast'), ('convert', 'csv2tbx'), ('convert', 'odf2xliff'), ('convert', 'xliff2odf'), ('convert', 'web2py2po'), ('convert', 'po2web2py'), ('filters', 'pofilter'), ('tools', 'pocompile'), ('tools', 'poconflicts'), ('tools', 'pocount'), ('tools', 'podebug'), ('tools', 'pogrep'), ('tools', 'pomerge'), ('tools', 'porestructure'), ('tools', 'posegment'), ('tools', 'poswap'), ('tools', 'poclean'), ('tools', 'poterminology'), ('tools', 'pretranslate'), ('services', 'tmserver'), ('tools', 'build_tmdb')] translatebashscripts = [apply(join, ('tools', ) + (script, )) for script in [ 'pomigrate2', 'pocompendium', 'posplit', 'popuretext', 'poreencode', 'pocommentclean', 'junitmsgfmt', ]] def addsubpackages(subpackages): for subpackage in subpackages: initfiles.append((join(sitepackages, 'translate', subpackage), [join('translate', subpackage, '__init__.py')])) for infofile in ('README', 'TODO'): infopath = join('translate', subpackage, infofile) if os.path.exists(infopath): infofiles.append((join(sitepackages, 'translate', subpackage), [infopath])) packages.append("translate.%s" % subpackage) class build_exe_map(build_exe): """distutils py2exe-based class that builds the exe file(s) but allows mapping data files""" def reinitialize_command(self, command, reinit_subcommands=0): if command == "install_data": install_data = build_exe.reinitialize_command(self, command, reinit_subcommands) install_data.data_files = self.remap_data_files(install_data.data_files) return install_data return build_exe.reinitialize_command(self, command, reinit_subcommands) def remap_data_files(self, data_files): """maps the given data files to different locations using external map_data_file function""" new_data_files = [] for f in data_files: if type(f) in (str, unicode): f = map_data_file(f) else: datadir, files = f datadir = map_data_file(datadir) if datadir is None: f = None else: f = datadir, files if f is not None: new_data_files.append(f) return new_data_files class InnoScript: """class that builds an InnoSetup script""" def __init__(self, name, lib_dir, dist_dir, exe_files = [], other_files = [], install_scripts = [], version = "1.0"): self.lib_dir = lib_dir self.dist_dir = dist_dir if not self.dist_dir.endswith(os.sep): self.dist_dir += os.sep self.name = name self.version = version self.exe_files = [self.chop(p) for p in exe_files] self.other_files = [self.chop(p) for p in other_files] self.install_scripts = install_scripts def getcompilecommand(self): try: import _winreg compile_key = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, "innosetupscriptfile\\shell\\compile\\command") compilecommand = _winreg.QueryValue(compile_key, "") compile_key.Close() except: compilecommand = 'compil32.exe "%1"' return compilecommand def chop(self, pathname): """returns the path relative to self.dist_dir""" assert pathname.startswith(self.dist_dir) return pathname[len(self.dist_dir):] def create(self, pathname=None): """creates the InnoSetup script""" if pathname is None: self.pathname = os.path.join(self.dist_dir, self.name + os.extsep + "iss").replace(' ', '_') else: self.pathname = pathname # See http://www.jrsoftware.org/isfaq.php for more InnoSetup config options. ofi = self.file = open(self.pathname, "w") print >> ofi, "; WARNING: This script has been created by py2exe. Changes to this script" print >> ofi, "; will be overwritten the next time py2exe is run!" print >> ofi, r"[Setup]" print >> ofi, r"AppName=%s" % self.name print >> ofi, r"AppVerName=%s %s" % (self.name, self.version) print >> ofi, r"DefaultDirName={pf}\%s" % self.name print >> ofi, r"DefaultGroupName=%s" % self.name print >> ofi, r"OutputBaseFilename=%s-%s-setup" % (self.name, self.version) print >> ofi, r"ChangesEnvironment=yes" print >> ofi print >> ofi, r"[Files]" for path in self.exe_files + self.other_files: print >> ofi, r'Source: "%s"; DestDir: "{app}\%s"; Flags: ignoreversion' % (path, os.path.dirname(path)) print >> ofi print >> ofi, r"[Icons]" print >> ofi, r'Name: "{group}\Documentation"; Filename: "{app}\doc\index.html";' print >> ofi, r'Name: "{group}\Translate Toolkit Command Prompt"; Filename: "cmd.exe"' print >> ofi, r'Name: "{group}\Uninstall %s"; Filename: "{uninstallexe}"' % self.name print >> ofi print >> ofi, r"[Registry]" # TODO: Move the code to update the Path environment variable to a Python script which will be invoked by the [Run] section (below) print >> ofi, r'Root: HKCU; Subkey: "Environment"; ValueType: expandsz; ValueName: "Path"; ValueData: "{reg:HKCU\Environment,Path|};{app};"' print >> ofi if self.install_scripts: print >> ofi, r"[Run]" for path in self.install_scripts: print >> ofi, r'Filename: "{app}\%s"; WorkingDir: "{app}"; Parameters: "-install"' % path print >> ofi print >> ofi, r"[UninstallRun]" for path in self.install_scripts: print >> ofi, r'Filename: "{app}\%s"; WorkingDir: "{app}"; Parameters: "-remove"' % path print >> ofi ofi.close() def compile(self): """compiles the script using InnoSetup""" shellcompilecommand = self.getcompilecommand() compilecommand = shellcompilecommand.replace('"%1"', self.pathname) result = os.system(compilecommand) if result: print "Error compiling iss file" print "Opening iss file, use InnoSetup GUI to compile manually" os.startfile(self.pathname) class build_installer(build_exe_map): """distutils class that first builds the exe file(s), then creates a Windows installer using InnoSetup""" description = "create an executable installer for MS Windows using InnoSetup and py2exe" user_options = getattr(build_exe, 'user_options', []) + \ [('install-script=', None, "basename of installation script to be run after installation or before deinstallation")] def initialize_options(self): build_exe.initialize_options(self) self.install_script = None def run(self): # First, let py2exe do it's work. build_exe.run(self) lib_dir = self.lib_dir dist_dir = self.dist_dir # create the Installer, using the files py2exe has created. exe_files = self.windows_exe_files + self.console_exe_files install_scripts = self.install_script if isinstance(install_scripts, (str, unicode)): install_scripts = [install_scripts] script = InnoScript(PRETTY_NAME, lib_dir, dist_dir, exe_files, self.lib_files, version=self.distribution.metadata.version, install_scripts=install_scripts) print "*** creating the inno setup script***" script.create() print "*** compiling the inno setup script***" script.compile() # Note: By default the final setup.exe will be in an Output subdirectory. def import_setup_module(modulename, modulepath): import imp modfile, pathname, description = imp.find_module(modulename, [modulepath]) return imp.load_module(modulename, modfile, pathname, description) def map_data_file (data_file): """remaps a data_file (could be a directory) to a different location This version gets rid of Lib\\site-packages, etc""" data_parts = data_file.split(os.sep) if data_parts[:2] == ["Lib", "site-packages"]: data_parts = data_parts[2:] if data_parts: data_file = os.path.join(*data_parts) else: data_file = "" if data_parts[:1] == ["translate"]: data_parts = data_parts[1:] if data_parts: data_file = os.path.join(*data_parts) else: data_file = "" return data_file def getdatafiles(): datafiles = initfiles + infofiles def listfiles(srcdir): return join(sitepackages, srcdir), [join(srcdir, f) for f in os.listdir(srcdir) if os.path.isfile(join(srcdir, f))] docfiles = [] for subdir in ['doc', 'share']: docwalk=os.walk(os.path.join('translate', subdir)) for docs in docwalk: if not '.svn' in docs[0]: docfiles.append(listfiles(docs[0])) datafiles += docfiles return datafiles def buildinfolinks(): linkfile = getattr(os, 'symlink', None) linkdir = getattr(os, 'symlink', None) import shutil if linkfile is None: linkfile = shutil.copy2 if linkdir is None: linkdir = shutil.copytree basedir = os.path.abspath(os.curdir) os.chdir("translate") if os.path.exists("LICENSE") or os.path.islink("LICENSE"): os.remove("LICENSE") linkfile("COPYING", "LICENSE") os.chdir(basedir) for infofile in ["COPYING", "README", "LICENSE"]: if os.path.exists(infofile) or os.path.islink(infofile): os.remove(infofile) linkfile(os.path.join("translate", infofile), infofile) def buildmanifest_in(file, scripts): """This writes the required files to a MANIFEST.in file""" print >>file, "# MANIFEST.in: the below autogenerated by setup.py from translate %s" % translateversion print >>file, "# things needed by translate setup.py to rebuild" print >>file, "# informational files" for infofile in ("README", "TODO", "ChangeLog", "COPYING", "LICENSE", "*.txt"): print >>file, "global-include %s" % infofile print >>file, "# C programs" print >>file, "global-include *.c" print >> file, "# scripts which don't get included by default in sdist" for scriptname in scripts: print >>file, "include %s" % scriptname print >> file, "# include our documentation" print >> file, "graft translate/doc" print >> file, "graft translate/share" # wordlist, portal are in the source tree but unconnected to the python code print >>file, "prune wordlist" print >>file, "prune spelling" print >>file, "prune lingua" print >>file, "prune Pootle" print >>file, "prune pootling" print >>file, "prune virtaal" print >>file, "prune spelt" print >>file, "prune corpuscatcher" print >>file, "prune amagama" print >>file, "prune .svn" print >>file, "# MANIFEST.in: the above autogenerated by setup.py from translate %s" % translateversion class TranslateDistribution(Distribution): """a modified distribution class for translate""" def __init__(self, attrs): baseattrs = {} py2exeoptions = {} py2exeoptions["packages"] = ["translate", "encodings"] py2exeoptions["compressed"] = True py2exeoptions["excludes"] = ["PyLucene", "Tkconstants", "Tkinter", "tcl", "enchant", #We need to do more to support spell checking on Windows # strange things unnecessarily included with some versions of pyenchant: "win32ui", "_win32sysloader", "win32pipe", "py2exe", "win32com", "pywin", "isapi", "_tkinter", "win32api", ] version = attrs.get("version", translateversion) py2exeoptions["dist_dir"] = "translate-toolkit-%s" % version py2exeoptions["includes"] = ["lxml", "lxml._elementpath", "psyco"] options = {"py2exe": py2exeoptions} baseattrs['options'] = options if py2exe: baseattrs['console'] = translatescripts baseattrs['zipfile'] = "translate.zip" baseattrs['cmdclass'] = {"py2exe": build_exe_map, "innosetup": build_installer} options["innosetup"] = py2exeoptions.copy() options["innosetup"]["install_script"] = [] baseattrs.update(attrs) Distribution.__init__(self, baseattrs) def standardsetup(name, version, custompackages=[], customdatafiles=[]): buildinfolinks() # TODO: make these end with .py ending on Windows... try: manifest_in = open("MANIFEST.in", "w") buildmanifest_in(manifest_in, translatescripts + translatebashscripts) manifest_in.close() except IOError, e: print >> sys.stderr, "warning: could not recreate MANIFEST.in, continuing anyway. Error was %s" % e addsubpackages(subpackages) datafiles = getdatafiles() ext_modules = [] dosetup(name, version, packages + custompackages, datafiles + customdatafiles, translatescripts+ translatebashscripts, ext_modules) classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License (GPL)", "Programming Language :: Python", "Topic :: Software Development :: Localization", "Topic :: Software Development :: Libraries :: Python Modules", "Operating System :: OS Independent", "Operating System :: Microsoft :: Windows", "Operating System :: Unix" ] def dosetup(name, version, packages, datafiles, scripts, ext_modules=[]): long_description = __doc__ description = __doc__.split("\n", 1)[0] setup(name=name, version=version, license="GNU General Public License (GPL)", description=description, long_description=long_description, author="Translate.org.za", author_email="[email protected]", url="http://translate.sourceforge.net/wiki/toolkit/index", download_url="http://sourceforge.net/project/showfiles.php?group_id=91920&package_id=97082", platforms=["any"], classifiers=classifiers, packages=packages, data_files=datafiles, scripts=scripts, ext_modules=ext_modules, distclass=TranslateDistribution ) if __name__ == "__main__": standardsetup("translate-toolkit", translateversion)
The Center for Early Childhood Professional Development (CECPD) reflects Oklahoma's commitment to quality early care and education by providing professional development that produces results! Created in 1998 using federal child development block grant funds, the Center supports the individuals who work in licensed child care facilities throughout Oklahoma. The settings include Head Start programs, family child care homes, child care centers, and pre-Kindergarten. Major funding is provided by the Oklahoma Department of Human Services, Oklahoma Child Care Services (OCCS) and supports numerous professional development opportunities for child care providers and teachers. Opportunities include the Leadership Academy (for directors and administrators), specialized training, Child Care Careers, online classes, a professional development registry, Entry Level Child Care Training, Directors’ Entry Level Training, Quest for Quality: Early Learning Guidelines Infants, Toddlers, and Two’s, Quest for Quality: Early Learning Guidelines for Preschoolers, Environment Rating Scales Assessment Services, and a Video Lending Library. The professional development assists teachers and directors as they work toward success in the Reaching for the Stars tiered reimbursement program. The tiered reimbursement program enables programs that seek two-star or three-star status to receive a higher reimbursement rate for children whose families receive child care assistance through the State of Oklahoma Department of Human Services (OKDHS). In addition, over $12 million dollars in Early Reading First funds have been awarded to CEPCD by the U.S. Department of Education since 2002. These funds have been used to 1) create centers and classrooms of educational excellence, 2) prepare 3 and 4-year olds for Kindergarten with the language and literacy skills necessary for academic success, 3) develop and implement a model of professional development that emphasizes five components – appropriate environment, scientifically-based curriculum, progress monitoring, professional development, and relational coaching. Over 250 teachers and thousands of preschool-age children and their families have been positively affected by the Early Reading First program. The research data from this successful intervention program suggest that our Early Reading First children are not only ready for Kindergarten, but exceed state standards and requirements. Early Reading First has been implemented in Head Start sites, private child care centers, and public schools throughout Oklahoma. New to CECPD are the Instructional Coaching Institute, Coaching Innovations, and Quest for Quality: Early Learning Guidelines (Q2: ELG). The Instructional Coaching Institute is a 3-day, intense professional development specifically for instructional classroom coaches and includes strategies and techniques designed to take coaching to the next level. The Coaching Institute has been presented nationwide and is available on DVD. Coaching Innovations is a continuation of the Coaching Institute and provides targeted and individualized on-site professional development for instructional coaches. In addition, the classroom teachers receive professional development in early literacy foundations for children birth – Kindergarten. The literacy sessions are also available online. Training programs meet DHS requirements and specific training can be applied to the Child Development Associate and certified Child Care Professional national child care credentials.
# Copyright (c) 2017-2021 Fumito Hamamura <[email protected]> # This library is free software: you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation version 3. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/>. # The source code in this file is modified from: # https://github.com/baoboa/pyqt5/blob/master/examples/itemviews/simpletreemodel/simpletreemodel.py # See below for the original copyright notice. ############################################################################# ## ## Copyright (C) 2013 Riverbank Computing Limited. ## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). ## All rights reserved. ## ## This file is part of the examples of PyQt. ## ## $QT_BEGIN_LICENSE:BSD$ ## You may use this file under the terms of the BSD license as follows: ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are ## met: ## * Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimer. ## * Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in ## the documentation and/or other materials provided with the ## distribution. ## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor ## the names of its contributors may be used to endorse or promote ## products derived from this software without specific prior written ## permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## $QT_END_LICENSE$ ## ############################################################################# import itertools from qtpy.QtCore import QAbstractItemModel, QModelIndex, Qt class BaseItem(object): """Base Item class for all tree item classes.""" def __init__(self, data, parent=None): self.colType = 1 self.colParam = 2 self.parentItem = parent self.itemData = None self.childItems = [] self.updateData(data) def updateData(self, data): if self.itemData != data: self.itemData = data self.updateChild() else: self.itemData = data def updateChild(self): raise NotImplementedError def changeParent(self, parent): self.parentItem = parent def appendChild(self, item): item.changeParent(self) self.childItems.append(item) def insertChild(self, index, item): item.changeParent(self) self.childItems.insert(index, item) def child(self, row): return self.childItems[row] def childCount(self): return len(self.childItems) def columnCount(self): return 3 def data(self, column): if column == 0: return self.itemData["name"] elif column == self.colType: return self.getType() elif column == self.colParam: return self.getParams() else: raise IndexError def parent(self): return self.parentItem def row(self): if self.parentItem: return self.parentItem.childItems.index(self) return 0 def getType(self): raise NotImplementedError def getParams(self): raise NotImplementedError class InterfaceItem(BaseItem): """Object item, such as Model, Space, Cells""" @property def objid(self): return self.itemData["id"] def __eq__(self, other): if isinstance(other, InterfaceItem): return self.objid == other.objid else: return False def __hash__(self): return hash(self.objid) class ViewItem(BaseItem): @property def attrid(self): return self.getType() def __eq__(self, other): if isinstance(other, ViewItem): return ( self.parent() == other.parent() and self.attrid == other.attrid ) def __hash__(self): return hash((self.parent().objid, self.attrid)) class SpaceContainerItem(InterfaceItem): """Base Item class for Models and Spaces which inherit SpaceContainer.""" def updateChild(self): self.childItems = self.newChildItems(self.itemData) def newChildItems(self, data): return [ SpaceItem(space, self) for space in data["spaces"]["items"].values() ] class ModelItem(SpaceContainerItem): """Item class for a Model (root item)""" def __init__(self, data): super(ModelItem, self).__init__(data, parent=None) def getType(self): return "Model" def getParams(self): return "" class SpaceItem(SpaceContainerItem): """Item class for Space objects.""" def updateChild(self): self.childItems.clear() for space in self.itemData["named_spaces"]["items"].values(): self.childItems.append(SpaceItem(space, self)) dynspaces = self.itemData["named_itemspaces"]["items"] if len(dynspaces) > 0: self.childItems.append(DynamicSpaceMapItem(dynspaces, self)) cellsmap = self.itemData["cells"]["items"] for cells in cellsmap.values(): self.childItems.append(CellsItem(cells, self)) def getType(self): return "Space" def getParams(self): if "argvalues" in self.itemData: args = self.itemData["argvalues"] if args is not None: return args else: return "" else: return "" class DynamicSpaceMapItem(ViewItem): """Item class for parent nodes of dynamic spaces of a space.""" def updateChild(self): self.childItems.clear() for space in self.itemData.values(): self.childItems.append(SpaceItem(space, self)) def data(self, column): if column == 0: return "Dynamic Spaces" else: return BaseItem.data(self, column) def getType(self): return "" def getParams(self): return self.parent().itemData["params"] class CellsItem(InterfaceItem): """Item class for cells objects.""" def updateChild(self): pass def getType(self): return "Cells" def getParams(self): return self.itemData["params"] class ModelTreeModel(QAbstractItemModel): def __init__(self, data, parent=None): super(ModelTreeModel, self).__init__(parent) self.rootItem = ModelItem(data) def updateRoot(self, data): newmodel = ModelItem(data) self.updateItem(QModelIndex(), newmodel) def getItem(self, index): if not index.isValid(): return self.rootItem else: return index.internalPointer() def updateItem(self, index, newitem, recursive=True): if not index.isValid(): item = self.rootItem else: item = index.internalPointer() if item.itemData != newitem.itemData: item.itemData = newitem.itemData # self.dataChanged.emit(index, index) delItems = set(item.childItems) - set(newitem.childItems) if delItems: delRows = sorted([item.row() for item in delItems]) delRows = [ list(g) for _, g in itertools.groupby( delRows, key=lambda n, c=itertools.count(): n - next(c) ) ] for rows in delRows: self.removeRows(rows[0], len(rows), index) addItems = set(newitem.childItems) - set(item.childItems) if addItems: addRows = sorted([item.row() for item in addItems]) addRows = [ list(g) for _, g in itertools.groupby( addRows, key=lambda n, c=itertools.count(): n - next(c) ) ] for rows in addRows: self.insertRows(rows, newitem, index) self.reorderChild(index, newitem) if recursive: for row, child in enumerate(item.childItems): child_index = self.index(row, 0, index) self.updateItem(child_index, newitem.childItems[row]) def insertRows(self, rows, newitem, parent): # Signature is different from the base method. item = self.getItem(parent) self.beginInsertRows(parent, rows[0], rows[-1]) for row in rows: item.insertChild(row, newitem.childItems[row]) self.endInsertRows() def removeRows(self, position, rows, parent=QModelIndex()): item = self.getItem(parent) self.beginRemoveRows(parent, position, position + rows - 1) for row in range(position, position + rows): item.childItems.pop(row) self.endRemoveRows() def reorderChild(self, parent, newitem): """Reorder a list to match target by moving a sequence at a time. Written for QtAbstractItemModel.moveRows. """ source = self.getItem(parent).childItems target = newitem.childItems i = 0 while i < len(source): if source[i] == target[i]: i += 1 continue else: i0 = i j0 = source.index(target[i0]) j = j0 + 1 while j < len(source): if source[j] == target[j - j0 + i0]: j += 1 continue else: break self.moveRows(parent, i0, j0, j - j0) i += j - j0 def moveRows(self, parent, index_to, index_from, length): """Move a sub sequence in a list index_to must be smaller than index_from """ source = self.getItem(parent).childItems self.beginMoveRows( parent, index_from, index_from + length - 1, parent, index_to ) sublist = [source.pop(index_from) for _ in range(length)] for _ in range(length): source.insert(index_to, sublist.pop()) self.endMoveRows() @property def modelid(self): if self.rootItem: return self.rootItem.objid else: return None def columnCount(self, parent): if parent.isValid(): return parent.internalPointer().columnCount() else: return self.rootItem.columnCount() def data(self, index, role): if not index.isValid(): return None if role != Qt.DisplayRole: return None item = index.internalPointer() return item.data(index.column()) def flags(self, index): if not index.isValid(): return Qt.NoItemFlags return Qt.ItemIsEnabled | Qt.ItemIsSelectable def headerData(self, section, orientation, role): if orientation == Qt.Horizontal and role == Qt.DisplayRole: # TODO: Refactor hard-coding column indexes if section == 0: return "Objects" elif section == 1: return "Type" elif section == 2: return "Parameters" return None def index(self, row, column, parent): if not self.hasIndex(row, column, parent): return QModelIndex() if not parent.isValid(): parentItem = self.rootItem else: parentItem = parent.internalPointer() childItem = parentItem.child(row) if childItem: return self.createIndex(row, column, childItem) else: return QModelIndex() def parent(self, index): if not index.isValid(): return QModelIndex() childItem = index.internalPointer() parentItem = childItem.parent() if parentItem is None or parentItem == self.rootItem: return QModelIndex() return self.createIndex(parentItem.row(), 0, parentItem) def rowCount(self, parent): if parent.column() > 0: return 0 if not parent.isValid(): parentItem = self.rootItem else: parentItem = parent.internalPointer() return parentItem.childCount()
This entry was posted on September 30, 2018 at 10:50 am and is filed under DESI MMS VIDEOS. Tagged: boyfriend, evening, romantic. You can follow any responses to this entry through the RSS 2.0 feed. You can leave a response, or trackback from your own site.
def mode(): mode = raw_input( "The three available classification mode you can select:\n" + "1. set all grabed tweets as category 1 and grab category 2 by new (opposite) tag \n" + "2. classify grabed tweets by selecting key words \n" + "3. classify grabed tweets one by one manually \n" + "select the mode you want by typing corresponding number: " ) or "2" if (mode == "1"): file_name2, pw2 = grabSetting() if (mode == "2"): words_category1 = raw_input("Type in the key words requirements for a tweet to be classified as category 1:" + "seperate alternative words by \"or\"") or "good" words_category2 = raw_input("Type in the key words requirements for a tweet to be classified as category 2:" + "seperate alternative words by \"or\"") or "bad" mode2(file_name, words_category1, words_category2) if (mode == "3"): judge = raw_input("For each tweet displayed, type \"1\" for category 1 and \"2\" for category 2, \"0\" to skip and \"q\" to stop labeling")
Democratic Leader Rep. Darren Jackson spoke against a constitutional amendment Wednesday that would reallocate power to the General Assembly to appoint judicial vacancies. Exactly three-fifths of House lawmakers passed the second reading of a constitutional amendment Wednesday that would give them more power to appoint judicial vacancies. Senate Bill 814 has already passed the Senate, and if it passes a third reading in the House, it will be put before the voters on the November election ballot. Lawmakers debated the bill on the House floor Wednesday and then voted 72-48 to pass it. Democratic Leader Rep. Darren Jackson (D-Wake) objected to the third reading and it remained on the calendar to be taken up at a later time. He and other Democrats called the amendment a sham and said the wording was crafted to be attractive to the public but doesn’t actually have any teeth to take politics out of the vacancy appointment process. “You can use the language to mislead the public in this state, but the truth is, this amendment requires nothing,” Jackson said. The constitutional amendment takes away the current power of the Governor to appoint judicial vacancies and gives it mostly to the legislature. Members of the public could nominate people to be vetted by a judicial “merit” commission, but lawmakers could determine the composition and function of that commission — after voters approve the amendment and without them knowing the details first. Rep. Jonathan Jordan (R-Ashe) claimed multiple times on the floor that the amendment would not affect North Carolinians’ right to vote on judges. Jackson refuted him, noting that the amendment actually gives judicial vacancy appointments two additional years on the bench, which bolsters their incumbency title. “This is a legislative thunderbolt,” she said. She added that the local and state Bars are the ones who know how best to evaluate the qualifications of judges, and questioned what would happen if individuals appointed to the commission had their own run ins with the justice system and were dissatisfied. Rep. Robert Reives II (D-Chatham, Lee) also brought up a number of concerns with the amendment. He countered the Republican argument that lawmakers were more accountable to the people than the governor and said the current process was not as undemocratic as they make it out to be. “I need you to take this seriously about this, about what we’re doing … we are obliterating the separation of powers clause in the constitution,” he said. Ultimately, Rep. Justin Burr (R-Stanly, Montgomery), who has been the architect behind most of the legislation in the past year affecting the courts, got the last word and said he disagreed with his colleagues on the other side of the aisle. “This is setting up an open and transparent process,” he said. The House did vote last night 107-9 to pass Marsy’s Law, a constitutional amendment to expand the definition of victim and give victims of more crimes more rights to be heard in their cases, as well as a mechanism to intervene when they don’t feel satisfied with the process. The amendment will be put before North Carolinians for a vote on the November ballot. If they approve it, lawmakers can promulgate enacting legislation later that sorts out the details of how the amendment will work in practice. A new fiscal note released this week shows that the amendment will cost the state $8.8 million in Fiscal Year 2021-22 and $11.2 million the following year.
import re def graphs_create_integers(input_dict): intStr = input_dict['intStr'] intList = [] for i in re.findall(r'\w+', intStr): try: intList.append(int(i)) except: pass if input_dict['sort'].lower() == "true": intList.sort() return {'intList':intList} def graphs_sum_integers(input_dict): intList = input_dict['intList'] return {'sum':sum(intList)} def graphs_pre_filter_integers(input_dict): return input_dict def graphs_post_filter_integers(postdata,input_dict,output_dict): intListOut = postdata['intListOut'] intList = [] for i in intListOut: try: intList.append(int(i)) except: pass return {'intList': intList} def graphs_pre_display_summation(input_dict): return {} ########################################### def graphs_visualize_visjs(input_dict): return {} def graphs_json2networkx(input_dict): from json import loads from networkx.readwrite import json_graph gtext = loads(input_dict['graph']) g = json_graph.node_link_graph(gtext) return {'nxgraph': g}
Sesame Seeds Roasting Machine Easy to operate. Microwave power Sesame Seeds Roasting Machine and the conveyor speed can be sleepless adjustment, there is no thermal inertia, can open and stop at an time, easy to control,improve the production environment.Microwave equipment Saving Energy Soybean Lecithin Soybean Extract, no waste heat radiation, no dust, no noise, no pollution, easy to implement the testing standards of food hygiene.Sesame Seeds Roasting Machine Low-temperature sterilization, less loss of nutrients.Retain vitamin C,conventional heating treatment for fruits and vegetables is 46% ~ 50%, but the microwave Saving Energy Soybean Lecithin Soybean Extract can reach 60%~90%.Retain vitamin A,conventional heating treatment is 58%,but Shandong Leader Machinery Co.,ltd. the microwave can reach 84%,and does not affect the original flavor, is a good method for deep processing of fruits&vegetables and get green foods. Shandong Leader Machinery Co.,ltd. type multi-purpose drum roasting machine Sesame Seeds Roasting Machine Mainly used for peanut, chestnut, walnut, almond, swallow beans, coffee beans, seeds and other granular material baking or roasting.Saving Energy Soybean Lecithin Soybean Extract Electric heating, fuel, or gas, coal as the heat source; the use of rotary drum, heat conduction, heat radiation principle, with coal to fuel; low production costs.The material does not contact with the fire during roasting ,Shandong Leader Machinery Co.,ltd. make sure the product quality is good, health, taste well, can reach the export standards.Temperature :300 degrees Set automatic constant temperature control.Sesame Seeds Roasting Machine Waterproof packing with the international export standard by 20ft, 40ft, 40hp container. Saving Energy Soybean Lecithin Soybean Extract Equipment Fittings, Electric Motor and Power Cabinet will be packed in Wooden Case or Iron Box.Shandong Leader Machinery Co.,ltd. Other Equipment will be packed by Color-Stripes Plastic Cloth.
from django.contrib.staticfiles.management.commands.runserver import \ Command as RunServerCommand from django.utils.autoreload import reloader_thread from http import server as BaseHTTPServer import threading as thread import os import sys import subprocess # default port number where we would run the change reporting server REFRESH_PORT = 32000 # prompts of our support PDBs PDB_PROMPTS = ["(Pdb) ", "ipdb> "] # Global counter that will be incremented whenever a refresh is required _needs_refresh = 0 # to hold the last _needs_refresh counter sent to the client # we compare this against _needs_refresh to determine if the client # needs to refresh itself _last_refresh = 0 _refresh_port = REFRESH_PORT class SilentHandler(BaseHTTPServer.BaseHTTPRequestHandler): """ HTTP Response handler, adapted from sample code in python Wiki. Supresses the connection message which interferes with the default Django server messages. Probably can be made better, but I'm not going to bother for now. """ def do_HEAD(s): s.send_response(200) s.send_header("Content-type", "text/json") s.end_headers() def do_GET(s): # GET returns a boolean indicating if browser page needs # to be refreshed global _needs_refresh global _last_refresh s.send_response(200) s.send_header("Content-type", "text/json") s.send_header("Access-Control-Allow-Origin", "*") s.end_headers() s.wfile.write(bytes('{ "changed": %d }\n' % s.needs_refresh(), 'utf-8')) _last_refresh = _needs_refresh def do_POST(s): '''POST can be used to force a refresh externally''' global _needs_refresh s.send_response(200) s.send_header("Content-type", "text/json") s.end_headers() _needs_refresh += 1 s.wfile.write('{ "POST": 1, "changed": %d }\n' % s.needs_refresh()) def needs_refresh(self): '''returns a boolean indicating if a refresh is required''' global _needs_refresh global _last_refresh return _needs_refresh != _last_refresh def log_request(self, *args, **kwargs): pass def refresh_state_server(): """ A simple HTTP server that does just one thing, serves a JSON object with a single attribute indicating whether the development server has been reloaded and therefore browser page requires refreshing. Extended to accept a POST request which forces the refresh flag """ httpd = BaseHTTPServer.HTTPServer(("127.0.0.1", _refresh_port), SilentHandler) try: sys.stdout.write("Starting auto refresh state server at 127.0.0.1:%d\n" \ % _refresh_port) httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close() class Command(RunServerCommand): """ A customized version of the runserver command that spawns a secondary http server which can be queried to check if the Django development server has been reloaded (and therefore the browser page needs refresh) """ help = "Starts a lightweight Web server for development that serves static files and provides refresh status through a secondary HTTP server running at %d." % _refresh_port def add_arguments(self, parser): super(Command, self).add_arguments(parser) parser.add_argument('--refreshport', action='store', default=32000, type=int, help='Port number where the refresh server listens. Defaults to 32000') def run(self, **options): use_reloader = options.get('use_reloader') global _refresh_port _refresh_port = options.get('refreshport', REFRESH_PORT) if use_reloader: self.autoreload() else: self.inner_run(None, **options) def autoreload(self): """Copied from django.core.autoload.python_reloader""" if os.environ.get("RUN_MAIN") == "true": thread.Thread(target=self.inner_run).start() # start http server try: #sys.stdout.write("Starting reloader_thread...\n") reloader_thread() # poll source files for modifications # if modified, kill self except KeyboardInterrupt: pass else: try: exit_code = self.restart_with_reloader() if exit_code < 0: os.kill(os.getpid(), -exit_code) else: sys.exit(exit_code) except KeyboardInterrupt: pass def restart_with_reloader(self): """ Differs from django.core.autoreload in that _needs_refresh counter is incremented everytime the Development server is reloaded owing to detected file changes. """ global _needs_refresh global _last_refresh # start the internal HTTP server that will serve the refresh # poll requests from our Chrome extenstion threadid = thread.Thread(target=refresh_state_server).start() while True: args = [sys.executable] + ['-u'] + ['-W%s' % o for o in sys.warnoptions] + sys.argv if sys.platform == "win32": args = ['"%s"' % arg for arg in args] new_environ = os.environ.copy() new_environ["RUN_MAIN"] = 'true' proc = subprocess.Popen(args, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, env=new_environ, close_fds=True) # We loop reading all the output of the child process # until it prints the 'Quit the server with CONTROL' # When that's done, we can be certain that the server # is fully initialized and ready to serve pages while True and proc.returncode == None: line = proc.stdout.readline() if line: print(line) if bytes('Quit the server with CONTROL', 'utf-8') in line: break; proc.poll() # Since the development server is fully initialized, we can # now set the refresh state flag. sys.stdout.write("Development server reinitialized, setting refresh flag\n") _needs_refresh += 1 # Here we're reading the output character by character rather # than line by line as done previously. # This is necessary for us to integrate with python debuggers # such as pdb and ipdb. When the child process is interrupted # by one of these two debuggers, it shows a prompt # and waits for user input. Since these prompts do not have a # terminating \n, readline would never return and we get a # non-intuitive user experience where inputs do not correspond # to pdb> prompts. Reading one character at a time allows to # detect these prompts and then ask user for input which we can # write back to child process' stdin. line = '' while True and proc.returncode == None: char = proc.stdout.read(1) if char: sys.stdout.write(char) # Buffer the character until we hit newline or one of # the recognized pdb prompts (PDB_PROMPTS) if char != '\n': line += char if line in PDB_PROMPTS: # keep checking if we hit pdb # Child process has hit pdb breakpoint. # Read a command from stdin and write to the # child process' stdin line = '' command = raw_input() proc.stdin.write(command+'\n') else: line = '' proc.poll() # will set the proc.returncode if proc terminates sys.stdout.write("Development server terminated with exit code %d\n" % proc.returncode) if proc.returncode != 3: return proc.returncode
Nancy Hadsell, PhD, was Professor of Music, with a focus on music therapy. She taught at Texas Woman’s University since 1984 and served as Coordinator of the Music Therapy Program from that time until 2016. Prior to her tenure at TWU, she served as Director of Music Therapy for Tennessee Technological University (1977-1981) and as a Music Therapist for the DeKalb-Rockdale Psycho-educational Center in DeKalb County, Georgia (1974-1977). She taught courses in the music therapy program at the graduate and undergraduate levels, along with Music and World Cultures, and Music Appreciation (distance learning). She served as a member of the Board of Directors for the Certification Board for Music Therapists (2000-2003 and 2005-2011), acting as Secretary/Treasurer for that organization. Hadsell is a past-president of the Southwestern Region of the American Music Therapy Association (2003-2005) and served as the SWAMTA representative to the AMTA Research Committee. She also represented the Southwestern, Midwestern, and Southeastern Regions on the AMTA Assembly of Delegates for many years and was a member of the Editorial Board for the Journal of Music Therapy from 2000 through 2006. Hadsell published articles on the effects of music listening on musicians and non-musicians, the uses of structure in music therapy practice, music therapy practicum, Autoharp® skills for music therapy, music therapy practice with children with Rett Syndrome, and private practice in music therapy. She presented at professional conferences on many topics related to the practice of music therapy, the CBMT Scope of Practice, legislative/regulatory affairs, clinical techniques in music therapy, behavior management strategies, and exam preparation using the CBMT Self Assessment Examination. She is listed in Who’s Who Among America’s Teachers (1996) and has been a member of Phi Beta Kappa since 1972. She was awarded the prestigious Humphries Award (2016) for Dedication to Texas Woman’s University. The Southwestern Region of AMTA awarded her the Lifetime Leadership Award at its regional conference in Austin in Spring 2016.
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. import os.path import tempfile import shutil from cStringIO import StringIO import unittest import platform import Queue from collections import Counter import shutil import nose.tools import mock import PIL.Image import numpy as np from . import create_db as _ class BaseTest(): """ Provides some helpful files and utilities """ @classmethod def setUpClass(cls): cls.empty_file = tempfile.mkstemp() cls.empty_dir = tempfile.mkdtemp() # Create one good textfile cls.good_file = tempfile.mkstemp() # Create a color image cls.color_image_file = tempfile.mkstemp(suffix='.png') cls.numpy_image_color = np.ones((8,10,3), dtype='uint8') cls.pil_image_color = PIL.Image.fromarray(cls.numpy_image_color) cls.pil_image_color.save(cls.color_image_file[1]) # Create a grayscale image cls.gray_image_file = tempfile.mkstemp(suffix='.png') cls.numpy_image_gray = np.ones((8,10), dtype='uint8') cls.pil_image_gray = PIL.Image.fromarray(cls.numpy_image_gray) cls.pil_image_gray.save(cls.gray_image_file[1]) cls.image_count = 0 for i in xrange(3): for j in xrange(3): os.write(cls.good_file[0], '%s %s\n' % (cls.color_image_file[1], i)) os.write(cls.good_file[0], '%s %s\n' % (cls.gray_image_file[1], i)) cls.image_count += 2 @classmethod def tearDownClass(cls): for f in cls.empty_file, cls.good_file, cls.color_image_file, cls.gray_image_file: try: os.close(f[0]) os.remove(f[1]) except OSError: pass try: shutil.rmtree(cls.empty_dir) except OSError: raise class TestFillLoadQueue(BaseTest): def test_valid_file(self): for shuffle in True, False: yield self.check_valid_file, shuffle def check_valid_file(self, shuffle): queue = Queue.Queue() result = _._fill_load_queue(self.good_file[1], queue, shuffle) assert result == self.image_count, 'lines not added' assert queue.qsize() == self.image_count, 'queue not full' def test_empty_file(self): for shuffle in True, False: yield self.check_empty_file, shuffle def check_empty_file(self, shuffle): queue = Queue.Queue() nose.tools.assert_raises( _.BadInputFileError, _._fill_load_queue, self.empty_file[1], queue, shuffle) class TestParseLine(): def test_good_lines(self): for label, line in [ (0, '/path/image.jpg 0'), (1, 'image.jpg 1'), (2, 'image.jpg 2\n'), (3, 'image.jpg 3'), (4, 'spaces in filename.jpg 4'), ]: yield self.check_good_line, line, label def check_good_line(self, line, label): c = Counter() p, l = _._parse_line(line, c) assert l == label, 'parsed label wrong' assert c[l] == 1, 'distribution is wrong' def test_bad_lines(self): for line in [ 'nolabel.jpg', 'non-number.jpg five', 'negative.jpg -1', ]: yield self.check_bad_line, line def check_bad_line(self, line): nose.tools.assert_raises( _.ParseLineError, _._parse_line, line, Counter() ) class TestCalculateBatchSize(): def test(self): for count, batch_size in [ (1, 1), (50, 50), (100, 100), (200, 100), ]: yield self.check, count, batch_size def check(self, count, batch_size): assert _._calculate_batch_size(count) == batch_size class TestCalculateNumThreads(): def test(self): for batch_size, shuffle, num in [ (1000, True, 10), (1000, False, 1), (100, True, 10), (100, False, 1), (50, True, 7), (4, True, 2), (1, True, 1), ]: yield self.check, batch_size, shuffle, num def check(self, batch_size, shuffle, num): assert _._calculate_num_threads( batch_size, shuffle) == num class TestInitialImageSum(): def test_color(self): s = _._initial_image_sum(10, 10, 3) assert s.shape == (10, 10, 3) assert s.dtype == 'float64' def test_grayscale(self): s = _._initial_image_sum(10, 10, 1) assert s.shape == (10, 10) assert s.dtype == 'float64' class TestImageToDatum(BaseTest): def test(self): for compression in None, 'png', 'jpg': yield self.check_color, compression yield self.check_grayscale, compression def check_color(self, compression): d = _._array_to_datum(self.numpy_image_color, 1, compression) assert d.height == self.numpy_image_color.shape[0] assert d.width == self.numpy_image_color.shape[1] assert d.channels == 3 assert d.encoded == bool(compression) def check_grayscale(self, compression): d = _._array_to_datum(self.numpy_image_gray, 1, compression) assert d.height == self.numpy_image_gray.shape[0] assert d.width == self.numpy_image_gray.shape[1] assert d.channels == 1 assert d.encoded == bool(compression) class TestSaveMeans(): def test(self): for color in True, False: d = tempfile.mkdtemp() for filename in 'mean.jpg', 'mean.png', 'mean.npy', 'mean.binaryproto': yield self.check, d, filename, color shutil.rmtree(d) def check(self, directory, filename, color): filename = os.path.join(directory, filename) if color: s = np.ones((8,10,3),dtype='float64') else: s = np.ones((8,10),dtype='float64') _._save_means(s, 2, [filename]) assert os.path.exists(filename) class BaseCreationTest(BaseTest): def test_image_sizes(self): for width in 8, 12: for channels in 1, 3: yield self.check_image_sizes, width, channels, False def check_image_sizes(self, width, channels, shuffle): _.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'), width, 10, channels, self.BACKEND) def test_no_shuffle(self): _.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'), 10, 10, 1, self.BACKEND, shuffle=False) def test_means(self): mean_files = [] for suffix in 'jpg','npy','png','binaryproto': mean_files.append(os.path.join(self.empty_dir, 'mean.%s' % suffix)) _.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'), 10, 10, 1, self.BACKEND, mean_files=mean_files) class TestLmdbCreation(BaseCreationTest): BACKEND = 'lmdb' class TestHdf5Creation(BaseCreationTest): BACKEND = 'hdf5' def test_dset_limit(self): _.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'), 10, 10, 1, 'hdf5', hdf5_dset_limit=10*10)
Retain maximum potency as they are ground manually which also ensure’s a fuller flavor. Hygienic, the ladies wear proper hand gloves, makes and aprons to clean and grind the spices. Chakki made spices use less than 100 RPM retaning the micro nutri-ents and aroma. No Contamination as they are best quality spices cleaned manually. Chakki is made out of traditional stones ensuring the medicinal properties of the spices to stay intact. High nutrient value. The Spices are clod pressed as the everything is done manually and no electricity Is used thus the spices don’t loose their nutrient value. Machine is blind, it can grind everything and anything that is mixed in the spices like bones, dirt, metal pieces, mold, excreta, dead insects, rat hairs, wire, string and a list of other ‘foreign matter’. May have other ingredients such as salt, rice or flour mixed. The spices are ground in about 2800 RPM (Rotation per minute) which results in destruction of micro nutrients and aroma. Not ‘Fresh’ as they are produced in a bulk and are kept in the grocery stores for months. The spices are always in contact with the metal, which can be hazardous to health. Get the best out of your food with ready made Chakki Masala mix for pav bhaji, paani puri & sambar and with many other spices.
# -*- coding: utf-8 -*- from django.conf import settings try: from django.core.context_processors import csrf except ImportError: from django.template.context_processors import csrf from django.utils.translation import ugettext_lazy as _ from django.template import RequestContext from django.shortcuts import redirect from email_extras.utils import send_mail_template from forms_builder.forms.settings import EMAIL_FAIL_SILENTLY from forms_builder.forms.signals import form_invalid, form_valid from forms_builder.forms.utils import split_choices from fluent_contents.extensions import ContentPlugin, plugin_pool from .forms import FormForForm from .models import FormItem, Form @plugin_pool.register class FormPlugin(ContentPlugin): model = FormItem category = _('Form') render_template = "fluentcms_forms_builder/form.html" cache_output = False def get_context(self, request, instance, **kwargs): context = super(FormPlugin, self).get_context( request, instance, **kwargs) context.update(form=instance.form, **csrf(request)) return context def render(self, request, instance, **kwargs): context = self.get_context(request, instance, **kwargs) form = context['form'] if request.method == 'POST': form_for_form = FormForForm( form, RequestContext(request), request.POST, request.FILES or None) if not form_for_form.is_valid(): form_invalid.send(sender=request, form=form_for_form) else: attachments = [] for f in form_for_form.files.values(): f.seek(0) attachments.append((f.name, f.read())) entry = form_for_form.save() form_valid.send(sender=request, form=form_for_form, entry=entry) self.send_emails(request, form_for_form, form, entry, attachments) if not request.is_ajax() and form.redirect_url: return redirect(str(form.redirect_url)) return self.render_to_string(request, "fluentcms_forms_builder/form_sent.html", context) else: form_for_form = FormForForm(form, RequestContext(request)) context.update(form_for_form=form_for_form) return self.render_to_string(request, self.render_template, context) def send_emails(self, request, form_for_form, form, entry, attachments): subject = form.email_subject if not subject: subject = "%s - %s" % (form.title, entry.entry_time) fields = [] for (k, v) in form_for_form.fields.items(): value = form_for_form.cleaned_data[k] if isinstance(value, list): value = ", ".join([i.strip() for i in value]) fields.append((v.label, value)) context = { "fields": fields, "message": form.email_message, "request": request, } email_from = form.email_from or settings.DEFAULT_FROM_EMAIL email_to = form_for_form.email_to() if email_to and form.send_email: send_mail_template(subject, "form_response", email_from, email_to, context=context, fail_silently=EMAIL_FAIL_SILENTLY) headers = None if email_to: headers = {"Reply-To": email_to} email_copies = split_choices(form.email_copies) if email_copies: send_mail_template(subject, "form_response_copies", email_from, email_copies, context=context, attachments=attachments, fail_silently=EMAIL_FAIL_SILENTLY, headers=headers)
Object Description Nisei Evacuees -- Their Challenge to Education by O.D. Richardson. Junior College Journal, published monthly by The American Association of Junior Colleges, Washington D.C. 11 pages, single sided. Paper, ink, metal. Notes Donated by Tashi Hori.
from collections import namedtuple from pybliometrics.scopus.superclasses import Retrieval from pybliometrics.scopus.utils import check_parameter_value, get_link class SerialTitle(Retrieval): @property def aggregation_type(self): """The type of the source.""" return self._entry['prism:aggregationType'] @property def citescoreyearinfolist(self): """A list of two tuples of the form (year, cite-score). The first tuple represents the current cite-score, the second tuple represents the tracker cite-score.""" try: d = self._entry['citeScoreYearInfoList'] except KeyError: return None current = (d['citeScoreCurrentMetricYear'], d['citeScoreCurrentMetric']) tracker = (d['citeScoreTrackerYear'], d['citeScoreTracker']) return [current, tracker] @property def eissn(self): """The electronic ISSN of the source.""" return self._entry.get('prism:eIssn') @property def issn(self): """The ISSN of the source.""" return self._entry.get('prism:issn') @property def oaallowsauthorpaid(self): """Whether under the Open-Access policy authors are allowed to pay.""" return self._entry.get('oaAllowsAuthorPaid') @property def openaccess(self): """Open Access status (0 or 1).""" return self._entry.get('openaccess') @property def openaccessstartdate(self): """Starting availability date.""" return self._entry.get('openaccessStartDate') @property def openaccesstype(self): """Open Archive status (full or partial).""" return self._entry.get('openaccessType') @property def openaccessarticle(self): """Open Access status (boolean).""" return self._entry.get('openaccessArticle') @property def openarchivearticle(self): """Open Archive status (boolean).""" return self._entry.get('openArchiveArticle') @property def openaccesssponsorname(self): """The name of the Open Access sponsor.""" return self._entry.get('openaccessSponsorName') @property def openaccesssponsortype(self): """The type of the Open Access sponsor.""" return self._entry.get('openaccessSponsorType') @property def openaccessuserlicense(self): """The User license.""" return self._entry.get('openaccessUserLicense') @property def publisher(self): """The publisher of the source.""" return self._entry['dc:publisher'] @property def scopus_source_link(self): """URL to info site on scopus.com.""" return get_link(self._entry, 0, ["link"]) @property def self_link(self): """URL to the source's API page.""" return get_link(self._json, 0, ["link"]) @property def sjrlist(self): """The SCImago Journal Rank (SJR) indicator as list of (year, indicator)-tuples. See https://www.scimagojr.com/journalrank.php. """ return _parse_list(self._entry, "SJR") @property def sniplist(self): """The Source-Normalized Impact per Paper (SNIP) as list of (year, indicator)-tuples. See https://blog.scopus.com/posts/journal-metrics-in-scopus-source-normalized-impact-per-paper-snip. """ return _parse_list(self._entry, "SNIP") @property def source_id(self): """The Scopus ID of the source.""" return self._entry['source-id'] @property def subject_area(self): """List of named tuples of subject areas in the form (area, abbreviation, code) of the source. """ area = namedtuple('Subjectarea', 'area abbreviation code') areas = [area(area=item['$'], code=item['@code'], abbreviation=item['@abbrev']) for item in self._entry["subject-area"]] return areas or None @property def title(self): """The title of the source.""" return self._entry['dc:title'] def __init__(self, issn, refresh=False, view="ENHANCED", years=None): """Interaction with the Serial Title API. Parameters ---------- issn : str or int The ISSN or the E-ISSN of the source. refresh : bool or int (optional, default=False) Whether to refresh the cached file if it exists or not. If int is passed, cached file will be refreshed if the number of days since last modification exceeds that value. view : str (optional, default="ENHANCED") The view of the file that should be downloaded. Allowed values: BASIC, STANDARD, ENHANCED. For details see https://dev.elsevier.com/sc_serial_title_views.html. years : str (optional, default=None) A string specifying a year or range of years (combining two years with a hyphen) for which yearly metric data (SJR, SNIP, yearly-data) should be looked up for. If None, only the most recent metric data values are provided. Note: If not None, refresh will always be True. Examples -------- See https://pybliometrics.readthedocs.io/en/stable/examples/SerialTitle.html. Notes ----- The directory for cached results is `{path}/{view}/{source_id}`, where `path` is specified in `~/.scopus/config.ini`. """ # Checks check_parameter_value(view, ('BASIC', 'STANDARD', 'ENHANCED'), "view") # Load json self._id = str(issn) self._years = years # Force refresh when years is specified if years: refresh = True Retrieval.__init__(self, identifier=self._id, view=view, date=years, api='SerialTitle', refresh=refresh) self._json = self._json['serial-metadata-response'] self._entry = self._json['entry'][0] def __str__(self): """Print a summary string.""" date = self.get_cache_file_mdate().split()[0] areas = [e.area for e in self.subject_area] if len(areas) == 1: areas = areas[0] else: areas = " and ".join([", ".join(areas[:-1]), areas[-1]]) s = f"'{self.title}', {self.aggregation_type} published by "\ f"'{self.publisher}', is active in {areas}\n" metrics = [] if self.sjrlist: metrics.append(f"SJR: year value") for rec in self.sjrlist: metrics.append(f" {rec[0]} {rec[1]}") if self.sniplist: metrics.append(f"SNIP: year value") for rec in self.sniplist: metrics.append(f" {rec[0]} {rec[1]}") if metrics: s += f"Metrics as of {date}:\n " + "\n ".join(metrics) + "\n" s += f" ISSN: {self.issn or '-'}, E-ISSN: {self.eissn or '-'}, "\ f"Scopus ID: {self.source_id}" return s def _parse_list(d, metric): """Auxiliary function to parse SNIP and SJR lists.""" try: values = [(r['@year'], r['$']) for r in d[metric + "List"][metric]] return sorted(set(values)) except (KeyError, TypeError): return None
These are the legal pages and notices for officialgabbee.com, which is the owner/operator of the website https://officialgabbee.com. The following describes the Anti-Spam Policy for our https://officialgabbee.com website which is owned/operated by officialgabbee.com. Spam is unsolicited email, also known as junk mail (received via email), or UCE (Unsolicited Commercial Email). Virtually all of us have opened the inbox of an email account and found emails from an unknown sender. By sending email only to those who have requested to receive it, we at officialgabbee.com are following accepted permission-based email guidelines. Beyond that, we protect you by ensuring that you are 100% in control of whether or not you ever hear from officialgabbee.com by email initially or in the future, as detailed in our “No Tolerance” policy below. WE HAVE A NO TOLERANCE SPAM POLICY. We do not email unless someone has filled out an "opt in" form or "webform" expressing an interest in our information or products and/or services, or otherwise directly and proactively requesting it. News of the features and benefits of Membership is spread through advertising, joint venture marketing, and word of mouth, so we are only building relationship with folks who wish to learn more about what we have to offer and willingly subscribe to our content and contact through email. You are always completely in control of whether you receive email communication from officialgabbee.com, and can terminate at any time. NOTE - Every auto-generated email contains a mandatory unsubscribe link that cannot be removed. Therefore, each communication generated by officialgabbee.com carries with it the option to "unsubscribe" and never receive another email communication from officialgabbee.com. CHANGE NOTICE: As with any of our administrative and legal notice pages, the contents of this page can and will change over time. Accordingly, this page could read differently as of your very next visit. These changes are necessitated, and carried out by officialgabbee.com, in order to protect you and our officialgabbee.com website. If this page is important to you, you should check back frequently as no other notice of changed content will be provided either before or after the change takes effect. COPYRIGHT WARNING: The legal notices and administrative pages on this website, including this one, have been diligently drafted by an attorney. We at officialgabbee.com have paid to license the use of these legal notices and administrative pages on https://officialgabbee.com for your protection and ours. This material may not be used in any way for any reason and unauthorized use is policed via Copyscape to detect violators. You do not own rights to any article, book, ebook, document, blog post, software, application, add-on, plugin, art, graphics, images, photos, video, webinar, recording or other materials viewed or listened to through or from our officialgabbee.com website or via email or by way of protected content in a membership site. The posting of data on our website, such as a blog comment, does not change this fact and does not give you any right in the data. You surrender any rights to your content once it becomes part of our website. You are granted a nonexclusive, nontransferable, revocable license to use our officialgabbee.com website only for private, personal, noncommercial reasons. You may print and download portions of material from the different areas of the website solely for your own non-commercial use, provided that you agree not to change the content from its original form. Moreover, you agree not to modify or delete any copyright or proprietary notices from the materials you print or download from officialgabbee.com. Also note that any notice on any portion of our website that forbids printing & downloading trumps all prior statements and controls. As a user at officialgabbee.com, you agree to use the products and services offered by our website in a manner consistent with all applicable local, state and federal laws and regulations. No material shall be stored or transmitted which infringes or violates the rights of others, which is unlawful, obscene, profane, indecent or otherwise objectionable, threatening, defamatory, or invasive of privacy or publicity rights. Our website prohibits conduct that might constitute a criminal offense, give rise to civil liability or otherwise violate any law. Any activity that restricts or inhibits any other officialgabbee.com user from using the services of our website is also prohibited. Unless allowed by a written agreement, you may not post or transmit advertising or commercial solicitation on our website. We at officialgabbee.com are committed to responding to any alleged copyright violations, should they occur. Notice of any alleged violation should take the form proposed by the U.S. Digital Millennium Copyright Act as revealed at http://www.copyright.gov. If any material infringes on the copyright of any offended party, we may remove the content from officialgabbee.com, prevent access to it, terminate or block access for those responsible for the content, and/or any other action deemed appropriate. We may also pass along record of the incident for documentation and/or publication by third parties at our discretion. For your convenience and to speed resolution, notice of alleged infringement may be tendered to officialgabbee.com via email, using the email address and/or contact information provided on this website. We warn that you will be liable for any and all statutory and common law damages, as well as court costs and attorney fees, if you falsify a claim that your copyrights have been violated. Six figure awards have already been granted for bogus complaints, so seeking the help of competent counsel is advised. We make every effort to ensure that we accurately represent these products and services and their potential for income. Earning and Income statements made by officialgabbee.com and its customers are estimates of what we think you can possibly earn. There is no guarantee that you will make these levels of income and you accept the risk that the earnings and income statements differ by individual. We are not responsible for your actions. The use of our information, products and services should be based on your own due diligence and you agree that officialgabbee.com is not liable for any success or failure of your business that is directly or indirectly related to the purchase and use of our information, products and services. The following describes the Federal Trade Commission Compliance for our https://officialgabbee.com website which is owned and operated by officialgabbee.com. We make every effort regarding any products or services we use, recommend, or otherwise make mention of at officialgabbee.com. We strive to clearly differentiate between our own products or services versus those of third parties, to facilitate inquiries, support, and customer care. Likewise, just as we (and any other legitimate business) may profit from the sale of our own products or services, we may also profit from the sale of others’ products or services (like any retailer) at officialgabbee.com. Additionally, wherever products or services may give rise to income generation, we endeavor to provide realistic and factual data, but highlight the fact that the variables impacting results are so numerous and uncontrollable that no guarantees are in any way made. It is our goal to embrace the guidelines and requirements of the Federal Trade Commission (FTC) for the benefit of all, and with that in mind provide the following disclosures regarding compensation and disclaimer regarding earnings & income. One or more parties affiliated or associated with our officialgabbee.com website in some way may be an Amazon affiliate. This means that links to products on Amazon.com, as well as reviews leading to purchases, can result in a commission being earned. Again, disclosure of this material connection and the potential for compensation may not be made at every single possible opportunity. To be safe, simply assume there is a material connection and potential for compensation at all times. While this does not imply skewed or unduly biased reviews, full disclosure calls for this warning. You should assume that we may be compensated for purchases of products or services mentioned on this officialgabbee.com website that are not created, owned, licensed, or otherwise materially controlled by us. Stated differently, while most people obviously understand that individuals make a living by way of the profit that remains after the costs associated with providing their product or service are covered, at least theoretically there may be someone out there who does not understand that a third party can "affiliate" someone else's products or services and be compensated by the product or service creator/owner for helping spread the word about their offering. Just compare it to retailers. They seldom produce anything, but rather make their money connecting product and service creators with end users. First, just always operate from the position that any website proprietor, including us at officialgabbee.com, will have a material connection to the product or service provider, and may be compensated as a result of your purchase, unless expressly stated otherwise. Aside from your purchases, note that even you actions could result in earnings for this website. For instance, there could be ads displayed on this officialgabbee.com website that we are compensated for displaying whenever a website visitor clicks on them. Third, despite the fact that it would be counterproductive to mention products or services that you'll find disappointing or inferior, not only are people different, but it’s also possible for us to have a lapse in judgment. Thus, to be extra cautious, even if you believe in our good faith motives, you may as well go ahead and keep in mind that we could be at least partially influenced by the monetization factor of listing various products or services on our officialgabbee.com website. Furthermore, in that vein, the reality is that there are sometimes other connections between parties that are not monetary, such as personal capital, goodwill, or otherwise, that could be an underlying undercurrent swaying the decision to promote a particular offering. Due to this hypothetical possibility, you should again simply nor rely solely on what we have to say, but rather just form your own independent opinion just to be safe. Finally, bear in mind that we might also receive free products or services, gifts, or review copies of items too. Testimonials regarding the outcome or performance of using any product or service are provided to embellish your understanding of the offering. While great effort is made to ensure that they are factually honest, we at officialgabbee.com are not liable for errors and omissions. Aside from human error, some information may be provided by third parties, such as customers or product/service providers. The best results are not uncommonly correlated with the best efforts, discipline, diligence, and so on, and thus the results depicted cannot, in any way, be construed as common, typical, expected, normal, or associated with the average user’s experience with any given product or service. Exceptional results may be depicted by our website as highlights, but you are responsible for understanding that atypical outcomes may not reflect your experience. Aside from market conditions, products and services change over time. Older products may lose effectiveness. Newer products may not have a reliable track record. Many products and services are designed to solve problems. Common problem areas include legal, financial, and medical. We are in no way purporting to counsel you on issues related law, finances, or health. If you require guidance in these arenas, you should consider securing your own counsel from lawyers, accountants, tax professionals, investment advisors, or medical professionals before taking any action. Nothing we may ever communicate at officialgabbee.com, in print or spoken word, will ever be intended to constitute any such counsel, as we do not claim to be professionals in any of those disciplines. You assume all risk for actions taken, losses incurred, damages sustained, or other issues stemming from your use of any product or service in any way connected with or mentioned on this website. Indeed, such decision is solely your own, or else determined in conjunction with the professional guidance of the advisor of your choosing. Income-producing products & services are likewise subject to the above cautions. In addition, however, there are additional factors we like to point out at officialgabbee.com. Unlike weight loss products or self-help materials, income-producing methods are influenced by the overall health of the economy in which one operates. In times of liquidity, money flows freely and commerce is easier. In times of perceived scarcity, fear, recession, depression, or otherwise, commerce is stymied. Results can be influenced by market sentiment, just as the stock market indices around the world are swayed heavily on news. Income-producing products & services purchased should be viewed as just that – purchases. Though they can be investments in one’s business, it is not unreasonable to expect that there may not be an express return on that investment, per se. Often, business success is the convergence of a number of factors, methods, strategies, and so on. It can be hard to peg success to one method or machination. This does not necessarily undermine value of any given product or service, as it can have an additive effect. Or, it may have no effect. Since it can be difficult to tell, you should operate on the assumption that your outcome could be zero. We make no guarantees and you should only risk what you can afford to lose on any purchases on or through officialgabbee.com. It should also be noted that we only have control over, and thus only accept responsibility for, the content of this officialgabbee.com website authored by us. Any representations made by others should be considered prima facie unauthorized. You may also read, hear, or otherwise come into contact with commentary about any of our products & services or offerings, and should assume those have likewise not been authorized. You should not construe a third-party offer as an endorsement by that third party of any product or service. You should, more conservatively, view it as an offer to buy something. Likewise, as alluded to previously, note that we cannot fully control all marketing practices by all parties. With the use of “mirror” sites, indirect or unauthorized affiliates, “tiered” affiliate structures, and so on, policing the world wide web with any modicum of thoroughness is unlikely. We make reasonable efforts to ensure our affiliates comply with our policies and represent our products & services consistent with our guidelines. However, at officialgabbee.com we cannot always guarantee they will do so. You are always free to report concerns or abuses via our Contact information. The author and publisher of officialgabbee.com and related communications, materials and/or products have used their best efforts in preparing such materials. The author and publisher make no representation or warranties with respect to the accuracy, applicability, fitness, or completeness of the contents of this website and related communications, materials and/or products. The information contained in this website and related communications, materials and/or products is strictly for educational purposes. Therefore, if you wish to apply ideas contained in this website and related communications, materials and/or products, you are taking full responsibility for your actions. The information on officialgabbee.com and related communications, materials and/or products should not be construed as specific advice; it is a limited review of the available scientific and empirical evidence. It is presented for the sole purpose of stimulating awareness and further investigation of important information that may help the reader achieve better health. This notice provides you with details of how we collect and process your personal data through your use of our site https://officialgabbee.com, including any information you may provide through our site when you purchase a product or service, sign up to our newsletter or take part in a prize draw or contest. officialgabbee.com is the data controller and we are responsible for your personal data (referred to as “we”, “us” or “our” in this privacy notice). It is very important that the information we hold about you is accurate and up to date. Please let us know if at any time your personal information changes by emailing us at [email protected] or by using the contact form at the foot of this page. officialgabbee.com uses Google Analytics to help analyse how users use the site. The tool uses “cookies,” which are text files placed on your computer, to collect standard Internet log information and visitor behaviour information in an anonymous form. The information generated by the cookie about your use of the website (including your IP address) is transmitted to Google. This information is then used to evaluate visitors’ use of this website and to compile statistical reports on website activity. We may process your personal data for more than one lawful reason, depending on the specific purpose for which we are using your data. Please email us at [email protected] if you need details about the specific legal basis we are relying on to process your personal data where more than one purpose has been detailed below. You can ask us or third parties to stop sending you marketing messages at any time by following the opt-out links on any marketing message sent to you OR by emailing us at [email protected] at any time, or by using the contact form on this page. Please email us at [email protected] if you want further information on the specific mechanism used by us when transferring your personal data out of the EEA. You may post follow-up questions. If you have a question, chances are you are not alone. Others are likely thinking similarly. Therefore, I would rather receive your comments on officialgabbee.com than via email. It is a better use of my time to address everyone at once rather than answer several similar emails. officialgabbee.com reserves the right to delete your comments. This is my blog. I don’t have an obligation to publish your comments. The First Amendment gives you the right to express your opinions on your blog not mine. You grant officialgabbee.com a license to post your comments. This license is worldwide, irrevocable, non-exclusive, and royalty-free. You grant me the right to store, use, transmit, display, publish, reproduce, and distribute your comments in any format, including but not limited to a blog, in a book, a video, or presentation. If you wish to exercise any of the rights set out above, please email us at [email protected]. The following describes the Social Media Disclosure for our officialgabbee.com website. We live in an interesting time when privacy rights are championed alongside an unprecedented voluntary willingness of people to share their most intimate and superfluous life details with the world, even in places such as our officialgabbee.com website. While apparently benign on the surface, the dangers of unrestrained public disclosure of sensitive information is beginning to surface. As social media rapidly advances to allow more technologically sophisticated and easy dissemination, the simultaneous fallout of revelation without boundaries is mounting. Thus, a sober approach to the benefits of social media, while sidestepping the perils of imprudent disclosure, can facilitate an enjoyable online experience, without the consequences of excess, in settings such as our own officialgabbee.com website. You should assume that social media is in use on our officialgabbee.com website. A simple click of a button to endorse a person, product, or service is building a cumulative profile about you, which you should always assume can be discovered by others. Attempting to share a website with someone, whether by direct press of a button or else by email forwarding facilitated on a website, you should assume that this may not stop with the intended recipient, and that this can generate information about you that could be seen by a veritable infinite number of people. Such a domino effect could initiate right here on our officialgabbee.com website. Something as simple as a blog comment provides the opportunity for knee-jerk reactions that can become public and may not truly represent a position (at least in strength or severity) that you might hold after a period of more reasoned contemplation. You should also note that the ease of accessing one site through the login credentials of another, or the use of a global login for access to multiple sites can accumulate a dossier on you and your online behavior that may reveal more information to unintended parties than you might realize or want. Any or all of these features could exist on our officialgabbee.com website at one time or another. Ideal use of social media on our website would confine your disclosures primarily to matters pertaining to you, not others. If in doubt, it's best to err on the side of non-disclosure. It's doubtful the disclosure is so meaningful that it cannot be offset by the precaution of acting to protect the best interests of someone who is involuntarily being exposed by your decision to disclose something on our officialgabbee.com website (or another). You should likewise pause to consider the long-term effects of a split-second decision to publicly share private information about yourself on our officialgabbee.com website. Opinions, likes, dislikes, preferences, and otherwise can change. Openly divulging perspectives that you hold today, may conflict with your developing views into the futures. Yet, the "new you" will always stand juxtaposed against the prior declarations you made that are now concretized as part of your public profile. While the contents of your breakfast may hold little long-term impact, other data likewise readily shared can have consequences that could conceivably impact your ability to obtain certain employment or hinder other life experiences and ambitions. You, as a visitor to our officialgabbee.com website, are not permitted to "mine" social media or other platforms contained herein for personal information related to others. Even where people have publicly displayed data, you should not construe that as though you have the liberty to capture, reproduce, or reuse that information. Any use of social media or related platforms on our website are for interactive use only, relevant only during the website visit. The following describes the Terms of Service Conditions of Use for https://officialgabbee.com website, which is owned and operated by officialgabbee.com. Our officialgabbee.com website (and other “internal” websites stemming from it, such as specific membership sites or webpages pertinent to the main website or weblog) is an online (and, periodically, offline) information service and is subject to your compliance with the terms and conditions set forth below (all parts and parties collectively referred to as our website). You agree to obey all applicable laws and regulations regarding your use of our officialgabbee.com website and the content and materials provided in it. Our officialgabbee.com website prohibits conduct that might constitute a criminal offense, give rise to civil liability or otherwise violate any law. Any activity that restricts or inhibits any other user from using the services of our website is also prohibited. Unless allowed by a written agreement, you may not post or transmit advertising or commercial solicitation on our website. Publications, products, content or services referenced herein or on our website are the exclusive trademarks or servicemarks of our officialgabbee.com website or related parties. Other product and company names mentioned in our website may be the trademarks of their respective owners. Data contained on or made available through our officialgabbee.com website is not intended to be, and does not constitute, legal advice. Our website, and your use of it, does not create an attorney-client relationship. We do not warrant or guarantee the accuracy, adequacy, or recency of the data contained in or linked to our website. Your use of our officialgabbee.com website or materials linked to our website is completely at your own risk. You should not act or depend on any data on our website, where applicable, without seeking the counsel of a competent lawyer licensed to practice in your jurisdiction for your particular legal issues. You should not act or depend on any data on our website, where applicable, without seeking the counsel of a competent physician licensed to practice in your jurisdiction for your particular medical issues. You should not act or depend on any data on our website, where applicable, without seeking the counsel of a competent financial advisor licensed to practice in your jurisdiction for your particular financial needs and issues. Access to certain areas of our officialgabbee.com website is restricted. We reserve the right to restrict access to other areas of our website, or indeed our whole website, at our discretion. We may disable your user ID and password at our sole discretion or if you breach any of the policies or terms governing your use of our officialgabbee.com website or any other contractual obligation you owe to us. You also understand that our officialgabbee.com website cannot and does not guarantee or warrant that files available for downloading through our website will be free of infection or viruses, worms, Trojan horses or other code that manifest contaminating or destructive properties. You are responsible for implementing sufficient procedures and checkpoints to satisfy your particular requirements for accuracy of data input and output, and for maintaining a means external to our website for the reconstruction of any lost data. The content may contain inaccuracies or typographical errors. Our officialgabbee.com website makes no representations about the accuracy, reliability, completeness, or timeliness of the content or about the results to be obtained from using our website or the content on it. Use of our website and the content is at your own risk. Changes are periodically made to our website, and may be made at any time. Our website contains links to third party Websites. Our officialgabbee.com website makes no representations whatsoever about any other website which you may access through this one or which may link to this website. When you access a website from our website, please understand that it is independent from our website, and that our website has no control over the content on that website. These links are provided solely as a convenience to you and not as an endorsement by our website of the contents on such third-party Websites. Our website is not responsible for the content of linked third-party Websites and does not make any representations regarding the content or accuracy of material on such third party Websites. If you decide to access linked third-party Websites, you do so at your own risk. We do not necessarily endorse, recommend, suggest or otherwise make any overture or prompt for action regarding any product or service offered. You should assume we are compensated for any purchases you make. Again, any income claims should be construed as atypical results and you assume the risk that inferior results obtain, including losses, for which we carry no responsibility or liability. As a user of our officialgabbee.com website, you are responsible for your own communications and are responsible for the consequences of their posting. You must not do the following things: post material that is copyrighted, unless you are the copyright owner or have the permission of the copyright owner to post it; post material that reveals trade secrets, unless you own them or have the permission of the owner; post material that infringes on any other intellectual property rights of others or on the privacy or publicity rights of others; post material that is obscene, profane, defamatory, threatening, harassing, abusive, hateful, or embarrassing to another user of our website or any other person or entity; post a sexually-explicit image; post advertisements or solicitations of business; post chain letters or pyramid schemes; or impersonate another person. Our officialgabbee.com website does not necessarily screen communications in advance and is not responsible for screening or monitoring material posted by users of our website. If observed by our website and/or notified by a user of communications which allegedly do not conform to this agreement, our website may investigate the allegation and determine in good faith and its sole discretion whether to remove or request the removal of the communication. Our website has no liability or responsibility to users of our website for performance or nonperformance of such activities. Our website reserves the right to expel users of our website and prevent their further access to our website for violating this agreement or any law or regulation, and also reserves the right to remove communications which are abusive, illegal, or disruptive. You agree to indemnify, defend and hold harmless our officialgabbee.com website, its members, officers, directors, employees, agents, licensors, suppliers and any third party information providers to our website from and against all losses, expenses, damages and costs, including reasonable attorneys' fees, resulting from any use of our website or violation of this Agreement (including negligent or wrongful conduct) by you or any other person accessing our website. This Agreement, in whole or in part, may be terminated by officialgabbee.com without notice at any time for any reason. The provisions of paragraphs 1 (Copyright, Licenses and Idea Submissions), 2 (Use of the Service), 3 (Indemnification), 4 (Third Party Rights), 6 (Hiring an Attorney / No Attorney-Client Relationship), and 7 (Miscellaneous) shall survive any termination of this Agreement, in whole or in part. Any cause of action by you with respect to our officialgabbee.com website must be instituted within one (1) year after the cause of action arose or be forever waived and barred. All actions shall be subject to the limitations set forth in these Terms of Service and Conditions of Use. Any legal claim arising out of or relating to these Terms of Service and Conditions of Use or our website, excluding intellectual property right infringement and other claims by us, shall be settled confidentially through mandatory binding arbitration per the American Arbitration Association commercial arbitration rules. The arbitration shall be conducted in California. Each party shall bear one half of the arbitration fees and costs incurred, and each party shall bear its own lawyer fees. All claims shall be arbitrated on an individual basis, and shall not be consolidated in any arbitration with any claim or controversy of any other party. Any rights not expressly granted herein are reserved to officialgabbee.com.
from django.core.exceptions import ValidationError from django.db import models def ncsu_semester(date): """ An algorithm for estimating NC State University semester start dates. * Spring is January 1-May 14. * Summer is May 15-August 14. * Fall is August 15-December 31. """ if date.month < 5: return "Spring" elif date.month == 5 and date.day < 15: return "Spring" elif date.month < 8: return "Summer" elif date.month == 8 and date.month < 15: return "Summer" else: return "Fall" class EventKind(models.Model): singular = models.CharField(max_length=32, help_text="What we call this kind of event, title case. " "Examples: Hack Day, Technical Meeting, " "Social Dinner, Business Meeting") plural = models.CharField(max_length=32, help_text="Pluralize the name above.") description = models.CharField(max_length=128, help_text="A tooltip description for this event kind. " "This should be a noun phrase capitalized " "and punctuated as a sentence.") class Meta: ordering = ['plural'] def __unicode__(self): return self.plural class Event(models.Model): name = models.CharField(max_length=64, help_text="The event's name, to go on the calendar. " "Repeating names is OK.") kind = models.ForeignKey(EventKind, null=False) start_time = models.DateTimeField() end_time = models.DateTimeField() speaker = models.CharField(max_length=48, blank=True, help_text="The name of the speaker or sponsor, " "if applicable. " "Examples: \"Matthew Frazier\", " "\"Jim Whitehurst of Red Hat\"") location = models.CharField(max_length=64, help_text="The event's location. Examples: " "\"Engineering Building II 1227\", " "\"2426 Hillsborough St\", " "\"Location TBD\"") pitch = models.TextField(blank=True, help_text="A quick paragraph describing the event and " "encouraging people to attend. " "For full details, use the URL below. " "Plain text.") custom_url = models.URLField("Custom URL", blank=True, help_text="A custom URL for the event, to use instead " "of a wiki page.") advisory = models.CharField(max_length=32, blank=True, help_text="Some sort of notice that needs to be " "advertised for the event. It will be displayed " "prominently and with a sense of urgency. " "Example: Cancelled due to inclement weather") on_website = models.BooleanField("display on Web site", default=True, help_text="Whether to display this event in the events " "lineup on the homepage and the history page.") on_billboard = models.BooleanField("display on Billboard", default=True, help_text="Whether to display this event on the " "Billboard slides.") class Meta: get_latest_by = 'start_time' ordering = ['-start_time'] def __unicode__(self): return self.name def clean(self): if self.start_time >= self.end_time: raise ValidationError("Events must end after they start.") @property def semester(self): return ncsu_semester(self.start_time) + self.start_time.strftime(" %Y") @property def has_link(self): return bool(self.custom_url) def get_absolute_url(self): if self.custom_url: return self.custom_url else: return '/events/' # FIXME
Parent and writer bringing disability issues to light. 1. You use the word "emasculate" in reference to the entire Democratic party without thinking twice. The actual quote, during a conversation about Hillary Clinton, was: "Bill Clinton really emasculated the Democratic Party..." I snorted audibly, to which the man responded, "What? What's wrong with that?" I replied, "Can we start with why emasculation of the Democratic party would be a bad thing? Can we start there?" Really, we'd need to start before that, with assigning a male gender to an entire political party in the first place. 2. You think Bernie Bros can only be men. I haven't had any lengthy conversations with female Bernie Bros in person, but I know they exist. Ladies and gentlemen, writing in Bernie's name or voting for a third party in November is a vote for a serial liar, rampant xenophobe, racist, misogynist, birther and bully -- even if the GOP candidate ends up being someone other than Donald Trump. The GOP is currently attacking women's health and women's pay. Our government is a two-party system, like it or not. Don't make the same mistake Nader supporters made in 2000. 3. You're still tweeting #BernieOrBust like bonkers. 5. You think that when someone refers to sexism, they are only referring to "typical wife-beater types." Sexism is not confined to domestic violence, although domestic violence certainly doesn't get enough attention either. 6. You've told a female Hillary supporter that she needs to have "thicker skin." Yes, this was spoken to my face as an argument for being more sensitive to the feelings of Bernie Bros so we can recruit their votes. Bros, I want you to vote for Hillary, don't get me wrong. But I won't accept harassment or silence myself about sexism to achieve that end. Please notice: nothing I'm posting here is critical of Bernie himself. I think he is a very fine candidate. While it is hard to watch him lose, his supporters aren't victims. The small subset of Bernie supporters I am critiquing here need to be called on their behavior. 7. You've mansplained feminism... to a woman. 8. You think Hillary's policies will be the same as Bill Clinton's were, "because they live under the same roof." Spouses are distinct individuals who can and do think for themselves. My spouse and I agree on some things, but disagree on others. This is hardly revolutionary. 9. You don't count Hillary's years as first lady as experience because she was married to Bill. This is my favorite example of the no-win situations women face every day. What, exactly, was Hillary supposed to do? NOT be first lady during that time? 10. You tend to end up at discussion groups dominated by men. Our group yesterday began with four women and three men. After some of these real-life "jokes" reared their ugly heads, women started excusing themselves for such pressing errands as "picking up a few groceries," or "preparing for guests to come into town this weekend." (It was Tuesday.) I eventually excused myself, too. It wasn't funny anymore.
import os from BungieDatabase import BungieDatabase outputPath = "../stl" def main(): print("Welcome to the Destiny stl generator") # Create a Bungie Database object and connect to it db = BungieDatabase() db.connect() if not os.path.exists(outputPath): print("Creating stl output directory "+outputPath) os.makedirs(outputPath) while True: # Get user request command = input("Enter an item name or id: ") # Break if q, quit or exit was typed if command == "q" or command == "quit" or command == "exit": break # Update the database if requested elif command == "update": db.update() # Assume the entered text was an item name or id else: # Download the model data for this item item = command model = db.getModel(item) # If the model is not null generate the stl file if model is not None: model.generate(outputPath+"/"+item+".stl") # Close the database and exit db.close() print("Bye.") exit() if __name__ == '__main__': main()
Rewards Canada: Jan 29 Update: 25% off Turkish Airlines Toronto-Istanbul award flights, 2x Petro-Points on vacation packages and more! Jan 29 Update: 25% off Turkish Airlines Toronto-Istanbul award flights, 2x Petro-Points on vacation packages and more! Petro-Points members can earn double points on Sunwings or Signature Vacations packages through itravel2000. Simply book by Feb 4 for travel by Apr 30. Find out more here. Air Miles members out West can earn 25x AIR MILES Reward Miles for shopping at Rexall this weekend (Fri-Sun). See the flyer here. Our tip of the day comes is our post on When and why secondary loyalty programs can prove to be valuable. In the bonus offers section you'll notice a discounted award flight offer from Turkish Airlines and the offer includes Toronto-Istanbul. If you have enought Miles&Smiles Miles now maybe the time to redeem to save some miles as long as you can travel by March 31st!
# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import extensions class Createserverext(extensions.ExtensionDescriptor): """Extended support to the Create Server v1.1 API.""" name = "Createserverext" alias = "os-create-server-ext" namespace = ("http://docs.openstack.org/compute/ext/" "createserverext/api/v1.1") updated = "2011-07-19T00:00:00Z" def get_resources(self): res = extensions.ResourceExtension('os-create-server-ext', inherits='servers') return [res]
A farmer carries his texel cross sheep after it has been judged on the first day of the Great Yorkshire Show near Harrogate, northern England on July 14, 2015. In the previous photo, farmers check the display of their texel cross sheep as they are judged on the first day of the show. The agricultural show, which was first held in 1838, showcases all aspects of country life and is set to attract around 130,000 visitors over its three days. Two red figures with holes in their hearts. Why? A pedestrian crossing signal showing a female homosexual couple at a junction on July 14, 2015 in Munich, Germany. The city, taking a cue from a similar project in the Austrian city of Vienna, introduced the new signals at a limited number of traffic lights in the city center for the recent CSD gay pride march and has since decided to keep them. The figures glow in red and green at pedestrian crosswalks and show both female and male couples. Justice is not only blind, but lazy. Do something, Judge! Actor Kirk Barge as Shylock lifts a knife to Alan Steele as Antonio, watched by Nicole Copper as Portia, as they act out a scene from Shakespeare's "The Merchant of Venice" on July 15, 2015 in Glasgow, Scotland. The Merchant of Venice, one of Shakespeare's most well known and controversial plays, runs from the 18th of July to the 2nd of August in The Botanic Gardens in Glasgow. A small book requires smaller fingers. Indian Muslim devotee Moulana Mohammed Fasihuddin Nizami examines a large-print Quran at Islamic University in Hyderabad during the holy month of Ramadan on July 12, 2015. In the previous photo, Nizami examines a 100-year-old one-inch Quran. As well as abstinence and fasting, Muslims are encouraged to pray and read the Quran during Islam's holiest month. Help, we can't get down! Youths pose for a photograph in an optical illusion installation at the Trick Art exhibition in Tel Aviv, Israel, July 14, 2015. In the previous photo, girls pose for a photograph on an optical illusion installation at the exhibition. The exhibit is held at the Tel Aviv Fair Grounds and runs through August 2015. Marooned on this island and my bed is at the top of a pole - that's not fair. French artist Abraham Poincheval waves from atop a platform on a pole in Esquibien, western France on July 14, 2015, where he plans to remain for one week as part of a performance called "La Vigie" (The Look-out Post). In previous art performances, Poincheval has lived inside a hole and inside a bear carcass. What in the world is he yelling at? Fernando Saraiva Reis of Brazil competes in the men's +105 weightlifting final on Day 5 of the Toronto 2015 Pan Am Games on July 15, 2015 in Toronto, Canada. The games run through July 26, 2015. Is that your floatie? No that's my floatie. Visitors crowd the Daying Dead Sea tourist resort in Daying County to escape high temperatures on July 11, 2015 in Suining, Sichuan Province of China. The pool has a capacity of 10,000. Now that's a party. I am not, repeat not, a hedgehog. Models walk the runway during Jennifer Thevenaz-Burdet fashion show as part of the International Talent Support 2015 Samsung Galaxy Award Fashion Show on July 11, 2015 in Trieste, Italy. Tiny red balls are attacking. Run! Spectators watch as thousands of Jaffa candies race down Baldwin Street on July 17, 2015 in Dunedin, New Zealand. The residential street is the steepest in the world and hosts the annual race to raise money for charity.
# Copyright (C) 2013- Takafumi Arakaki # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os def daemon_run(no_error, restart, record_path, keep_json, check_duplicate, use_polling, log_level): """ Run RASH index daemon. This daemon watches the directory ``~/.config/rash/data/record`` and translate the JSON files dumped by ``record`` command into sqlite3 DB at ``~/.config/rash/data/db.sqlite``. ``rash init`` will start RASH automatically by default. But there are alternative ways to start daemon. If you want to organize background process in one place such as supervisord_, it is good to add `--restart` option to force stop other daemon process if you accidentally started it in other place. Here is an example of supervisord_ setup:: [program:rash-daemon] command=rash daemon --restart .. _supervisord: http://supervisord.org/ Alternatively, you can call ``rash index`` in cron job to avoid using daemon. It is useful if you want to use RASH on NFS, as it looks like watchdog does not work on NFS.:: # Refresh RASH DB every 10 minutes */10 * * * * rash index """ # Probably it makes sense to use this daemon to provide search # API, so that this daemon is going to be the only process that # is connected to the DB? from .config import ConfigStore from .indexer import Indexer from .log import setup_daemon_log_file, LogForTheFuture from .watchrecord import watch_record, install_sigterm_handler install_sigterm_handler() cfstore = ConfigStore() if log_level: cfstore.daemon_log_level = log_level flogger = LogForTheFuture() # SOMEDAY: make PID checking/writing atomic if possible flogger.debug('Checking old PID file %r.', cfstore.daemon_pid_path) if os.path.exists(cfstore.daemon_pid_path): flogger.debug('Old PID file exists. Reading from it.') with open(cfstore.daemon_pid_path, 'rt') as f: pid = int(f.read().strip()) flogger.debug('Checking if old process with PID=%d is alive', pid) try: os.kill(pid, 0) # check if `pid` is alive except OSError: flogger.info( 'Process with PID=%d is already dead. ' 'So just go on and use this daemon.', pid) else: if restart: flogger.info('Stopping old daemon with PID=%d.', pid) stop_running_daemon(cfstore, pid) else: message = ('There is already a running daemon (PID={0})!' .format(pid)) if no_error: flogger.debug(message) # FIXME: Setup log handler and flogger.dump(). # Note that using the default log file is not safe # since it has already been used. return else: raise RuntimeError(message) else: flogger.debug('Daemon PID file %r does not exists. ' 'So just go on and use this daemon.', cfstore.daemon_pid_path) with open(cfstore.daemon_pid_path, 'w') as f: f.write(str(os.getpid())) try: setup_daemon_log_file(cfstore) flogger.dump() indexer = Indexer(cfstore, check_duplicate, keep_json, record_path) indexer.index_all() watch_record(indexer, use_polling) finally: os.remove(cfstore.daemon_pid_path) def stop_running_daemon(cfstore, pid): import time import signal os.kill(pid, signal.SIGTERM) for _ in range(30): time.sleep(0.1) if not os.path.exists(cfstore.daemon_pid_path): break else: raise RuntimeError( 'Failed to stop running daemon process (PID={0})' .format(pid)) def start_daemon_in_subprocess(options, outpath=os.devnull): """ Run `rash daemon --no-error` in background. :type options: list of str :arg options: options for "rash daemon" command :type outpath: str :arg outpath: path to redirect daemon output """ import subprocess import sys from .utils.py3compat import nested from .utils.pathutils import mkdirp if outpath != os.devnull: mkdirp(os.path.dirname(outpath)) with nested(open(os.devnull), open(outpath, 'w')) as (stdin, stdout): subprocess.Popen( [os.path.abspath(sys.executable), '-m', 'rash.cli', 'daemon', '--no-error'] + options, preexec_fn=os.setsid, stdin=stdin, stdout=stdout, stderr=subprocess.STDOUT) def daemon_add_arguments(parser): parser.add_argument( '--no-error', action='store_true', default=False, help=""" Do nothing if a daemon is already running. """) parser.add_argument( '--restart', action='store_true', default=False, help=""" Kill already running daemon process if exist. """) parser.add_argument( '--record-path', help=""" specify the directory that has JSON records. """) parser.add_argument( '--keep-json', default=False, action='store_true', help=""" Do not remove old JSON files. It turns on --check-duplicate. """) parser.add_argument( '--check-duplicate', default=False, action='store_true', help='do not store already existing history in DB.') parser.add_argument( '--use-polling', default=False, action='store_true', help=""" Use polling instead of system specific notification. This is useful, for example, when your $HOME is on NFS where inotify does not work. """) parser.add_argument( '--log-level', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'], help='logging level.') commands = [ ('daemon', daemon_add_arguments, daemon_run), ]
Bestselling author Linda Castillo is the recipient of numerous writing awards, including the Daphne du Maurier Award of Excellence, the Holt Medallion and a nomination for the Rita. She lives in Texas with her husband and is currently at work on her next thriller, also set in Amish Country and featuring Chief of Police Kate Burkholder. A rainy night, a father returning home with his three children, a speeding car hurtling toward them out of nowhere. What at first seems like a tragic, but routine, car accident suddenly takes on a more sinister cast as evidence emerges that nothing about the crash is accidental. But who would want to kill an Amish deacon and his children? He leaves behind a grieving widow and a young boy who clings to life in the intensive-care wing of a hospital, unable to communicate. He may be the only one who knows what happened that night. Desperate to find out who killed her best friend's husband and why, Kate Burkholder begins to suspect she is not looking for a reckless drunk, but instead is on the trail of a coldblooded killer. It is a search that takes her on a chilling journey into the darkest reaches of the human psyche and strikes at the heart of everything she has ever believed about the Amish culture into which she was born. Solly and Rachel Slabaugh, and his brother Abel, have drowned in the hog pit leaving the four children as orphans. As the investigation progresses, it seems that the Slabaugh deaths were not an accident, and the case suddenly becomes a murder enquiry. As the case deepens, Kate develops a bond with the children, particularly the 15-year-old daughter, Solome. Maybe she is reminded of herself at that age, and maybe there's something about this case which stirs up memories for her. The events surrounding the deaths puzzle her something doesn't feel right. As more information comes to light, a tragic incident turns into something much more shocking. July 2010 Good Housekeeping selection. In Pray for Silence by Linda Castillo, a gruesome murder on an Amish farm leads police chief Katie Burkholder to discover that someone close to home has been leading a lurid double life. December 2009 Book of the Month. A well devised plot with Amish and ‘English’ communities facing the prospect that a serial killer from the past has returned to their now peaceful lives. With plenty of characters harbouring a secret the plot unfolds tantalisingly and moves towards a suspense-filled ending. A gripping thriller with another in the series lined up. Kate's long time love interest, State Agent John Tomasetti, is dead set against her taking on such an unorthodox assignment, knowing she'll have limited communication and even less in the way of backup. But Kate can't turn her back, especially when the rumour mill boils with disturbing accounts of children in danger. She travels to New York where she's briefed and assumes her new identity as a lone widow seeking a new life. Kate infiltrates the community and goes deep under cover. In the coming days, she unearths a world built on secrets, a series of shocking crimes, and herself, alone...trapped in a fight for her life. From the New York Times bestselling author of THE DEAD WILL TELL comes a new short story offering a glimpse into Chief of Police Kate Burkholder's past and her Amish roots.It's autumn in Painter's Mill, and fourteen year old Katie Burkholder has been tasked with picking apples in Zimmerman's Orchard with her brother. It's just another day filled with chores-until her best friend Mattie arrives to help. Somehow, boredom transforms into fun and games whenever the girls are together. The innocent fun comes to an end when Billy Marquardt and his gang of friends interrupts. Katie is no prude, but she knows better than to associate with the older English boys, especially since they're known troublemakers. Mattie has no such compunction. Thumbing her nose at the Ordnung and all of the Amish rules, she disappears into the old barn with Billy. Moments later, the Zimmerman's barn is consumed by fire. Katie suspects Billy had something to do with the blaze, but he denies it. When the facts don't add up, Katie begins her own investigation-and she doesn't like what she finds. Will her friendship with Mattie survive the truth? IN THE NEWS: Ex-navy search and rescue pilot Drew EvansFAMILY HISTORY: His adopted brother is one of the genetically engineered Extraordinary FiveDEEPEST SECRET: Haunted by the only mission he failedHardened hero Drew Evans had thought his tragic past was behind him, until the woman he'd secretly loved-his best friend's widow-stepped back into his solitary life. But single mom Alison Myers was in need of his help to save her ailing son-and quench her own hidden longings. Alison had never expected to see devastatingly handsome Drew again, the one man who made her feel the forbidden stirrings of passion. But could she trust him with the two most precious things in her life...her son and her heart? Wenn die Geister der Vergangenheit sprechen1979: Ein amischer Vater und vier seiner Kinder sterben bei einem missgluckten Raububerfall. Seine Frau wird von den Tatern entfuhrt und nie wieder gesehen. Allein der vierzehnjahrige Sohn Billy Hochstetler uberlebt diese grausame Nacht.2014: Jeder in Painters Mill wei, dass es auf der verlassenen Farm der Familie Hochstetler spukt. Aber nur einige wenige wissen, was damals in jener Nacht tatsachlich geschah. Und nun wird einer nach dem anderen auf grausame Weise ermordet. Wer ist ihrem Geheimnis auf die Spur gekommen? Wer hat Angela Blaine gesehen? Die junge Frau verschwand spurlos vor zwanzig Jahren. Nur ihre blutigen Kleider fand man am Ufer des Flusses. Kate Burkholder und John Tomasetti ermitteln und stoen dabei auf ein lange gut gehutetes Geheimnis. Spannend und bewegend: Lange Vermisst ist eine spannende und unterhaltende Kurzgeschichte, die im Land der Amischen spielt. In this electrifying thriller by New York Times bestseller Linda Castillo, Kate Burkholder must uncover a family's long-hidden past to solve a brutal murderWhen a tornado tears through Painters Mill and unearths human remains, Chief of Police Kate Burkholder finds herself tasked with the responsibility of identifying the bones-and notifying the family. Evidence quickly emerges that the death was no accident and Kate finds herself plunged into a thirty year old case that takes her deep into the Amish community to which she once belonged. Meanwhile, turmoil of an emotional and personal nature strikes at the very heart of Kate's budding relationship with state agent John Tomasetti. A reality that strains their fragile new love to the breaking point and threatens the refuge they've built for themselves-and their future. Under siege from an unknown assailant-and her own personal demons-Kate digs deep into the case only to discover proof of an unimaginable atrocity, a plethora of family secrets and the lengths to which people will go to protect their own. New York Times bestselling author Linda Castillo immerses readers in the world of the Amish in this chilling series that is equal parts fast-paced thriller and intriguing psychological puzzle.Sworn to SilenceSixteen years ago, a brutal serial killer left a trail of victims before vanishing into thin air. Now, he may have returned, and only one woman can stop him-but can she solve the case without revealing a terrible secret that links her to the original murders?Pray for SilenceIn the quiet town of Painters Mill an Amish family of seven has been found slaughtered on their farm. Police chief Kate Burkholder and her small force have few clues, no motive, and no suspect. Breaking SilenceWhat appears to be a gruesome accident that left three people dead turns more sinister when evidence of foul play emerge. Together with agent John Tomasetti, Kate searches for answers and uncovers a dark secret at work beneath the placid surface of this idyllic Amish community. When a baby-only hours old-is discovered on the Amish bishop's front porch in Painter's Mill, Ohio, Chief of Police Kate Burkholder is called in to investigate. The newborn is swaddled in an Amish crib quilt, and the only other item found with the child is a hand-carved wood rattle, which Kate also recognizes as Amish. The little girl seems healthy and whole; but who would abandon her and why? Though the quilt and rattle could be purchased, Kate suspects the mother is Amish, possibly young and unmarried, both of which would be powerful motives for such a desperate act. With the rattle and the baby quilt as the only clues, Kate must call upon her own Amish roots, and with the help of state agent John Tomasetti, search the Amish and "e;English"e; communities of Painters Mill for clues to unravel the poignant, puzzling mystery. When a baby - only hours old - is discovered on the Amish bishop's front porch in Painter's Mill, Ohio, Chief of Police Kate Burkholder is called in to investigate. The newborn is swaddled in an Amish crib quilt, and the only other item found with the child is a hand-carved wood rattle, which Kate also recognizes as Amish.The little girl seems healthy and whole, but who would abandon her and why? Though the quilt and rattle could have been purchased, Kate suspects the mother is Amish, possibly young and unmarried, both of which would be powerful motives for such a desperate act.With the rattle and the baby quilt as the only clues, Kate must call upon her own Amish roots and, with the help of state agent John Tomasetti, search the Amish and "e;English"e; communities of Painters Mill for clues to unravel the poignant, puzzling mystery. Everyone in Painters Mill knows the abandoned Hochstetler farm is haunted. Ghost stories abound, but no one knows what really happened that terrible night thirty-five years ago when an Amish father and his four children perished-and his young wife disappeared without a trace. When Chief of Police Kate Burkholder is called to the scene of an apparent suicide-a man found hanging from the rafters in his barn-evidence quickly points to murder. She finds herself following an elusive trail that points back to the tragedy of that long ago incident. Meanwhile, Kate has moved in with state agent John Tomasetti and for the first time in a long time, she's happy. Their newfound bliss is shattered when one of the men responsible for the murders of Tomasetti's family four years ago is found not guilty and walks away a free man. When a second man turns up dead, Kate discovers a link that sends the investigation in a direction no one could imagine and uncovers a horrifying truth that reaches deep into the past and strikes at the very heart of Painters Mill. As Kate draws a bead on a stone cold killer, a murderer vows to right old wrongs and kill anyone who stands in the way. After nine years as one of Chicago' s finest, Erin McNeal had come to sleepy Logan Falls for a second chance, not to be watched over by a man as infuriatingly overprotective- and disarmingly attractive- as Nick Ryan.She was no damsel in distress, but someone wanted her dead, and Nick knew better than to entrust a guilt-driven daredevil with her own safety. It was his duty to protect her- whether she liked it or not. It seemed the only thing the chief of police and his newest deputy could agree on was the one thing they couldn' t resist- each other. Could the cautious single father and the reckless beauty be cops... and lovers? Agent: Robert DavidsonMission: Pinpoint exact location of the missing agent in Rebelia.Deepest Secret: He's never recovered from the horror of watching the woman he loved die.Robert Davidson thought he could handle returning to Rebelia, the war-ravaged country that nearly cost him his life-and had cost him Lily Scott, the passionate journalist he'd fallen in love with. But nothing could have prepared him for the shock of finding Lily still alive-with a child.With Lily's and her son's life in danger, Robert must set aside his jealousy-and desire-in order to protect the only woman he's ever loved. But Lily has one more secret-which will change his life forever.
# coding: utf-8 import random import numpy as np def fake_adjacency_list(node_size): adjancency_list = {} for node_src in range(node_size): adjancency_list[node_src] = [] threshold = random.random() for node_dst in range(node_size): p_jump = random.random() if p_jump >= threshold: adjancency_list[node_src].append(node_dst) return adjancency_list def page_rank(p, adjancency_list): def adjancency_list_to_table(adjancency_list): node_size = len(adjancency_list) adjancency_table = np.zeros([node_size, node_size]) for src_node, dst_nodes in adjancency_list.items(): cnt_dst_nodes = len(dst_nodes) for dst_node in dst_nodes: adjancency_table[src_node, dst_node] = 1.0 / cnt_dst_nodes return adjancency_table node_size = len(adjancency_list) adjancency_table = adjancency_list_to_table(adjancency_list) init_state = np.array([[1.0 / node_size for _ in range(node_size)]]).T # loop last_state = init_state while True: state = p * adjancency_table.dot(last_state) + (1 - p) * init_state if (state == last_state).all(): break last_state = state return last_state if __name__ == '__main__': adjancency_list = fake_adjacency_list(6) p = 0.8 page_rank_value = page_rank(p, adjancency_list) print(page_rank_value)
WASHINGTON, November 22, 2016—The Marine Toys for Tots Program, with support from HASBRO, FIRST BOOK, The Walt Disney Company, Houghton Mifflin Harcourt, and the Navy Blue Angels Flight Demonstration Team, will deliver toys and books on November 22 to families impacted by the severe flooding from Hurricane Matthew. The flooding in North Carolina from Hurricane Matthew caused well over $1 billion in damage to over 100,000 homes in the southeastern portion of the state leaving many without homes and basic needs in October of 2016. Now, faced with the holiday season rapidly approaching, many families continue to struggle as they attempt to return some normalcy to their lives, and are finding themselves in need of support for their children during Christmas, Chanukah and other holidays. With help from Hasbro, their long-time partner and sponsor, the Marine Toys for Tots Program will distribute over $250,000 worth of new toys to the North Carolina communities affected by the flooding. In their 20-year history of supporting Toys for Tots, Hasbro has donated over 3 million toys valued at over $36 million fulfilling the Christmas holiday dreams of millions of less fortunate children who may have gone without. Joining in the effort is Toys for Tots’ newest partner, First Book, the nonprofit social enterprise that has distributed more than 150 million books to children in need, which will be contributing 30,000 children’s books. The books will be provided by Disney and Houghton Mifflin Harcourt. Since this large shipment of toys and books is well prior to Christmas, Santa’s reindeer are busy grazing in the North Pole and unable to deliver the goods. Santa, through his U.S. Marine Corps connections, reached out to the Navy Blue Angels Flight Demonstration Team to seek alternative transportation. The Blue Angels have graciously offered their C-130 “Fat Albert” and crew to fly these much needed toys to ensure no child has been forgotten this holiday season. On November 22nd, 2016, the Blue Angels’ C-130 will fly toys in to the (Fayetteville) airport where Marines from Marine Forces Reserve units along with a host of volunteers will unload the toys and deliver them to the local Toys for Tots Coordinators in the surrounding counties who will then distribute those toys and books to families in need for the 2016 Holiday Season. Established in 1947, Toys for Tots is the Marine Corps’ premier community action program and an official activity of the U.S. Marine Corps as well as an official mission of Marine Forces Reserve. The mission of the program is to collect new, unwrapped toys, and distribute those toys as Christmas gifts to less fortunate children. The goal is to deliver, through a new toy at Christmas, a message of hope to less fortunate youngsters that will assist them in becoming responsible, productive, patriotic citizens. Since 1947, the Marine Toys for Tots Program has distributed 512 million toys to 237 million children. The Marine Toys for Tots Foundation is an IRS recognized 501 (c) (3) not-for-profit charity established in 1991 at the behest of the Marine Corps to support the Marine Toys for Tots Program. The Foundation’s mission is to provide a tangible sign of hope to economically disadvantaged children at Christmas time. Since its inception in 1991, the Foundation has supplemented local toy collections with over 117 million toys valued at over $920,000. Hasbro is a global play and entertainment company committed to Creating the World’s Best Play Experiences. From toys and games to television, movies, digital gaming and consumer products, Hasbro offers a variety of ways for audiences to experience its iconic brands, including NERF, MY LITTLE PONY, TRANSFORMERS, PLAY-DOH, MONOPOLY, LITTLEST PET SHOP and MAGIC: THE GATHERING, as well as premier partner brands. The Company’s Hasbro Studios and its film label, Allspark Pictures, are building its brands globally through great storytelling and content on all screens. Through its commitment to corporate social responsibility and philanthropy, Hasbro is helping to make the world a better place for children and their families. Learn more at hasbro.com, and follow us on Twitter (@Hasbro & @HasbroNews) and Instagram (@Hasbro).
# -*- coding: utf-8 -*- # See LICENSE file for full copyright and licensing details. import time import datetime from odoo.exceptions import UserError, ValidationError from odoo.osv import expression from odoo.tools import misc, DEFAULT_SERVER_DATETIME_FORMAT from odoo import models, fields, api, _ from decimal import Decimal def _offset_format_timestamp1(src_tstamp_str, src_format, dst_format, ignore_unparsable_time=True, context=None): """ Convert a source timeStamp string into a destination timeStamp string, attempting to apply the correct offset if both the server and local timeZone are recognized,or no offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ). @param src_tstamp_str: the STR value containing the timeStamp. @param src_format: the format to use when parsing the local timeStamp. @param dst_format: the format to use when formatting the resulting timeStamp. @param server_to_client: specify timeZone offset direction (server=src and client=dest if True, or client=src and server=dest if False) @param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed using src_format or formatted using dst_format. @return: destination formatted timestamp, expressed in the destination timezone if possible and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined. """ if not src_tstamp_str: return False res = src_tstamp_str if src_format and dst_format: try: # dt_value needs to be a datetime.datetime object\ # (so notime.struct_time or mx.DateTime.DateTime here!) dt_value = datetime.datetime.strptime(src_tstamp_str, src_format) if context.get('tz', False): try: import pytz src_tz = pytz.timezone(context['tz']) dst_tz = pytz.timezone('UTC') src_dt = src_tz.localize(dt_value, is_dst=True) dt_value = src_dt.astimezone(dst_tz) except Exception: pass res = dt_value.strftime(dst_format) except Exception: # Normal ways to end up here are if strptime or strftime failed if not ignore_unparsable_time: return False pass return res class HotelFloor(models.Model): _name = "hotel.floor" _description = "Floor" name = fields.Char('Floor Name', size=64, required=True, index=True) sequence = fields.Integer('Sequence', size=64, index=True) class HotelRoomType(models.Model): _name = "hotel.room.type" _description = "Room Type" name = fields.Char('Name', size=64, required=True) categ_id = fields.Many2one('hotel.room.type', 'Category') child_id = fields.One2many('hotel.room.type', 'categ_id', 'Child Categories') @api.multi def name_get(self): def get_names(cat): """ Return the list [cat.name, cat.categ_id.name, ...] """ res = [] while cat: res.append(cat.name) cat = cat.categ_id return res return [(cat.id, " / ".join(reversed(get_names(cat)))) for cat in self] @api.model def name_search(self, name, args=None, operator='ilike', limit=100): if not args: args = [] if name: # Be sure name_search is symetric to name_get category_names = name.split(' / ') parents = list(category_names) child = parents.pop() domain = [('name', operator, child)] if parents: names_ids = self.name_search(' / '.join(parents), args=args, operator='ilike', limit=limit) category_ids = [name_id[0] for name_id in names_ids] if operator in expression.NEGATIVE_TERM_OPERATORS: categories = self.search([('id', 'not in', category_ids)]) domain = expression.OR([[('categ_id', 'in', categories.ids)], domain]) else: domain = expression.AND([[('categ_id', 'in', category_ids)], domain]) for i in range(1, len(category_names)): domain = [[('name', operator, ' / '.join(category_names[-1 - i:]))], domain] if operator in expression.NEGATIVE_TERM_OPERATORS: domain = expression.AND(domain) else: domain = expression.OR(domain) categories = self.search(expression.AND([domain, args]), limit=limit) else: categories = self.search(args, limit=limit) return categories.name_get() class ProductProduct(models.Model): _inherit = "product.product" isroom = fields.Boolean('Is Room') iscategid = fields.Boolean('Is categ id') isservice = fields.Boolean('Is Service id') class HotelRoomAmenitiesType(models.Model): _name = 'hotel.room.amenities.type' _description = 'amenities Type' name = fields.Char('Name', size=64, required=True) amenity_id = fields.Many2one('hotel.room.amenities.type', 'Category') child_id = fields.One2many('hotel.room.amenities.type', 'amenity_id', 'Child Categories') @api.multi def name_get(self): def get_names(cat): """ Return the list [cat.name, cat.amenity_id.name, ...] """ res = [] while cat: res.append(cat.name) cat = cat.amenity_id return res return [(cat.id, " / ".join(reversed(get_names(cat)))) for cat in self] @api.model def name_search(self, name, args=None, operator='ilike', limit=100): if not args: args = [] if name: # Be sure name_search is symetric to name_get category_names = name.split(' / ') parents = list(category_names) child = parents.pop() domain = [('name', operator, child)] if parents: names_ids = self.name_search(' / '.join(parents), args=args, operator='ilike', limit=limit) category_ids = [name_id[0] for name_id in names_ids] if operator in expression.NEGATIVE_TERM_OPERATORS: categories = self.search([('id', 'not in', category_ids)]) domain = expression.OR([[('amenity_id', 'in', categories.ids)], domain]) else: domain = expression.AND([[('amenity_id', 'in', category_ids)], domain]) for i in range(1, len(category_names)): domain = [[('name', operator, ' / '.join(category_names[-1 - i:]))], domain] if operator in expression.NEGATIVE_TERM_OPERATORS: domain = expression.AND(domain) else: domain = expression.OR(domain) categories = self.search(expression.AND([domain, args]), limit=limit) else: categories = self.search(args, limit=limit) return categories.name_get() class HotelRoomAmenities(models.Model): _name = 'hotel.room.amenities' _description = 'Room amenities' product_id = fields.Many2one('product.product', 'Product Category', required=True, delegate=True, ondelete='cascade') categ_id = fields.Many2one('hotel.room.amenities.type', string='Amenities Category', required=True) product_manager = fields.Many2one('res.users', string='Product Manager') class FolioRoomLine(models.Model): _name = 'folio.room.line' _description = 'Hotel Room Reservation' _rec_name = 'room_id' room_id = fields.Many2one(comodel_name='hotel.room', string='Room id') check_in = fields.Datetime('Check In Date', required=True) check_out = fields.Datetime('Check Out Date', required=True) folio_id = fields.Many2one('hotel.folio', string='Folio Number') status = fields.Selection(string='state', related='folio_id.state') class HotelRoom(models.Model): _name = 'hotel.room' _description = 'Hotel Room' product_id = fields.Many2one('product.product', 'Product_id', required=True, delegate=True, ondelete='cascade') floor_id = fields.Many2one('hotel.floor', 'Floor No', help='At which floor the room is located.') max_adult = fields.Integer('Max Adult') max_child = fields.Integer('Max Child') categ_id = fields.Many2one('hotel.room.type', string='Room Category', required=True) room_amenities = fields.Many2many('hotel.room.amenities', 'temp_tab', 'room_amenities', 'rcateg_id', string='Room Amenities', help='List of room amenities. ') status = fields.Selection([('available', 'Available'), ('occupied', 'Occupied')], 'Status', default='available') capacity = fields.Integer('Capacity', required=True) room_line_ids = fields.One2many('folio.room.line', 'room_id', string='Room Reservation Line') product_manager = fields.Many2one('res.users', string='Product Manager') @api.constrains('capacity') def check_capacity(self): for room in self: if room.capacity <= 0: raise ValidationError(_('Room capacity must be more than 0')) @api.onchange('isroom') def isroom_change(self): ''' Based on isroom, status will be updated. ---------------------------------------- @param self: object pointer ''' if self.isroom is False: self.status = 'occupied' if self.isroom is True: self.status = 'available' @api.multi def write(self, vals): """ Overrides orm write method. @param self: The object pointer @param vals: dictionary of fields value. """ if 'isroom' in vals and vals['isroom'] is False: vals.update({'color': 2, 'status': 'occupied'}) if 'isroom'in vals and vals['isroom'] is True: vals.update({'color': 5, 'status': 'available'}) ret_val = super(HotelRoom, self).write(vals) return ret_val @api.multi def set_room_status_occupied(self): """ This method is used to change the state to occupied of the hotel room. --------------------------------------- @param self: object pointer """ return self.write({'isroom': False, 'color': 2}) @api.multi def set_room_status_available(self): """ This method is used to change the state to available of the hotel room. --------------------------------------- @param self: object pointer """ return self.write({'isroom': True, 'color': 5}) class HotelFolio(models.Model): @api.multi def name_get(self): res = [] disp = '' for rec in self: if rec.order_id: disp = str(rec.name) res.append((rec.id, disp)) return res @api.model def name_search(self, name='', args=None, operator='ilike', limit=100): if args is None: args = [] args += ([('name', operator, name)]) mids = self.search(args, limit=100) return mids.name_get() @api.model def _needaction_count(self, domain=None): """ Show a count of draft state folio on the menu badge. @param self: object pointer """ return self.search_count([('state', '=', 'draft')]) @api.model def _get_checkin_date(self): if self._context.get('tz'): to_zone = self._context.get('tz') else: to_zone = 'UTC' return _offset_format_timestamp1(time.strftime("%Y-%m-%d 12:00:00"), DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, ignore_unparsable_time=True, context={'tz': to_zone}) @api.model def _get_checkout_date(self): if self._context.get('tz'): to_zone = self._context.get('tz') else: to_zone = 'UTC' tm_delta = datetime.timedelta(days=1) return datetime.datetime.strptime(_offset_format_timestamp1 (time.strftime("%Y-%m-%d 12:00:00"), DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, ignore_unparsable_time=True, context={'tz': to_zone}), '%Y-%m-%d %H:%M:%S') + tm_delta @api.multi def copy(self, default=None): ''' @param self: object pointer @param default: dict of default values to be set ''' return super(HotelFolio, self).copy(default=default) _name = 'hotel.folio' _description = 'hotel folio new' _rec_name = 'order_id' _order = 'id' # _inherit = ['ir.needaction_mixin'] name = fields.Char('Folio Number', readonly=True, index=True, default='New') order_id = fields.Many2one('sale.order', 'Order', delegate=True, required=True, ondelete='cascade') checkin_date = fields.Datetime('Check In', required=True, readonly=True, states={'draft': [('readonly', False)]}, default=_get_checkin_date) checkout_date = fields.Datetime('Check Out', required=True, readonly=True, states={'draft': [('readonly', False)]}, default=_get_checkout_date) room_lines = fields.One2many('hotel.folio.line', 'folio_id', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Hotel room reservation detail.") service_lines = fields.One2many('hotel.service.line', 'folio_id', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Hotel services detail provide to" "customer and it will include in " "main Invoice.") hotel_policy = fields.Selection([('prepaid', 'On Booking'), ('manual', 'On Check In'), ('picking', 'On Checkout')], 'Hotel Policy', default='manual', help="Hotel policy for payment that " "either the guest has to payment at " "booking time or check-in " "check-out time.") duration = fields.Float('Duration in Days', help="Number of days which will automatically " "count from the check-in and check-out date. ") hotel_invoice_id = fields.Many2one('account.invoice', 'Invoice', copy=False) duration_dummy = fields.Float('Duration Dummy') @api.constrains('room_lines') def folio_room_lines(self): ''' This method is used to validate the room_lines. ------------------------------------------------ @param self: object pointer @return: raise warning depending on the validation ''' folio_rooms = [] for room in self[0].room_lines: if room.product_id.id in folio_rooms: raise ValidationError(_('You Cannot Take Same Room Twice')) folio_rooms.append(room.product_id.id) @api.onchange('checkout_date', 'checkin_date') def onchange_dates(self): ''' This method gives the duration between check in and checkout if customer will leave only for some hour it would be considers as a whole day.If customer will check in checkout for more or equal hours, which configured in company as additional hours than it would be consider as full days -------------------------------------------------------------------- @param self: object pointer @return: Duration and checkout_date ''' configured_addition_hours = 0 wid = self.warehouse_id whouse_com_id = wid or wid.company_id if whouse_com_id: configured_addition_hours = wid.company_id.additional_hours myduration = 0 chckin = self.checkin_date chckout = self.checkout_date if chckin and chckout: server_dt = DEFAULT_SERVER_DATETIME_FORMAT chkin_dt = datetime.datetime.strptime(chckin, server_dt) chkout_dt = datetime.datetime.strptime(chckout, server_dt) dur = chkout_dt - chkin_dt sec_dur = dur.seconds if (not dur.days and not sec_dur) or (dur.days and not sec_dur): myduration = dur.days else: myduration = dur.days + 1 # To calculate additional hours in hotel room as per minutes if configured_addition_hours > 0: additional_hours = abs((dur.seconds / 60) / 60) if additional_hours >= configured_addition_hours: myduration += 1 self.duration = myduration self.duration_dummy = self.duration @api.model def create(self, vals, check=True): """ Overrides orm create method. @param self: The object pointer @param vals: dictionary of fields value. @return: new record set for hotel folio. """ if not 'service_lines' and 'folio_id' in vals: tmp_room_lines = vals.get('room_lines', []) vals['order_policy'] = vals.get('hotel_policy', 'manual') vals.update({'room_lines': []}) folio_id = super(HotelFolio, self).create(vals) for line in (tmp_room_lines): line[2].update({'folio_id': folio_id}) vals.update({'room_lines': tmp_room_lines}) folio_id.write(vals) else: if not vals: vals = {} vals['name'] = self.env['ir.sequence'].next_by_code('hotel.folio') vals['duration'] = vals.get('duration', 0.0) or vals.get('duration_dummy', 0.0) folio_id = super(HotelFolio, self).create(vals) folio_room_line_obj = self.env['folio.room.line'] h_room_obj = self.env['hotel.room'] try: for rec in folio_id: if not rec.reservation_id: for room_rec in rec.room_lines: prod = room_rec.product_id.name room_obj = h_room_obj.search([('name', '=', prod)]) room_obj.write({'isroom': False}) vals = {'room_id': room_obj.id, 'check_in': rec.checkin_date, 'check_out': rec.checkout_date, 'folio_id': rec.id, } folio_room_line_obj.create(vals) except: for rec in folio_id: for room_rec in rec.room_lines: prod = room_rec.product_id.name room_obj = h_room_obj.search([('name', '=', prod)]) room_obj.write({'isroom': False}) vals = {'room_id': room_obj.id, 'check_in': rec.checkin_date, 'check_out': rec.checkout_date, 'folio_id': rec.id, } folio_room_line_obj.create(vals) return folio_id @api.multi def write(self, vals): """ Overrides orm write method. @param self: The object pointer @param vals: dictionary of fields value. """ product_obj = self.env['product.product'] h_room_obj = self.env['hotel.room'] folio_room_line_obj = self.env['folio.room.line'] room_lst1 = [] for rec in self: for res in rec.room_lines: room_lst1.append(res.product_id.id) room_lst = [] for folio_obj in self: if vals and vals.get('duration_dummy', False): vals['duration'] = vals.get('duration_dummy', 0.0) else: vals['duration'] = folio_obj.duration for folio_rec in folio_obj.room_lines: room_lst.append(folio_rec.product_id.id) new_rooms = set(room_lst).difference(set(room_lst1)) if len(list(new_rooms)) != 0: room_list = product_obj.browse(list(new_rooms)) for rm in room_list: room_obj = h_room_obj.search([('name', '=', rm.name)]) room_obj.write({'isroom': False}) vals = {'room_id': room_obj.id, 'check_in': folio_obj.checkin_date, 'check_out': folio_obj.checkout_date, 'folio_id': folio_obj.id, } folio_room_line_obj.create(vals) if len(list(new_rooms)) == 0: room_list_obj = product_obj.browse(room_lst1) for rom in room_list_obj: room_obj = h_room_obj.search([('name', '=', rom.name)]) room_obj.write({'isroom': False}) room_vals = {'room_id': room_obj.id, 'check_in': folio_obj.checkin_date, 'check_out': folio_obj.checkout_date, 'folio_id': folio_obj.id, } folio_romline_rec = (folio_room_line_obj.search ([('folio_id', '=', folio_obj.id)])) folio_romline_rec.write(room_vals) return super(HotelFolio, self).write(vals) @api.onchange('warehouse_id') def onchange_warehouse_id(self): ''' When you change warehouse it will update the warehouse of the hotel folio as well ---------------------------------------------------------- @param self: object pointer ''' return self.order_id._onchange_warehouse_id() @api.onchange('partner_id') def onchange_partner_id(self): ''' When you change partner_id it will update the partner_invoice_id, partner_shipping_id and pricelist_id of the hotel folio as well --------------------------------------------------------------- @param self: object pointer ''' if self.partner_id: partner_rec = self.env['res.partner'].browse(self.partner_id.id) order_ids = [folio.order_id.id for folio in self] if not order_ids: self.partner_invoice_id = partner_rec.id self.partner_shipping_id = partner_rec.id self.pricelist_id = partner_rec.property_product_pricelist.id raise _('Not Any Order For %s ' % (partner_rec.name)) else: self.partner_invoice_id = partner_rec.id self.partner_shipping_id = partner_rec.id self.pricelist_id = partner_rec.property_product_pricelist.id @api.multi def button_dummy(self): ''' @param self: object pointer ''' for folio in self: folio.order_id.button_dummy() return True @api.multi def action_done(self): self.state = 'done' @api.multi def action_invoice_create(self, grouped=False, final=False): ''' @param self: object pointer ''' room_lst = [] invoice_id = (self.order_id.action_invoice_create(grouped=False, final=False)) for line in self: values = {'invoiced': True, 'hotel_invoice_id': invoice_id } line.write(values) for rec in line.room_lines: room_lst.append(rec.product_id) for room in room_lst: room_obj = self.env['hotel.room' ].search([('name', '=', room.name)]) room_obj.write({'isroom': True}) return invoice_id @api.multi def action_invoice_cancel(self): ''' @param self: object pointer ''' if not self.order_id: raise UserError(_('Order id is not available')) for sale in self: for line in sale.order_line: line.write({'invoiced': 'invoiced'}) self.state = 'invoice_except' return self.order_id.action_invoice_cancel @api.multi def action_cancel(self): ''' @param self: object pointer ''' if not self.order_id: raise UserError(_('Order id is not available')) for sale in self: for invoice in sale.invoice_ids: invoice.state = 'cancel' return self.order_id.action_cancel() @api.multi def action_confirm(self): for order in self.order_id: order.state = 'sale' # order.order_line._action_procurement_create() if not order.analytic_account_id: for line in order.order_line: if line.product_id.invoice_policy == 'cost': order._create_analytic_account() break config_parameter_obj = self.env['ir.config_parameter'] if config_parameter_obj.sudo().get_param('sale.auto_done_setting'): self.order_id.action_done() @api.multi def test_state(self, mode): ''' @param self: object pointer @param mode: state of workflow ''' write_done_ids = [] write_cancel_ids = [] if write_done_ids: test_obj = self.env['sale.order.line'].browse(write_done_ids) test_obj.write({'state': 'done'}) if write_cancel_ids: test_obj = self.env['sale.order.line'].browse(write_cancel_ids) test_obj.write({'state': 'cancel'}) @api.multi def action_cancel_draft(self): ''' @param self: object pointer ''' if not len(self._ids): return False query = "select id from sale_order_line \ where order_id IN %s and state=%s" self._cr.execute(query, (tuple(self._ids), 'cancel')) cr1 = self._cr line_ids = map(lambda x: x[0], cr1.fetchall()) self.write({'state': 'draft', 'invoice_ids': [], 'shipped': 0}) sale_line_obj = self.env['sale.order.line'].browse(line_ids) sale_line_obj.write({'invoiced': False, 'state': 'draft', 'invoice_lines': [(6, 0, [])]}) return True class HotelFolioLine(models.Model): @api.multi def copy(self, default=None): ''' @param self: object pointer @param default: dict of default values to be set ''' return super(HotelFolioLine, self).copy(default=default) @api.model def _get_checkin_date(self): if 'checkin' in self._context: return self._context['checkin'] return time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) @api.model def _get_checkout_date(self): if 'checkout' in self._context: return self._context['checkout'] return time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) _name = 'hotel.folio.line' _description = 'hotel folio1 room line' order_line_id = fields.Many2one('sale.order.line', string='Order Line', required=True, delegate=True, ondelete='cascade') folio_id = fields.Many2one('hotel.folio', string='Folio', ondelete='cascade') checkin_date = fields.Datetime('Check In', required=True, default=_get_checkin_date) checkout_date = fields.Datetime('Check Out', required=True, default=_get_checkout_date) is_reserved = fields.Boolean('Is Reserved', help='True when folio line created from \ Reservation') @api.model def create(self, vals, check=True): """ Overrides orm create method. @param self: The object pointer @param vals: dictionary of fields value. @return: new record set for hotel folio line. """ if 'folio_id' in vals: folio = self.env["hotel.folio"].browse(vals['folio_id']) vals.update({'order_id': folio.order_id.id}) return super(HotelFolioLine, self).create(vals) @api.constrains('checkin_date', 'checkout_date') def check_dates(self): ''' This method is used to validate the checkin_date and checkout_date. ------------------------------------------------------------------- @param self: object pointer @return: raise warning depending on the validation ''' if self.checkin_date >= self.checkout_date: raise ValidationError(_('Room line Check In Date Should be \ less than the Check Out Date!')) if self.folio_id.date_order and self.checkin_date: if self.checkin_date <= self.folio_id.date_order: raise ValidationError(_('Room line check in date should be \ greater than the current date.')) @api.multi def unlink(self): """ Overrides orm unlink method. @param self: The object pointer @return: True/False. """ sale_line_obj = self.env['sale.order.line'] fr_obj = self.env['folio.room.line'] for line in self: if line.order_line_id: sale_unlink_obj = (sale_line_obj.browse ([line.order_line_id.id])) for rec in sale_unlink_obj: room_obj = self.env['hotel.room' ].search([('name', '=', rec.name)]) if room_obj.id: folio_arg = [('folio_id', '=', line.folio_id.id), ('room_id', '=', room_obj.id)] folio_room_line_myobj = fr_obj.search(folio_arg) if folio_room_line_myobj.id: folio_room_line_myobj.unlink() room_obj.write({'isroom': True, 'status': 'available'}) sale_unlink_obj.unlink() return super(HotelFolioLine, self).unlink() @api.onchange('product_id') def product_id_change(self): ''' - @param self: object pointer - ''' context = dict(self._context) if not context: context = {} if context.get('folio', False): if self.product_id and self.folio_id.partner_id: self.name = self.product_id.name self.price_unit = self.product_id.list_price self.product_uom = self.product_id.uom_id tax_obj = self.env['account.tax'] pr = self.product_id self.price_unit = tax_obj._fix_tax_included_price(pr.price, pr.taxes_id, self.tax_id) else: if not self.product_id: return {'domain': {'product_uom': []}} val = {} pr = self.product_id.with_context( lang=self.folio_id.partner_id.lang, partner=self.folio_id.partner_id.id, quantity=val.get('product_uom_qty') or self.product_uom_qty, date=self.folio_id.date_order, pricelist=self.folio_id.pricelist_id.id, uom=self.product_uom.id ) p = pr.with_context(pricelist=self.order_id.pricelist_id.id).price if self.folio_id.pricelist_id and self.folio_id.partner_id: obj = self.env['account.tax'] val['price_unit'] = obj._fix_tax_included_price(p, pr.taxes_id, self.tax_id) @api.onchange('checkin_date', 'checkout_date') def on_change_checkout(self): ''' When you change checkin_date or checkout_date it will checked it and update the qty of hotel folio line ----------------------------------------------------------------- @param self: object pointer ''' configured_addition_hours = 0 fwhouse_id = self.folio_id.warehouse_id fwc_id = fwhouse_id or fwhouse_id.company_id if fwc_id: configured_addition_hours = fwhouse_id.company_id.additional_hours myduration = 0 if not self.checkin_date: self.checkin_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) if not self.checkout_date: self.checkout_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) chckin = self.checkin_date chckout = self.checkout_date if chckin and chckout: server_dt = DEFAULT_SERVER_DATETIME_FORMAT chkin_dt = datetime.datetime.strptime(chckin, server_dt) chkout_dt = datetime.datetime.strptime(chckout, server_dt) dur = chkout_dt - chkin_dt sec_dur = dur.seconds if (not dur.days and not sec_dur) or (dur.days and not sec_dur): myduration = dur.days else: myduration = dur.days + 1 # To calculate additional hours in hotel room as per minutes if configured_addition_hours > 0: additional_hours = abs((dur.seconds / 60) / 60) if additional_hours >= configured_addition_hours: myduration += 1 self.product_uom_qty = myduration hotel_room_obj = self.env['hotel.room'] hotel_room_ids = hotel_room_obj.search([]) avail_prod_ids = [] for room in hotel_room_ids: assigned = False for rm_line in room.room_line_ids: if rm_line.status != 'cancel': if(self.checkin_date <= rm_line.check_in <= self.checkout_date) or (self.checkin_date <= rm_line.check_out <= self.checkout_date): assigned = True elif (rm_line.check_in <= self.checkin_date <= rm_line.check_out) or (rm_line.check_in <= self.checkout_date <= rm_line.check_out): assigned = True if not assigned: avail_prod_ids.append(room.product_id.id) domain = {'product_id': [('id', 'in', avail_prod_ids)]} return {'domain': domain} @api.multi def button_confirm(self): ''' @param self: object pointer ''' for folio in self: line = folio.order_line_id line.button_confirm() return True @api.multi def button_done(self): ''' @param self: object pointer ''' lines = [folio_line.order_line_id for folio_line in self] lines.button_done() self.state = 'done' return True @api.multi def copy_data(self, default=None): ''' @param self: object pointer @param default: dict of default values to be set ''' line_id = self.order_line_id.id sale_line_obj = self.env['sale.order.line'].browse(line_id) return sale_line_obj.copy_data(default=default) class HotelServiceLine(models.Model): @api.multi def copy(self, default=None): ''' @param self: object pointer @param default: dict of default values to be set ''' return super(HotelServiceLine, self).copy(default=default) @api.model def _service_checkin_date(self): if 'checkin' in self._context: return self._context['checkin'] return time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) @api.model def _service_checkout_date(self): if 'checkout' in self._context: return self._context['checkout'] return time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) _name = 'hotel.service.line' _description = 'hotel Service line' service_line_id = fields.Many2one('sale.order.line', 'Service Line', required=True, delegate=True, ondelete='cascade') folio_id = fields.Many2one('hotel.folio', 'Folio', ondelete='cascade') ser_checkin_date = fields.Datetime('From Date', required=True, default=_service_checkin_date) ser_checkout_date = fields.Datetime('To Date', required=True, default=_service_checkout_date) @api.model def create(self, vals, check=True): """ Overrides orm create method. @param self: The object pointer @param vals: dictionary of fields value. @return: new record set for hotel service line. """ if 'folio_id' in vals: folio = self.env['hotel.folio'].browse(vals['folio_id']) vals.update({'order_id': folio.order_id.id}) return super(HotelServiceLine, self).create(vals) @api.multi def unlink(self): """ Overrides orm unlink method. @param self: The object pointer @return: True/False. """ s_line_obj = self.env['sale.order.line'] for line in self: if line.service_line_id: sale_unlink_obj = s_line_obj.browse([line.service_line_id.id]) sale_unlink_obj.unlink() return super(HotelServiceLine, self).unlink() @api.onchange('product_id') def product_id_change(self): ''' @param self: object pointer ''' if self.product_id and self.folio_id.partner_id: self.name = self.product_id.name self.price_unit = self.product_id.list_price self.product_uom = self.product_id.uom_id tax_obj = self.env['account.tax'] prod = self.product_id self.price_unit = tax_obj._fix_tax_included_price(prod.price, prod.taxes_id, self.tax_id) @api.onchange('ser_checkin_date', 'ser_checkout_date') def on_change_checkout(self): ''' When you change checkin_date or checkout_date it will checked it and update the qty of hotel service line ----------------------------------------------------------------- @param self: object pointer ''' if not self.ser_checkin_date: time_a = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) self.ser_checkin_date = time_a if not self.ser_checkout_date: self.ser_checkout_date = time_a if self.ser_checkout_date < self.ser_checkin_date: raise _('Checkout must be greater or equal checkin date') if self.ser_checkin_date and self.ser_checkout_date: date_a = time.strptime(self.ser_checkout_date, DEFAULT_SERVER_DATETIME_FORMAT)[:5] date_b = time.strptime(self.ser_checkin_date, DEFAULT_SERVER_DATETIME_FORMAT)[:5] diffDate = datetime.datetime(*date_a) - datetime.datetime(*date_b) qty = diffDate.days + 1 self.product_uom_qty = qty @api.multi def button_confirm(self): ''' @param self: object pointer ''' for folio in self: line = folio.service_line_id x = line.button_confirm() return x @api.multi def button_done(self): ''' @param self: object pointer ''' for folio in self: line = folio.service_line_id x = line.button_done() return x @api.multi def copy_data(self, default=None): ''' @param self: object pointer @param default: dict of default values to be set ''' sale_line_obj = self.env['sale.order.line' ].browse(self.service_line_id.id) return sale_line_obj.copy_data(default=default) class HotelServiceType(models.Model): _name = "hotel.service.type" _description = "Service Type" name = fields.Char('Service Name', size=64, required=True) service_id = fields.Many2one('hotel.service.type', 'Service Category') child_id = fields.One2many('hotel.service.type', 'service_id', 'Child Categories') @api.multi def name_get(self): def get_names(cat): """ Return the list [cat.name, cat.service_id.name, ...] """ res = [] while cat: res.append(cat.name) cat = cat.service_id return res return [(cat.id, " / ".join(reversed(get_names(cat)))) for cat in self] @api.model def name_search(self, name, args=None, operator='ilike', limit=100): if not args: args = [] if name: # Be sure name_search is symetric to name_get category_names = name.split(' / ') parents = list(category_names) child = parents.pop() domain = [('name', operator, child)] if parents: names_ids = self.name_search(' / '.join(parents), args=args, operator='ilike', limit=limit) category_ids = [name_id[0] for name_id in names_ids] if operator in expression.NEGATIVE_TERM_OPERATORS: categories = self.search([('id', 'not in', category_ids)]) domain = expression.OR([[('service_id', 'in', categories.ids)], domain]) else: domain = expression.AND([[('service_id', 'in', category_ids)], domain]) for i in range(1, len(category_names)): domain = [[('name', operator, ' / '.join(category_names[-1 - i:]))], domain] if operator in expression.NEGATIVE_TERM_OPERATORS: domain = expression.AND(domain) else: domain = expression.OR(domain) categories = self.search(expression.AND([domain, args]), limit=limit) else: categories = self.search(args, limit=limit) return categories.name_get() class HotelServices(models.Model): _name = 'hotel.services' _description = 'Hotel Services and its charges' product_id = fields.Many2one('product.product', 'Service_id', required=True, ondelete='cascade', delegate=True) categ_id = fields.Many2one('hotel.service.type', string='Service Category', required=True) product_manager = fields.Many2one('res.users', string='Product Manager') class ResCompany(models.Model): _inherit = 'res.company' additional_hours = fields.Integer('Additional Hours', help="Provide the min hours value for \ check in, checkout days, whatever the \ hours will be provided here based on \ that extra days will be calculated.") class AccountInvoice(models.Model): _inherit = 'account.invoice' @api.model def create(self, vals): res = super(AccountInvoice, self).create(vals) if self._context.get('folio_id'): folio = self.env['hotel.folio'].browse(self._context['folio_id']) folio.write({'hotel_invoice_id': res.id, 'invoice_status': 'invoiced'}) return res
Urbane is elegant and sophisticated, impeccably styled yet inviting and functional as the name implies. Solid maple construction meets today’s heightened demand for longevity and authenticity, while clipped table-top corners are friendly to the touch. Legs are profiled on all four sides to achieve a sense of fluidity that lightens the scale of even the largest pieces. A brushed nickel pull complements the curvilinear contour of the legs.
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import log as logging from rally import consts from rally import osclients from rally.task import context LOG = logging.getLogger(__name__) @context.configure(name="create_flavor", order=1000) class CreateFlavorContext(context.Context): """Create sample flavor This sample create flavor with specified options before task starts and delete it after task completion. To create your own context plugin, inherit it from rally.task.context.Context """ CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "additionalProperties": False, "properties": { "flavor_name": { "type": "string", }, "ram": { "type": "integer", "minimum": 1 }, "vcpus": { "type": "integer", "minimum": 1 }, "disk": { "type": "integer", "minimum": 1 } } } def setup(self): """This method is called before the task start.""" try: # use rally.osclients to get necessary client instance nova = osclients.Clients(self.context["admin"]["endpoint"]).nova() # and than do what you need with this client self.context["flavor"] = nova.flavors.create( # context settings are stored in self.config name=self.config.get("flavor_name", "rally_test_flavor"), ram=self.config.get("ram", 1), vcpus=self.config.get("vcpus", 1), disk=self.config.get("disk", 1)).to_dict() LOG.debug("Flavor with id '%s'" % self.context["flavor"]["id"]) except Exception as e: msg = "Can't create flavor: %s" % e.message if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg) def cleanup(self): """This method is called after the task finish.""" try: nova = osclients.Clients(self.context["admin"]["endpoint"]).nova() nova.flavors.delete(self.context["flavor"]["id"]) LOG.debug("Flavor '%s' deleted" % self.context["flavor"]["id"]) except Exception as e: msg = "Can't delete flavor: %s" % e.message if logging.is_debug(): LOG.exception(msg) else: LOG.warning(msg)
A new patent application from Google tells us about how the search engine may use context to find query suggestions before a searcher has completed typing in a full query. Think of Google as a Decision Engine, focused upon bringing searchers more information about interests they may have. After seeing this patent, I’ve been thinking about previous patents I’ve seen from Google that have similarities. To those of us who are used to doing Search Engine Optimization (SEO), we’ve been looking at URLs filled with content, and links between that content, and how algorithms such as PageRank (based upon links pointed between pages) and information retrieval scores based upon the relevance of that content have been determining how well pages rank in search results in response to queries entered into search boxes by searchers. Web pages connected by links have been seen as information points connected by nodes. This was the first generation of SEO. Chances are good that many of the methods that we have been using to do SEO will remain the same as new features appear in search, such as knowledge panels, rich results, featured snippets, structured snippets, search by photography, and expanded schema covering many more industries and features then it does at present. My last Post was Five Years of Google Ranking Signals, and I start that post by saying that there are other posts about ranking signals that have some issues. But, there are other pages that you may want to look at while you are learning to rank webpages, and I didn’t want to turn people away from looking at one recent post that did contain a lot of useful information. Cyrus Shepard recently published a post about Google Sucess Factors on Zyppy.com which I would recommend that you also check out. Cyrus did a video with Ross Hudgins on Seige Media where he talked about those Ranking signals with Cyrus, called Google Ranking Factors with Cyrus Shepard. I’m keeping this post short on purpose, to make the discussion about ranking the focus of this post, and the star. There is some really good information in the Video and in the post from Cyrus. Cyrus takes a different approach on writing about ranking signals from what I wrote, but it’s worth the time visiting and listening and watching.
# -*- coding: utf-8 -*- # # Copyright 2013 - Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This module implements SQLAlchemy-based types for dict and list # expressed by json-strings # from oslo_serialization import jsonutils import sqlalchemy as sa from sqlalchemy.dialects import mysql from sqlalchemy.ext import mutable class JsonEncoded(sa.TypeDecorator): """Represents an immutable structure as a json-encoded string.""" impl = sa.Text def process_bind_param(self, value, dialect): if value is not None: value = jsonutils.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = jsonutils.loads(value) return value class MutableDict(mutable.Mutable, dict): @classmethod def coerce(cls, key, value): """Convert plain dictionaries to MutableDict.""" if not isinstance(value, MutableDict): if isinstance(value, dict): return MutableDict(value) # this call will raise ValueError return mutable.Mutable.coerce(key, value) return value def update(self, e=None, **f): """Detect dictionary update events and emit change events.""" dict.update(self, e, **f) self.changed() def __setitem__(self, key, value): """Detect dictionary set events and emit change events.""" dict.__setitem__(self, key, value) self.changed() def __delitem__(self, key): """Detect dictionary del events and emit change events.""" dict.__delitem__(self, key) self.changed() class MutableList(mutable.Mutable, list): @classmethod def coerce(cls, key, value): """Convert plain lists to MutableList.""" if not isinstance(value, MutableList): if isinstance(value, list): return MutableList(value) # this call will raise ValueError return mutable.Mutable.coerce(key, value) return value def __add__(self, value): """Detect list add events and emit change events.""" list.__add__(self, value) self.changed() def append(self, value): """Detect list add events and emit change events.""" list.append(self, value) self.changed() def __setitem__(self, key, value): """Detect list set events and emit change events.""" list.__setitem__(self, key, value) self.changed() def __delitem__(self, i): """Detect list del events and emit change events.""" list.__delitem__(self, i) self.changed() def JsonDictType(): """Returns an SQLAlchemy Column Type suitable to store a Json dict.""" return MutableDict.as_mutable(JsonEncoded) def JsonListType(): """Returns an SQLAlchemy Column Type suitable to store a Json array.""" return MutableList.as_mutable(JsonEncoded) def LongText(): # TODO(rakhmerov): Need to do for postgres. return sa.Text().with_variant(mysql.LONGTEXT(), 'mysql') class JsonEncodedLongText(JsonEncoded): impl = LongText() def JsonLongDictType(): return MutableDict.as_mutable(JsonEncodedLongText)
Love the idea but Dax needs to talk less about himself and more about the guests. And less interruptions please, it is rude. Otherwise charming but just too self centered maybe? Either way, great guests, so let them shine please. That is the true art of a podcast host. Very smart, honest and entertaining interviews/discussion.. looking forward to every new episode!
from abc import ABCMeta, abstractmethod import logging import shutil import threading import time import subprocess import multiprocessing log = logging.getLogger(__name__) class MesosTestSupport(object): """ A mixin for test cases that need a running Mesos master and slave on the local host """ def _startMesos(self, numCores=None): if numCores is None: numCores = multiprocessing.cpu_count() shutil.rmtree('/tmp/mesos', ignore_errors=True) self.master = self.MesosMasterThread(numCores) self.master.start() self.slave = self.MesosSlaveThread(numCores) self.slave.start() while self.master.popen is None or self.slave.popen is None: log.info("Waiting for master and slave processes") time.sleep(.1) def _stopMesos(self): self.slave.popen.kill() self.slave.join() self.master.popen.kill() self.master.join() class MesosThread(threading.Thread): __metaclass__ = ABCMeta # Lock is used because subprocess is NOT thread safe: http://tinyurl.com/pkp5pgq lock = threading.Lock() def __init__(self, numCores): threading.Thread.__init__(self) self.numCores = numCores self.popen = None @abstractmethod def mesosCommand(self): raise NotImplementedError def run(self): with self.lock: self.popen = subprocess.Popen(self.mesosCommand()) self.popen.wait() log.info('Exiting %s', self.__class__.__name__) class MesosMasterThread(MesosThread): def mesosCommand(self): return ['mesos-master', '--registry=in_memory', '--ip=127.0.0.1', '--allocation_interval=500ms'] class MesosSlaveThread(MesosThread): def mesosCommand(self): # NB: The --resources parameter forces this test to use a predictable number of cores, independent of how # many cores the system running the test actually has. return ['mesos-slave', '--ip=127.0.0.1', '--master=127.0.0.1:5050', '--resources=cpus(*):%i' % self.numCores]
Save your car's upholstery from depression damage, dirt, spills, and more with this heavy-duty seat protector. The two-stage design allows you to use the bottom portion only for rear-facing infant seats. When it's time to convert to a forward-facing toddler car seat or booster, simply attach the back piece.
from flask_restplus import Resource, abort from pfamserver.api.v0 import api, schemas from pfamserver.services import pfam_service from pfamserver.extensions import cache from flask import request from zlib import compress from base64 import b64encode ns = api.namespace('pfams', decorators=[ api.response(200, "success"), api.response(400, "not found")]) @ns.errorhandler(pfam_service.PfamServiceError) def handle_root_exception(error): '''Return a custom message and 400 status code''' return {'message': error.message}, 400 def make_cache_key(*args, **kwargs): path = request.path args = str(request.args.items()) return (path + args).encode('utf-8') @ns.route('/<pfam>') class PfamAAPI(Resource): schema = schemas.PfamSchema() @ns.response(200, "response") @ns.doc('Obtain the pfam information.') @cache.cached(timeout=3600) def get(self, pfam): pfam = pfam_service.get_pfam(pfam) data, errors = self.schema.dump(pfam) return data, 200 @ns.route('/<pfam>/sequence_descriptions') class PfamASequenceDescriptionsAPI(Resource): @ns.response(200, "response") @ns.doc('Obtain a sequence_description list from a pfam.') @cache.cached(timeout=3600, key_prefix=make_cache_key) @ns.expect(schemas.pfam_a_query) def get(self, pfam): kwargs = schemas.pfam_a_query.parse_args() with_pdb = kwargs['with_pdb'] sequence_descriptions = pfam_service.get_sequence_descriptions_from_pfam(pfam, with_pdb) data = {'query': pfam, 'with_pdb': with_pdb, 'output': sequence_descriptions, 'size': len(sequence_descriptions)} return data, 200 @ns.route('/<pfam>/stockholm') class PfamAStockholmAPI(Resource): @ns.response(200, "response") @ns.doc('Obtain a sequence_description list from a pfam.') @cache.cached(timeout=3600, key_prefix=make_cache_key) def get(self, pfam): stockholm = pfam_service.get_stockholm_from_pfam(pfam) data = {'query': pfam, 'output': b64encode(compress(stockholm))} return data, 200
RNESU Negotiations Committee Mtg. & Session with Association. Special mtng re: Para/ Bus driver negotiations.. 5:30-7 p.m. RNESU Conference Room, 49 Court Drive, Brandon, [email protected], 802-247-5757. Front Porch Forum. A screening of a new independent documentary from Canadian filmmaker Peter Strauss, “The Story of Vermont’s Quiet Digital Revolution.”6-8 p.m. Town Hall Theater, 68 South Pleasant Street, Middlebury, [email protected], 802-388-1436. Effie Jean in Tahiti. Mill River Union HS’s Stage 40 presents the comedy, “Effie Jean in Tahiti” by David Johnston. 60 minute play; a hilarious mixture of Gilligan’s Island and The Little Mermaid, and based on Euripides’, “Iphegenia in Tauris”. $5, 7:30-8:30 p.m. Mill River UHS, 2321 Middle Road, North Clarendon, [email protected], 802-775-3451. Seasonal Affective Disorder: Symptoms, Causes, Treatments, and New Research on Cognitive-Behavioral Therapy. Dr. Kelly J. Rohan, professor of Psychological Science at UVM, will give an overview of adult depression, including SAD as well as new clinical trial findings comparing cognitive-behavior therapy and light therapy. $5 or Osher Membership, 1:30-3:30 p.m. Godnick Center, 1 Deer Street, Rutland. Gary Wade. Local songster Gary Wade returns to Bomoseen.. 6-9 p.m. Lake Bomoseen Inn Taproom, RT 30, Lake Bomoseen, [email protected], 802-236-7317. Tales from Japan. Original work by Skyler Ambrose, RYT Alumna. stories of East Asian mythology showcase the meaning of kindness, love, loyalty, and bravery. 2 p.m. and 7 p.m. $6 children/senior $8 for adults, 7 p.m. Rutland Intermediate School, 63-65 Library Avenue, Rutland, [email protected], 802-558-4177. Sylvia. A comedy that tells the story of an empty nester couple, Greg and Kate, who have recently moved from the suburbs to New York City. Presented by The Dorset Players. $25, 7:30-9:30 p.m. Dorset Playhouse, 104 Cheney Road, Dorset, [email protected], 802-394-5570. Damn It All. An original rock band out of Rutland, Vermont. Their mission is simple — put down the “acousticrap” and get their “grrr” on. 9:30 p.m.-1:30 a.m. Hide-A-Way Tavern, 42 Center Street, Rutland, [email protected], 802-558-9580. Vermont One Act Festival. Gathers students from different schools throughout the state for a day-long celebration of theater. Performances throughout the day. $5, Mill River UHS, 2321 Middle Road, North Clarendon, [email protected], 802-775-3451. Know Your Rights Training. workshops and a panel of recognizable and widely-respected black men from Vermont, sharing their experiences, suggestions, and hopes for the future. Register ahead of time to ensure seating, materials and meals. 8:30 a.m.-2:30 p.m. Grace Congregational Church, 8 Court Street, Rutland, [email protected], 802-342-4755. Salisbury Steak Dinner with DJ. Public welcome. $10.00, 5-7 p.m. VFW Post 648 Rutland, 15 Wales St, Rutland, [email protected], 802-775-6892. VSO Jukebox. This innovative chamber music series, curated by Vermont composer and VSO Creative Projects Chair Matt LaRocca, features a sliding-scale payment option and a singular concert experience. 6:30 p.m. Merchant’s Hall, 42 Merchants Row, Rutland, 802-855-8081. The Will Patton Ensemble. Gypsy jazz and Brazilian Samba music. $10, teens and kids are free., 7:30-9:30 p.m. Burnham Hall, 52 River Road, Lincoln, [email protected], 802-388-6863. Metal The Mouse March. A high energy metal show with three bands: Blind Threat from Albany NY, Every Enemy Alive from Bennington, Humdinger & the Bucksnort from Rutland. $5 band donation, 8-10:30 p.m. The Howlin’ Mouse Record Store, 158 N. Main Street, Rutland, [email protected], 802-772-7955. Town Hall with Lt. Governor David Zuckerman. A discussion about the public policies and issues that affect racial justice, jobs, healthcare, education, our environment and creating safe and just communities. Beverages and snacks provided. 2-4 p.m. Unitarian Universalist Church, 117 West Street, Rutland City, [email protected], 802-353-0998. Youth Showcase. Youth choirs, Joyful Noise and Children’s Choir, will offer songs, including a medley from Grease, as well as other solos and duets. Raising money for the HEAL Foundation. Refreshments for sale. Free will offering, 6-8 p.m. Grace Congregational Church, 8 Court Street, Rutland, [email protected], 802-775-4301. Silas. Folkhop, enjoyable grooves, themes of reggae and bluegrass. 7-10 p.m. Hide-A-Way Tavern, 42 Center Street, Rutland, [email protected], 802-558-9580. OVUU Planning Task Force. Special Meeting re: 5 year plan.. 6-8 p.m. RNESU Conference Room, 49 Court Drive, Brandon, [email protected], 802-247-5757. OVUU Hawk Hill Committee. Special Meeting re: Hawk Hill discussion. 6-8 p.m. OVUHS Conference Room, 2997 Franklin St., Brandon, [email protected], 802-247-5757.
# Django settings for phillypug project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Jason Stelzer', '[email protected]'), # ('Your Name', '[email protected]'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'phillypug', # Or path to database file if using sqlite3. 'USER': 'pugadmin', # Not used with sqlite3. 'PASSWORD': 'secret', # Not used with sqlite3. 'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/New York' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 3 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = '/Users/cynic/projects/phillypug/phillypug/media' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash if there is a path component (optional in other cases). # Examples: "http://media.lawrence.com", "http://example.com/media/" MEDIA_URL = 'http://www.phillypug.org/static/' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'oq_dm6t5h99j7e$7vdbvh8xewt4idldki%7xsf-f#g4!#g8j0v' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. "/Users/cynic/projects/phillypug/phillypug/templates", ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.admin', 'django.contrib.admindocs', 'memberlist', 'about', 'events', ) MEETUP_CFG="/etc/phillypug/meetup.cfg"
Many K-12 public school systems and other not-for-profit organizations allow their employees to contribute to a Section 403(b) or 403(b) (7) retirement plan through payroll deduction. These employees' plans contributions must be forwarded to each active investment provider. Without Bay Bridge's help, you may have to maintain separate records for each investment provider and write a separate monthly check to each one. We can relieve you of that burden. Using our common remitting service, you will only write one monthly check - to Bay Bridge. We will forward all employee and employer (if any) contributions to each of the respective plan vendors. Section 403(b) plans are likely not your area of expertise, so let Bay Bridge handle those compliance and reporting requirements for you. We will prepare all required IRS reports and will ensure your compliance with current and future regulations.
#!/usr/bin/env python # This file is copied from GCoder. # # GCoder is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GCoder is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Printrun. If not, see <http://www.gnu.org/licenses/>. import sys import re import math import datetime import logging from array import array from printrun_utils import install_locale install_locale('pronterface') gcode_parsed_args = ["x", "y", "e", "f", "z", "i", "j"] gcode_parsed_nonargs = ["g", "t", "m", "n"] to_parse = "".join(gcode_parsed_args + gcode_parsed_nonargs) gcode_exp = re.compile("\([^\(\)]*\)|;.*|[/\*].*\n|([%s])([-+]?[0-9]*\.?[0-9]*)" % to_parse) m114_exp = re.compile("\([^\(\)]*\)|[/\*].*\n|([XYZ]):?([-+]?[0-9]*\.?[0-9]*)") specific_exp = "(?:\([^\(\)]*\))|(?:;.*)|(?:[/\*].*\n)|(%s[-+]?[0-9]*\.?[0-9]*)" move_gcodes = ["G0", "G1", "G2", "G3"] class PyLine(object): __slots__ = ('x', 'y', 'z', 'e', 'f', 'i', 'j', 'raw', 'command', 'is_move', 'relative', 'relative_e', 'current_x', 'current_y', 'current_z', 'extruding', 'current_tool', 'gcview_end_vertex') def __init__(self, l): self.raw = l def __getattr__(self, name): return None try: import gcoder_line Line = gcoder_line.GLine except ImportError: Line = PyLine def find_specific_code(line, code): exp = specific_exp % code bits = [bit for bit in re.findall(exp, line.raw) if bit] if not bits: return None else: return float(bits[0][1:]) def S(line): return find_specific_code(line, "S") def P(line): return find_specific_code(line, "P") def split(line): split_raw = gcode_exp.findall(line.raw.lower()) if not split_raw: line.command = line.raw line.is_move = False logging.warning(_("raw G-Code line \"%s\" could not be parsed") % line.raw) return [line.raw] command = split_raw[0] if split_raw[0][0] != "n" else split_raw[1] line.command = command[0].upper() + command[1] line.is_move = line.command in move_gcodes return split_raw def parse_coordinates(line, split_raw, imperial = False, force = False): # Not a G-line, we don't want to parse its arguments if not force and line.command[0] != "G": return unit_factor = 25.4 if imperial else 1 for bit in split_raw: code = bit[0] if code not in gcode_parsed_nonargs and bit[1]: setattr(line, code, unit_factor * float(bit[1])) class Layer(list): __slots__ = ("duration", "z") def __init__(self, lines, z = None): super(Layer, self).__init__(lines) self.z = z class GCode(object): lines = None layers = None all_layers = None layer_idxs = None line_idxs = None append_layer = None append_layer_id = None imperial = False relative = False relative_e = False current_tool = 0 # Home position: current absolute position counted from machine origin home_x = 0 home_y = 0 home_z = 0 # Current position: current absolute position counted from machine origin current_x = 0 current_y = 0 current_z = 0 # For E this is the absolute position from machine start current_e = 0 # Current feedrate current_f = 0 # Offset: current offset between the machine origin and the machine current # absolute coordinate system (as shifted by G92s) offset_x = 0 offset_y = 0 offset_z = 0 offset_e = 0 # Expected behavior: # - G28 X => X axis is homed, offset_x <- 0, current_x <- home_x # - G92 Xk => X axis does not move, so current_x does not change # and offset_x <- current_x - k, # - absolute G1 Xk => X axis moves, current_x <- offset_x + k # How to get... # current abs X from machine origin: current_x # current abs X in machine current coordinate system: current_x - offset_x filament_length = None duration = None xmin = None xmax = None ymin = None ymax = None zmin = None zmax = None width = None depth = None height = None est_layer_height = None # abs_x is the current absolute X in machine current coordinate system # (after the various G92 transformations) and can be used to store the # absolute position of the head at a given time def _get_abs_x(self): return self.current_x - self.offset_x abs_x = property(_get_abs_x) def _get_abs_y(self): return self.current_y - self.offset_y abs_y = property(_get_abs_y) def _get_abs_z(self): return self.current_z - self.offset_z abs_z = property(_get_abs_z) def _get_abs_e(self): return self.current_e - self.offset_e abs_e = property(_get_abs_e) def _get_abs_pos(self): return (self.abs_x, self.abs_y, self.abs_z) abs_pos = property(_get_abs_pos) def _get_current_pos(self): return (self.current_x, self.current_y, self.current_z) current_pos = property(_get_current_pos) def _get_home_pos(self): return (self.home_x, self.home_y, self.home_z) def _set_home_pos(self, home_pos): if home_pos: self.home_x, self.home_y, self.home_z = home_pos home_pos = property(_get_home_pos, _set_home_pos) def __init__(self, data = None, home_pos = None): self.home_pos = home_pos if data: self.lines = [Line(l2) for l2 in (l.strip() for l in data) if l2] self._preprocess_lines() self.filament_length = self._preprocess_extrusion() self._create_layers() self._preprocess_layers() else: self.lines = [] def __len__(self): return len(self.line_idxs) def __iter__(self): return self.lines.__iter__() def append(self, command, store = True): command = command.strip() if not command: return gline = Line(command) self._preprocess_lines([gline]) self._preprocess_extrusion([gline]) if store: self.lines.append(gline) self.append_layer.append(gline) self.layer_idxs.append(self.append_layer_id) self.line_idxs.append(len(self.append_layer)) return gline def _preprocess_lines(self, lines = None): """Checks for imperial/relativeness settings and tool changes""" if not lines: lines = self.lines imperial = self.imperial relative = self.relative relative_e = self.relative_e current_tool = self.current_tool current_x = self.current_x current_y = self.current_y current_z = self.current_z offset_x = self.offset_x offset_y = self.offset_y offset_z = self.offset_z for line in lines: split_raw = split(line) if not line.command: continue # Update properties if line.is_move: line.relative = relative line.relative_e = relative_e line.current_tool = current_tool elif line.command == "G20": imperial = True elif line.command == "G21": imperial = False elif line.command == "G90": relative = False relative_e = False elif line.command == "G91": relative = True relative_e = True elif line.command == "M82": relative_e = False elif line.command == "M83": relative_e = True elif line.command[0] == "T": current_tool = int(line.command[1:]) if line.command[0] == "G": parse_coordinates(line, split_raw, imperial) # Compute current position if line.is_move: x = line.x y = line.y z = line.z if line.f is not None: self.current_f = line.f if line.relative: x = current_x + (x or 0) y = current_y + (y or 0) z = current_z + (z or 0) else: if x is not None: x = x + offset_x if y is not None: y = y + offset_y if z is not None: z = z + offset_z if x is not None: current_x = x if y is not None: current_y = y if z is not None: current_z = z elif line.command == "G28": home_all = not any([line.x, line.y, line.z]) if home_all or line.x is not None: offset_x = 0 current_x = self.home_x if home_all or line.y is not None: offset_y = 0 current_y = self.home_y if home_all or line.z is not None: offset_z = 0 current_z = self.home_z elif line.command == "G92": if line.x is not None: offset_x = current_x - line.x if line.y is not None: offset_y = current_y - line.y if line.z is not None: offset_z = current_z - line.z line.current_x = current_x line.current_y = current_y line.current_z = current_z self.imperial = imperial self.relative = relative self.relative_e = relative_e self.current_tool = current_tool self.current_x = current_x self.current_y = current_y self.current_z = current_z self.offset_x = offset_x self.offset_y = offset_y self.offset_z = offset_z def _preprocess_extrusion(self, lines = None): if not lines: lines = self.lines current_e = self.current_e offset_e = self.offset_e total_e = 0 max_e = 0 for line in lines: if line.e is None: continue if line.is_move: if line.relative_e: line.extruding = line.e > 0 total_e += line.e current_e += line.e else: new_e = line.e + offset_e line.extruding = new_e > current_e total_e += new_e - current_e current_e = new_e max_e = max(max_e, total_e) elif line.command == "G92": offset_e = current_e - line.e self.current_e = current_e self.offset_e = offset_e return max_e # FIXME : looks like this needs to be tested with list Z on move def _create_layers(self): layers = {} all_layers = [] layer_idxs = [] line_idxs = [] layer_id = 0 layer_line = 0 last_layer_z = None prev_z = None prev_base_z = (None, None) cur_z = None cur_lines = [] for line in self.lines: if line.command == "G92" and line.z is not None: cur_z = line.z elif line.is_move: if line.z is not None: if line.relative: cur_z += line.z else: cur_z = line.z # FIXME: the logic behind this code seems to work, but it might be # broken if cur_z != prev_z: if prev_z is not None and last_layer_z is not None: offset = self.est_layer_height if self.est_layer_height else 0.01 if abs(prev_z - last_layer_z) < offset: if self.est_layer_height is None: zs = sorted([l.z for l in all_layers if l.z is not None]) heights = [round(zs[i + 1] - zs[i], 3) for i in range(len(zs) - 1)] if len(heights) >= 2: self.est_layer_height = heights[1] elif heights: self.est_layer_height = heights[0] else: self.est_layer_height = 0.1 base_z = round(prev_z - (prev_z % self.est_layer_height), 2) else: base_z = round(prev_z, 2) else: base_z = prev_z if base_z != prev_base_z: all_layers.append(Layer(cur_lines, base_z)) old_lines = layers.get(base_z, []) old_lines += cur_lines layers[base_z] = old_lines cur_lines = [] layer_id += 1 layer_line = 0 last_layer_z = base_z prev_base_z = base_z cur_lines.append(line) layer_idxs.append(layer_id) line_idxs.append(layer_line) layer_line += 1 prev_z = cur_z if cur_lines: all_layers.append(Layer(cur_lines, prev_z)) old_lines = layers.get(prev_z, []) old_lines += cur_lines layers[prev_z] = old_lines for zindex in layers.keys(): cur_lines = layers[zindex] has_movement = False for l in layers[zindex]: if l.is_move and l.e is not None: has_movement = True break if has_movement: layers[zindex] = Layer(cur_lines, zindex) else: del layers[zindex] self.append_layer_id = len(all_layers) self.append_layer = Layer([]) all_layers.append(self.append_layer) self.all_layers = all_layers self.layers = layers self.layer_idxs = array('I', layer_idxs) self.line_idxs = array('I', line_idxs) def idxs(self, i): return self.layer_idxs[i], self.line_idxs[i] def num_layers(self): return len(self.layers) def _preprocess_layers(self): xmin = float("inf") ymin = float("inf") zmin = 0 xmax = float("-inf") ymax = float("-inf") zmax = float("-inf") # Count moves without extrusion if filament length is lower than 0 count_noe = self.filament_length <= 0 for line in self.lines: if line.is_move and (line.extruding or count_noe): if line.current_x is not None: xmin = min(xmin, line.current_x) xmax = max(xmax, line.current_x) if line.current_y is not None: ymin = min(ymin, line.current_y) ymax = max(ymax, line.current_y) if line.current_z is not None: zmin = min(zmin, line.current_z) zmax = max(zmax, line.current_z) self.xmin = xmin if not math.isinf(xmin) else 0 self.xmax = xmax if not math.isinf(xmax) else 0 self.ymin = ymin if not math.isinf(ymin) else 0 self.ymax = ymax if not math.isinf(ymax) else 0 self.zmin = zmin if not math.isinf(zmin) else 0 self.zmax = zmax if not math.isinf(zmax) else 0 self.width = self.xmax - self.xmin self.depth = self.ymax - self.ymin self.height = self.zmax - self.zmin def estimate_duration(self): if self.duration is not None: return self.duration lastx = lasty = lastz = laste = lastf = 0.0 lastdx = 0 lastdy = 0 x = y = e = f = 0.0 currenttravel = 0.0 moveduration = 0.0 totalduration = 0.0 acceleration = 2000.0 # mm/s^2 layerbeginduration = 0.0 #TODO: # get device caps from firmware: max speed, acceleration/axis # (including extruder) # calculate the maximum move duration accounting for above ;) for layer in self.all_layers: for line in layer: if line.command not in ["G1", "G0", "G4"]: continue if line.command == "G4": moveduration = line.p if not moveduration: continue else: moveduration /= 1000.0 else: x = line.x if line.x is not None else lastx y = line.y if line.y is not None else lasty z = line.z if line.z is not None else lastz e = line.e if line.e is not None else laste # mm/s vs mm/m => divide by 60 f = line.f / 60.0 if line.f is not None else lastf # given last feedrate and current feedrate calculate the # distance needed to achieve current feedrate. # if travel is longer than req'd distance, then subtract # distance to achieve full speed, and add the time it took # to get there. # then calculate the time taken to complete the remaining # distance # FIXME: this code has been proven to be super wrong when 2 # subsquent moves are in opposite directions, as requested # speed is constant but printer has to fully decellerate # and reaccelerate # The following code tries to fix it by forcing a full # reacceleration if this move is in the opposite direction # of the previous one dx = x - lastx dy = y - lasty if dx * lastdx + dy * lastdy <= 0: lastf = 0 currenttravel = math.hypot(dx, dy) if currenttravel == 0: if line.z is not None: currenttravel = abs(line.z) if line.relative else abs(line.z - lastz) elif line.e is not None: currenttravel = abs(line.e) if line.relative_e else abs(line.e - laste) # Feedrate hasn't changed, no acceleration/decceleration planned if f == lastf: moveduration = currenttravel / f if f != 0 else 0. else: # FIXME: review this better # this looks wrong : there's little chance that the feedrate we'll decelerate to is the previous feedrate # shouldn't we instead look at three consecutive moves ? distance = 2 * abs(((lastf + f) * (f - lastf) * 0.5) / acceleration) # multiply by 2 because we have to accelerate and decelerate if distance <= currenttravel and lastf + f != 0 and f != 0: moveduration = 2 * distance / (lastf + f) # This is distance / mean(lastf, f) moveduration += (currenttravel - distance) / f else: moveduration = 2 * currenttravel / (lastf + f) # This is currenttravel / mean(lastf, f) # FIXME: probably a little bit optimistic, but probably a much better estimate than the previous one: # moveduration = math.sqrt(2 * distance / acceleration) # probably buggy : not taking actual travel into account lastdx = dx lastdy = dy totalduration += moveduration lastx = x lasty = y lastz = z laste = e lastf = f layer.duration = totalduration - layerbeginduration layerbeginduration = totalduration totaltime = datetime.timedelta(seconds = int(totalduration)) self.duration = totaltime return "%d layers, %s" % (len(self.layers), str(totaltime)) def main(): if len(sys.argv) < 2: print "usage: %s filename.gcode" % sys.argv[0] return print "Line object size:", sys.getsizeof(Line("G0 X0")) gcode = GCode(open(sys.argv[1], "rU")) print "Dimensions:" xdims = (gcode.xmin, gcode.xmax, gcode.width) print "\tX: %0.02f - %0.02f (%0.02f)" % xdims ydims = (gcode.ymin, gcode.ymax, gcode.depth) print "\tY: %0.02f - %0.02f (%0.02f)" % ydims zdims = (gcode.zmin, gcode.zmax, gcode.height) print "\tZ: %0.02f - %0.02f (%0.02f)" % zdims print "Filament used: %0.02fmm" % gcode.filament_length print "Number of layers: %d" % gcode.num_layers() print "Estimated duration: %s" % gcode.estimate_duration() if __name__ == '__main__': main()
Cointains 1/4 troy ounce of pure .9999 gold. Considered legal tender by the Government of Australia (Face Value of $25 AUD). Obverse Design: Traditional bust of Queen Elizabeth II along with the date, purity (1/4 oz .9999 gold) and face value of the coin ($25 AUD).
__author__ = 'Rogelio Negrete - Weffe' import pickle import random import time from datetime import datetime class PickleList: def __init__(self, nameoffile): self.nameoffile = nameoffile self.pickle_list = self.load_pickle_list() def load_pickle_list(self): with open(self.nameoffile, "rb") as file: try: pickle_list = pickle.load(file) return pickle_list except: return [] def add_to_pickle_list(self, user_input): #its a list of tuples #user input = [{permalink : timeposted}, {permalink : timeposted}, ...] self.pickle_list.append(user_input) def save_pickle_list(self): # save to existing list with open(self.nameoffile, "wb") as file: pickle.dump(self.pickle_list, file) def manual_add_to_pickle_list(self): endLoop = False while (endLoop != True): user_input = input("Enter in (Nigel) Link [Enter DONE to stop]: ") if user_input != 'done': self.pickle_list.append(user_input) print(self.pickle_list) else: endLoop = True # save to existing list with open(self.nameoffile, "wb") as file: pickle.dump(self.pickle_list, file) def empty_pickle_file(self): #cheeky way of deleting the file content #just open the file and save an empty list to it - which overwrites everything with open(self.nameoffile, "wb") as file: pickle.dump([], file) def print_pickle(self): print(self.pickle_list) #----------------------- #NIGEL Related method(s) def choose_random_nigel_pic(self): #choose a random nigel picture from the pickle list return random.choice(self.pickle_list) #---------------------- #permalinks Related method(s) def is_link_in_list(self, permalink): linkInList = False #permalink list looks like: (permalink, date_posted) for key in [y[0] for y in self.pickle_list]: if permalink == key: #print('Found old match. Ignoring comment. Link: ' + permalink) linkInList = True return linkInList def clean_up_permalink_list(self): day_ago = datetime.fromtimestamp(time.time() - (24 * 60 * 60)) # find date for 24 hours ago for tupleItem in self.pickle_list: permalink_date = tupleItem[1] time_delta = int((day_ago - permalink_date).days) + 1 if time_delta >= 2: #print("Found Old Link.") self.pickle_list.remove(tupleItem)
Peter Ludwig (Saul), bass-baritone, enjoys ongoing careers as a performer and as a teacher. He has been soloist in concerts and recitals at Carnegie Hall, Weill Hall, the 92nd Street Y, and other venues in the U.S., Italy, and Switzerland. He was soloist in nine World or U. S. Premieres, of works by historical and living composers ranging from Rossini and Georg Schoenberg, to Gerald Busby and Anna Dembska. Mr. Ludwig has appeared in a number of noted opera productions in New York, among them as Horace in the Encompass Theater revival of Blitzstein's Regina, and the Seneschal in the Vineyard Theater's acclaimed American premiere of Donizetti's Gianni di Parigi, and sang principal roles with Opera Ensemble of New York, Bel Canto Opera, Pensacola Opera, Chattanooga Opera, Rockland Opera, and at the historic Smith Opera House in Geneva, NY. He maintains his own studio in New York (www.ludwigsinging.com). Many of his students have continued to leading conservatories and successful performing careers. Michael Ashby (David), tenor, was recently a soloist in the Mozart Requiem at West Liberty College. Previous performances include Mime in Das Rheingold as a visiting artist for The Academy of Vocal Arts and Lippo Fiorentino in Weil's Street Scene with Rutgers Opera Company. Other roles include Don Basilio and Don Curzio (Le nozze di Figaro), Ferrando (Così fan tutte), Benjamin Franklin (1776) for Actors'NET of Bucks County and Pseudolous (A Funny Thing Happened on the Way to the Forum) for NJCU. He has appeared as Corporal Schultz in the Actors'NET of Bucks County in Stalag 17, as well as Pirelli in Sweeney Todd. Prior roles with Rutgers Opera Company include Monostatos (Die Zauberflöte ), Slender (The Merry Wives of Windsor), and Seargent Duffy in the premiere of Jerome Morosse's Sorry, Wrong Number . Mr. Ashby's performed the role of Midas in Die schöne Galathée for Concert Operetta Theater of Philadelphia. Charles Schneider (Zerubbabel) is a lyric baritone who enjoys a busy schedule of both singing and teaching. Mr. Schneider currently serves as an adjunct professor of voice at New Jersey City University and Wagner College where he is also the director of the Opera Workshop. He has sung roles with Des Moines Metro Opera, Lyric Opera Cleveland, The Princeton Festival, Opera New York, The OK Mozart Festival, Anchorage Opera, Opera Iowa, National Opera Company, and Wildwood Park for the Performing Arts (Little Rock, Arkansas). He is currently enrolled in the Doctor of Musical Arts degree at Mason Gross School of the Arts. Lyric baritone Justin Johnson (Jonathan) has had the opportunity of performing nationally with companies such as New Jersey Opera Theater, Amarillo Opera, Bay Area Summer Opera Theater Institute, Artistic Repertory Theater and Westminster Opera Theater among others. Justin will be making his New York City chamber recital debut this upcoming season with Música de Cámera. His repertoire includes Bob in Menotti’s The Old Maid and the Thief, Sid/Vicar in Albert Herring, Capitan in Candide, John Brooke in Little Women, Elviro in Serse, Lord Devenaut in Der Vampyr and Peter in Hansel and Gretel, among others. Justin made his stage directing debut this spring 2006 with the New Jersey Opera Theater in The Telephone, Trouble in Tahiti and The Impresario. Soprano Bethany Reeves (Hannagail) pursues a multi-faceted career as a soloist, director, voice teacher, choral conductor, and actress. As a soprano she has worked extensively with New York’s adventurous American Chamber Opera Company, including premieres of custom-written work (upcoming: Douglas Anderson’s “Antigone Sings”). She recently directed “Hansel & Gretel” for the Berkshire Opera and played Viola in Twelfth Night for the Actors Shakespeare Company. Ms. Reeves is a DMA candidate in voice at Rutgers and teaches music at Stevens Institute of Technology. Mezzo soprano Adrienne Alexander (Hannagail) is currently pursuing a DMA in music education at Mason Gross School of the Arts at Rutgers University. Chosen as Runner-up in the 2005-2006 Concerto Competition, she recently performed Alban Berg's "Sieben Fruhe Lieder" with the Rutgers Orchestra. Diverse Spanish artist Cristina Pato (pianist) enjoys an active professional career devoted to both Galician popular music as a bagpiper, and to classical music as a pianist. Her dual careers have led to performances throughout major stages in Spain, Portugal, United Kingdom, France, Brazil, Italy, Germany, Mexico and the US. Ms. Pato has released three CD’s as a solo bagpiper and collaborated in international tours with bands such as The Chieftains, Yo-Yo Ma, Hevia, Royal Pipe Band, Tenerife Symphony Orchestra, Silk Road Ensemble or Galicia Symphony Orchestra and in more than twenty recordings as a guest artist. As a classically trained pianist Ms. Pato holds Master's Degree in Piano, Music Theory and Chamber Music obtained from the Conservatorio del Liceu (Barcelona) and also a Master's Degree in Digital Arts (Computer Music) from the Universitat Pompeu Fabra (Barcelona). As the recipient of a major grant from Fundación Barrié de la Maza, she is currently a Doctor of Musical Arts candidate in Collaborative Piano at Rutgers University where she studies with Prof. Gonzalez-Palmer.
""" space.py simulate a spacecraft with no traction at all """ import pygame, math pygame.init() class Ship(pygame.sprite.Sprite): def __init__(self, screen): pygame.sprite.Sprite.__init__(self) self.screen = screen self.imageThrust = pygame.image.load("shipThrust.png") self.imageThrust = self.imageThrust.convert() self.imageCruise = pygame.image.load("shipCruise.png") self.imageCruise = self.imageCruise.convert() self.imageLeft = pygame.image.load("shipLeft.png") self.imageLeft = self.imageLeft.convert() self.imageRight = pygame.image.load("shipRight.png") self.imageRight = self.imageRight.convert() self.imageMaster = self.imageCruise self.image = self.imageMaster self.rect = self.image.get_rect() self.x = 100 self.y = 100 self.dx = 0 self.dy = 0 self.dir = 0 self.turnRate = 5 self.thrust = 0 def update(self): self.checkKeys() self.rotate() self.calcVector() self.setPos() self.checkBounds() self.rect.center = (self.x, self.y) def checkKeys(self): keys = pygame.key.get_pressed() self.imageMaster = self.imageCruise if keys[pygame.K_RIGHT]: self.dir -= self.turnRate if self.dir < 0: self.dir = 360 - self.turnRate self.imageMaster = self.imageRight if keys[pygame.K_LEFT]: self.dir += self.turnRate if self.dir > 360: self.dir = self.turnRate self.imageMaster = self.imageLeft if keys[pygame.K_UP]: self.thrust = .1 self.imageMaster = self.imageThrust else: self.thrust = 0 def rotate(self): oldCenter = self.rect.center self.image = pygame.transform.rotate(self.imageMaster, self.dir) self.rect = self.image.get_rect() self.rect.center = oldCenter def calcVector(self): radians = self.dir * math.pi / 180 thrustDx = self.thrust * math.cos(radians) thrustDy = self.thrust * math.sin(radians) thrustDy *= -1 self.dx += thrustDx self.dy += thrustDy self.speed = math.sqrt((self.dx * self.dx) + (self.dy * self.dy)) def setPos(self): self.x += self.dx self.y += self.dy def checkBounds(self): screen = self.screen if self.x > screen.get_width(): self.x = 0 if self.x < 0: self.x = screen.get_width() if self.y > screen.get_height(): self.y = 0 if self.y < 0: self.y = screen.get_height() def main(): screen = pygame.display.set_mode((640, 480)) pygame.display.set_caption("Space craft") background = pygame.Surface(screen.get_size()) background.fill((0, 0, 0)) screen.blit(background, (0, 0)) ship = Ship(screen) allSprites = pygame.sprite.Group(ship) clock = pygame.time.Clock() keepGoing = True while keepGoing: clock.tick(30) for event in pygame.event.get(): if event.type == pygame.QUIT: keepGoing = False allSprites.clear(screen, background) allSprites.update() allSprites.draw(screen) pygame.display.flip() if __name__ == "__main__": main()
Abhia=1. . Wallpaper and background images in the Vivian Dsena club tagged: vivian.