text
stringlengths
29
850k
from pysnmp.hlapi import * import argparse class SNMP: hostname = None community = None def __init__(self, hostname, community): self.hostname = hostname self.community = community def walk(self, root): result = [] for errorIndication, errorStatus, errorIndex, varBinds in self.next_cmd(root): if errorIndication: print errorIndication break elif errorStatus: print "{0} at {1}".format(errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex)-1][0] or "?") break else: for varBind in varBinds: result.append({"oid": varBind[0].prettyPrint(), "value": varBind[1]}) return result def next_cmd(self, root): return nextCmd( SnmpEngine(), CommunityData(self.community), UdpTransportTarget((self.hostname, 161)), ContextData(), ObjectType(ObjectIdentity(root)), lookupNames=False, lookupValues=False, lookupMib=False, lexicographicMode=False ) class MRVFiberDriver: snmp = None chassis = {} def __init__(self, hostname, community): self.snmp = SNMP(hostname, community) self.discover() def discover(self): chassis = {} # Figure out slots for o in self.snmp.walk("1.3.6.1.4.1.629.200.7.1.1.2"): self.chassis[o["value"]] = {} # Initialize chassis data. self._init_slots() self._init_ports() def _init_slots(self): # slot model models = self.snmp.walk("1.3.6.1.4.1.629.200.7.1.1.4") # port count portcounts = self.snmp.walk("1.3.6.1.4.1.629.200.7.1.1.6") # hardware revisions hwrevs = self.snmp.walk("1.3.6.1.4.1.629.200.7.1.1.7") # card types cardtypes = self.snmp.walk("1.3.6.1.4.1.629.200.7.1.1.8") # serial numbers serials = self.snmp.walk("1.3.6.1.4.1.629.200.7.1.1.32") for slot in self.chassis: slot = int(slot) self.chassis[slot]["model"] = self._slot_value(slot, models) self.chassis[slot]["portcount"] = self._slot_value(slot, portcounts) self.chassis[slot]["hwrev"] = self._slot_value(slot, hwrevs) self.chassis[slot]["type"] = self._slot_value_type(slot, cardtypes) self.chassis[slot]["serial"] = self._slot_value(slot, serials) def _init_ports(self): # port types porttypes = { 28: "TP", 87: "Console", 125: "SFP" } for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.4"): c, s, p = self._sp(i["oid"]) self.chassis[s]["ports"][p]["type"] = porttypes[i["value"]] # link status linkstatuses = { 1: "Other", 2: "NoSignal", 3: "SignalDetected", 4: "Link" } for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.6"): c, s, p = self._sp(i["oid"]) self.chassis[s]["ports"][p]["link"] = linkstatuses[i["value"]] # loopback loopbacks = { 1: "NotSupported", 2: "Off" } for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.13"): c, s, p = self._sp(i["oid"]) self.chassis[s]["ports"][p]["loopback"] = loopbacks[i["value"]] # enable enables = { 1: "NotSupported", 3: "Enabled" } for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.14"): c, s, p = self._sp(i["oid"]) self.chassis[s]["ports"][p]["enabled"] = enables[i["value"]] # link integrity notification lins = { 1: "NotSupported", 3: "Enabled" } for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.16"): c, s, p = self._sp(i["oid"]) self.chassis[s]["ports"][p]["lin"] = lins[int(i["value"])] # port names (descriptions) for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.21"): c, s, p = self._sp(i["oid"]) self.chassis[s]["ports"][p]["name"] = i["value"] # optics serial for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.28"): c, s, p = self._sp(i["oid"]) val = str(i["value"]) if(val == "N/A"): val = None self.chassis[s]["ports"][p]["optics"]["serial"] = val # optics vendor info for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.29"): c, s, p = self._sp(i["oid"]) val = str(i["value"]) if(val == "N/A"): val = None self.chassis[s]["ports"][p]["optics"]["vendor"] = val # optics model for i in self.snmp.walk(".1.3.6.1.4.1.629.200.8.1.1.42"): c, s, p = self._sp(i["oid"]) val = str(i["value"]) if(val == "N/A"): val = None self.chassis[s]["ports"][p]["optics"]["model"] = val # optics temperature for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.30"): c, s, p = self._sp(i["oid"]) val = i["value"] if(val < 0): val = None self.chassis[s]["ports"][p]["optics"]["temperature"] = val # optics txpower for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.31"): c, s, p = self._sp(i["oid"]) val = float(i["value"]) / 1000 self.chassis[s]["ports"][p]["optics"]["txpower"] = val # optics rxpower for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.32"): c, s, p = self._sp(i["oid"]) val = float(i["value"]) / 1000 self.chassis[s]["ports"][p]["optics"]["rxpower"] = val # optics bias amps for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.33"): c, s, p = self._sp(i["oid"]) val = float(i["value"]) / 1000 self.chassis[s]["ports"][p]["optics"]["bias"] = val # optics voltage for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.34"): c, s, p = self._sp(i["oid"]) val = float(i["value"]) / 1000 self.chassis[s]["ports"][p]["optics"]["voltage"] = val # optics wavelength for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.37"): c, s, p = self._sp(i["oid"]) self.chassis[s]["ports"][p]["optics"]["wavelength"] = i["value"] # digital diagnostic status doms = { 1: "NotSupported", 2: "DiagsOk" } for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.38"): c, s, p = self._sp(i["oid"]) self.chassis[s]["ports"][p]["domstatus"] = i["value"] def _sp(self, oid): # Helper function to parse chassis, slot, port from oid pcs = oid.split(".") c = int(pcs[-3]) s = int(pcs[-2]) p = int(pcs[-1]) if(s in self.chassis and not "ports" in self.chassis[s]): self.chassis[s]["ports"] = {} if(s in self.chassis and not p in self.chassis[s]["ports"]): self.chassis[s]["ports"][p] = {"optics": {}} return c, s, p def _slot_value(self, slot, data): # Helper function to store data for slot. for i in data: pcs = i["oid"].split(".") if(slot == int(pcs[-1])): if str(i["value"]) == "N/A": return None return str(i["value"]).strip() return None def _slot_value_type(self, slot, data): types = { 1: None, 2: "Management", 3: "Converter" } for i in data: pcs = i["oid"].split(".") if(slot == int(pcs[-1])): if i["value"] in types: return types[i["value"]] else: return None def get_chassis(self): return self.chassis def get_slot_count(self): return len(self.chassis) def get_slot_active_count(self): active = 0 for slot in self.chassis: if self.chassis[slot]["model"] is not None: active +=1 return active def main(): parser = argparse.ArgumentParser(description="List info from MRV Fiberdriver chassis") parser.add_argument("--host", "-H", help="Host for your fiberdriver chassis", required=True) parser.add_argument("--community", "-c", help="SNMP Community", required=True) parser.add_argument("--list-slots", "-s", help="display a list of chassis slots", action="store_true") parser.add_argument("--list-ports", "-p", help="display a list of ports", action="store_true") parser.add_argument("--digital-diagnostics", "-d", help="display digital diagnostics information", action="store_true") parser.add_argument("--inventory", "-i", help="display inventory", action="store_true") opts = parser.parse_args() fd = MRVFiberDriver(opts.host, opts.community) if(opts.list_slots): print "{:4} {:20} {:20} {:20}".format("Slot", "Model", "Type", "Serial") for slot_id in fd.get_chassis(): slot = fd.get_chassis()[slot_id] print "{:4} {:20} {:20} {:20}".format(slot_id, slot["model"], slot["type"], slot["serial"]) if(opts.inventory): print "{:4} {:8} {:15} {:20} {:25} {:25}".format("Type", "Location", "Serial", "Vendor", "Model", "Revision") optics = [] for slot_id in fd.get_chassis(): slot = fd.get_chassis()[slot_id] if "ports" in slot and len(slot["ports"]) > 0: print "{:4} 1.{:6} {:15} {:20} {:25} {:25}".format("Slot", slot_id, slot["serial"], "MRV", slot["model"], slot["hwrev"]) for port_id in slot["ports"]: port = slot["ports"][port_id] if port["optics"]["serial"] is None: continue optic = { "location": "{}.{}".format(slot_id, port_id), "type": port["type"], "vendor": port["optics"]["vendor"], "serial": port["optics"]["serial"], "model": port["optics"]["model"], "hwrev": "N/A" } optics.append(optic) for optic in optics: print "{:4} 1.{:6} {:15} {:20} {:25} {:25}".format(optic["type"], optic["location"], optic["serial"], optic["vendor"], optic["model"], optic["hwrev"]) if(opts.list_ports): print "{:5} {:13} {:15} {:13} {:15} {:6} {:7} {:20}".format("Port", "Enabled", "Link", "Lin", "DOM", "WL(nm)", "Channel", "Name") for slot_id in fd.get_chassis(): slot = fd.get_chassis()[slot_id] if "ports" in slot and len(slot["ports"]) > 0: for port_id in slot["ports"]: port = slot["ports"][port_id] print "1.{}.{} {:13} {:15} {:13} {:15} {:6} {:7} {:20}".format( slot_id, port_id, port["enabled"], port["link"], port["lin"], port["domstatus"], port["optics"]["wavelength"], "Channel", port["name"]) if(opts.digital_diagnostics): print "{:5} {:10} {:10} {:10} {:10} {:10} {:10}".format("Port", "DDiags", "Temp(C)", "Supply(V)", "TxPower(dBm)", "RxPower(dBm)", "Bias(mA)") for slot_id in fd.get_chassis(): slot = fd.get_chassis()[slot_id] if "ports" in slot and len(slot["ports"]) > 0: for port_id in slot["ports"]: port = slot["ports"][port_id] optic = port["optics"] if port["domstatus"] == 1: # Don't list ports where dom is not available continue def dom_status(x): return { 2: "Ok" }.get(x, "N/A") print "1.{}.{} {:10} {:10} {:10} {:10} {:10} {:10}".format( slot_id, port_id, dom_status(port["domstatus"]), optic["temperature"], optic["voltage"], optic["txpower"], optic["rxpower"], optic["bias"] ) if __name__ == "__main__": main()
About I'm a master student of applied computing at the university of applied science in Berlin/Germany. I'm pretty much interested in everything. From technology over politics, to pop culture or japanese kobudo.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Ingredient', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)), ('name', models.CharField(max_length=30)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Perk', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)), ('name', models.CharField(max_length=30)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Product', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)), ('slug', models.SlugField(unique=True, blank=True, editable=False)), ('title', models.CharField(max_length=40)), ('description', models.CharField(max_length=1024)), ('type', models.CharField(choices=[('MIX', 'Dip Mix'), ('RUB', 'Dry Rub'), ('MAR', 'Marinade')], max_length='3')), ('price', models.DecimalField(decimal_places=2, max_digits=6)), ('contains', models.ManyToManyField(to='products.Ingredient', null=True, blank=True)), ('perks', models.ManyToManyField(to='products.Perk', null=True, blank=True)), ], options={ 'abstract': False, }, bases=(models.Model,), ), ]
I now live near Philadelphia but I lived 30 years in France: approximately 15 years in the south: Manosque, Avignon and 8 of which in Marseille, I spent every summer in Cassis growing up and still go every year; I lived approximately 15 years in Paris. Apr 8, 2019ReplyMarseilleRe: The best bouillabaisse in Marseille? Mar 8, 2019ReplyMarseilleRe: Train Marseille to Nice - Difference between TER and TGV? Sep 26, 2018ReplyMarseilleRe: Location InterRent/Europcar : à éviter !
import os from .cplfields import * from .postproc import PostProc from .pplexceptions import NoResultsInDir, DataNotAvailable from .mdpostproc import MD_PostProc from .cfdpostproc import CFD_PostProc from .serial_cfdpostproc import Serial_CFD_PostProc from .openfoampostproc import OpenFOAM_PostProc # Results directory paths for each code resultsdirs = { 'flowmol': 'flowmol/results', 'lammps': 'lammps/', 'serialcouette': 'couette_serial/results/', 'openfoam': 'openfoam/', 'transflow': 'couette_data/' } # Field classes that are associated with velocity for each code vfieldtypes = { 'flowmol': mdfields.MD_vField, 'lammps': lammpsfields.LAMMPS_vField, 'serialcouette': serial_cfdfields.Serial_CFD_vField, 'openfoam': openfoamfields.OpenFOAM_vField, 'transflow': cfdfields.CFD_vField } # Field classes that are associated with momentum for each code momfieldtypes = { 'flowmol': mdfields.MD_momField, 'serialcouette': serial_cfdfields.Serial_CFD_momField, 'lammps': lammpsfields.LAMMPS_momField, 'openfoam': openfoamfields.OpenFOAM_momField, 'transflow': None } # Field classes that are associated with stress for each code stressfieldtypes = { 'flowmol': mdfields.MD_stressField, 'lammps': None, 'serialcouette': serial_cfdfields.Serial_CFD_StressField, 'openfoam': openfoamfields.OpenFOAM_mugradvField, 'transflow': cfdfields.CFD_mugradvField } # CPL Field classes that could potentially be constructed possible_fields = { 'CPL Velocity': CPL_vField, 'CPL Momentum': CPL_momField, 'CPL Stress': CPL_stressField } # And their associated field class dictionary type_dicts = { 'CPL Velocity': vfieldtypes, 'CPL Momentum': momfieldtypes, 'CPL Stress': stressfieldtypes } # All possible pairings (Surely this should be done with itertools permute?) possible_pairs = [ {'MD':'flowmol', 'CFD':'serialcouette'}, {'MD':'flowmol', 'CFD':'openfoam'}, {'MD':'flowmol', 'CFD':'transflow'}, {'MD':'lammps', 'CFD':'openfoam'} ] class CPL_PostProc(PostProc): """ Post processing class for Coupled runs """ def __init__(self,resultsdir,**kwargs): self.resultsdir = resultsdir # Check directory exists before instantiating object and check # which files associated with plots are in directory if (not os.path.isdir(self.resultsdir)): print(("Directory " + self.resultsdir + " not found")) raise IOError self.plotlist = {} try: fobj = open(self.resultsdir + 'cpl/coupler_header','r') except IOError: raise NoResultsInDir for pair in possible_pairs: MDkey = pair['MD'] CFDkey = pair['CFD'] for CPLkey, CPLfieldtype in list(possible_fields.items()): print(('Attempting to construct ' + str(CPLfieldtype) + ' for ' + MDkey + ' and ' + CFDkey)) try: self.plotlist[CPLkey] = CPLfieldtype(self.resultsdir, MDFieldType=type_dicts[CPLkey][MDkey], CFDFieldType=type_dicts[CPLkey][CFDkey], mddir=resultsdirs[MDkey], cfddir=resultsdirs[CFDkey]) except AssertionError: pass except DataNotAvailable: pass except IOError: pass except TypeError: pass
Truman High School’s Yearbook, “The Heritage,” recently earned an All-Missouri ranking from the Missouri Interscholastic Press Association’s 2010 yearbook contest. This marks the second time in three years the school’s publication has received the honor. MIPA ranks schools with a Merit, Excellent, Superior or All-Missouri classification. All-Missouri is the highest ranking a school can receive. “The Heritage” 2010 staff included – Cathy Ann Baker, editor-in-chief; Kassidy Ritchel, Kaitlyn Eikenbary, Ciara Bloss, Adolfo Vargus, Erin Waterhouse, Kirsten Wilson, Tara Garcia, Kelsey Gorbet, Cole Moore, Stephanie Tebe, Danielle Vaught, Sarah Carpenter, Holly Jackson, and Megan Tracy, reports; Shelby Henderson, phot editor; Emily Bennett, Ciara Bloss, Kyle Burnett, Drew Colletti, Annie Frisbie, Caitlin Gilpin, Taylor Goudelock, Sara Hankins, Michelle Kabrick, Jordan Lockwood, Shanika Mason, Natalie Oddo, Kassidy Ritchel, Brandi Saluto, Mandi Thompson, Samantha Trombino and Kimberly Zans, photographers.
# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to consistency groups. """ import functools from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils from cinder import db from cinder.db import base from cinder import exception from cinder.i18n import _, _LE, _LW from cinder import objects from cinder.objects import fields as c_fields import cinder.policy from cinder import quota from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.volume import api as volume_api from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import utils as vol_utils from cinder.volume import volume_types CONF = cfg.CONF LOG = logging.getLogger(__name__) CGQUOTAS = quota.CGQUOTAS VALID_REMOVE_VOL_FROM_CG_STATUS = ( 'available', 'in-use', 'error', 'error_deleting') VALID_ADD_VOL_TO_CG_STATUS = ( 'available', 'in-use') def wrap_check_policy(func): """Check policy corresponding to the wrapped methods prior to execution. This decorator requires the first 3 args of the wrapped function to be (self, context, consistencygroup) """ @functools.wraps(func) def wrapped(self, context, target_obj, *args, **kwargs): check_policy(context, func.__name__, target_obj) return func(self, context, target_obj, *args, **kwargs) return wrapped def check_policy(context, action, target_obj=None): target = { 'project_id': context.project_id, 'user_id': context.user_id, } target_obj = target_obj.fields if target_obj else {} target.update(target_obj) _action = 'consistencygroup:%s' % action cinder.policy.enforce(context, _action, target) class API(base.Base): """API for interacting with the volume manager for consistency groups.""" def __init__(self, db_driver=None): self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.volume_rpcapi = volume_rpcapi.VolumeAPI() self.availability_zone_names = () self.volume_api = volume_api.API() super(API, self).__init__(db_driver) def _valid_availability_zone(self, availability_zone): if availability_zone in self.availability_zone_names: return True if CONF.storage_availability_zone == availability_zone: return True azs = self.volume_api.list_availability_zones() self.availability_zone_names = [az['name'] for az in azs] return availability_zone in self.availability_zone_names def _extract_availability_zone(self, availability_zone): if availability_zone is None: if CONF.default_availability_zone: availability_zone = CONF.default_availability_zone else: # For backwards compatibility use the storage_availability_zone availability_zone = CONF.storage_availability_zone valid = self._valid_availability_zone(availability_zone) if not valid: msg = _LW( "Availability zone '%s' is invalid") % (availability_zone) LOG.warning(msg) raise exception.InvalidInput(reason=msg) return availability_zone def create(self, context, name, description, cg_volume_types, availability_zone=None): check_policy(context, 'create') volume_type_list = None volume_type_list = cg_volume_types.split(',') req_volume_types = [] # NOTE: Admin context is required to get extra_specs of volume_types. req_volume_types = (self.db.volume_types_get_by_name_or_id( context.elevated(), volume_type_list)) req_volume_type_ids = "" for voltype in req_volume_types: req_volume_type_ids = ( req_volume_type_ids + voltype.get('id') + ",") if len(req_volume_type_ids) == 0: req_volume_type_ids = None availability_zone = self._extract_availability_zone(availability_zone) kwargs = {'user_id': context.user_id, 'project_id': context.project_id, 'availability_zone': availability_zone, 'status': c_fields.ConsistencyGroupStatus.CREATING, 'name': name, 'description': description, 'volume_type_id': req_volume_type_ids} group = None try: group = objects.ConsistencyGroup(context=context, **kwargs) group.create() except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred when creating consistency group" " %s."), name) request_spec_list = [] filter_properties_list = [] for req_volume_type in req_volume_types: request_spec = {'volume_type': req_volume_type.copy(), 'consistencygroup_id': group.id} filter_properties = {} request_spec_list.append(request_spec) filter_properties_list.append(filter_properties) # Update quota for consistencygroups self.update_quota(context, group, 1) self._cast_create_consistencygroup(context, group, request_spec_list, filter_properties_list) return group def create_from_src(self, context, name, description=None, cgsnapshot_id=None, source_cgid=None): check_policy(context, 'create') kwargs = { 'user_id': context.user_id, 'project_id': context.project_id, 'status': c_fields.ConsistencyGroupStatus.CREATING, 'name': name, 'description': description, 'cgsnapshot_id': cgsnapshot_id, 'source_cgid': source_cgid, } group = None try: group = objects.ConsistencyGroup(context=context, **kwargs) group.create(cg_snap_id=cgsnapshot_id, cg_id=source_cgid) except exception.ConsistencyGroupNotFound: with excutils.save_and_reraise_exception(): LOG.error(_LE("Source CG %(source_cg)s not found when " "creating consistency group %(cg)s from " "source."), {'cg': name, 'source_cg': source_cgid}) except exception.CgSnapshotNotFound: with excutils.save_and_reraise_exception(): LOG.error(_LE("CG snapshot %(cgsnap)s not found when creating " "consistency group %(cg)s from source."), {'cg': name, 'cgsnap': cgsnapshot_id}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred when creating consistency group" " %(cg)s from cgsnapshot %(cgsnap)s."), {'cg': name, 'cgsnap': cgsnapshot_id}) # Update quota for consistencygroups self.update_quota(context, group, 1) if not group.host: msg = _("No host to create consistency group %s.") % group.id LOG.error(msg) raise exception.InvalidConsistencyGroup(reason=msg) if cgsnapshot_id: self._create_cg_from_cgsnapshot(context, group, cgsnapshot_id) elif source_cgid: self._create_cg_from_source_cg(context, group, source_cgid) return group def _create_cg_from_cgsnapshot(self, context, group, cgsnapshot_id): try: cgsnapshot = objects.CGSnapshot.get_by_id(context, cgsnapshot_id) snapshots = objects.SnapshotList.get_all_for_cgsnapshot( context, cgsnapshot.id) if not snapshots: msg = _("Cgsnahost is empty. No consistency group " "will be created.") raise exception.InvalidConsistencyGroup(reason=msg) for snapshot in snapshots: kwargs = {} kwargs['availability_zone'] = group.availability_zone kwargs['cgsnapshot'] = cgsnapshot kwargs['consistencygroup'] = group kwargs['snapshot'] = snapshot volume_type_id = snapshot.volume_type_id if volume_type_id: kwargs['volume_type'] = volume_types.get_volume_type( context, volume_type_id) # Since cgsnapshot is passed in, the following call will # create a db entry for the volume, but will not call the # volume manager to create a real volume in the backend yet. # If error happens, taskflow will handle rollback of quota # and removal of volume entry in the db. try: self.volume_api.create(context, snapshot.volume_size, None, None, **kwargs) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred when creating volume " "entry from snapshot in the process of " "creating consistency group %(group)s " "from cgsnapshot %(cgsnap)s."), {'group': group.id, 'cgsnap': cgsnapshot.id}) except Exception: with excutils.save_and_reraise_exception(): try: group.destroy() finally: LOG.error(_LE("Error occurred when creating consistency " "group %(group)s from cgsnapshot " "%(cgsnap)s."), {'group': group.id, 'cgsnap': cgsnapshot.id}) volumes = self.db.volume_get_all_by_group(context, group.id) for vol in volumes: # Update the host field for the volume. self.db.volume_update(context, vol['id'], {'host': group.get('host')}) self.volume_rpcapi.create_consistencygroup_from_src( context, group, cgsnapshot) def _create_cg_from_source_cg(self, context, group, source_cgid): try: source_cg = objects.ConsistencyGroup.get_by_id(context, source_cgid) source_vols = self.db.volume_get_all_by_group(context, source_cg.id) if not source_vols: msg = _("Source CG is empty. No consistency group " "will be created.") raise exception.InvalidConsistencyGroup(reason=msg) for source_vol in source_vols: kwargs = {} kwargs['availability_zone'] = group.availability_zone kwargs['source_cg'] = source_cg kwargs['consistencygroup'] = group kwargs['source_volume'] = source_vol volume_type_id = source_vol.get('volume_type_id') if volume_type_id: kwargs['volume_type'] = volume_types.get_volume_type( context, volume_type_id) # Since source_cg is passed in, the following call will # create a db entry for the volume, but will not call the # volume manager to create a real volume in the backend yet. # If error happens, taskflow will handle rollback of quota # and removal of volume entry in the db. try: self.volume_api.create(context, source_vol['size'], None, None, **kwargs) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error occurred when creating cloned " "volume in the process of creating " "consistency group %(group)s from " "source CG %(source_cg)s."), {'group': group.id, 'source_cg': source_cg.id}) except Exception: with excutils.save_and_reraise_exception(): try: group.destroy() finally: LOG.error(_LE("Error occurred when creating consistency " "group %(group)s from source CG " "%(source_cg)s."), {'group': group.id, 'source_cg': source_cg.id}) volumes = self.db.volume_get_all_by_group(context, group.id) for vol in volumes: # Update the host field for the volume. self.db.volume_update(context, vol['id'], {'host': group.host}) self.volume_rpcapi.create_consistencygroup_from_src(context, group, None, source_cg) def _cast_create_consistencygroup(self, context, group, request_spec_list, filter_properties_list): try: for request_spec in request_spec_list: volume_type = request_spec.get('volume_type', None) volume_type_id = None if volume_type: volume_type_id = volume_type.get('id', None) specs = {} if volume_type_id: qos_specs = volume_types.get_volume_type_qos_specs( volume_type_id) specs = qos_specs['qos_specs'] if not specs: # to make sure we don't pass empty dict specs = None volume_properties = { 'size': 0, # Need to populate size for the scheduler 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'attach_status': 'detached', 'encryption_key_id': request_spec.get('encryption_key_id', None), 'display_description': request_spec.get('description', None), 'display_name': request_spec.get('name', None), 'volume_type_id': volume_type_id, } request_spec['volume_properties'] = volume_properties request_spec['qos_specs'] = specs except Exception: with excutils.save_and_reraise_exception(): try: group.destroy() finally: LOG.error(_LE("Error occurred when building " "request spec list for consistency group " "%s."), group.id) # Cast to the scheduler and let it handle whatever is needed # to select the target host for this group. self.scheduler_rpcapi.create_consistencygroup( context, group, request_spec_list=request_spec_list, filter_properties_list=filter_properties_list) def update_quota(self, context, group, num, project_id=None): reserve_opts = {'consistencygroups': num} try: reservations = CGQUOTAS.reserve(context, project_id=project_id, **reserve_opts) if reservations: CGQUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: group.destroy() finally: LOG.error(_LE("Failed to update quota for " "consistency group %s."), group.id) @wrap_check_policy def delete(self, context, group, force=False): if not group.host: self.update_quota(context, group, -1, group.project_id) LOG.debug("No host for consistency group %s. Deleting from " "the database.", group.id) group.destroy() return if force: expected = {} else: expected = {'status': (c_fields.ConsistencyGroupStatus.AVAILABLE, c_fields.ConsistencyGroupStatus.ERROR)} filters = [~db.cg_has_cgsnapshot_filter(), ~db.cg_has_volumes_filter(attached_or_with_snapshots=force), ~db.cg_creating_from_src(cg_id=group.id)] values = {'status': c_fields.ConsistencyGroupStatus.DELETING} if not group.conditional_update(values, expected, filters): if force: reason = _('Consistency group must not have attached volumes, ' 'volumes with snapshots, or dependent cgsnapshots') else: reason = _('Consistency group status must be available or ' 'error and must not have volumes or dependent ' 'cgsnapshots') msg = (_('Cannot delete consistency group %(id)s. %(reason)s, and ' 'it cannot be the source for an ongoing CG or CG ' 'Snapshot creation.') % {'id': group.id, 'reason': reason}) raise exception.InvalidConsistencyGroup(reason=msg) self.volume_rpcapi.delete_consistencygroup(context, group) def _check_update(self, group, name, description, add_volumes, remove_volumes, allow_empty=False): if allow_empty: if (name is None and description is None and not add_volumes and not remove_volumes): msg = (_("Cannot update consistency group %(group_id)s " "because no valid name, description, add_volumes, " "or remove_volumes were provided.") % {'group_id': group.id}) raise exception.InvalidConsistencyGroup(reason=msg) else: if not (name or description or add_volumes or remove_volumes): msg = (_("Cannot update consistency group %(group_id)s " "because no valid name, description, add_volumes, " "or remove_volumes were provided.") % {'group_id': group.id}) raise exception.InvalidConsistencyGroup(reason=msg) def update(self, context, group, name, description, add_volumes, remove_volumes, allow_empty=False): """Update consistency group.""" add_volumes_list = [] remove_volumes_list = [] if add_volumes: add_volumes = add_volumes.strip(',') add_volumes_list = add_volumes.split(',') if remove_volumes: remove_volumes = remove_volumes.strip(',') remove_volumes_list = remove_volumes.split(',') invalid_uuids = [] for uuid in add_volumes_list: if uuid in remove_volumes_list: invalid_uuids.append(uuid) if invalid_uuids: msg = _("UUIDs %s are in both add and remove volume " "list.") % invalid_uuids raise exception.InvalidVolume(reason=msg) # Validate name. if name == group.name: name = None # Validate description. if description == group.description: description = None self._check_update(group, name, description, add_volumes, remove_volumes, allow_empty) fields = {'updated_at': timeutils.utcnow()} # Update name and description in db now. No need to # to send them over through an RPC call. if allow_empty: if name is not None: fields['name'] = name if description is not None: fields['description'] = description else: if name: fields['name'] = name if description: fields['description'] = description # NOTE(geguileo): We will use the updating status in the CG as a lock # mechanism to prevent volume add/remove races with other API, while we # figure out if we really need to add or remove volumes. if add_volumes or remove_volumes: fields['status'] = c_fields.ConsistencyGroupStatus.UPDATING # We cannot modify the members of this CG if the CG is being used # to create another CG or a CGsnapshot is being created filters = [~db.cg_creating_from_src(cg_id=group.id), ~db.cgsnapshot_creating_from_src()] else: filters = [] expected = {'status': c_fields.ConsistencyGroupStatus.AVAILABLE} if not group.conditional_update(fields, expected, filters): msg = _("Cannot update consistency group %s, status must be " "available, and it cannot be the source for an ongoing " "CG or CG Snapshot creation.") % group.id raise exception.InvalidConsistencyGroup(reason=msg) # Now the CG is "locked" for updating try: # Validate volumes in add_volumes and remove_volumes. add_volumes_new = self._validate_add_volumes( context, group.volumes, add_volumes_list, group) remove_volumes_new = self._validate_remove_volumes( group.volumes, remove_volumes_list, group) self._check_update(group, name, description, add_volumes_new, remove_volumes_new, allow_empty) except Exception: # If we have an error on the volume_lists we must return status to # available as we were doing before removing API races with excutils.save_and_reraise_exception(): group.status = c_fields.ConsistencyGroupStatus.AVAILABLE group.save() # Do an RPC call only if the update request includes # adding/removing volumes. add_volumes_new and remove_volumes_new # are strings of volume UUIDs separated by commas with no spaces # in between. if add_volumes_new or remove_volumes_new: self.volume_rpcapi.update_consistencygroup( context, group, add_volumes=add_volumes_new, remove_volumes=remove_volumes_new) # If there are no new volumes to add or remove and we had changed # the status to updating, turn it back to available elif group.status == c_fields.ConsistencyGroupStatus.UPDATING: group.status = c_fields.ConsistencyGroupStatus.AVAILABLE group.save() def _validate_remove_volumes(self, volumes, remove_volumes_list, group): # Validate volumes in remove_volumes. if not remove_volumes_list: return None remove_volumes_new = "" for volume in volumes: if volume['id'] in remove_volumes_list: if volume['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS: msg = (_("Cannot remove volume %(volume_id)s from " "consistency group %(group_id)s because volume " "is in an invalid state: %(status)s. Valid " "states are: %(valid)s.") % {'volume_id': volume['id'], 'group_id': group.id, 'status': volume['status'], 'valid': VALID_REMOVE_VOL_FROM_CG_STATUS}) raise exception.InvalidVolume(reason=msg) # Volume currently in CG. It will be removed from CG. if remove_volumes_new: remove_volumes_new += "," remove_volumes_new += volume['id'] for rem_vol in remove_volumes_list: if rem_vol not in remove_volumes_new: msg = (_("Cannot remove volume %(volume_id)s from " "consistency group %(group_id)s because it " "is not in the group.") % {'volume_id': rem_vol, 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) return remove_volumes_new def _validate_add_volumes(self, context, volumes, add_volumes_list, group): if not add_volumes_list: return None add_volumes_new = "" for volume in volumes: if volume['id'] in add_volumes_list: # Volume already in CG. Remove from add_volumes. add_volumes_list.remove(volume['id']) for add_vol in add_volumes_list: try: add_vol_ref = self.db.volume_get(context, add_vol) except exception.VolumeNotFound: msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume cannot be " "found.") % {'volume_id': add_vol, 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) orig_group = add_vol_ref.get('consistencygroup_id', None) if orig_group: # If volume to be added is already in the group to be updated, # it should have been removed from the add_volumes_list in the # beginning of this function. If we are here, it means it is # in a different group. msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because it is already in " "consistency group %(orig_group)s.") % {'volume_id': add_vol_ref['id'], 'group_id': group.id, 'orig_group': orig_group}) raise exception.InvalidVolume(reason=msg) if add_vol_ref: add_vol_type_id = add_vol_ref.get('volume_type_id', None) if not add_vol_type_id: msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because it has no volume " "type.") % {'volume_id': add_vol_ref['id'], 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) if add_vol_type_id not in group.volume_type_id: msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume type " "%(volume_type)s is not supported by the " "group.") % {'volume_id': add_vol_ref['id'], 'group_id': group.id, 'volume_type': add_vol_type_id}) raise exception.InvalidVolume(reason=msg) if (add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS): msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume is in an " "invalid state: %(status)s. Valid states are: " "%(valid)s.") % {'volume_id': add_vol_ref['id'], 'group_id': group.id, 'status': add_vol_ref['status'], 'valid': VALID_ADD_VOL_TO_CG_STATUS}) raise exception.InvalidVolume(reason=msg) # group.host and add_vol_ref['host'] are in this format: # 'host@backend#pool'. Extract host (host@backend) before # doing comparison. vol_host = vol_utils.extract_host(add_vol_ref['host']) group_host = vol_utils.extract_host(group.host) if group_host != vol_host: raise exception.InvalidVolume( reason=_("Volume is not local to this node.")) # Volume exists. It will be added to CG. if add_volumes_new: add_volumes_new += "," add_volumes_new += add_vol_ref['id'] else: msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume does not exist.") % {'volume_id': add_vol_ref['id'], 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) return add_volumes_new def get(self, context, group_id): group = objects.ConsistencyGroup.get_by_id(context, group_id) check_policy(context, 'get', group) return group def get_all(self, context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): check_policy(context, 'get_all') if filters is None: filters = {} if filters: LOG.debug("Searching by: %s", filters) if (context.is_admin and 'all_tenants' in filters): del filters['all_tenants'] groups = objects.ConsistencyGroupList.get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) else: groups = objects.ConsistencyGroupList.get_all_by_project( context, context.project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return groups def create_cgsnapshot(self, context, group, name, description): options = {'consistencygroup_id': group.id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': "creating", 'name': name, 'description': description} cgsnapshot = None cgsnapshot_id = None try: cgsnapshot = objects.CGSnapshot(context, **options) cgsnapshot.create() cgsnapshot_id = cgsnapshot.id snap_name = cgsnapshot.name snap_desc = cgsnapshot.description with group.obj_as_admin(): self.volume_api.create_snapshots_in_db( context, group.volumes, snap_name, snap_desc, cgsnapshot_id) except Exception: with excutils.save_and_reraise_exception(): try: # If the cgsnapshot has been created if cgsnapshot.obj_attr_is_set('id'): cgsnapshot.destroy() finally: LOG.error(_LE("Error occurred when creating cgsnapshot" " %s."), cgsnapshot_id) self.volume_rpcapi.create_cgsnapshot(context, cgsnapshot) return cgsnapshot def delete_cgsnapshot(self, context, cgsnapshot, force=False): values = {'status': 'deleting'} expected = {'status': ('available', 'error')} filters = [~db.cg_creating_from_src(cgsnapshot_id=cgsnapshot.id)] res = cgsnapshot.conditional_update(values, expected, filters) if not res: msg = _('CgSnapshot status must be available or error, and no CG ' 'can be currently using it as source for its creation.') raise exception.InvalidCgSnapshot(reason=msg) self.volume_rpcapi.delete_cgsnapshot(context.elevated(), cgsnapshot) def update_cgsnapshot(self, context, cgsnapshot, fields): cgsnapshot.update(fields) cgsnapshot.save() def get_cgsnapshot(self, context, cgsnapshot_id): check_policy(context, 'get_cgsnapshot') cgsnapshots = objects.CGSnapshot.get_by_id(context, cgsnapshot_id) return cgsnapshots def get_all_cgsnapshots(self, context, search_opts=None): check_policy(context, 'get_all_cgsnapshots') search_opts = search_opts or {} if context.is_admin and 'all_tenants' in search_opts: # Need to remove all_tenants to pass the filtering below. del search_opts['all_tenants'] cgsnapshots = objects.CGSnapshotList.get_all(context, search_opts) else: cgsnapshots = objects.CGSnapshotList.get_all_by_project( context.elevated(), context.project_id, search_opts) return cgsnapshots
Canadians are generally considered friendly, honest, and knowledgeable people who will do whatever it takes to get the job done. At LCWSoft, we don't make unrealistic promises and then hide behind our policies to limit your usage. Others may boast about "response time" and "rock bottom prices". Those do not matter when you receive canned responses with no solutions, or suffer from constant downtime. And as one of the first Canadian providers to use CloudLinux together with cPanel, you can rest assured that your service is as secure from outside threats as possible while being easy to use. While we are known for quality web site hosting, it is not the only thing we do. We also offer affordable web design services that will build you a web site that will be something to be proud of! Beautiful, easy to use and maintain, and created with search engine friendliness in mind to not only impress those who are already familiar with you, but bring you new business or visitors as well. We also offer an array of competitively priced, high end SSL certificates that you can use to secure communication being you and your user, starting with a basic RapidSSL and moving up to complete domain and extended validation. Our support goals are simple: be respectful, knowledgeable, and always use clear and concise language. In our industry, the quality of your support is paramount. To us, no issue is unimportant and every effort is made to ensure that a resolution is reached as promptly as possible. The average ticket response and resolution time is less than 1 hour! LCWSoft - web hosting built on quality service. Whatever you need, we've got you covered! Our advanced hosting solutions rival the best providers on the web in both price and performance. Our most affordable package, great for getting started. Free domain registration/transfer (.ca,.com,.net,.org) with annual or longer purchase, and more! Pro performance, premium features, free domain registration/transfer (.ca,.com,.net,.org) with annual or longer purchase, and more! Best performance for demanding websites, with free domain registration/transfer (.ca,.com,.net,.org) with annual or longer purchase. LCWSoft - Web Hosting and Design Services in St. John's, Newfoundland. Since opening in 2007 in St. John's Newfoundland, we have provided the best Canada web hosting using the popular Linux platform, ensuring your sites run smoothly and with 99.9% uptime. We also offer world class web design services with a focus on user experience, responsive design (try viewing our website on any device and see how well it scales), and SEO best practices so you don't need to pay thousands to another company to make on site changes before working on link building and such. We are not the cheapest, but we are worth every penny. As the old saying goes: you get what you pay for. With us, you get reliable, worry free service and support who is there when needed, whether it be for help with a simple password reset or a complex issue. Our hosting system is built to be as green and nature friendly as possible. Over 150+ scripts ready to install (1 click), including WordPress and Joomla. Need Assistance? Our support team is here to help you when you need it. Affordable web hosting and design packages, with options to suit your needs.
from django.http import JsonResponse from rest_framework import viewsets from rest_framework.authtoken.models import Token from rest_framework.permissions import IsAuthenticated from escout.applications.dashboard.models import Application from escout.applications.dashboard.serializers import ApplicationSerializer # Create your views here. class ApplicationsViewSet(viewsets.ModelViewSet): queryset = Application.objects.all() serializer_class = ApplicationSerializer permission_classes = (IsAuthenticated,) # TODO def logout(request): auth_token = request.META['HTTP_AUTHORIZATION'] token_parts = auth_token.split(" ") token = token_parts[1] token_record = Token.objects.get(key=token) token_record.delete() return JsonResponse({ 'status': 'OK', 'data': { } }) # @login_required def get_applications(request): applicationRecords = Application.objects.all() applications = [] return JsonResponse({ 'status': 'OK', 'data': { 'applications': applications, 'offset': 0, 'limit': 0 } })
The Frame Tv Wall Unit is the best solution to dress with class an entire wall, giving an high-end touch to the living area. The big backlit panel hides electrical wires in an easily and orderly way, making visible only hardware components in the specific niches. The real lead of this furniture is its decorative bold frame, available in endless styles: from classical ones with wood carving, handmade mosaik, Murano glass tiles and gold/silver leaf to contemporary ones with precious inserts like inlaid woods, embroidered leathers, marbles and more over.
import logging from adhocracy import model from adhocracy.lib.cache.util import clear_tag log = logging.getLogger(__name__) def invalidate_badge(badge): log.debug('invalidate_badge %s' % badge) clear_tag(badge) def invalidate_userbadges(userbadges): clear_tag(userbadges) invalidate_user(userbadges.user) def invalidate_delegateablebadges(delegateablebadges): clear_tag(delegateablebadges) invalidate_delegateable(delegateablebadges.delegateable) def invalidate_user(user): clear_tag(user) def invalidate_text(text): clear_tag(text) invalidate_page(text.page) def invalidate_page(page): invalidate_delegateable(page) def invalidate_delegateable(d, include_parents=True): clear_tag(d) if include_parents: for p in d.parents: invalidate_delegateable(p) if not len(d.parents): clear_tag(d.instance) def invalidate_revision(rev): invalidate_comment(rev.comment) def invalidate_comment(comment): clear_tag(comment) if comment.reply: invalidate_comment(comment.reply) invalidate_delegateable(comment.topic) def invalidate_delegation(delegation): invalidate_user(delegation.principal) invalidate_user(delegation.agent) def invalidate_vote(vote): clear_tag(vote) invalidate_user(vote.user) invalidate_poll(vote.poll) def invalidate_selection(selection): if selection is None: return clear_tag(selection) if selection.page: invalidate_delegateable(selection.page) if selection.proposal: invalidate_delegateable(selection.proposal) def invalidate_poll(poll): clear_tag(poll) if poll.action == poll.SELECT: invalidate_selection(poll.selection) elif isinstance(poll.subject, model.Delegateable): invalidate_delegateable(poll.subject) elif isinstance(poll.subject, model.Comment): invalidate_comment(poll.subject) def invalidate_instance(instance): # muharhar cache epic fail clear_tag(instance) for d in instance.delegateables: invalidate_delegateable(d, include_parents=False) def invalidate_tagging(tagging): clear_tag(tagging) invalidate_delegateable(tagging.delegateable)
My hair is longer than my attention span. OOPS. Sorry not sorry!
# -*- coding: utf-8 -*- # from rest_framework import status from rest_framework.views import Response from rest_framework_bulk import BulkModelViewSet from common.permissions import IsSuperUserOrAppUser from .models import Organization from .serializers import OrgSerializer, OrgReadSerializer, \ OrgMembershipUserSerializer, OrgMembershipAdminSerializer from users.models import User, UserGroup from assets.models import Asset, Domain, AdminUser, SystemUser, Label from perms.models import AssetPermission from orgs.utils import current_org from common.utils import get_logger from .mixins.api import OrgMembershipModelViewSetMixin logger = get_logger(__file__) class OrgViewSet(BulkModelViewSet): queryset = Organization.objects.all() serializer_class = OrgSerializer permission_classes = (IsSuperUserOrAppUser,) org = None def get_serializer_class(self): if self.action in ('list', 'retrieve'): return OrgReadSerializer else: return super().get_serializer_class() def get_data_from_model(self, model): if model == User: data = model.objects.filter(related_user_orgs__id=self.org.id) else: data = model.objects.filter(org_id=self.org.id) return data def destroy(self, request, *args, **kwargs): self.org = self.get_object() models = [ User, UserGroup, Asset, Domain, AdminUser, SystemUser, Label, AssetPermission, ] for model in models: data = self.get_data_from_model(model) if data: return Response(status=status.HTTP_400_BAD_REQUEST) else: if str(current_org) == str(self.org): return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) self.org.delete() return Response({'msg': True}, status=status.HTTP_200_OK) class OrgMembershipAdminsViewSet(OrgMembershipModelViewSetMixin, BulkModelViewSet): serializer_class = OrgMembershipAdminSerializer membership_class = Organization.admins.through permission_classes = (IsSuperUserOrAppUser, ) class OrgMembershipUsersViewSet(OrgMembershipModelViewSetMixin, BulkModelViewSet): serializer_class = OrgMembershipUserSerializer membership_class = Organization.users.through permission_classes = (IsSuperUserOrAppUser, )
Like many websites, Solo Lisa collects basic demographic information about site visitors. This information is aggregated and intended to help me better understand and serve site visitors. This information is not personally identifiable, sold to third parties, or used to contact individuals. Aggregate statistics about web visitors may be made available to third parties or potential advertisers upon request, but nothing that personally identifies a visitor is made available except with the visitor's explicit permission. Contests and promotions require entrants to provide some sort of contact information such as a blog, email address, or Twitter handle. This information is intended solely as a means of contact should the entrant win. By entering the contest or promotion, site visitors implicitly agree that they would like to receive communications in the context of the contest or promotion. Solo Lisa does not collect this contact information, sell it to third parties, or use it to contact individuals outside of the contest or promotion. However, because this information is provided in an open medium, there is no way to stop third parties from also accessing this information. Solo Lisa holds no liability in the latter situation. Privacy policies vary across advertisers; please check their websites for more information. If you are uncomfortable with having information collected about your user session, use your browser settings to disable cookies and trackers. To opt out, refer to your respective browser's documentation for more information about this topic, or click this link.
import ast import types import decimal import unittest a_global = 'global variable' # You could argue that I'm too strict in looking for specific error # values with assertRaisesRegex, but without it it's way too easy to # make a syntax error in the test strings. Especially with all of the # triple quotes, raw strings, backslashes, etc. I think it's a # worthwhile tradeoff. When I switched to this method, I found many # examples where I wasn't testing what I thought I was. class TestCase(unittest.TestCase): def assertAllRaise(self, exception_type, regex, error_strings): for str in error_strings: with self.subTest(str=str): with self.assertRaisesRegex(exception_type, regex): eval(str) def test__format__lookup(self): # Make sure __format__ is looked up on the type, not the instance. class X: def __format__(self, spec): return 'class' x = X() # Add a bound __format__ method to the 'y' instance, but not # the 'x' instance. y = X() y.__format__ = types.MethodType(lambda self, spec: 'instance', y) self.assertEqual(f'{y}', format(y)) self.assertEqual(f'{y}', 'class') self.assertEqual(format(x), format(y)) # __format__ is not called this way, but still make sure it # returns what we expect (so we can make sure we're bypassing # it). self.assertEqual(x.__format__(''), 'class') self.assertEqual(y.__format__(''), 'instance') # This is how __format__ is actually called. self.assertEqual(type(x).__format__(x, ''), 'class') self.assertEqual(type(y).__format__(y, ''), 'class') def test_ast(self): # Inspired by http://bugs.python.org/issue24975 class X: def __init__(self): self.called = False def __call__(self): self.called = True return 4 x = X() expr = """ a = 10 f'{a * x()}'""" t = ast.parse(expr) c = compile(t, '', 'exec') # Make sure x was not called. self.assertFalse(x.called) # Actually run the code. exec(c) # Make sure x was called. self.assertTrue(x.called) def test_literal_eval(self): # With no expressions, an f-string is okay. self.assertEqual(ast.literal_eval("f'x'"), 'x') self.assertEqual(ast.literal_eval("f'x' 'y'"), 'xy') # But this should raise an error. with self.assertRaisesRegex(ValueError, 'malformed node or string'): ast.literal_eval("f'x{3}'") # As should this, which uses a different ast node with self.assertRaisesRegex(ValueError, 'malformed node or string'): ast.literal_eval("f'{3}'") def test_ast_compile_time_concat(self): x = [''] expr = """x[0] = 'foo' f'{3}'""" t = ast.parse(expr) c = compile(t, '', 'exec') exec(c) self.assertEqual(x[0], 'foo3') def test_literal(self): self.assertEqual(f'', '') self.assertEqual(f'a', 'a') self.assertEqual(f' ', ' ') def test_unterminated_string(self): self.assertAllRaise(SyntaxError, 'f-string: unterminated string', [r"""f'{"x'""", r"""f'{"x}'""", r"""f'{("x'""", r"""f'{("x}'""", ]) def test_mismatched_parens(self): self.assertAllRaise(SyntaxError, 'f-string: mismatched', ["f'{((}'", ]) def test_double_braces(self): self.assertEqual(f'{{', '{') self.assertEqual(f'a{{', 'a{') self.assertEqual(f'{{b', '{b') self.assertEqual(f'a{{b', 'a{b') self.assertEqual(f'}}', '}') self.assertEqual(f'a}}', 'a}') self.assertEqual(f'}}b', '}b') self.assertEqual(f'a}}b', 'a}b') self.assertEqual(f'{{}}', '{}') self.assertEqual(f'a{{}}', 'a{}') self.assertEqual(f'{{b}}', '{b}') self.assertEqual(f'{{}}c', '{}c') self.assertEqual(f'a{{b}}', 'a{b}') self.assertEqual(f'a{{}}c', 'a{}c') self.assertEqual(f'{{b}}c', '{b}c') self.assertEqual(f'a{{b}}c', 'a{b}c') self.assertEqual(f'{{{10}', '{10') self.assertEqual(f'}}{10}', '}10') self.assertEqual(f'}}{{{10}', '}{10') self.assertEqual(f'}}a{{{10}', '}a{10') self.assertEqual(f'{10}{{', '10{') self.assertEqual(f'{10}}}', '10}') self.assertEqual(f'{10}}}{{', '10}{') self.assertEqual(f'{10}}}a{{' '}', '10}a{}') # Inside of strings, don't interpret doubled brackets. self.assertEqual(f'{"{{}}"}', '{{}}') self.assertAllRaise(TypeError, 'unhashable type', ["f'{ {{}} }'", # dict in a set ]) def test_compile_time_concat(self): x = 'def' self.assertEqual('abc' f'## {x}ghi', 'abc## defghi') self.assertEqual('abc' f'{x}' 'ghi', 'abcdefghi') self.assertEqual('abc' f'{x}' 'gh' f'i{x:4}', 'abcdefghidef ') self.assertEqual('{x}' f'{x}', '{x}def') self.assertEqual('{x' f'{x}', '{xdef') self.assertEqual('{x}' f'{x}', '{x}def') self.assertEqual('{{x}}' f'{x}', '{{x}}def') self.assertEqual('{{x' f'{x}', '{{xdef') self.assertEqual('x}}' f'{x}', 'x}}def') self.assertEqual(f'{x}' 'x}}', 'defx}}') self.assertEqual(f'{x}' '', 'def') self.assertEqual('' f'{x}' '', 'def') self.assertEqual('' f'{x}', 'def') self.assertEqual(f'{x}' '2', 'def2') self.assertEqual('1' f'{x}' '2', '1def2') self.assertEqual('1' f'{x}', '1def') self.assertEqual(f'{x}' f'-{x}', 'def-def') self.assertEqual('' f'', '') self.assertEqual('' f'' '', '') self.assertEqual('' f'' '' f'', '') self.assertEqual(f'', '') self.assertEqual(f'' '', '') self.assertEqual(f'' '' f'', '') self.assertEqual(f'' '' f'' '', '') self.assertAllRaise(SyntaxError, "f-string: expecting '}'", ["f'{3' f'}'", # can't concat to get a valid f-string ]) def test_comments(self): # These aren't comments, since they're in strings. d = {'#': 'hash'} self.assertEqual(f'{"#"}', '#') self.assertEqual(f'{d["#"]}', 'hash') self.assertAllRaise(SyntaxError, "f-string expression part cannot include '#'", ["f'{1#}'", # error because the expression becomes "(1#)" "f'{3(#)}'", "f'{#}'", "f'{)#}'", # When wrapped in parens, this becomes # '()#)'. Make sure that doesn't compile. ]) def test_many_expressions(self): # Create a string with many expressions in it. Note that # because we have a space in here as a literal, we're actually # going to use twice as many ast nodes: one for each literal # plus one for each expression. def build_fstr(n, extra=''): return "f'" + ('{x} ' * n) + extra + "'" x = 'X' width = 1 # Test around 256. for i in range(250, 260): self.assertEqual(eval(build_fstr(i)), (x+' ')*i) # Test concatenating 2 largs fstrings. self.assertEqual(eval(build_fstr(255)*256), (x+' ')*(255*256)) s = build_fstr(253, '{x:{width}} ') self.assertEqual(eval(s), (x+' ')*254) # Test lots of expressions and constants, concatenated. s = "f'{1}' 'x' 'y'" * 1024 self.assertEqual(eval(s), '1xy' * 1024) def test_format_specifier_expressions(self): width = 10 precision = 4 value = decimal.Decimal('12.34567') self.assertEqual(f'result: {value:{width}.{precision}}', 'result: 12.35') self.assertEqual(f'result: {value:{width!r}.{precision}}', 'result: 12.35') self.assertEqual(f'result: {value:{width:0}.{precision:1}}', 'result: 12.35') self.assertEqual(f'result: {value:{1}{0:0}.{precision:1}}', 'result: 12.35') self.assertEqual(f'result: {value:{ 1}{ 0:0}.{ precision:1}}', 'result: 12.35') self.assertEqual(f'{10:#{1}0x}', ' 0xa') self.assertEqual(f'{10:{"#"}1{0}{"x"}}', ' 0xa') self.assertEqual(f'{-10:-{"#"}1{0}x}', ' -0xa') self.assertEqual(f'{-10:{"-"}#{1}0{"x"}}', ' -0xa') self.assertEqual(f'{10:#{3 != {4:5} and width}x}', ' 0xa') self.assertAllRaise(SyntaxError, "f-string: expecting '}'", ["""f'{"s"!r{":10"}}'""", # This looks like a nested format spec. ]) self.assertAllRaise(SyntaxError, "invalid syntax", [# Invalid syntax inside a nested spec. "f'{4:{/5}}'", ]) self.assertAllRaise(SyntaxError, "f-string: expressions nested too deeply", [# Can't nest format specifiers. "f'result: {value:{width:{0}}.{precision:1}}'", ]) self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character', [# No expansion inside conversion or for # the : or ! itself. """f'{"s"!{"r"}}'""", ]) def test_side_effect_order(self): class X: def __init__(self): self.i = 0 def __format__(self, spec): self.i += 1 return str(self.i) x = X() self.assertEqual(f'{x} {x}', '1 2') def test_missing_expression(self): self.assertAllRaise(SyntaxError, 'f-string: empty expression not allowed', ["f'{}'", "f'{ }'" "f' {} '", "f'{!r}'", "f'{ !r}'", "f'{10:{ }}'", "f' { } '", # Catch the empty expression before the # invalid conversion. "f'{!x}'", "f'{ !xr}'", "f'{!x:}'", "f'{!x:a}'", "f'{ !xr:}'", "f'{ !xr:a}'", "f'{!}'", "f'{:}'", # We find the empty expression before the # missing closing brace. "f'{!'", "f'{!s:'", "f'{:'", "f'{:x'", ]) def test_parens_in_expressions(self): self.assertEqual(f'{3,}', '(3,)') # Add these because when an expression is evaluated, parens # are added around it. But we shouldn't go from an invalid # expression to a valid one. The added parens are just # supposed to allow whitespace (including newlines). self.assertAllRaise(SyntaxError, 'invalid syntax', ["f'{,}'", "f'{,}'", # this is (,), which is an error ]) self.assertAllRaise(SyntaxError, "f-string: expecting '}'", ["f'{3)+(4}'", ]) self.assertAllRaise(SyntaxError, 'EOL while scanning string literal', ["f'{\n}'", ]) def test_backslashes_in_string_part(self): self.assertEqual(f'\t', '\t') self.assertEqual(r'\t', '\\t') self.assertEqual(rf'\t', '\\t') self.assertEqual(f'{2}\t', '2\t') self.assertEqual(f'{2}\t{3}', '2\t3') self.assertEqual(f'\t{3}', '\t3') self.assertEqual(f'\u0394', '\u0394') self.assertEqual(r'\u0394', '\\u0394') self.assertEqual(rf'\u0394', '\\u0394') self.assertEqual(f'{2}\u0394', '2\u0394') self.assertEqual(f'{2}\u0394{3}', '2\u03943') self.assertEqual(f'\u0394{3}', '\u03943') self.assertEqual(f'\U00000394', '\u0394') self.assertEqual(r'\U00000394', '\\U00000394') self.assertEqual(rf'\U00000394', '\\U00000394') self.assertEqual(f'{2}\U00000394', '2\u0394') self.assertEqual(f'{2}\U00000394{3}', '2\u03943') self.assertEqual(f'\U00000394{3}', '\u03943') self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}', '\u0394') self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}', '2\u0394') self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}{3}', '2\u03943') self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}{3}', '\u03943') self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}', '2\u0394') self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}3', '2\u03943') self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}3', '\u03943') self.assertEqual(f'\x20', ' ') self.assertEqual(r'\x20', '\\x20') self.assertEqual(rf'\x20', '\\x20') self.assertEqual(f'{2}\x20', '2 ') self.assertEqual(f'{2}\x20{3}', '2 3') self.assertEqual(f'\x20{3}', ' 3') self.assertEqual(f'2\x20', '2 ') self.assertEqual(f'2\x203', '2 3') self.assertEqual(f'\x203', ' 3') def test_misformed_unicode_character_name(self): # These test are needed because unicode names are parsed # differently inside f-strings. self.assertAllRaise(SyntaxError, r"\(unicode error\) 'unicodeescape' codec can't decode bytes in position .*: malformed \\N character escape", [r"f'\N'", r"f'\N{'", r"f'\N{GREEK CAPITAL LETTER DELTA'", # Here are the non-f-string versions, # which should give the same errors. r"'\N'", r"'\N{'", r"'\N{GREEK CAPITAL LETTER DELTA'", ]) def test_no_backslashes_in_expression_part(self): self.assertAllRaise(SyntaxError, 'f-string expression part cannot include a backslash', [r"f'{\'a\'}'", r"f'{\t3}'", r"f'{\}'", r"rf'{\'a\'}'", r"rf'{\t3}'", r"rf'{\}'", r"""rf'{"\N{LEFT CURLY BRACKET}"}'""", ]) def test_no_escapes_for_braces(self): # \x7b is '{'. Make sure it doesn't start an expression. self.assertEqual(f'\x7b2}}', '{2}') self.assertEqual(f'\x7b2', '{2') self.assertEqual(f'\u007b2', '{2') self.assertEqual(f'\N{LEFT CURLY BRACKET}2\N{RIGHT CURLY BRACKET}', '{2}') def test_newlines_in_expressions(self): self.assertEqual(f'{0}', '0') self.assertEqual(rf'''{3+ 4}''', '7') def test_lambda(self): x = 5 self.assertEqual(f'{(lambda y:x*y)("8")!r}', "'88888'") self.assertEqual(f'{(lambda y:x*y)("8")!r:10}', "'88888' ") self.assertEqual(f'{(lambda y:x*y)("8"):10}', "88888 ") # lambda doesn't work without parens, because the colon # makes the parser think it's a format_spec self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing', ["f'{lambda x:x}'", ]) def test_yield(self): # Not terribly useful, but make sure the yield turns # a function into a generator def fn(y): f'y:{yield y*2}' g = fn(4) self.assertEqual(next(g), 8) def test_yield_send(self): def fn(x): yield f'x:{yield (lambda i: x * i)}' g = fn(10) the_lambda = next(g) self.assertEqual(the_lambda(4), 40) self.assertEqual(g.send('string'), 'x:string') def test_expressions_with_triple_quoted_strings(self): self.assertEqual(f"{'''x'''}", 'x') self.assertEqual(f"{'''eric's'''}", "eric's") # Test concatenation within an expression self.assertEqual(f'{"x" """eric"s""" "y"}', 'xeric"sy') self.assertEqual(f'{"x" """eric"s"""}', 'xeric"s') self.assertEqual(f'{"""eric"s""" "y"}', 'eric"sy') self.assertEqual(f'{"""x""" """eric"s""" "y"}', 'xeric"sy') self.assertEqual(f'{"""x""" """eric"s""" """y"""}', 'xeric"sy') self.assertEqual(f'{r"""x""" """eric"s""" """y"""}', 'xeric"sy') def test_multiple_vars(self): x = 98 y = 'abc' self.assertEqual(f'{x}{y}', '98abc') self.assertEqual(f'X{x}{y}', 'X98abc') self.assertEqual(f'{x}X{y}', '98Xabc') self.assertEqual(f'{x}{y}X', '98abcX') self.assertEqual(f'X{x}Y{y}', 'X98Yabc') self.assertEqual(f'X{x}{y}Y', 'X98abcY') self.assertEqual(f'{x}X{y}Y', '98XabcY') self.assertEqual(f'X{x}Y{y}Z', 'X98YabcZ') def test_closure(self): def outer(x): def inner(): return f'x:{x}' return inner self.assertEqual(outer('987')(), 'x:987') self.assertEqual(outer(7)(), 'x:7') def test_arguments(self): y = 2 def f(x, width): return f'x={x*y:{width}}' self.assertEqual(f('foo', 10), 'x=foofoo ') x = 'bar' self.assertEqual(f(10, 10), 'x= 20') def test_locals(self): value = 123 self.assertEqual(f'v:{value}', 'v:123') def test_missing_variable(self): with self.assertRaises(NameError): f'v:{value}' def test_missing_format_spec(self): class O: def __format__(self, spec): if not spec: return '*' return spec self.assertEqual(f'{O():x}', 'x') self.assertEqual(f'{O()}', '*') self.assertEqual(f'{O():}', '*') self.assertEqual(f'{3:}', '3') self.assertEqual(f'{3!s:}', '3') def test_global(self): self.assertEqual(f'g:{a_global}', 'g:global variable') self.assertEqual(f'g:{a_global!r}', "g:'global variable'") a_local = 'local variable' self.assertEqual(f'g:{a_global} l:{a_local}', 'g:global variable l:local variable') self.assertEqual(f'g:{a_global!r}', "g:'global variable'") self.assertEqual(f'g:{a_global} l:{a_local!r}', "g:global variable l:'local variable'") self.assertIn("module 'unittest' from", f'{unittest}') def test_shadowed_global(self): a_global = 'really a local' self.assertEqual(f'g:{a_global}', 'g:really a local') self.assertEqual(f'g:{a_global!r}', "g:'really a local'") a_local = 'local variable' self.assertEqual(f'g:{a_global} l:{a_local}', 'g:really a local l:local variable') self.assertEqual(f'g:{a_global!r}', "g:'really a local'") self.assertEqual(f'g:{a_global} l:{a_local!r}', "g:really a local l:'local variable'") def test_call(self): def foo(x): return 'x=' + str(x) self.assertEqual(f'{foo(10)}', 'x=10') def test_nested_fstrings(self): y = 5 self.assertEqual(f'{f"{0}"*3}', '000') self.assertEqual(f'{f"{y}"*3}', '555') def test_invalid_string_prefixes(self): self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing', ["fu''", "uf''", "Fu''", "fU''", "Uf''", "uF''", "ufr''", "urf''", "fur''", "fru''", "rfu''", "ruf''", "FUR''", "Fur''", "fb''", "fB''", "Fb''", "FB''", "bf''", "bF''", "Bf''", "BF''", ]) def test_leading_trailing_spaces(self): self.assertEqual(f'{ 3}', '3') self.assertEqual(f'{ 3}', '3') self.assertEqual(f'{3 }', '3') self.assertEqual(f'{3 }', '3') self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]}}', 'expr={1: 2}') self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]} }', 'expr={1: 2}') def test_not_equal(self): # There's a special test for this because there's a special # case in the f-string parser to look for != as not ending an # expression. Normally it would, while looking for !s or !r. self.assertEqual(f'{3!=4}', 'True') self.assertEqual(f'{3!=4:}', 'True') self.assertEqual(f'{3!=4!s}', 'True') self.assertEqual(f'{3!=4!s:.3}', 'Tru') def test_conversions(self): self.assertEqual(f'{3.14:10.10}', ' 3.14') self.assertEqual(f'{3.14!s:10.10}', '3.14 ') self.assertEqual(f'{3.14!r:10.10}', '3.14 ') self.assertEqual(f'{3.14!a:10.10}', '3.14 ') self.assertEqual(f'{"a"}', 'a') self.assertEqual(f'{"a"!r}', "'a'") self.assertEqual(f'{"a"!a}', "'a'") # Not a conversion. self.assertEqual(f'{"a!r"}', "a!r") # Not a conversion, but show that ! is allowed in a format spec. self.assertEqual(f'{3.14:!<10.10}', '3.14!!!!!!') self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character', ["f'{3!g}'", "f'{3!A}'", "f'{3!3}'", "f'{3!G}'", "f'{3!!}'", "f'{3!:}'", "f'{3! s}'", # no space before conversion char ]) self.assertAllRaise(SyntaxError, "f-string: expecting '}'", ["f'{x!s{y}}'", "f'{3!ss}'", "f'{3!ss:}'", "f'{3!ss:s}'", ]) def test_assignment(self): self.assertAllRaise(SyntaxError, 'invalid syntax', ["f'' = 3", "f'{0}' = x", "f'{x}' = x", ]) def test_del(self): self.assertAllRaise(SyntaxError, 'invalid syntax', ["del f''", "del '' f''", ]) def test_mismatched_braces(self): self.assertAllRaise(SyntaxError, "f-string: single '}' is not allowed", ["f'{{}'", "f'{{}}}'", "f'}'", "f'x}'", "f'x}x'", # Can't have { or } in a format spec. "f'{3:}>10}'", "f'{3:}}>10}'", ]) self.assertAllRaise(SyntaxError, "f-string: expecting '}'", ["f'{3:{{>10}'", "f'{3'", "f'{3!'", "f'{3:'", "f'{3!s'", "f'{3!s:'", "f'{3!s:3'", "f'x{'", "f'x{x'", "f'{x'", "f'{3:s'", "f'{{{'", "f'{{}}{'", "f'{'", ]) # But these are just normal strings. self.assertEqual(f'{"{"}', '{') self.assertEqual(f'{"}"}', '}') self.assertEqual(f'{3:{"}"}>10}', '}}}}}}}}}3') self.assertEqual(f'{2:{"{"}>10}', '{{{{{{{{{2') def test_if_conditional(self): # There's special logic in compile.c to test if the # conditional for an if (and while) are constants. Exercise # that code. def test_fstring(x, expected): flag = 0 if f'{x}': flag = 1 else: flag = 2 self.assertEqual(flag, expected) def test_concat_empty(x, expected): flag = 0 if '' f'{x}': flag = 1 else: flag = 2 self.assertEqual(flag, expected) def test_concat_non_empty(x, expected): flag = 0 if ' ' f'{x}': flag = 1 else: flag = 2 self.assertEqual(flag, expected) test_fstring('', 2) test_fstring(' ', 1) test_concat_empty('', 2) test_concat_empty(' ', 1) test_concat_non_empty('', 1) test_concat_non_empty(' ', 1) def test_empty_format_specifier(self): x = 'test' self.assertEqual(f'{x}', 'test') self.assertEqual(f'{x:}', 'test') self.assertEqual(f'{x!s:}', 'test') self.assertEqual(f'{x!r:}', "'test'") def test_str_format_differences(self): d = {'a': 'string', 0: 'integer', } a = 0 self.assertEqual(f'{d[0]}', 'integer') self.assertEqual(f'{d["a"]}', 'string') self.assertEqual(f'{d[a]}', 'integer') self.assertEqual('{d[a]}'.format(d=d), 'string') self.assertEqual('{d[0]}'.format(d=d), 'integer') def test_invalid_expressions(self): self.assertAllRaise(SyntaxError, 'invalid syntax', [r"f'{a[4)}'", r"f'{a(4]}'", ]) def test_errors(self): # see issue 26287 self.assertAllRaise(TypeError, 'non-empty', [r"f'{(lambda: 0):x}'", r"f'{(0,):x}'", ]) self.assertAllRaise(ValueError, 'Unknown format code', [r"f'{1000:j}'", r"f'{1000:j}'", ]) def test_loop(self): for i in range(1000): self.assertEqual(f'i:{i}', 'i:' + str(i)) def test_dict(self): d = {'"': 'dquote', "'": 'squote', 'foo': 'bar', } self.assertEqual(f'''{d["'"]}''', 'squote') self.assertEqual(f"""{d['"']}""", 'dquote') self.assertEqual(f'{d["foo"]}', 'bar') self.assertEqual(f"{d['foo']}", 'bar') if __name__ == '__main__': unittest.main()
We are looking for talents to our Warsaw office for the position of Managerial Internship in Logistics Start: immediately Work schedule: full-time/part-time with flexible working hours Duration: 3-6 months This is an excellent opportunity for students that are on the 3rd year or older that would like to work on a flexible/reduced schedule. Do you want to join us and learn from the top Logistics organization on the market? About us: At Procter & Gamble, we are proud to say that we develop talents almost exclusively from within. This means we are not just offering you your first job out of university, we are hiring you with the expectation you will grow into one of our future leaders - maybe even our next CEO. We are the world's largest consumer goods company with operations in 80 countries, more than 80 billion US$ in revenues and with 25 Billion-Dollar Brands, such as Gillette®, Ariel®, Pantene® and Oral-B®. About Logistics department: We add new value to the supply network every single day. We are uniquely positioned to handle the supply chain from end to end – starting with our materials through the touch-point of our brands with shoppers. We are known as the premier Supply Network Innovators in the market (recently recognized as Global Supply Chain Masters). We are both the voice of the customer for supply and the integrator across the Company's disciplines. This perspective, along with our deep understanding of supply network, accelerates our ability to craft and build value for us, our customers and our suppliers. You can start your career in different and diverse work areas within P&G that comprise our supply network operations. Currently we are searching top talents into our Warsaw - -based organization to: • Customization Team - who will work closely with our third-party logistics partners (3PL) and will be responsible for daily process management, 3PL governance of and especially project work driving value for our customers (creative customization solutions) or net structural savings for P&G driven by productivity & work process improvements and automation innovations • Customer Logistics Team - who will work with our Sales department and will focus on improving quality of customer promotions and features planning, also will be a key source of customer knowledge and will drive the E2E supply chain synchronization program in a customer centric way • Warehousing Team - working with our third-party logistics partners (3PL) and will be mainly responsible for warehouse process optimizations, driving creative supply chain design solutions including x-docking and managing operational perfection on daily basis. Why to Apply - The job scope: • Leading significant projects on your own with your full responsibility for the business results • Collaborating with employees from different functions, with diverse international and professional background • Sky-rocketing personal learning curve assuming our way of doing business, used technologies and systems, corporate culture, organizational set-up, management styles, etc. • Developing yourself via leadership, feedbacks, coaching and mentoring provided by your manager and colleagues • Use of different Supply Chain analytical tools and application of different operational excellence methodologies – zero loss, PDCA, Work Process Improvement, Value Stream Mapping, etc. • Develop according to your individual training plan. • Enjoy an attractive salary and compensation package. • Participate in accelerated On-the-job and classroom trainings to develop your personal capabilities. • Be supported to independent decision-making and risk taking. • Have the opportunity for a managerial role upon completion of the program.
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( [email protected] ) """ The I{2nd generation} service proxy provides access to web services. See I{README.txt} """ import suds import suds.metrics as metrics from cookielib import CookieJar from suds import * from suds.reader import DefinitionsReader from suds.transport import TransportError, Request from suds.transport.https import HttpAuthenticated from suds.servicedefinition import ServiceDefinition from suds import sudsobject from sudsobject import Factory as InstFactory from sudsobject import Object from suds.resolver import PathResolver from suds.builder import Builder from suds.wsdl import Definitions from suds.cache import ObjectCache from suds.sax.document import Document from suds.sax.parser import Parser from suds.options import Options from suds.properties import Unskin from urlparse import urlparse from copy import deepcopy from suds.plugin import PluginContainer from logging import getLogger log = getLogger(__name__) class Client(object): """ A lightweight web services client. I{(2nd generation)} API. @ivar wsdl: The WSDL object. @type wsdl:L{Definitions} @ivar service: The service proxy used to invoke operations. @type service: L{Service} @ivar factory: The factory used to create objects. @type factory: L{Factory} @ivar sd: The service definition @type sd: L{ServiceDefinition} @ivar messages: The last sent/received messages. @type messages: str[2] """ @classmethod def items(cls, sobject): """ Extract the I{items} from a suds object much like the items() method works on I{dict}. @param sobject: A suds object @type sobject: L{Object} @return: A list of items contained in I{sobject}. @rtype: [(key, value),...] """ return sudsobject.items(sobject) @classmethod def dict(cls, sobject): """ Convert a sudsobject into a dictionary. @param sobject: A suds object @type sobject: L{Object} @return: A python dictionary containing the items contained in I{sobject}. @rtype: dict """ return sudsobject.asdict(sobject) @classmethod def metadata(cls, sobject): """ Extract the metadata from a suds object. @param sobject: A suds object @type sobject: L{Object} @return: The object's metadata @rtype: L{sudsobject.Metadata} """ return sobject.__metadata__ def __init__(self, url, **kwargs): """ @param url: The URL for the WSDL. @type url: str @param kwargs: keyword arguments. @see: L{Options} """ options = Options() options.transport = HttpAuthenticated() self.options = options options.cache = ObjectCache(days=1) self.set_options(**kwargs) reader = DefinitionsReader(options, Definitions) self.wsdl = reader.open(url) plugins = PluginContainer(options.plugins) plugins.init.initialized(wsdl=self.wsdl) self.factory = Factory(self.wsdl) self.service = ServiceSelector(self, self.wsdl.services) self.sd = [] for s in self.wsdl.services: sd = ServiceDefinition(self.wsdl, s) self.sd.append(sd) self.messages = dict(tx=None, rx=None) def set_options(self, **kwargs): """ Set options. @param kwargs: keyword arguments. @see: L{Options} """ p = Unskin(self.options) p.update(kwargs) def add_prefix(self, prefix, uri): """ Add I{static} mapping of an XML namespace prefix to a namespace. This is useful for cases when a wsdl and referenced schemas make heavy use of namespaces and those namespaces are subject to changed. @param prefix: An XML namespace prefix. @type prefix: str @param uri: An XML namespace URI. @type uri: str @raise Exception: when prefix is already mapped. """ root = self.wsdl.root mapped = root.resolvePrefix(prefix, None) if mapped is None: root.addPrefix(prefix, uri) return if mapped[1] != uri: raise Exception('"%s" already mapped as "%s"' % (prefix, mapped)) def last_sent(self): """ Get last sent I{soap} message. @return: The last sent I{soap} message. @rtype: L{Document} """ return self.messages.get('tx') def last_received(self): """ Get last received I{soap} message. @return: The last received I{soap} message. @rtype: L{Document} """ return self.messages.get('rx') def clone(self): """ Get a shallow clone of this object. The clone only shares the WSDL. All other attributes are unique to the cloned object including options. @return: A shallow clone. @rtype: L{Client} """ class Uninitialized(Client): def __init__(self): pass clone = Uninitialized() clone.options = Options() cp = Unskin(clone.options) mp = Unskin(self.options) cp.update(deepcopy(mp)) clone.wsdl = self.wsdl clone.factory = self.factory clone.service = ServiceSelector(clone, self.wsdl.services) clone.sd = self.sd clone.messages = dict(tx=None, rx=None) return clone def __str__(self): return unicode(self) def __unicode__(self): s = ['\n'] build = suds.__build__.split() s.append('Suds ( https://fedorahosted.org/suds/ )') s.append(' version: %s' % suds.__version__) s.append(' %s build: %s' % (build[0], build[1])) for sd in self.sd: s.append('\n\n%s' % unicode(sd)) return ''.join(s) class Factory: """ A factory for instantiating types defined in the wsdl @ivar resolver: A schema type resolver. @type resolver: L{PathResolver} @ivar builder: A schema object builder. @type builder: L{Builder} """ def __init__(self, wsdl): """ @param wsdl: A schema object. @type wsdl: L{wsdl.Definitions} """ self.wsdl = wsdl self.resolver = PathResolver(wsdl) self.builder = Builder(self.resolver) def create(self, name): """ create a WSDL type by name @param name: The name of a type defined in the WSDL. @type name: str @return: The requested object. @rtype: L{Object} """ timer = metrics.Timer() timer.start() type = self.resolver.find(name) if type is None: raise TypeNotFound(name) if type.enum(): result = InstFactory.object(name) for e, a in type.children(): setattr(result, e.name, e.name) else: try: result = self.builder.build(type) except Exception, e: log.error("create '%s' failed", name, exc_info=True) raise BuildError(name, e) timer.stop() metrics.log.debug('%s created: %s', name, timer) return result def separator(self, ps): """ Set the path separator. @param ps: The new path separator. @type ps: char """ self.resolver = PathResolver(self.wsdl, ps) class ServiceSelector: """ The B{service} selector is used to select a web service. In most cases, the wsdl only defines (1) service in which access by subscript is passed through to a L{PortSelector}. This is also the behavior when a I{default} service has been specified. In cases where multiple services have been defined and no default has been specified, the service is found by name (or index) and a L{PortSelector} for the service is returned. In all cases, attribute access is forwarded to the L{PortSelector} for either the I{first} service or the I{default} service (when specified). @ivar __client: A suds client. @type __client: L{Client} @ivar __services: A list of I{wsdl} services. @type __services: list """ def __init__(self, client, services): """ @param client: A suds client. @type client: L{Client} @param services: A list of I{wsdl} services. @type services: list """ self.__client = client self.__services = services def __getattr__(self, name): """ Request to access an attribute is forwarded to the L{PortSelector} for either the I{first} service or the I{default} service (when specified). @param name: The name of a method. @type name: str @return: A L{PortSelector}. @rtype: L{PortSelector}. """ default = self.__ds() if default is None: port = self.__find(0) else: port = default return getattr(port, name) def __getitem__(self, name): """ Provides selection of the I{service} by name (string) or index (integer). In cases where only (1) service is defined or a I{default} has been specified, the request is forwarded to the L{PortSelector}. @param name: The name (or index) of a service. @type name: (int|str) @return: A L{PortSelector} for the specified service. @rtype: L{PortSelector}. """ if len(self.__services) == 1: port = self.__find(0) return port[name] default = self.__ds() if default is not None: port = default return port[name] return self.__find(name) def __find(self, name): """ Find a I{service} by name (string) or index (integer). @param name: The name (or index) of a service. @type name: (int|str) @return: A L{PortSelector} for the found service. @rtype: L{PortSelector}. """ service = None if not len(self.__services): raise Exception, 'No services defined' if isinstance(name, int): try: service = self.__services[name] name = service.name except IndexError: raise ServiceNotFound, 'at [%d]' % name else: for s in self.__services: if name == s.name: service = s break if service is None: raise ServiceNotFound, name return PortSelector(self.__client, service.ports, name) def __ds(self): """ Get the I{default} service if defined in the I{options}. @return: A L{PortSelector} for the I{default} service. @rtype: L{PortSelector}. """ ds = self.__client.options.service if ds is None: return None else: return self.__find(ds) class PortSelector: """ The B{port} selector is used to select a I{web service} B{port}. In cases where multiple ports have been defined and no default has been specified, the port is found by name (or index) and a L{MethodSelector} for the port is returned. In all cases, attribute access is forwarded to the L{MethodSelector} for either the I{first} port or the I{default} port (when specified). @ivar __client: A suds client. @type __client: L{Client} @ivar __ports: A list of I{service} ports. @type __ports: list @ivar __qn: The I{qualified} name of the port (used for logging). @type __qn: str """ def __init__(self, client, ports, qn): """ @param client: A suds client. @type client: L{Client} @param ports: A list of I{service} ports. @type ports: list @param qn: The name of the service. @type qn: str """ self.__client = client self.__ports = ports self.__qn = qn def __getattr__(self, name): """ Request to access an attribute is forwarded to the L{MethodSelector} for either the I{first} port or the I{default} port (when specified). @param name: The name of a method. @type name: str @return: A L{MethodSelector}. @rtype: L{MethodSelector}. """ default = self.__dp() if default is None: m = self.__find(0) else: m = default return getattr(m, name) def __getitem__(self, name): """ Provides selection of the I{port} by name (string) or index (integer). In cases where only (1) port is defined or a I{default} has been specified, the request is forwarded to the L{MethodSelector}. @param name: The name (or index) of a port. @type name: (int|str) @return: A L{MethodSelector} for the specified port. @rtype: L{MethodSelector}. """ default = self.__dp() if default is None: return self.__find(name) else: return default def __find(self, name): """ Find a I{port} by name (string) or index (integer). @param name: The name (or index) of a port. @type name: (int|str) @return: A L{MethodSelector} for the found port. @rtype: L{MethodSelector}. """ port = None if not len(self.__ports): raise Exception, 'No ports defined: %s' % self.__qn if isinstance(name, int): qn = '%s[%d]' % (self.__qn, name) try: port = self.__ports[name] except IndexError: raise PortNotFound, qn else: qn = '.'.join((self.__qn, name)) for p in self.__ports: if name == p.name: port = p break if port is None: raise PortNotFound, qn qn = '.'.join((self.__qn, port.name)) return MethodSelector(self.__client, port.methods, qn) def __dp(self): """ Get the I{default} port if defined in the I{options}. @return: A L{MethodSelector} for the I{default} port. @rtype: L{MethodSelector}. """ dp = self.__client.options.port if dp is None: return None else: return self.__find(dp) class MethodSelector: """ The B{method} selector is used to select a B{method} by name. @ivar __client: A suds client. @type __client: L{Client} @ivar __methods: A dictionary of methods. @type __methods: dict @ivar __qn: The I{qualified} name of the method (used for logging). @type __qn: str """ def __init__(self, client, methods, qn): """ @param client: A suds client. @type client: L{Client} @param methods: A dictionary of methods. @type methods: dict @param qn: The I{qualified} name of the port. @type qn: str """ self.__client = client self.__methods = methods self.__qn = qn def __getattr__(self, name): """ Get a method by name and return it in an I{execution wrapper}. @param name: The name of a method. @type name: str @return: An I{execution wrapper} for the specified method name. @rtype: L{Method} """ return self[name] def __getitem__(self, name): """ Get a method by name and return it in an I{execution wrapper}. @param name: The name of a method. @type name: str @return: An I{execution wrapper} for the specified method name. @rtype: L{Method} """ m = self.__methods.get(name) if m is None: qn = '.'.join((self.__qn, name)) raise MethodNotFound, qn return Method(self.__client, m) class Method: """ The I{method} (namespace) object. @ivar client: A client object. @type client: L{Client} @ivar method: A I{wsdl} method. @type I{wsdl} Method. """ def __init__(self, client, method): """ @param client: A client object. @type client: L{Client} @param method: A I{raw} method. @type I{raw} Method. """ self.client = client self.method = method def __call__(self, *args, **kwargs): """ Invoke the method. """ clientclass = self.clientclass(kwargs) client = clientclass(self.client, self.method) if not self.faults(): try: return client.invoke(args, kwargs) except WebFault, e: return (500, e) else: return client.invoke(args, kwargs) def faults(self): """ get faults option """ return self.client.options.faults def clientclass(self, kwargs): """ get soap client class """ if SimClient.simulation(kwargs): return SimClient else: return SoapClient class SoapClient: """ A lightweight soap based web client B{**not intended for external use} @ivar service: The target method. @type service: L{Service} @ivar method: A target method. @type method: L{Method} @ivar options: A dictonary of options. @type options: dict @ivar cookiejar: A cookie jar. @type cookiejar: libcookie.CookieJar """ def __init__(self, client, method): """ @param client: A suds client. @type client: L{Client} @param method: A target method. @type method: L{Method} """ self.client = client self.method = method self.options = client.options self.cookiejar = CookieJar() def invoke(self, args, kwargs): """ Send the required soap message to invoke the specified method @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The result of the method invocation. @rtype: I{builtin}|I{subclass of} L{Object} """ timer = metrics.Timer() timer.start() result = None binding = self.method.binding.input soapenv = binding.get_message(self.method, args, kwargs) timer.stop() metrics.log.debug( "message for '%s' created: %s", self.method.name, timer) timer.start() result = self.send(soapenv) timer.stop() metrics.log.debug( "method '%s' invoked: %s", self.method.name, timer) return result def send(self, soapenv): """ Send soap message. @param soapenv: A soap envelope to send. @type soapenv: L{Document} @return: The reply to the sent message. @rtype: I{builtin} or I{subclass of} L{Object} """ result = None location = self.location() binding = self.method.binding.input transport = self.options.transport retxml = self.options.retxml nosend = self.options.nosend prettyxml = self.options.prettyxml timer = metrics.Timer() log.debug('sending to (%s)\nmessage:\n%s', location, soapenv) try: self.last_sent(soapenv) plugins = PluginContainer(self.options.plugins) plugins.message.marshalled(envelope=soapenv.root()) if prettyxml: soapenv = soapenv.str() else: soapenv = soapenv.plain() soapenv = soapenv.encode('utf-8') ctx = plugins.message.sending(envelope=soapenv) soapenv = ctx.envelope if nosend: return RequestContext(self, binding, soapenv) request = Request(location, soapenv) request.headers = self.headers() timer.start() reply = transport.send(request) timer.stop() metrics.log.debug('waited %s on server reply', timer) ctx = plugins.message.received(reply=reply.message) reply.message = ctx.reply if retxml: result = reply.message else: result = self.succeeded(binding, reply.message) except TransportError, e: if e.httpcode in (202,204): result = None else: log.error(self.last_sent()) result = self.failed(binding, e) return result def headers(self): """ Get http headers or the http/https request. @return: A dictionary of header/values. @rtype: dict """ action = self.method.soap.action if isinstance(action, unicode): action = action.encode('utf-8') stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action } # At this point the action was encoded, but the vanilla suds code takes all injected headers as they are, # potentially implicitly decoding the whole request into a unicode string, if there's any unicode in the # headers (e.g. because you're like me and trying to be clever and Python 3 compatible by using # unicode_literals. This causes all kinds of horrible pains, as I've had to repeatedly notice. We could # silently encode everything here, but I'll go the safer(?) route and just reject all unicode strings. for k, v in self.options.headers.items(): if type(k) != str: raise ValueError("'%s' header has a non-string name, but only (encoded/non-unicode) strings are allowed" % repr(k)) if type(v) != str: raise ValueError("'%s' header has a non-string value, but only (encoded/non-unicode) strings are allowed: %s" % (k, repr(v))) result = dict(stock, **self.options.headers) log.debug('headers = %s', result) return result def succeeded(self, binding, reply): """ Request succeeded, process the reply @param binding: The binding to be used to process the reply. @type binding: L{bindings.binding.Binding} @param reply: The raw reply text. @type reply: str @return: The method result. @rtype: I{builtin}, L{Object} @raise WebFault: On server. """ log.debug('http succeeded:\n%s', reply) plugins = PluginContainer(self.options.plugins) if len(reply) > 0: reply, result = binding.get_reply(self.method, reply) self.last_received(reply) else: result = None ctx = plugins.message.unmarshalled(reply=result) result = ctx.reply if self.options.faults: return result else: return (200, result) def failed(self, binding, error): """ Request failed, process reply based on reason @param binding: The binding to be used to process the reply. @type binding: L{suds.bindings.binding.Binding} @param error: The http error message @type error: L{transport.TransportError} """ status, reason = (error.httpcode, tostr(error)) reply = error.fp.read() log.debug('http failed:\n%s', reply) if status == 500: if len(reply) > 0: r, p = binding.get_fault(reply) self.last_received(r) return (status, p) else: return (status, None) if self.options.faults: raise TransportError(reason, status) else: return (status, None) def location(self): p = Unskin(self.options) return p.get('location', self.method.location) def last_sent(self, d=None): key = 'tx' messages = self.client.messages if d is None: return messages.get(key) else: messages[key] = d def last_received(self, d=None): key = 'rx' messages = self.client.messages if d is None: return messages.get(key) else: messages[key] = d class SimClient(SoapClient): """ Loopback client used for message/reply simulation. """ injkey = '__inject' @classmethod def simulation(cls, kwargs): """ get whether loopback has been specified in the I{kwargs}. """ return kwargs.has_key(SimClient.injkey) def invoke(self, args, kwargs): """ Send the required soap message to invoke the specified method @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The result of the method invocation. @rtype: I{builtin} or I{subclass of} L{Object} """ simulation = kwargs[self.injkey] msg = simulation.get('msg') reply = simulation.get('reply') fault = simulation.get('fault') if msg is None: if reply is not None: return self.__reply(reply, args, kwargs) if fault is not None: return self.__fault(fault) raise Exception('(reply|fault) expected when msg=None') sax = Parser() msg = sax.parse(string=msg) return self.send(msg) def __reply(self, reply, args, kwargs): """ simulate the reply """ binding = self.method.binding.input msg = binding.get_message(self.method, args, kwargs) log.debug('inject (simulated) send message:\n%s', msg) binding = self.method.binding.output return self.succeeded(binding, reply) def __fault(self, reply): """ simulate the (fault) reply """ binding = self.method.binding.output if self.options.faults: r, p = binding.get_fault(reply) self.last_received(r) return (500, p) else: return (500, None) class RequestContext: """ A request context. Returned when the ''nosend'' options is specified. @ivar client: The suds client. @type client: L{Client} @ivar binding: The binding for this request. @type binding: I{Binding} @ivar envelope: The request soap envelope. @type envelope: str """ def __init__(self, client, binding, envelope): """ @param client: The suds client. @type client: L{Client} @param binding: The binding for this request. @type binding: I{Binding} @param envelope: The request soap envelope. @type envelope: str """ self.client = client self.binding = binding self.envelope = envelope def succeeded(self, reply): """ Re-entry for processing a successful reply. @param reply: The reply soap envelope. @type reply: str @return: The returned value for the invoked method. @rtype: object """ options = self.client.options plugins = PluginContainer(options.plugins) ctx = plugins.message.received(reply=reply) reply = ctx.reply return self.client.succeeded(self.binding, reply) def failed(self, error): """ Re-entry for processing a failure reply. @param error: The error returned by the transport. @type error: A suds I{TransportError}. """ return self.client.failed(self.binding, error)
The child sits alone on the second lowest stair. His heart is open wide to the feeling of despair. Incongruous and obsolete, ridiculous and wrong. He sings the song for comfort but it doesn’t fill the void. He wasn’t who they’d wanted and he ought to be destroyed. Maybe he should say that he is sorry to them now. Sorry for existing and for causing such a row. Or maybe that would make it worse, to show his little face. After all, he doesn’t understand the human race. Maybe if he died he’d be invisible, unseen. And that is so much worse than having parents who aren’t kind. With nobody to comfort him; with nobody to share. And one day soon I’m hoping he will let me in, at last. I’ve built a lot of bridges to him. Now I see the way. But I’m scared he doesn’t want me so I cannot cross today. Waiting for the shadow of the woman I called ‘mummy’. Shadows can’t support him to be trusting; to be wild. They’re made of broke down castles which have all but turned to dust. In the centre of my heart beyond the reach of time and space. Sometimes, in the mirror, I can see his little face.
from nltk.classify import DecisionTreeClassifier, MaxentClassifier, NaiveBayesClassifier, megam from nltk_trainer import basestring from nltk_trainer.classification.multi import AvgProbClassifier classifier_choices = ['NaiveBayes', 'DecisionTree', 'Maxent'] + MaxentClassifier.ALGORITHMS dense_classifiers = set(['ExtraTreesClassifier', 'GradientBoostingClassifier', 'RandomForestClassifier', 'GaussianNB', 'DecisionTreeClassifier']) verbose_classifiers = set(['RandomForestClassifier', 'SVC']) try: import svmlight # do this first since svm module makes ugly errors from nltk.classify.svm import SvmClassifier classifier_choices.append('Svm') except: pass try: from nltk.classify import scikitlearn from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import Pipeline from sklearn import ensemble, feature_selection, linear_model, naive_bayes, neighbors, svm, tree classifiers = [ ensemble.ExtraTreesClassifier, ensemble.GradientBoostingClassifier, ensemble.RandomForestClassifier, linear_model.LogisticRegression, #linear_model.SGDClassifier, # NOTE: this seems terrible, but could just be the options naive_bayes.BernoulliNB, naive_bayes.GaussianNB, naive_bayes.MultinomialNB, neighbors.KNeighborsClassifier, # TODO: options for nearest neighbors svm.LinearSVC, svm.NuSVC, svm.SVC, tree.DecisionTreeClassifier, ] sklearn_classifiers = {} for classifier in classifiers: sklearn_classifiers[classifier.__name__] = classifier classifier_choices.extend(sorted(['sklearn.%s' % c.__name__ for c in classifiers])) except ImportError as exc: sklearn_classifiers = {} def add_maxent_args(parser): maxent_group = parser.add_argument_group('Maxent Classifier', 'These options only apply when a Maxent classifier is chosen.') maxent_group.add_argument('--max_iter', default=10, type=int, help='maximum number of training iterations, defaults to %(default)d') maxent_group.add_argument('--min_ll', default=0, type=float, help='stop classification when average log-likelihood is less than this, default is %(default)d') maxent_group.add_argument('--min_lldelta', default=0.1, type=float, help='''stop classification when the change in average log-likelihood is less than this. default is %(default)f''') def add_decision_tree_args(parser): decisiontree_group = parser.add_argument_group('Decision Tree Classifier', 'These options only apply when the DecisionTree classifier is chosen') decisiontree_group.add_argument('--entropy_cutoff', default=0.05, type=float, help='default is 0.05') decisiontree_group.add_argument('--depth_cutoff', default=100, type=int, help='default is 100') decisiontree_group.add_argument('--support_cutoff', default=10, type=int, help='default is 10') sklearn_kwargs = { # ensemble 'ExtraTreesClassifier': ['criterion', 'max_feats', 'depth_cutoff', 'n_estimators'], 'GradientBoostingClassifier': ['learning_rate', 'max_feats', 'depth_cutoff', 'n_estimators'], 'RandomForestClassifier': ['criterion', 'max_feats', 'depth_cutoff', 'n_estimators'], # linear_model 'LogisticRegression': ['C','penalty'], # naive_bayes 'BernoulliNB': ['alpha'], 'MultinomialNB': ['alpha'], # svm 'LinearSVC': ['C', 'loss', 'penalty'], 'NuSVC': ['nu', 'kernel'], 'SVC': ['C', 'kernel'], # tree 'DecisionTreeClassifier': ['criterion', 'max_feats', 'depth_cutoff'], } def add_sklearn_args(parser): if not sklearn_classifiers: return sklearn_group = parser.add_argument_group('sklearn Classifiers', 'These options are used by one or more sklearn classification algorithms.') sklearn_group.add_argument('--alpha', type=float, default=1.0, help='smoothing parameter for naive bayes classifiers, default is %(default)s') sklearn_group.add_argument('--C', type=float, default=1.0, help='penalty parameter, default is %(default)s') sklearn_group.add_argument('--criterion', choices=['gini', 'entropy'], default='gini', help='Split quality function, default is %(default)s') sklearn_group.add_argument('--kernel', default='rbf', choices=['linear', 'poly', 'rbf', 'sigmoid', 'precomputed'], help='kernel type for support vector machine classifiers, default is %(default)s') sklearn_group.add_argument('--learning_rate', type=float, default=0.1, help='learning rate, default is %(default)s') sklearn_group.add_argument('--loss', choices=['l1', 'l2'], default='l2', help='loss function, default is %(default)s') sklearn_group.add_argument('--n_estimators', type=int, default=10, help='Number of trees for Decision Tree ensembles, default is %(default)s') sklearn_group.add_argument('--nu', type=float, default=0.5, help='upper bound on fraction of training errors & lower bound on fraction of support vectors, default is %(default)s') sklearn_group.add_argument('--penalty', choices=['l1', 'l2'], default='l2', help='norm for penalization, default is %(default)s') sklearn_group.add_argument('--tfidf', default=False, action='store_true', help='Use TfidfTransformer') # for mapping existing args to sklearn args sklearn_keys = { 'max_feats': 'max_features', 'depth_cutoff': 'max_depth' } def make_sklearn_classifier(algo, args): name = algo.split('.', 1)[1] kwargs = {} for key in sklearn_kwargs.get(name, []): val = getattr(args, key, None) if val: kwargs[sklearn_keys.get(key, key)] = val if args.trace and kwargs: print('training %s with %s' % (algo, kwargs)) if args.trace and name in verbose_classifiers: kwargs['verbose'] = True return sklearn_classifiers[name](**kwargs) def make_classifier_builder(args): if isinstance(args.classifier, basestring): algos = [args.classifier] else: algos = args.classifier for algo in algos: if algo not in classifier_choices: raise ValueError('classifier %s is not supported' % algo) classifier_train_args = [] for algo in algos: classifier_train_kwargs = {} if algo == 'DecisionTree': classifier_train = DecisionTreeClassifier.train classifier_train_kwargs['binary'] = False classifier_train_kwargs['entropy_cutoff'] = args.entropy_cutoff classifier_train_kwargs['depth_cutoff'] = args.depth_cutoff classifier_train_kwargs['support_cutoff'] = args.support_cutoff classifier_train_kwargs['verbose'] = args.trace elif algo == 'NaiveBayes': classifier_train = NaiveBayesClassifier.train elif algo == 'Svm': classifier_train = SvmClassifier.train elif algo.startswith('sklearn.'): # TODO: support many options for building an estimator pipeline pipe = [('classifier', make_sklearn_classifier(algo, args))] tfidf = getattr(args, 'tfidf', None) penalty = getattr(args, 'penalty', None) if tfidf and penalty: if args.trace: print('using tfidf transformer with norm %s' % penalty) pipe.insert(0, ('tfidf', TfidfTransformer(norm=penalty))) sparse = pipe[-1][1].__class__.__name__ not in dense_classifiers if not sparse and args.trace: print('using dense matrix') value_type = getattr(args, 'value_type', 'bool') if value_type == 'bool' and not tfidf: dtype = bool elif value_type == 'int' and not tfidf: dtype = int else: dtype = float if args.trace: print('using dtype %s' % dtype.__name__) classifier_train = scikitlearn.SklearnClassifier(Pipeline(pipe), dtype=dtype, sparse=sparse).train else: if algo != 'Maxent': classifier_train_kwargs['algorithm'] = algo if algo == 'MEGAM': megam.config_megam() classifier_train = MaxentClassifier.train classifier_train_kwargs['max_iter'] = args.max_iter classifier_train_kwargs['min_ll'] = args.min_ll classifier_train_kwargs['min_lldelta'] = args.min_lldelta classifier_train_kwargs['trace'] = args.trace classifier_train_args.append((algo, classifier_train, classifier_train_kwargs)) def trainf(train_feats): classifiers = [] for algo, classifier_train, train_kwargs in classifier_train_args: if args.trace: print('training %s classifier' % algo) classifiers.append(classifier_train(train_feats, **train_kwargs)) if len(classifiers) == 1: return classifiers[0] else: return AvgProbClassifier(classifiers) return trainf #return lambda(train_feats): classifier_train(train_feats, **classifier_train_kwargs)
36 What does 'the very next day' mean? 34 What are the pros and cons of disclosing a spectrum disorder when applying for a job? 22 Which pronunciation of 'either' is preferred? 22 Autistic child constantly mobbed by other children - what can be done? 21 Can I be mobbed by only one person? 17 Which is the plural of "forum": "fora" or "forums"? 16 Is there a general rule for Verb + Preposition/Particle idioms, such as "come across"?
# 项目: 工具库 # 模块: 日志模块 # 作者: 黄涛 # License: GPL # Email: [email protected] # 创建:2019-07-20 22:26 import logging import sys import os from orange import Path from .datetime_ import datetime today = datetime.now() % '%F' name = sys.argv[0] or 'test' logger = logging.getLogger(name) if os.name == 'nt': path = Path(f'%localappdata%/logs/{today}') else: path = Path(f'~/.logs/{today}') path.ensure() path = (path / name.split(os.sep)[-1]).with_suffix('.log') log = logger.log debug = logger.debug info = logger.info warning = logger.warning error = logger.error fatal = logger.fatal critical = logger.critical warn = logger.warn logging.basicConfig(format='%(asctime)s %(levelname)-8s: %(message)s', filename=str(path), datefmt='%F %T') def set_debug(level=logging.DEBUG): logger.setLevel(level) def set_verbose(fmt='%(message)s'): if logger.level == 0 or logger.level > logging.INFO: logger.setLevel(logging.INFO) handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.INFO) formatter = logging.Formatter(fmt=fmt) handler.setFormatter(formatter) logger.addHandler(handler)
What more could you ask for from a licensed plumber in Marina? We provide the top quality plumbing services because we want to have a hand in the efficiency of a home or business. ​We are detailed plumbers who strive for perfection. With our efforts you can guarantee that your appliances are installed correctly, your lines are flowing smoothly and your drains are clean and clear. Running a business? You are in luck! We have commercial plumbers ready to deliver the best commercial plumbing services in all of Salinas and it's surrounding areas. We provide plumbing services to commercial properties no matter the size of the business. We treat your business with the same care and attention as a single family home. We understand the importance of running a business and the daily demands it takes to become successful. We want to serve your business so you can focus your attention on the rest of your responsibilities. Leave the plumbing problems to Salinas Plumber Pros!
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.http import JsonResponse from django.views.generic import TemplateView, View from .mixins import JSONView from .models import Profile, Note, Game from .serializer import activity_serializer from activity.models import Activity, Verb class HomeView(TemplateView): template_name = 'extra/index.html' class ActivityResourceView(JSONView): serializer = activity_serializer def get_context_data(self, **kwargs): context = super(ActivityResourceView, self).get_context_data(**kwargs) context['objects'] = [] filters = self.request.GET.dict() query_set = Activity.objects.filter(**filters) print query_set fields = ['pk', 'actor', 'object_ref', 'target', 'verb', 'published_verbose', 'title', 'summary'] for obj in query_set: # obj = self.serializer.serialize(obj, fields) context['objects'].append(obj) return context class DrumKitView(TemplateView): template_name = 'drumkit/index.html' def post(self, request, *args, **kwargs): note = request.POST.get('note') ip_address = self.get_client_ip() verb = Verb.objects.get(name='play') game = Game.objects.get(title='Drumkit') note, _ = Note.objects.get_or_create(title=note) profiles = Profile.objects.filter(ip_address=ip_address) if profiles.count() == 0: user = Profile.objects.create_anon_user(ip_address).owner else: user = profiles.first().owner activity = Activity.objects.create( actor=user, object_ref=note, verb=verb, target=game, title='{} {} {} {}'.format( user.username, verb.name, note.title, game.title) ) return JsonResponse({'activity': activity.pk}) def get_client_ip(self, **kwargs): request = self.request x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(',')[0] else: ip = request.META.get('REMOTE_ADDR') return ip
It's not on daily basis that Green Day is referenced throughout the hip-hop world, although 2018 has yielded a couple of notable ones. For starters, J. Cole beforehand boasted to "get the bread like I am Green Day" on "Motiv8." Recently, Vince Staples regarded to the influential punk-rock band to attract inspiration for his FM! album cowl. Upon launch, many famous the similarities between Vinces' album cowl and that of Green Day's 1994 album Dookie. The FM! art work, designed by Verdy, has now caught the attention of Green Day's frontman, the long-lasting Bille Joe Armstrong. Clearly, he is a fan of Staples' work, as he selected to view the inventive similarity for what it was: a homage. Taking to Instagram, Armstrong hit Staples with the official stamp of approval. "I really like this," he writes, a person of few phrases. As for Staples, his ideas on Green Day stay unclear for now, although given the eclectic sound of his music, it would not be shocking to depend them amongst his myriad influences.
# -*- coding: utf-8 -*- """ templatetricks.timesince_filter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ timesince filter http://flask.pocoo.org/snippets/33/ """ import os import sys sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from datetime import datetime from app import app @app.template_filter() def timesince(dt, default="just now"): """ Returns string representing "time since" e.g. 3 days ago, 5 hours ago etc. """ now = datetime.utcnow() diff = now - dt periods = ( (diff.days / 365, "year", "years"), (diff.days / 30, "month", "months"), (diff.days / 7, "week", "weeks"), (diff.days, "day", "days"), (diff.seconds / 3600, "hour", "hours"), (diff.seconds / 60, "minute", "minutes"), (diff.seconds, "second", "seconds"), ) for period, singular, plural in periods: if period: return "%d %s ago" % (period, singular if period == 1 else plural) return default
The dust is settling on Sauerkrautathon; the sauerkraut we made weighed in on 26th October at a stonking 359.6kgs and every last shred was given away by the end of the following week. Jo and I are sorting through photos and documents to provide Guinness with the info they require to prove our claim to have made the largest recorded amount of bacterially fermented cabbage. There are pictures on our Sauerkrautathon.com website of the day itself and the weigh in, but I think one of my favourites is this one. Jo met Mara when she attended Sandor Katz’s fermentation residential in Tennessee in April. Mara runs her own fermentation business in Denver and was sufficiently intrigued by our idea to join us for the week. She cooked us some amazing food, briefed our fermenting facilitators and encouraged our choppers. She stayed calm, thoughtful and good humoured throughout and then sealed the vats at the end of the day. Next to her is Pao who runs her own fermenting business PaoPickles - they had only met a few days before when Mara taught a fermenting class in Hackney. We all share a fascination with fermentation and love to share our enthusiasm. This spirit of collaboration, and with other fermenters who came and helped on the day - made Sauerkrautathon so much more than a race for a record. We were all the richer for taking part.
# XMPP server class from twisted.application import service from twisted.python import components from twisted.internet import reactor from twisted.words.xish import domish, xpath, xmlstream from twisted.words.protocols.jabber import jid from punjab.xmpp import ns SASL_XMLNS = 'urn:ietf:params:xml:ns:xmpp-sasl' COMP_XMLNS = 'http://jabberd.jabberstudio.org/ns/component/1.0' STREAMS_XMLNS = 'urn:ietf:params:xml:ns:xmpp-streams' from zope.interface import Interface, implements # interfaces class IXMPPServerService(Interface): pass class IXMPPServerFactory(Interface): pass class IXMPPFeature(Interface): pass class IXMPPAuthenticationFeature(IXMPPFeature): pass class IQAuthFeature(object): """ XEP-0078 : http://www.xmpp.org/extensions/xep-0078.html""" implements(IXMPPAuthenticationFeature) IQ_GET_AUTH = xpath.internQuery(ns.IQ_GET_AUTH) IQ_SET_AUTH = xpath.internQuery(ns.IQ_SET_AUTH) def associateWithStream(self, xs): """Add a streamm start event observer. And do other things to associate with the xmlstream if necessary. """ self.xmlstream = xs self.xmlstream.addOnetimeObserver(xmlstream.STREAM_START_EVENT, self.streamStarted) def disassociateWithStream(self, xs): self.xmlstream.removeObserver(self.IQ_GET_AUTH, self.authRequested) self.xmlstream.removeObserver(self.IQ_SET_AUTH, self.auth) self.xmlstream = None def streamStarted(self, elm): """ Called when client sends stream:stream """ self.xmlstream.addObserver(self.IQ_GET_AUTH, self.authRequested) self.xmlstream.addObserver(self.IQ_SET_AUTH, self.auth) def authRequested(self, elem): """Return the supported auth type. """ resp = domish.Element(('iq', ns.NS_CLIENT)) resp['type'] = 'result' resp['id'] = elem['id'] q = resp.addElement("query", ns.NS_AUTH) q.addElement("username", content=str(elem.query.username)) q.addElement("digest") q.addElement("password") q.addElement("resource") self.xmlstream.send(resp) def auth(self, elem): """Do not auth the user, anyone can log in""" username = elem.query.username.__str__() resource = elem.query.resource.__str__() user = jid.internJID(username+'@'+self.xmlstream.host+'/'+resource) resp = domish.Element(('iq', ns.NS_CLIENT)) resp['type'] = 'result' resp['id'] = elem['id'] self.xmlstream.send(resp) self.xmlstream.authenticated(user) class XMPPServerProtocol(xmlstream.XmlStream): """ Basic dummy server protocol """ host = "localhost" user = None initialized = False id = 'Punjab123' features = [IQAuthFeature()] delay_features = 0 def connectionMade(self): """ a client connection has been made """ xmlstream.XmlStream.connectionMade(self) self.bootstraps = [ (xmlstream.STREAM_CONNECTED_EVENT, self.streamConnected), (xmlstream.STREAM_START_EVENT, self.streamStarted), (xmlstream.STREAM_END_EVENT, self.streamEnded), (xmlstream.STREAM_ERROR_EVENT, self.streamErrored), ] for event, fn in self.bootstraps: self.addObserver(event, fn) # load up the authentication features for f in self.features: if IXMPPAuthenticationFeature.implementedBy(f.__class__): f.associateWithStream(self) def send(self, obj): if not self.initialized: self.transport.write("""<?xml version="1.0"?>\n""") self.initialized = True xmlstream.XmlStream.send(self, obj) def streamConnected(self, elm): print "stream connected" def streamStarted(self, elm): """stream has started, we need to respond """ if self.delay_features == 0: self.send("""<stream:stream xmlns='%s' xmlns:stream='http://etherx.jabber.org/streams' from='%s' id='%s' version='1.0' xml:lang='en'><stream:features><register xmlns='http://jabber.org/features/iq-register'/></stream:features>""" % (ns.NS_CLIENT, self.host, self.id,)) else: self.send("""<stream:stream xmlns='%s' xmlns:stream='http://etherx.jabber.org/streams' from='%s' id='%s' version='1.0' xml:lang='en'>""" % (ns.NS_CLIENT, self.host, self.id,)) reactor.callLater(self.delay_features, self.send, """<stream:features><register xmlns='http://jabber.org/features/iq-register'/></stream:features>""") def streamEnded(self, elm): self.send("""</stream:stream>""") def streamErrored(self, elm): self.send("""<stream:error/></stream:stream>""") def authenticated(self, user): """User has authenticated. """ self.user = user def onElement(self, element): try: xmlstream.XmlStream.onElement(self, element) except Exception, e: print "Exception!", e raise e def onDocumentEnd(self): pass def connectionLost(self, reason): xmlstream.XmlStream.connectionLost(self, reason) pass def triggerChallenge(self): """ send a fake challenge for testing """ self.send("""<challenge xmlns='urn:ietf:params:xml:ns:xmpp-sasl'>cmVhbG09ImNoZXNzcGFyay5jb20iLG5vbmNlPSJ0YUhIM0FHQkpQSE40eXNvNEt5cFlBPT0iLHFvcD0iYXV0aCxhdXRoLWludCIsY2hhcnNldD11dGYtOCxhbGdvcml0aG09bWQ1LXNlc3M=</challenge>""") def triggerInvalidXML(self): """Send invalid XML, to trigger a parse error.""" self.send("""<parse error=>""") self.streamEnded(None) def triggerStreamError(self): """ send a stream error """ self.send(""" <stream:error xmlns:stream='http://etherx.jabber.org/streams'> <policy-violation xmlns='urn:ietf:params:xml:ns:xmpp-streams'/> <text xmlns='urn:ietf:params:xml:ns:xmpp-streams' xml:lang='langcode'>Error text</text> <arbitrary-extension val='2'/> </stream:error>""") self.streamEnded(None) class XMPPServerFactoryFromService(xmlstream.XmlStreamFactory): implements(IXMPPServerFactory) protocol = XMPPServerProtocol def __init__(self, service): xmlstream.XmlStreamFactory.__init__(self) self.service = service def buildProtocol(self, addr): self.resetDelay() xs = self.protocol() xs.factory = self for event, fn in self.bootstraps: xs.addObserver(event, fn) return xs components.registerAdapter(XMPPServerFactoryFromService, IXMPPServerService, IXMPPServerFactory) class XMPPServerService(service.Service): implements(IXMPPServerService)
List of Tagalog terms that begins with letter Z with meaning and translations in Bisaya language. Page 1 of 0 pages. Each term may have information like root word, part of speech, usage, synonyms, antonyms, etc.
import sys sys.path.insert(1,"../../../") import h2o from tests import pyunit_utils def distribution_behaviorGBM(): #Log.info("==============================") #Log.info("Default Behavior - Gaussian") #Log.info("==============================") eco = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/ecology_model.csv")) # 0/1 response: expect gaussian eco_model = h2o.gbm(x=eco[2:13], y=eco["Angaus"]) # more than 2 integers for response: expect gaussian cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars.csv")) cars_model = h2o.gbm(x=cars[3:7], y=cars["cylinders"]) #Log.info("==============================") #Log.info("Gaussian Behavior") #Log.info("==============================") # 0/1 response: expect gaussian eco_model = h2o.gbm(x=eco[2:13], y=eco["Angaus"], distribution="gaussian") # character response: expect error try: eco_model = h2o.gbm(x=eco[1:8], y=eco["Method"], distribution="gaussian") assert False, "expected an error" except EnvironmentError: assert True #Log.info("==============================") #Log.info("Bernoulli Behavior") #Log.info("==============================") # 0/1 response: expect bernoulli eco_model = h2o.gbm(x=eco[2:13], y=eco["Angaus"].asfactor(), distribution="bernoulli") # 2 level character response: expect bernoulli tree = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/test_tree_minmax.csv")) tree_model = h2o.gbm(x=tree[0:3], y=tree["response"], distribution="bernoulli", min_rows=1) # more than two integers for response: expect error try: cars_mod = h2o.gbm(x=cars[3:7], y=cars["cylinders"], distribution="bernoulli") assert False, "expected an error" except EnvironmentError: assert True # more than two character levels for response: expect error try: eco_model = h2o.gbm(x=eco[0:8], y=eco["Method"], distribution="bernoulli") assert False, "expected an error" except EnvironmentError: assert True #Log.info("==============================") #Log.info("Multinomial Behavior") #Log.info("==============================") # more than two integers for response: expect multinomial cars_model = h2o.gbm(x=cars[3:7], y=cars["cylinders"].asfactor(), distribution="multinomial") # more than two character levels for response: expect multinomial eco_model = h2o.gbm(x=eco[0:8], y=eco["Method"], distribution="multinomial") if __name__ == "__main__": pyunit_utils.standalone_test(distribution_behaviorGBM()) else: distribution_behaviorGBM()
Use this form to send a request to ASI for a price quotation. Please include in the Comments section at the bottom any additional information, such as shipment requirements, blanket order information, etc. If you need more room to enter items just press the Add More Product Rows button below to increase the size of the form.
import argparse import struct import sys try: from elftools.elf.elffile import ELFFile except ImportError: print('pytelftools missing, install to run this script', file=sys.stderr) print('https://github.com/eliben/pyelftools#installing', file=sys.stderr) sys.exit(1) class Colors: RED = '\033[91m' BLUE = '\033[94m' GREEN = '\033[92m' END = '\033[0m' CORE = 0x20 param_type_to_str_dict = { 0x0 | 0x0 << 2 | 0x1 << 3: 'PARAM_UINT8', 0x0 | 0x0 << 2 | 0x0 << 3: 'PARAM_INT8', 0x1 | 0x0 << 2 | 0x1 << 3: 'PARAM_UIN16', 0x1 | 0x0 << 2 | 0x0 << 3: 'PARAM_INT16', 0x2 | 0x0 << 2 | 0x1 << 3: 'PARAM_UINT32', 0x2 | 0x0 << 2 | 0x0 << 3: 'PARAM_INT32', 0x2 | 0x1 << 2 | 0x0 << 3: 'PARAM_FLOAT' } def param_type_to_str(t: int) -> str: extra = str() if t & (1 << 5): # PARAM_CORE set extra = ' | PARAM_CORE' if t & (1 << 6): # PARAM_RONLY set extra += ' | PARAM_RONLY' int_type = t & ~(1 << 5 | 1 << 6) return '{:12}{}'.format(param_type_to_str_dict[int_type], extra) log_type_to_str_dict = { 0x1: 'LOG_UINT8', 0x2: 'LOG_INT8', 0x3: 'LOG_UIN16', 0x4: 'LOG_INT16', 0x5: 'LOG_UINT32', 0x6: 'LOG_INT32', 0x7: 'LOG_FLOAT', 0x8: 'LOG_FP16' } def log_type_to_str(t: int) -> str: extra = str() if t & (1 << 5): # LOG_CORE set extra = ' | LOG_CORE' if t & (1 << 6): # BY_FUNCTION set extra += ' | BY_FUNCTION' int_type = t & ~(1 << 5 | 1 << 6) return '{:12}{}'.format(log_type_to_str_dict[int_type], extra) def process_file(filename, list_params: bool, list_logs: bool, core: bool): with open(filename, 'rb') as f: parameters = check_structs(f, 'param', core) if list_params: for key in sorted(parameters.keys()): t = parameters[key] print('{:25}\t{}'.format(key, param_type_to_str(t))) logs = check_structs(f, 'log', core) if list_logs: for key in sorted(logs.keys()): t = logs[key] print('{:25}\t{}'.format(key, log_type_to_str(t))) n_logs = Colors.GREEN + str(len(logs.keys())) + Colors.END n_params = Colors.BLUE + str(len(parameters.keys())) + Colors.END print('{} parameters and {} log vars in elf'.format(n_params, n_logs)) def get_offset_of(elf, addr): for seg in elf.iter_segments(): if seg.header['p_type'] != 'PT_LOAD': continue # If the symbol is inside the range of a LOADed segment, calculate the # file offset by subtracting the virtual start address and adding the # file offset of the loaded section(s) if addr >= seg['p_vaddr'] and addr < seg['p_vaddr'] + seg['p_filesz']: return addr - seg['p_vaddr'] + seg['p_offset'] return None def get_offset_of_symbol(elf, name): section = elf.get_section_by_name('.symtab') sym = section.get_symbol_by_name(name)[0] if not sym: print('symbol %s not found' % name, file=sys.stderr) sys.exit(1) return get_offset_of(elf, sym['st_value']) def check_structs(stream, what: str, core: bool) -> dict: elf = ELFFile(stream) offset = get_offset_of_symbol(elf, '_{}_start'.format(what)) stop_offset = get_offset_of_symbol(elf, '_{}_stop'.format(what)) name_type_dict = {} name_maxlen = 25 struct_len = 12 group_bit = 0x1 << 7 start_bit = 0x1 while offset < stop_offset: elf.stream.seek(offset) # # Parsing log or param, first unpack the struct: # struct [param_s|log_s] { # uint8_t type; # char * name; # void * address; # }; # # We want the type and the name. # buffer = elf.stream.read(struct_len) t, addr = struct.unpack('@Bxxxixxxx', buffer) # # Next, convert address of name to offset in elf # addr = get_offset_of(elf, addr) # # And read the name from that offset # elf.stream.seek(addr) name = ''.join(iter(lambda: stream.read(1).decode('ascii'), '\x00')) # # Check if this is start of a group # if t & group_bit != 0 and t & start_bit != 0: current_group = name elif t & group_bit == 0: name = '%s.%s' % (current_group, name) if name in name_type_dict: print('%sDuplicate parameter detected!%s (%s)' % (Colors.RED, Colors.END, name), file=sys.stderr) sys.exit(1) else: # # If core only is specified we check if the core flag is set # if not core or (t & CORE) != 0: name_type_dict[name] = t if len(name) > name_maxlen: print('%sName too long!%s (%s > %d)' % (Colors.RED, Colors.END, name, name_maxlen), file=sys.stderr) sys.exit(1) offset += struct_len return name_type_dict if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--list-params', action='store_true') parser.add_argument('--list-logs', action='store_true') parser.add_argument('--core', action='store_true') parser.add_argument('filename', nargs=argparse.REMAINDER) args = parser.parse_args() if args.filename: process_file(args.filename[0], args.list_params, args.list_logs, args.core) else: sys.exit(1)
A full-bodied and bold coffee for Keurig® single-serve coffee makers. Order online Emeril's® coffee K-Cup® pods, a Rainforest Alliance Certified™ coffee. A chef's schedule sometimes starts as early as 6:00am. Receiving product, analyzing the menu and meetings with staff all happen before my first cup of coffee. Those who know me best know that I like to start my day with a robust, somptuous, flavourful cup of coffee. Good coffee is very important to me and why I wanted to share this super-premium coffee with you. Sourced from all over the world, these coffees represent the highest standart of excellence. Each bean is skillfully blended and roasted to perfection so that we can deliver the best tasting cup possible. Enjoy! Rated 5 out of 5 by Coffee time from Definitely coffee A good cup of Joe caffiene. Arrived sooner than listed. Rated 5 out of 5 by Gayle from Excellent flavour, never bitter. I have tried other top brands, but always return to Emeril's Bold. It has a smooth, dark flavour that is never bitter or harsh. Rated 5 out of 5 by Reynald from Fantastic Very strong coffee and great taste, i recommend it! Rated 5 out of 5 by Keeeez from Probably my favorite Very good but increasingly hard to find in stores . Rated 5 out of 5 by JavaQueen from Delicious!!! When the Keurig machine first came out I tried many trays of different brands of coffee samples looking for just the right one. Then I tried Emeril's Big Easy Bold!!! I knew as soon as it hit my taste buds I had hit the jackpot!!!! Big Easy Bold is aptly named because of it's bold presence yet smooth, mellow flavor...a combination of perfect proportions that make it very easy to love. Rated 5 out of 5 by NanaC from Best dark roast coffee ever!!!! Absolutely love, love, love Emerils Big Easy Bold coffee!! It’s the only dark roast coffee I buy! Very hard to find in stores???
''' Created on Jan 10, 2016 @author: Julia M. Winchester ''' import plython import DNE import OPC import RFI import implicitfair from collections import defaultdict class TopoMesh(plython.PlythonMesh): """A class for creating and interacting with triangulated polygon meshes and topographic variables. Class inherits from plython.PlythonMesh. Creates a list of Numpy ndarray objects containing triangulated polygon mesh data if provided with a path to a .ply file. Topographic variables are instanced as None and take the data types specified below when generated using the ProcessSurface method. Args: filepath (str): Path to a .ply polygon mesh file Attributes: mesh (list): Triangulated polygon mesh data. Contains three ndarrays: vertex XYZ points, polygons with component vertex XYZ points, and polygons with component vertex indices. nvert (int): Number of vertices in mesh. nface (int): Number of polygons in mesh. vertices (ndarray): Vertex XYZ points for mesh. faces (ndarray): Polygons with component vertex indices for mesh. triverts (ndarray): Polygons with component vertex XYZ points for mesh. DNE (float): Total Dirichlet normal energy of mesh. DNEscalars (ndarray): Scalars for visualizing DNE. conditionfaces (list): List of polygon face indices with high matrix condition numbers. boundaryfaces (list): List of polygon face indices forming mesh edges. outlierfaces (list): List of polygon face indices removed as outliers, with DNE values and face areas. RFI (float): Relief index of mesh (surface area/projected area). surfarea (float): 3D surface area of mesh. projarea (float): 2D surface area of mesh projected on XY plane. OPCR (float): Orientation patch count rotated for mesh. OPClist (list): Orientation patch counts at 8 rotations for mesh. OPCscalars: Scalars for visualizing OPC. """ def __init__(self, filepath=""): super(TopoMesh,self).__init__(filepath) self.DNE = None self.DNEscalars = None self.conditionfaces = None self.boundaryfaces = None self.outlierfaces = None self.RFI = None self.surfarea = None self.projarea = None self.linelen = None self.bluepixie = None self.redpixie = None self.pixelratio = None self.OPCR = None self.OPClist = None self.OPCscalars = None def GenerateDNE(self, dosmooth, smoothit, smoothstep, docondition, dooutlier, outlierperc, outliertype, filename): """Calculates Dirichlet normal energy (surface bending) from mesh data. For details on args, see DNE.MeshDNE class. Args: doSmooth (bool): If true, do implicit fair smooth. SmoothIt (int): Iterations of smoothing SmoothStep (float): Smoothing step size. doCondition (bool): If true, do polygon condition number control. doOutlier (bool): If true, do outlier removal. OutlierPerc (float): Outlier percentile. OutlierType (bool): If true, outliers as energy*area. If false, outliers as energy. """ self.check_for_mesh(self.GenerateDNE) surfcurv = DNE.MeshDNE(self, dosmooth, smoothit, smoothstep, docondition, dooutlier, outlierperc, outliertype, filename) self.DNE = surfcurv.DNE self.DNEscalars = surfcurv.equantity self.conditionfaces = surfcurv.high_condition_faces self.boundaryfaces = surfcurv.boundary_faces self.outlierfaces = surfcurv.outlier_faces def GenerateRFI(self): """Calculates relief index (surface relief) from mesh data.""" self.check_for_mesh(self.GenerateRFI) surfrelf = RFI.MeshRFI(self) self.RFI = surfrelf.RFI self.surfarea = surfrelf.surfarea self.projarea = surfrelf.projarea self.linelen = surfrelf.linelen self.bluepixie = surfrelf.bluepixie self.redpixie = surfrelf.redpixie self.pixelratio = surfrelf.pixelratio def GenerateOPCR(self, minpatch): """Calculates orientation patch count rotated (surface complexity) from mesh data. For details on args see OPC.MeshOPCR class. Args: minpatch (int): Minimum size for counting patches. """ self.check_for_mesh(self.GenerateOPCR) surfcomp = OPC.MeshOPCR(self, minpatch) self.OPCR = surfcomp.OPCR self.OPClist = surfcomp.opc_list self.OPCscalars = surfcomp.colormap_list[0] def implicit_fair_mesh(self, iterations, step): self.get_vert_tri_dict() faired_vertices = implicitfair.smooth(self.vertices, self.faces, iterations, step, self.vert_tri_dict) self.vertices = faired_vertices self.mesh[0] = faired_vertices for i in range(len(self.triverts)): self.triverts[i] = self.vertices[self.faces[i]] self.mesh[1] = self.triverts def get_vert_tri_dict(self): """Generates dictionary associating vertex index keys with related polygon index values.""" self.vert_tri_dict = defaultdict(list) for findex, face in enumerate(self.faces): for vertex in face: self.vert_tri_dict[vertex].append(findex) def check_for_mesh(self, function="function"): if self.mesh == None: raise ValueError('A mesh has not been imported, %s cannot proceed.' % function)
A simple aïoli pairs perfectly with Fish Chowder Croquettes. Combine garlic and 1 cup oil in a small saucepan over medium-low heat. Cook 5 minutes or until garlic starts to sizzle. Remove from heat; let cool completely. Combine vinegar, salt, and egg yolks in a blender. Process until combined. Combine garlic oil and remaining 2 cups oil in a 4-cup glass measuring cup. With blender running, slowly drizzle oil mixture into blender; process 2 minutes. Transfer mixture to a large bowl; stir in rum.
#!/usr/bin/env python #-*- coding: utf-8 -*- ########################################################################### ## ## ## Copyrights Frédéric Rodrigo 2014 ## ## ## ## This program is free software: you can redistribute it and/or modify ## ## it under the terms of the GNU General Public License as published by ## ## the Free Software Foundation, either version 3 of the License, or ## ## (at your option) any later version. ## ## ## ## This program is distributed in the hope that it will be useful, ## ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## ## GNU General Public License for more details. ## ## ## ## You should have received a copy of the GNU General Public License ## ## along with this program. If not, see <http://www.gnu.org/licenses/>. ## ## ## ########################################################################### from Analyser_Merge import Analyser_Merge, Source, SHP, Load, Mapping, Select, Generate class Analyser_Merge_Bicycle_Rental_FR_bm(Analyser_Merge): def __init__(self, config, logger = None): self.missing_official = {"item":"8160", "class": 1, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental not integrated") } self.possible_merge = {"item":"8161", "class": 3, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental integration suggestion") } self.update_official = {"item":"8162", "class": 4, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle update") } Analyser_Merge.__init__(self, config, logger, "http://data.bordeaux-metropole.fr/data.php?themes=10", u"Station VCUB", SHP(Source(attribution = u"Bordeaux Métropole", millesime = "08/2016", fileUrl = "http://data.bordeaux-metropole.fr/files.php?gid=43&format=2", zip = "TB_STVEL_P.shp", encoding = "ISO-8859-15")), Load(("ST_X(geom)",), ("ST_Y(geom)",), srid = 2154), Mapping( select = Select( types = ["nodes"], tags = {"amenity": "bicycle_rental"}), osmRef = "ref", conflationDistance = 100, generate = Generate( static1 = { "amenity": "bicycle_rental", "network": "VCUB"}, static2 = {"source": self.source}, mapping1 = { "name": "NOM", "ref": "NUMSTAT", "capacity": "NBSUPPOR", "vending": lambda res: "subscription" if res["TERMBANC"] == "OUI" else None, "description": lambda res: "VCUB+" if res["TARIF"] == "VLS PLUS" else None} )))
Crime doesn't pay. Well, unless you're a scammy DJ, that is. Following a massive investigation by a joint venture of the FBI and Scotland Yard's Central E-Crime Unit, nine UK DJs have been arrested for allegedly using stolen credit cards to buy their own music via Apple's iTunes and Amazon, which not only gave the tricksters a boost in income but their chart standings as well. Here's how the scam worked: a group of DJs recorded 19 compilations, put them on the service providers' sites, then downloaded them 65,000 times on accounts set up with stolen cards. Recognizing something was amiss last December, a number of credit card companies warned Apple that the accounts were bogus. That prompted a deeper probe that led to nine people being arrested across the UK yesterday (June 10). They are charged with conspiracy to commit fraud and money laundering. The investigation spanned London and New York. Various reports have the sum of illegal royalty earnings ranging from between £200,000 to £400,000 (almost $729,000 CDN). The false boost in sales also placed the artists in higher chart ratings, thereby rendering many recent charts null and void. After the arrests, detective chief inspector Terry Wilson was quoted as stating "This has been a complex investigation to establish what we believe to be an international conspiracy to defraud Apple and Amazon. This investigation, with its national and international dimension, exemplifies why we have set up this national response to e-crime. It shows the success that can be achieved through our close working relationship with the FBI."
DATE_FORMAT = "%Y-%m-%d %H:%M:%S" SENSOR_DATA_KEYSPACE = "sensor_data" CASSANDRA_ADDRESS = 'cassandra.cloudbrain.rocks' RABBITMQ_ADDRESS = 'rabbitmq.cloudbrain.rocks' WEBSERVER_ADDRESS = 'webserver.cloudbrain.rocks' WEBSERVER_PORT = 8080 MOCK_DEVICE_ID = "mock" REGISTERED_DEVICES_TABLE_NAME = "registered_device_ids" # Metric metadata of all wearable devices accepted by CloudBrain. DEVICE_METADATA = [ {'device_name': 'openbci', 'device_type': 'eeg_headset', 'metrics': [ { 'metric_name': 'eeg', 'num_channels': 8, 'metric_description': 'Raw eeg data coming from the OpenBCI channels' } ] }, { 'device_name': 'muse', 'device_type': 'eeg_headset', 'metrics': [ { 'metric_name': 'eeg', 'num_channels': 4, 'metric_description': 'Raw eeg data coming from the 4 channels of the Muse' }, { 'metric_name': 'horseshoe', 'num_channels': 4, 'metric_description': 'Status indicator for each channel (1 = good, 2 = ok, >=3 bad)' }, { 'metric_name': 'concentration', 'num_channels': 1, 'metric_description': None }, { 'metric_name': 'mellow', 'num_channels': 1, 'metric_description': None }, { 'metric_name': 'acc', 'num_channels': 3, 'metric_description': None }, { 'metric_name': 'delta_absolute', 'num_channels': 4, 'metric_description': None }, { 'metric_name': 'theta_absolute', 'num_channels': 4, 'metric_description': None }, { 'metric_name': 'beta_absolute', 'num_channels': 4, 'metric_description': None }, { 'metric_name': 'alpha_absolute', 'num_channels': 4, 'metric_description': None }, { 'metric_name': 'gamma_absolute', 'num_channels': 4, 'metric_description': None }, { 'metric_name': 'delta_relative', 'num_channels': 4, 'metric_description': None }, { 'metric_name': 'theta_relative', 'num_channels': 4, 'metric_description': None }, { 'metric_name': 'beta_relative', 'num_channels': 4, 'metric_description': None }, { 'metric_name': 'alpha_relative', 'num_channels': 4, 'metric_description': None }, { 'metric_name': 'gamma_relative', 'num_channels': 4, 'metric_description': None }, { 'metric_name': 'is_good', 'num_channels': 4, 'metric_description': 'Strict data quality indicator for each channel, 0= bad, 1 = good.' }, { 'metric_name': 'blink', 'num_channels': 1, 'metric_description': None }, { 'metric_name': 'jaw_clench', 'num_channels': 1, 'metric_description': None }, ] }, { 'device_name': 'neurosky', 'device_type': 'eeg_headset', 'metrics': [ { 'metric_name': 'concentration', 'num_channels': 1, 'metric_description': None }, { 'metric_name': 'meditation', 'num_channels': 1, 'metric_description': None }, { 'metric_name': 'signal_strength', 'num_channels': 1, 'metric_description': None }, ] }, { 'device_name': 'pulsesensor', 'device_type': 'heart_rate_monitor', 'metrics': [ { 'metric_name': 'raw', 'num_channels': 1, 'metric_description': None } ] } ]
NO PURCHASE IS REQUIRED :). The Country Hearth Breads Holiday Sweeps will run until 11:59 PM CT on December 3, 2017. In order to participate you must be 18 years of age or older and a legal resident in one of the following states: Iowa, Illinois, Indiana, Michigan, Minnesota, Montana, North Dakota, Nebraska, South Dakota, and Wisconsin. In order to enter please Complete the entry form found www.CountryHearthBreads.com/sweeps. If your not sure if you live within 25 miles of a Country Hearth Breads location please use the Country Hearth Store Finder tool (you will want to enter your zip code in order tho determine how many miles you are from a Country Hearth Store). If you enter the Country Hearth Breads promotion you will agree to receive promotional email notifications from the sponsor. You can chose to opt-out from receiving the emails after receiving the first notification is received by clicking the opt out link within the first email. If you would like to obtain a list of winners associated with the Country Hearth Breads Sweepstakes Promotion can be requested via US mail by writing to: 2017 Home For The Holidays Sweepstakes, PO Box 240, Clear Lake, MN 55319. Please include your return address and proper postage. You must make the request by January 31, 2018 (do not make the requesting until after the grad prize drawing date). The sponsor of the $1,000 in Country Hearth Home for the Holidays Sweepstakes is the Pan-O-Gold Baking Company who is based at 444 E St Germain St, St Cloud, MN 56304. If your looking for coupons associated with a Country Hearth Breads product please visit their Facebook page. Each coupon may be printed twice per device and check back often as they update their coupons monthly. Country Hearth’s Facebook fanboys can get FREE bread! via the Country Hearth’s Facebook page for details on how to win.
#!/usr/bin/env python3 # Copyright (c) 2019 The Navcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import NavCoinTestFramework from test_framework.cfund_util import * import time class LightVotingTest(NavCoinTestFramework): """Tests the voting from light wallets""" def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 3 def setup_network(self, split=False): self.nodes = [] self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug=dao","-dandelion=0"]]*3) connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[1], 2) connect_nodes(self.nodes[2], 0) def run_test(self): # Get cfund parameters blocks_per_voting_cycle = self.nodes[0].cfundstats()["consensus"]["blocksPerVotingCycle"] self.nodes[0].staking(False) self.nodes[1].staking(False) self.nodes[2].staking(False) activate_softfork(self.nodes[0], "coldstaking_v2") votingkey = self.nodes[2].getnewaddress() coldstaking = self.nodes[0].getcoldstakingaddress(self.nodes[1].getnewaddress(),self.nodes[1].getnewaddress(),votingkey) self.nodes[0].sendtoaddress(votingkey, 100) self.nodes[0].sendtoaddress(coldstaking, 3000000) self.nodes[0].sendtoaddress(coldstaking, 3000000) self.nodes[0].sendtoaddress(coldstaking, 3000000) self.nodes[0].sendtoaddress(coldstaking, 3000000) self.nodes[0].sendtoaddress(coldstaking, 3000000) self.nodes[0].sendtoaddress(coldstaking, 3000000) slow_gen(self.nodes[0], 10) time.sleep(3) consultation_hash = self.nodes[0].createconsultation("range", 300, 600, True)['hash'] slow_gen(self.nodes[0], 1) start_new_cycle(self.nodes[0]) reversed_hash = reverse_byte_str(consultation_hash) support_str = '6a' + 'c9' + 'c4' + '20' + reversed_hash supportrm_str = '6a' + 'c9' + 'c8' + '20' + reversed_hash rawtx=self.nodes[2].createrawtransaction([],{support_str:0,'6ac1':0.1}) rawtx = "08" + rawtx[2:] fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex'] signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex'] self.nodes[2].sendrawtransaction(signedrawtx) self.nodes[2].generatetoaddress(1, votingkey) sync_blocks(self.nodes) self.stake_block(self.nodes[1], False) self.stake_block(self.nodes[1], False) sync_blocks(self.nodes) time.sleep(3) assert_equal(self.nodes[0].getconsultation(consultation_hash)['support'], 2) rawtx=self.nodes[2].createrawtransaction([],{supportrm_str:0,'6ac1':0.1}) rawtx = "08" + rawtx[2:] fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex'] signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex'] self.nodes[2].sendrawtransaction(signedrawtx) self.nodes[2].generatetoaddress(1, votingkey) sync_blocks(self.nodes) self.stake_block(self.nodes[1], False) self.stake_block(self.nodes[1], False) sync_blocks(self.nodes) time.sleep(3) assert_equal(self.nodes[0].getconsultation(consultation_hash)['support'], 2) start_new_cycle(self.nodes[0]) start_new_cycle(self.nodes[0]) start_new_cycle(self.nodes[0]) start_new_cycle(self.nodes[0]) vote_str_500 = '6a' + 'cb' + 'ca' + '20' + reversed_hash + '02f401' vote_str_400 = '6a' + 'cb' + 'ca' + '20' + reversed_hash + '029001' voteabs_str = '6a' + 'cb' + 'c7' + '20' + reversed_hash voterm_str = '6a' + 'cb' + 'c8' + '20' + reversed_hash rawtx=self.nodes[2].createrawtransaction([],{vote_str_500:0,'6ac1':0.1}) rawtx = "08" + rawtx[2:] fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex'] signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex'] self.nodes[2].sendrawtransaction(signedrawtx) self.nodes[2].generatetoaddress(1, votingkey) sync_blocks(self.nodes) self.stake_block(self.nodes[1], False) self.stake_block(self.nodes[1], False) sync_blocks(self.nodes) time.sleep(3) assert_equal(self.nodes[0].getconsultation(consultation_hash)['answers'][0]['500'], 2) assert_equal(self.nodes[0].getconsultation(consultation_hash)['abstain'], 0) rawtx=self.nodes[2].createrawtransaction([],{voterm_str:0,'6ac1':0.1}) rawtx = "08" + rawtx[2:] fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex'] signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex'] self.nodes[2].sendrawtransaction(signedrawtx) self.nodes[2].generatetoaddress(1, votingkey) sync_blocks(self.nodes) self.stake_block(self.nodes[1], False) self.stake_block(self.nodes[1], False) sync_blocks(self.nodes) time.sleep(3) assert_equal(self.nodes[0].getconsultation(consultation_hash)["answers"][0]['500'], 2) assert_equal(self.nodes[0].getconsultation(consultation_hash)['abstain'], 0) rawtx=self.nodes[2].createrawtransaction([],{voteabs_str:0,'6ac1':0.1}) rawtx = "08" + rawtx[2:] fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex'] signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex'] self.nodes[2].sendrawtransaction(signedrawtx) self.nodes[2].generatetoaddress(1, votingkey) sync_blocks(self.nodes) self.stake_block(self.nodes[1], False) self.stake_block(self.nodes[1], False) sync_blocks(self.nodes) time.sleep(3) assert_equal(self.nodes[0].getconsultation(consultation_hash)["answers"][0]['500'], 2) assert_equal(self.nodes[0].getconsultation(consultation_hash)['abstain'], 2) start_new_cycle(self.nodes[0]) sync_blocks(self.nodes) # 1 abstain vote time.sleep(3) self.stake_block(self.nodes[1], False) sync_blocks(self.nodes) time.sleep(3) # remove abstain vote rawtx=self.nodes[2].createrawtransaction([],{voterm_str:0,'6ac1':0.1}) rawtx = "08" + rawtx[2:] fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex'] signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex'] self.nodes[2].sendrawtransaction(signedrawtx) self.nodes[2].generatetoaddress(1, votingkey) sync_blocks(self.nodes) time.sleep(3) # switch to vote 400 rawtx=self.nodes[2].createrawtransaction([],{vote_str_400:0,'6ac1':0.1}) rawtx = "08" + rawtx[2:] fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex'] signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex'] self.nodes[2].sendrawtransaction(signedrawtx) self.nodes[2].generatetoaddress(1, votingkey) sync_blocks(self.nodes) self.stake_block(self.nodes[1], False) self.stake_block(self.nodes[1], False) sync_blocks(self.nodes) time.sleep(3) assert_equal(self.nodes[0].getconsultation(consultation_hash)["answers"][0]['400'], 2) assert_equal(self.nodes[0].getconsultation(consultation_hash)['abstain'], 1) # switch to vote 500 rawtx=self.nodes[2].createrawtransaction([],{voterm_str:0,'6ac1':0.1}) rawtx = "08" + rawtx[2:] fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex'] signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex'] self.nodes[2].sendrawtransaction(signedrawtx) rawtx=self.nodes[2].createrawtransaction([],{vote_str_500:0,'6ac1':0.1}) rawtx = "08" + rawtx[2:] fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex'] signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex'] self.nodes[2].sendrawtransaction(signedrawtx) self.nodes[2].generatetoaddress(1, votingkey) sync_blocks(self.nodes) self.stake_block(self.nodes[1], False) self.stake_block(self.nodes[1], False) sync_blocks(self.nodes) time.sleep(3) assert_equal(self.nodes[0].getconsultation(consultation_hash)["answers"][0]['500'], 2) assert_equal(self.nodes[0].getconsultation(consultation_hash)["answers"][0]['400'], 2) assert_equal(self.nodes[0].getconsultation(consultation_hash)['abstain'], 1) def stake_block(self, node, mature = True): # Get the current block count to check against while we wait for a stake blockcount = node.getblockcount() # Turn staking on node.staking(True) # wait for a new block to be mined while node.getblockcount() == blockcount: #print("waiting for a new block...") time.sleep(1) # We got one #print("found a new block...") # Turn staking off node.staking(False) # Get the staked block block_hash = node.getbestblockhash() # Only mature the blocks if we asked for it if (mature): # Make sure the blocks are mature before we check the report slow_gen(node, 5, 0.5) self.sync_all() # return the block hash to the function caller return block_hash if __name__ == '__main__': LightVotingTest().main()
The Rutledge, Georgia community of dance is a beautiful thing. With so many different personalities and backgrounds, the people who love dance find meaning in another's arms as they rhythmically move across the dance floor of life. Me and my boyfriend are looking to learn something new. My cousin dances professionally, but is pregnant and not currently doing any teaching.
from build_filtered_chunks import process_chunk_list from experiment import Experiment from innovations import measure_innovations, analyze_innovations from latex import latex_corpora, latex_performance ########################################################################### # # Experiments # ########################################################################### experiment_windows_273_150_100 = Experiment( # Paths to tools r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe", r"D:\git\mg4j-workbench", r"/home/mhop/git/partitioned_elias_fano/bin", # The directory containing all indexes and the basename for this index r"D:\temp\indexes", 100, 150, r"273-150-100", # The directory with the gov2 chunks and the regular expression pattern # used to determine which chunks will be used for this experiment. r"d:\sigir\chunks-100-150", r"GX.*", # Use all chunks # The query log to be used for this experiment. r"D:\sigir\queries\06.efficiency_topics.all", # BitFunnel density 0.15, # Min and max thread counts 8, 1, 8 ) experiment_windows_273_64_127 = Experiment( # Paths to tools r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe", r"D:\git\mg4j-workbench", r"/home/mhop/git/partitioned_elias_fano/bin", # The directory containing all indexes and the basename for this index r"D:\temp\indexes", 64, 127, r"273-64-127", # The directory with the gov2 chunks and the regular expression pattern # used to determine which chunks will be used for this experiment. r"d:\sigir\chunks-64-127", r"GX.*", # Use all chunks # The query log to be used for this experiment. r"D:\sigir\queries\06.efficiency_topics.all", # BitFunnel density 0.15, # Min and max thread counts 8, 1, 8 ) experiment_windows_273_128_255 = Experiment( # Paths to tools r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe", r"D:\git\mg4j-workbench", r"/home/mhop/git/partitioned_elias_fano/bin", # The directory containing all indexes and the basename for this index r"D:\temp\indexes", 128, 255, r"273_128_255", # The directory with the gov2 chunks and the regular expression pattern # used to determine which chunks will be used for this experiment. r"d:\sigir\chunks-128-255", r"GX.*", # Use all chunks # The query log to be used for this experiment. r"D:\sigir\queries\06.efficiency_topics.all", # BitFunnel density 0.15, # Min and max thread counts 8, 1, 8 ) experiment_windows_273_256_511 = Experiment( # Paths to tools r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe", r"D:\git\mg4j-workbench", r"/home/mhop/git/partitioned_elias_fano/bin", # The directory containing all indexes and the basename for this index r"D:\temp\indexes", 256, 511, r"273-256-511", # The directory with the gov2 chunks and the regular expression pattern # used to determine which chunks will be used for this experiment. r"d:\sigir\chunks-256-511", r"GX.*", # Use all chunks # The query log to be used for this experiment. r"D:\sigir\queries\06.efficiency_topics.all", # BitFunnel density 0.15, # Min and max thread counts 8, 1, 8 ) experiment_windows_273_1000_1500 = Experiment( # Paths to tools r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe", r"D:\git\mg4j-workbench", r"/home/mhop/git/partitioned_elias_fano/bin", # The directory containing all indexes and the basename for this index r"D:\temp\indexes", 1000, 1500, r"273-1000-1500", # The directory with the gov2 chunks and the regular expression pattern # used to determine which chunks will be used for this experiment. r"d:\sigir\chunks-1000-1500", r"GX.*", # Use all chunks # The query log to be used for this experiment. r"D:\sigir\queries\06.efficiency_topics.all", # BitFunnel density 0.15, # Min and max thread counts 8, 1, 8 ) experiment_windows_273_1024_2047 = Experiment( # Paths to tools r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe", r"D:\git\mg4j-workbench", r"/home/mhop/git/partitioned_elias_fano/bin", # The directory containing all indexes and the basename for this index r"D:\temp\indexes", 1024, 2047, r"273-1024-2047", # The directory with the gov2 chunks and the regular expression pattern # used to determine which chunks will be used for this experiment. r"d:\sigir\chunks-1024-2047", r"GX.*", # Use all chunks # The query log to be used for this experiment. r"D:\sigir\queries\06.efficiency_topics.all", # BitFunnel density 0.15, # Min and max thread counts 8, 1, 8 ) experiment_windows_273_2048_4095 = Experiment( # Paths to tools r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe", r"D:\git\mg4j-workbench", r"/home/mhop/git/partitioned_elias_fano/bin", # The directory containing all indexes and the basename for this index r"D:\temp\indexes", 2048, 4095, r"273-2048-4095", # The directory with the gov2 chunks and the regular expression pattern # used to determine which chunks will be used for this experiment. r"d:\sigir\chunks-2048-4095", r"GX.*", # Use all chunks # The query log to be used for this experiment. r"D:\sigir\queries\06.efficiency_topics.all", # BitFunnel density 0.15, # Min and max thread counts 8, 1, 8 ) experiment_linux_273_64_127 = Experiment( # Paths to tools r"/home/mhop/git/BitFunnel/build-make/tools/BitFunnel/src/BitFunnel", r"/home/mhop/git/mg4j-workbench", r"/home/mhop/git/partitioned_elias_fano/bin", # The directory containing all indexes and the basename for this index r"/mnt/d/temp/indexes", 64, 127, r"273-64-127", # The directory with the gov2 chunks and the regular expression pattern # used to determine which chunks will be used for this experiment. r"/mnt/d/sigir/chunks-64-127", r"GX.*", # Use all chunks # The query log to be used for this experiment. r"/mnt/d/sigir/queries/06.efficiency_topics.all", # BitFunnel density 0.15, # Min and max thread counts 8, 1, 8 ) experiment_linux_273_128_255 = Experiment( # Paths to tools r"/home/mhop/git/BitFunnel/build-make/tools/BitFunnel/src/BitFunnel", r"/home/mhop/git/mg4j-workbench", r"/home/mhop/git/partitioned_elias_fano/bin", # The directory containing all indexes and the basename for this index r"/mnt/d/temp/indexes", 128, 255, r"273_128_255", # The directory with the gov2 chunks and the regular expression pattern # used to determine which chunks will be used for this experiment. r"/mnt/d/sigir/chunks-128-255", r"GX.*", # Use all chunks # The query log to be used for this experiment. r"/mnt/d/sigir/queries/06.efficiency_topics.all", # BitFunnel density 0.15, # Min and max thread counts 8, 1, 8 ) experiment_linux_273_256_511 = Experiment( # Paths to tools r"/home/mhop/git/BitFunnel/build-make/tools/BitFunnel/src/BitFunnel", r"/home/mhop/git/mg4j-workbench", r"/home/mhop/git/partitioned_elias_fano/bin", # The directory containing all indexes and the basename for this index r"/mnt/d/temp/indexes", 256, 511, r"273-256-511", # The directory with the gov2 chunks and the regular expression pattern # used to determine which chunks will be used for this experiment. r"/mnt/d/sigir/chunks-256-511", r"GX.*", # Use all chunks # The query log to be used for this experiment. r"/mnt/d/sigir/queries/06.efficiency_topics.all", # BitFunnel density 0.15, # Min and max thread counts 8, 1, 8 ) experiment_linux_273_1024_2047 = Experiment( # Paths to tools r"/home/mhop/git/BitFunnel/build-make/tools/BitFunnel/src/BitFunnel", r"/home/mhop/git/mg4j-workbench", r"/home/mhop/git/partitioned_elias_fano/bin", # The directory containing all indexes and the basename for this index r"/mnt/d/temp/indexes", 1024, 2047, r"273-1024-2047", # The directory with the gov2 chunks and the regular expression pattern # used to determine which chunks will be used for this experiment. r"/mnt/d/sigir/chunks-100-150", r"GX.*", # Use all chunks # The query log to be used for this experiment. r"/mnt/d/sigir/queries/06.efficiency_topics.all", # BitFunnel density 0.15, # Min and max thread counts 8, 1, 8 ) experiment_linux_273_2048_4095 = Experiment( # Paths to tools r"/home/mhop/git/BitFunnel/build-make/tools/BitFunnel/src/BitFunnel", r"/home/mhop/git/mg4j-workbench", r"/home/mhop/git/partitioned_elias_fano/bin", # The directory containing all indexes and the basename for this index r"/mnt/d/temp/indexes", 2048, 4095, r"273-2048-4095", # The directory with the gov2 chunks and the regular expression pattern # used to determine which chunks will be used for this experiment. r"/mnt/d/sigir/chunks-2048-4095", r"GX.*", # Use all chunks # The query log to be used for this experiment. r"/mnt/d/sigir/queries/06.efficiency_topics.all", # BitFunnel density 0.15, # Min and max thread counts 8, 1, 8 ) def runxxx(experiment): pass # experiment.fix_query_log() # experiment.build_chunk_manifest() # # # Must build the mg4j index before filtering the query log # # Must also build mg4j before building PEF which takes the MG4J export # # as input. # experiment.build_mg4j_index() # # # Build the other indexes at this point # experiment.build_bf_index() # experiment.build_lucene_index() # experiment.build_pef_collection() # # # experiment.build_pef_index() # # # # Must filter the query log before running any queries. # experiment.filter_query_log() # # # Now we're ready to run queries. # # experiment.run_bf_queries() # experiment.run_lucene_queries() # experiment.run_mg4j_queries() # # experiment.run_pef_queries() # experiment.summarize(7) # print() def run_windows(experiment): experiment.run_bf_queries() experiment.run_lucene_queries() experiment.run_mg4j_queries() def run_linux(experiment): experiment.run_pef_queries() def linux(experiment): experiment.build_pef_index() experiment.run_pef_queries() def finish(experiment): experiment.summarize(7) process_chunk_list(r"d:\data\gov2", "*", r"d:\temp\chunks", r"D:\git\mg4j-workbench", r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe", 0, 100000, 8) # process_chunk_list(r"d:\data\gov2", # r"d:\temp\chunks", # r"D:\git\mg4j-workbench", # r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe", # 64, # 127, # 8) # process_chunk_list(r"d:\data\gov2", # r"d:\temp\chunks", # r"D:\git\mg4j-workbench", # r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe", # 512, # 1023, # 8) # process_chunk_list(r"d:\data\gov2", # r"d:\temp\chunks", # r"D:\git\mg4j-workbench", # r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe", # 2048, # 4095, # 7) # process_chunk_list(r"d:\data\gov2", # r"d:\temp\chunks", # r"D:\git\mg4j-workbench", # r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe", # 1024, # 2047, # 7) # process_chunk_list(r"/home/danluu/dev/gov2", # r"/home/danluu/dev/what-is-this", # r"/home/danluu/dev/mg4j-workbench", # r"/home/danluu/dev/BitFunnel/build-ninja/tools/BitFunnel/src/BitFunnel", # 128, # 255, # 7) # runxxx(experiment_windows_273_64_127) # runxxx(experiment_windows_273_128_255) # runxxx(experiment_windows_273_150_100) # runxxx(experiment_windows_273_1000_1500) # runxxx(experiment_windows_273_1024_2047) # runxxx(experiment_linux_273_1024_2047) # runxxx(experiment_windows_273_256_511) # linux(experiment_linux_273_256_511) # runxxx(experiment_windows_273_2048_4095) # linux(experiment_linux_273_2048_4095) # finish(experiment_windows_273_2048_4095) # print() # runxxx(experiment_windows_273_64_127) # linux(experiment_linux_273_64_127) # experiment_windows_273_64_127.run_lucene_queries() # finish(experiment_windows_273_64_127) def run_innovations(experiments): labels = ["BSS", "BSS-FC", "BTFNL"] treatments = ["ClassicBitsliced", "PrivateSharedRank0", "Optimal"] densities = [0.05, 0.10, 0.15, 0.20, 0.25, 0.3, 0.35] # for experiment in experiments: # measure_innovations(experiment, treatments, densities) for experiment in experiments: analyze_innovations(experiment, labels, treatments, densities) experiments = [ experiment_windows_273_64_127, experiment_windows_273_128_255, experiment_windows_273_256_511, experiment_windows_273_1024_2047, experiment_windows_273_2048_4095] # latex_corpora(experiments) # latex_performance(experiments) # run_innovations(experiments) # run_windows(experiment_windows_273_64_127) # run_windows(experiment_windows_273_128_255) # run_windows(experiment_windows_273_1024_2047) # run_windows(experiment_windows_273_2048_4095) # run_linux(experiment_linux_273_64_127) # run_linux(experiment_linux_273_128_255) # run_linux(experiment_linux_273_1024_2047) # run_linux(experiment_linux_273_2048_4095)
Generate even more realistic worlds of Minecraft. Moreover, as you can see in the picture below, this mod supports another mod which adds more quanitity and variety of biomes, it is Biomes O’ Plenty, so they both can work together to get better results. How to install Realistic World Gen Mod for Minecraft 1.7.10? Play in a realistic Minecraft!
# -*- coding: utf-8 -*- from setuptools import setup, find_packages setup( name='pynagmailplugins', packages=find_packages(exclude=['docs', 'samples', 'tests']), namespace_packages=['snowpenguin'], version='0.0.8', scripts=['bin/check_mailq.py', 'bin/pydnswl_check.py'], install_requires=[ 'nagiosplugin>=1.2', ], description='Nagios plugins that detect unusual mail flow.', author='Andrea Briganti', author_email='[email protected]', url='https://github.com/kbytesys/pynagmailplugins', download_url='https://github.com/kbytesys/pynagmailplugins/tarball/v0.0.8', keywords=['nagios', 'systemd', 'postfix', 'mail'], license='GNU GPL v2', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Environment :: Plugins', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 'Programming Language :: Python', 'Programming Language :: Python :: 3 :: Only', 'Operating System :: POSIX :: Linux', 'Topic :: System :: Networking :: Monitoring' ], )
Join Michaela for an extended yin practice paired with myofasical release, pranayama and meditation to restore a sense of vitality to the body and clarity to the mind. Held at Yoga Mandali, Saturday March 2nd, 6-8:30 pm, $35. Preregistration strongly encouraged.
from JumpScale.clients.racktivity.energyswitch.common.GUIDTable import Value from JumpScale.clients.racktivity.energyswitch.modelfactory.models.common.Power_0_0_5_20 import Model as Power import struct import time class Model(Power): def __init__(self, parent): super(Model, self).__init__(parent) self._guidTable.update({ 50: Value(u"type='TYPE_UNSIGNED_NUMBER'\nsize=2\nlength=2\nunit='%'\nscale=1") }) self._pointerGuids = [ (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 8), (7, 8), (8, 8), (9, 8), (10, 8), (11, 1), (24, 1), (31, 8), (50, 8), (5000, 8), (5001, 8), (5002, 1), (5003, 1), (5004, 1), (5005, 1), (5006, 1), (5007, 1), (5010, 8), (5011, 8), (5012, 8), (5013, 8), (5014, 1), (5015, 1), (5016, 1), (5017, 1), (15, 8), (16, 8), (17, 1), (18, 1), (19, 1), (20, 1), (21, 1), (22, 1) ] # Attribute 'THD' GUID 50 Data type TYPE_UNSIGNED_NUMBER # Total Harmonic Distortion def getTHD(self, moduleID, portnumber=1): guid = 50 length = 1 valDef = self._guidTable[guid] data = self._parent.client.getAttribute( moduleID, guid, portnumber, length) return self._parent.getObjectFromData(data, valDef, count=length) def getOscilloscopeTimeData(self, moduleID, portnumber=1): Ioffset = 258 result = {'voltage': [], 'current': []} # Get 516 bytes of raw data from device: rawData = self._parent.client.getOscData( module=moduleID, outlet=portnumber, dataType="T") if b'failed' in rawData: time.sleep(0.1) rawData = self._parent.client.getOscData( module=moduleID, outlet=portnumber, dataType="T") if len(rawData) < 516: # something is wrong, not enough data return (101, rawData) # Extracting values from raw binary data: voltageCalibration = float( (struct.unpack('<H', rawData[:2]))[0]) / 12800.0 voltageValues = struct.unpack('<256b', rawData[2:Ioffset]) # the current values is returned in miliampers currentCalibration = float( (struct.unpack('<H', rawData[Ioffset:Ioffset + 2]))[0]) / 128.0 currentValues = struct.unpack( '<256b', rawData[Ioffset + 2:2 * Ioffset]) # Calculate the values based on calibration: for i in range(256): result['voltage'].append(voltageValues[i] * voltageCalibration) result['current'].append(currentValues[i] * currentCalibration) return (0, result) def getOscilloscopeFrequencyData(self, moduleID, portnumber=1, dataType="current"): # pylint: disable=W0221 result = { 'current': {'amplitudes': [], 'phases': []}, 'voltage': {'amplitudes': [], 'phases': []} } dataType = "FC" if dataType == "current" else "FV" numSamples = 64 rawData = self._parent.client.getOscData( module=moduleID, outlet=portnumber, dataType=dataType) if b'failed' in rawData: time.sleep(0.1) rawData = self._parent.client.getOscData( module=moduleID, outlet=portnumber, dataType=dataType) if len(rawData) < 516: # something is wrong, not enough data return (101, rawData) if dataType == "FC": # Calculate the values based on calibration: currentCalibration = float( (struct.unpack('<H', rawData[:2]))[0]) / 4096.0 / 1000 for i in range(6, 2 + 4 * numSamples, 4): # do not take DC (0th harmonic) currentAmplitude = struct.unpack('<H', rawData[i:i + 2])[0] result['current']['amplitudes'].append( currentAmplitude * currentCalibration) # if first harmonic is below 0.01 A it makes no sense to read # as on 0 load, there will be useful information if len(result['current']['amplitudes']) == 1 and result['current']['amplitudes'][0] < 0.01: return (100, None) result['current']['phases'].append( struct.unpack('<h', rawData[i + 2:i + 4])[0]) else: length = 256 VOffset = 2 + length voltageCalibration = float( (struct.unpack('<H', rawData[VOffset:VOffset + 2]))[0]) * 10 / 4096.0 / 1000 # Calculate the values based on calibration: # do not take DC (0th harmonic) for i in range(VOffset + 6, VOffset + 4 * numSamples, 4): result['voltage']['amplitudes'].append(struct.unpack( '<H', rawData[i:i + 2])[0] * voltageCalibration) result['voltage']['phases'].append( struct.unpack('<h', rawData[i + 2:i + 4])[0]) return (0, result)
NEW YORK — Bruce Springsteen, Jon Stewart, Jim Gaffigan and Seth Meyers are headlining this year's Stand Up for Heroes fundraiser, which benefits injured veterans and their families. Jimmy Carr and Eric Church are also on the bill for the Nov. 5 comedy event at the Hulu Theater at Madison Square Garden in New York City. Stand Up for Heroes was first held in 2007. The event produced by the New York Comedy Festival is the brainchild of festival founders Caroline Hirsch, Andrew Fox and ABC News correspondent Bob Woodruff, who was nearly killed during a 2006 attack in Iraq while embedded with U.S. troops. He calls the event a chance to "put aside our differences" to "honor those who have and continue to sacrifice so much." Tickets go on sale Thursday at noon Eastern.
import urlparse import BaseHTTPServer import webbrowser ACCESS_TOKEN = None auth_url = 'http://www.youcandoitvfx.com/fb/' server_host = '127.0.0.1' server_port = 80 class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler): def do_GET(self): global ACCESS_TOKEN self.send_response(301) self.send_header('Location', auth_url + 'close.html') self.end_headers() parsed_path = urlparse.urlparse(self.path) try: params = dict([p.split('=') for p in parsed_path[4].split('&')]) except: params = {} _access_token = params.get('access_token', 'error') if (_access_token != 'error') and (len(_access_token) != 0): ACCESS_TOKEN = _access_token def getAccessToken(): global ACCESS_TOKEN ACCESS_TOKEN = None server_class = BaseHTTPServer.HTTPServer httpd = server_class((server_host, server_port), MyHandler) webbrowser.open(auth_url) while ACCESS_TOKEN is None: httpd.handle_request() httpd.server_close() return ACCESS_TOKEN
Not quite a cracker, not quite a flatbread…you can imagine the amount of restraint it required not to entitle this post “crack bread”, especially considering how good they taste. Although this is not exactly bread, and therefore not really fulfilling one of my new year’s food resolutions, I think this is a good intro to the world of unleavened, savory baked goods at the very least. Give me some credit – it’s only been two weeks. This recipe went through three evolutions: the little-bit-naughty one, an extremely healthy, birdseed style version, and what I am presenting you with today: the very happy medium. The inspiration for the recipe came from the humble flax seed – one of those food items that has received massive attention in the past couple years for its explosively high content of Omega-3 Fatty Acids. However, there is yet another powerful aspect to this seed, a component in the shell itself, called a lignan. What in the world is a lignan? Lignans are special compounds found in flax and in other seeds, grains, and legumes that are converted by beneficial gut flora (re: probiotics) into two hormone-like substances called enterolactone and enterodiol. These hormone-like agents demonstrate a number of protective effects against breast cancer and are believed to be one reason a vegetarian diet is associated with a lower risk for breast cancer. In animal studies conducted to evaluate lignans’ beneficial effect, supplementing a high-fat diet with flaxseed flour (like in the recipe below, for example) reduced early markers for mammary (breast) cancer in laboratory animals by more than 55%. What else do lignans do, Sarah? I’m glad you asked that. The lignan-rich fiber found in flax meal (ground flax seeds) has also been shown to decrease insulin resistance, which, in turn, reduces bio-available estrogen, which also lessens breast cancer risk. And, as insulin resistance is an early warning sign for type 2 diabetes, flaxseed may also provide protection against this disease. Groovy. So here’s the recipe that I’ve been working on, adapted from an old issue of Gourmet. If you choose to ignore all that incredibly valuable information I just spewed out, by all means omit that flax meal for a real treat (a.k.a. crack bread). However, if you would like to take advantage of all the benefits flax seeds have to offer, I suggest that you grind your flax fresh as the delicate fats (those good ol’ omega 3’s) are extremely sensitive to heat and light and go rancid super fast. If you must buy it pre-ground, find it in refrigerated/freezer section of your grocer, preferably in a lightproof vacuum-sealed bag, and check the expiry date. Making your own at home when you need it will ensure absolute freshness. Whole flax seeds on their own last a very long time, but once you crack them open their shelf life declines considerably. Store any extra in the freezer. You can purchase golden or brown flax seeds (I used golden in this version) – they are nutritionally equal. As for the salt, I went with a delectable smoked sea salt (pictured. Yes, that is the real colour) to enhance the richness, but any high-quality sea salt is fine. You can find various salts at specialty gourmet shops and health food stores – this is a great opportunity to try something new! These flatbreads / crackers / crisps are rich and nutty-tasting with a seriously luscious flakiness. These would superb as a bed for black olive tapenade, or roasted red pepper hummus. If you’re into dairy, a soft-ripened cheese would be delightful I bet. The rustic, free form shapes created simply by breaking the large rounds into pieces, lend a certain informality to a distinctively sophisticated flavour. Oh my. 1. Preheat oven to 450°F with a heavy baking sheet on rack in middle. 2. Grind flax seeds in a coffee grinder or blender until all seeds are pulverized. It should resemble flour. 3. Stir together flour, flax meal, chopped rosemary, baking powder, and salt in a medium bowl. Make a well in center, then add water and oil and gradually stir into flour with a wooden spoon until a dough forms. Knead dough gently in the bowl 4 or 5 times until it comes together. 4. Divide dough into 3 pieces and roll out 1 piece (keep remaining pieces covered with plastic wrap) on a sheet of parchment paper into a 10-inch round (shape can be rustic; dough should be thin). 5. Scatter small clusters of rosemary leaves and a little salt, pressing in slightly, then lightly brush top with additional oil. Slide round (still on parchment) onto preheated baking sheet and bake until pale golden and browned in spots, 8 to 10 minutes. Transfer flatbread a rack to cool, then make 2 more rounds (1 at a time) on parchment (do not oil or salt until just before baking). Break into pieces and serve. I recently graduated college and after grocery shopping for the first time on my own, I have started caring a lot about my heatlh and what I put into my body. I was definitely a soy and pasta vegetarian for a few years. Your blog has been such an inspiration and has helped me come up with countless ways to use ingredients and feed myself! I was wondering if I could substitute whole wheat flour for spelt? Is there a big difference in how these will come out? I’ve heard that baking flax seeds ruins the lovely oils which boast so many benefits, is this true? I’ve been wondering the very same thing…..??? Not sure if anyone is monitoring this comment forum as I see few answers to questions posted. Does anyone else have informed insight about this relevant question? I’m a huge sucker for baked goods, but that usually means I feel pretty terrible after over indulging on carbs – yuck. This is by far the most delicious bread(?) I have ever made and quite possibly the best I have ever eaten. The very best part? I felt great and was satisfied, too! I wish I had doubled the recipe because it was gone in literally 10 minutes (thanks, loving husband). I had some almond hummus on hand so we used it as flatbread… an interesting mix, but still delicious. As always, thank you for your incredible recipes and inspirations!!! I just made these and they’re wonderful. Thank you for sharing this recipe! I made these and they were de-lish!
from django import template from ..models import Post from ..conf import settings register = template.Library() class LatestBlogPostsNode(template.Node): def __init__(self, context_var): self.context_var = context_var def render(self, context): latest_posts = Post.objects.current()[:5] context[self.context_var] = latest_posts return "" @register.tag def latest_blog_posts(parser, token): bits = token.split_contents() return LatestBlogPostsNode(bits[2]) class LatestBlogPostNode(template.Node): def __init__(self, context_var): self.context_var = context_var def render(self, context): try: latest_post = Post.objects.current()[0] except IndexError: latest_post = None context[self.context_var] = latest_post return "" @register.tag def latest_blog_post(parser, token): bits = token.split_contents() return LatestBlogPostNode(bits[2]) class LatestSectionPostNode(template.Node): def __init__(self, section, context_var): self.section = template.Variable(section) self.context_var = context_var def render(self, context): section = self.section.resolve(context) post = Post.objects.section(section, queryset=Post.objects.current()) try: post = post[0] except IndexError: post = None context[self.context_var] = post return "" @register.tag def latest_section_post(parser, token): """ {% latest_section_post "articles" as latest_article_post %} """ bits = token.split_contents() return LatestSectionPostNode(bits[1], bits[3]) class BlogSectionsNode(template.Node): def __init__(self, context_var): self.context_var = context_var def render(self, context): sections = [(settings.PINAX_BLOG_ALL_SECTION_NAME, "All")] sections += settings.PINAX_BLOG_SECTIONS context[self.context_var] = sections return "" @register.tag def blog_sections(parser, token): """ {% blog_sections as blog_sections %} """ bits = token.split_contents() return BlogSectionsNode(bits[2])
Clare Buckley the practice manager at St Andrews NHS medical centre reached out to us because she wanted to bring her practice away from the generic website templates on offer across the medical profession. After an initial short telephone call a discovery meeting was soon set up and Daniel from Forty3 met Clare at St Andrews medical centre in Eccles, Manchester. At the discovery meeting Clare was quick to lay out a design brief for a logo identity and shown Daniel some examples of websites that she liked the functions of. It was clear that user experience was the most important factor in the operation of the new website project. The patients of the practice had to be able to book appointments and order prescriptions with ease. We carefully crafted the logo identity for the practice through a series of presentations to Clare for her feedback, once agreed upon we moved onto the next stage of building the website, Clare chose the business package which involved a multi page development with the ultimate user experience, the website was made fully responsive across all platforms from desktop, tablets and mobile devices. Building our working relationship over the 12 weeks of the design process Clare and I found that we share a passion of motor-sport and Formula One and we found we have many friends in common on a personal level. Therefore most recently we met for drinks at the British Grand Prix at Silverstone. Once again showing we love building relationships in our communities. Clients that become friends is what we are about. "Forty3 has created a new website for my practice bringing St Andrews Medical Centre into the modern digital age. Our new website is easy to navigate and both staff and patients are very impressed at how easy it is to used. Forty3 also created our new logo which really encompasses the image we were trying to portray. The whole process has been seamless and efficient and I cant recommend them enough."
#!/usr/bin/python # This file is part of PARPG. # PARPG is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # PARPG is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with PARPG. If not, see <http://www.gnu.org/licenses/>. import unittest from scripts.objects.base import * class TestObjectsBase(unittest.TestCase): def testWildcard(self): class Wildcard (GameObject, Lockable, Container, Living, Scriptable, CharStats, Wearable, Usable, Weapon, Destructable, Trappable, Carryable, ): def __init__ (self, ID, *args, **kwargs): self.name = 'All-purpose carry-all' self.text = 'What is this? I dont know' GameObject. __init__( self, ID, **kwargs ) Lockable. __init__( self, **kwargs ) Container. __init__( self, **kwargs ) Living. __init__( self, **kwargs ) Scriptable. __init__( self, **kwargs ) CharStats. __init__( self, **kwargs ) Wearable. __init__( self, **kwargs ) Usable. __init__( self, **kwargs ) Weapon. __init__( self, **kwargs ) Destructable.__init__( self, **kwargs ) Trappable. __init__( self, **kwargs ) Carryable. __init__( self, **kwargs ) wc = Wildcard (2) # TODO: need to fill the rest of these tests out attrs = dict( is_openable = True, is_open = True, is_lockable = True, locked = False, is_carryable = True, weight = 1.0, is_container = True, items = [], is_living = True, is_scriptable = True ) for attr in attrs: self.assertEqual(getattr(wc, attr), attrs[attr])
Everytime a new project started we picked a new "empty" base theme. It was supposed to be the basis for building our own. But almost no project looked the same. We always had to go in and change everything in the templates. Sometimes html5 was not allowed, sometimes the structure had to be totally different and functionality was not needed. Sometimes it was grid960, sometimes boilerplate with again our implementation changing like using scss or not. So have fun cleaning and recoding all the stupid wordpress templates that your theme came along with. We ended up using almost nothing from the initial template but were stuck with 100 functions within functions.php to which we had to add 10 more. So I started from scratch until I reached a point contemplating about get_template_part. If you don't know what that is (lucky you), it's basically including a different file with a fallback to a default file:. So you can get_template_part('loop', get_post_type()) and if there's a loop-$postType.php file found it will be loaded. No other configuration can happen. Also all those template parts have to reside in your base theme directory. I couldn't care less about theme inheritance. There's no point if your theme has no functionality you really need. So the best theme for me would be a theme with nothing in functions.php (or at most just a few commented out lines code so I don't have to lookup things like how to add custom thumbnail sizes). I would not want my theme to decide what kind of css framework I'm going to use and would not like 100 stupid functions printing html that I would have to edit or never use. I would love to at least be able to configure my template parts. Pass some parameters in cleanly so I can skip putting logic in them or splitting them into more template parts. Also I would love the ability to group and structure them differently. I want my preview templates (that do not show the full content) in a preview folder. Template parts are not templates. In combination with the above, how cool would it be if I could pass the excerpt length as a parameter to the template part and then maybe with more parametrization I wouldn't need more than 1-2 template parts. Your functions.php will be empty apart from a function allowing Skellie to provide the view architecture and take over from wordpress. Your layout will be minimal. Your default partials will be non-existent apart from examples and so you can test how it works. The header is html5 but together with the footer it's just 20 lines so feel free to remove it. Skellie provides partials and layouts and redesigns the way templates work. The template is still the main part of the application. A layout is the wrapper around your page content. Calling the header, footer and sidebars amongst other common things should be in the layout. You can have several layouts like 3-columns, 2-columns-1-sidebar along with your default but you shouldn't need that many. All of them will be in the same directory (layouts/) in any case. You can still select a template for your pages as usual. However your template can now select its layout using with a special comment (or assume default.php). The template should now only contain the main content of the page and call upon partials to help it render stuff. $args is an optional associative array. Everything you pass into it along with some default arguments will be accessible using $this. E.g. if $args is array('someVar' => 'someValue') then $this->someVar will be 'someValue' within the partial. I am aware that using Closures with including files is weird. If something feels still extra it will be removed. The code is experimental and everything can change. Hi Thanos, this is a very good idea — thanks.
import re import datetime import lxml.html import requests from billy.utils.fulltext import text_after_line_numbers from .bills import IABillScraper from .legislators import IALegislatorScraper from .events import IAEventScraper from .votes import IAVoteScraper try: # Silencing unverified HTTPS request warnings. requests.packages.urllib3.disable_warnings() except AttributeError: pass settings = dict(SCRAPELIB_TIMEOUT=240) metadata = dict( name = 'Iowa', abbreviation = 'ia', capitol_timezone = 'America/Chicago', legislature_name = 'Iowa General Assembly', legislature_url = 'https://www.legis.iowa.gov/', chambers = { 'upper': {'name': 'Senate', 'title': 'Senator'}, 'lower': {'name': 'House', 'title': 'Representative'}, }, terms = [ { 'name': '2011-2012', 'start_year': 2011, 'end_year': 2012, 'sessions': ['2011-2012'], }, { 'name': '2013-2014', 'start_year': 2013, 'end_year': 2014, 'sessions': ['2013-2014'], }, { 'name': '2015-2016', 'start_year': 2015, 'end_year': 2016, 'sessions': ['2015-2016'], }, { 'name': '2017-2018', 'start_year': 2017, 'end_year': 2018, 'sessions': ['2017-2018'], }, ], session_details = { '2011-2012': { 'display_name': '2011-2012 Regular Session', '_scraped_name': 'General Assembly: 84', 'number': '84', 'start_date': datetime.date(2011, 1, 10), 'end_date': datetime.date(2013, 1, 13), }, '2013-2014': { 'display_name': '2013-2014 Regular Session', '_scraped_name': 'General Assembly: 85', 'number': '85', }, '2015-2016': { 'display_name': '2015-2016 Regular Session', '_scraped_name': 'General Assembly: 86', 'number': '86', }, '2017-2018': { 'display_name': '2017-2018 Regular Session', '_scraped_name': 'General Assembly: 87', 'number': '87', }, }, feature_flags = ['events', 'influenceexplorer'], _ignored_scraped_sessions = [ 'Legislative Assembly: 86', 'General Assembly: 83', 'General Assembly: 82', 'General Assembly: 81', 'General Assembly: 80', 'General Assembly: 79', 'General Assembly: 79', 'General Assembly: 78', 'General Assembly: 78', 'General Assembly: 77', 'General Assembly: 77', 'General Assembly: 76', ] ) def session_list(): def url_xpath(url, path): doc = lxml.html.fromstring(requests.get(url, verify=False).text) return doc.xpath(path) sessions = url_xpath( 'https://www.legis.iowa.gov/legislation/findLegislation', "//section[@class='grid_6']//li/a/text()[normalize-space()]" ) sessions = [x[0] for x in filter(lambda x: x != [], [ re.findall(r'^.*Assembly: [0-9]+', session) for session in sessions ])] return sessions def extract_text(doc, data): doc = lxml.html.fromstring(data) text = doc.xpath('//pre')[0].text_content() # strip two sets of line numbers return text_after_line_numbers(text_after_line_numbers(text))
Popularity - Super high amount of bids. 91 views, 3.0 views per day, 30 days on eBay. High amount of views. 1 sold, 0 available. Super high amount of bids. 91 views, 3.0 views per day, 30 days on eBay. High amount of views. 1 sold, 0 available. Seller - 139,292+ items sold. 0.3% negative feedback. Top-Rated Seller! Ships on time with tracking, 0 problems with past sales. 139,292+ items sold. 0.3% negative feedback. Top-Rated Seller! Ships on time with tracking, 0 problems with past sales.
# Four-In-A-Row (a Connect Four clone) # By Al Sweigart [email protected] # http://inventwithpython.com/pygame # Released under a "Simplified BSD" license import random, copy, sys, pygame from pygame.locals import * BOARDWIDTH = 7 # how many spaces wide the board is BOARDHEIGHT = 6 # how many spaces tall the board is assert BOARDWIDTH >= 4 and BOARDHEIGHT >= 4, 'Board must be at least 4x4.' DIFFICULTY = 2 # how many moves to look ahead. (>2 is usually too much) SPACESIZE = 50 # size of the tokens and individual board spaces in pixels FPS = 30 # frames per second to update the screen WINDOWWIDTH = 640 # width of the program's window, in pixels WINDOWHEIGHT = 480 # height in pixels XMARGIN = int((WINDOWWIDTH - BOARDWIDTH * SPACESIZE) / 2) YMARGIN = int((WINDOWHEIGHT - BOARDHEIGHT * SPACESIZE) / 2) BRIGHTBLUE = (0, 50, 255) WHITE = (255, 255, 255) BGCOLOR = BRIGHTBLUE TEXTCOLOR = WHITE RED = 'red' BLACK = 'black' EMPTY = None HUMAN = 'human' COMPUTER = 'computer' def main(): global FPSCLOCK, DISPLAYSURF, REDPILERECT, BLACKPILERECT, REDTOKENIMG global BLACKTOKENIMG, BOARDIMG, ARROWIMG, ARROWRECT, HUMANWINNERIMG global COMPUTERWINNERIMG, WINNERRECT, TIEWINNERIMG pygame.init() FPSCLOCK = pygame.time.Clock() DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT)) pygame.display.set_caption('Four in a Row') REDPILERECT = pygame.Rect(int(SPACESIZE / 2), WINDOWHEIGHT - int(3 * SPACESIZE / 2), SPACESIZE, SPACESIZE) BLACKPILERECT = pygame.Rect(WINDOWWIDTH - int(3 * SPACESIZE / 2), WINDOWHEIGHT - int(3 * SPACESIZE / 2), SPACESIZE, SPACESIZE) REDTOKENIMG = pygame.image.load('4row_red.png') REDTOKENIMG = pygame.transform.smoothscale(REDTOKENIMG, (SPACESIZE, SPACESIZE)) BLACKTOKENIMG = pygame.image.load('4row_black.png') BLACKTOKENIMG = pygame.transform.smoothscale(BLACKTOKENIMG, (SPACESIZE, SPACESIZE)) BOARDIMG = pygame.image.load('4row_board.png') BOARDIMG = pygame.transform.smoothscale(BOARDIMG, (SPACESIZE, SPACESIZE)) HUMANWINNERIMG = pygame.image.load('4row_humanwinner.png') COMPUTERWINNERIMG = pygame.image.load('4row_computerwinner.png') TIEWINNERIMG = pygame.image.load('4row_tie.png') WINNERRECT = HUMANWINNERIMG.get_rect() WINNERRECT.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2)) ARROWIMG = pygame.image.load('4row_arrow.png') ARROWRECT = ARROWIMG.get_rect() ARROWRECT.left = REDPILERECT.right + 10 ARROWRECT.centery = REDPILERECT.centery isFirstGame = True while True: runGame(isFirstGame) isFirstGame = False def runGame(isFirstGame): if isFirstGame: # Let the computer go first on the first game, so the player # can see how the tokens are dragged from the token piles. turn = COMPUTER showHelp = True else: # Randomly choose who goes first. if random.randint(0, 1) == 0: turn = COMPUTER else: turn = HUMAN showHelp = False # Set up a blank board data structure. mainBoard = getNewBoard() while True: # main game loop if turn == HUMAN: # Human player's turn. getHumanMove(mainBoard, showHelp) if showHelp: # turn off help arrow after the first move showHelp = False if isWinner(mainBoard, RED): winnerImg = HUMANWINNERIMG break turn = COMPUTER # switch to other player's turn else: # Computer player's turn. column = getComputerMove(mainBoard) animateComputerMoving(mainBoard, column) makeMove(mainBoard, BLACK, column) if isWinner(mainBoard, BLACK): winnerImg = COMPUTERWINNERIMG break turn = HUMAN # switch to other player's turn if isBoardFull(mainBoard): # A completely filled board means it's a tie. winnerImg = TIEWINNERIMG break while True: # Keep looping until player clicks the mouse or quits. drawBoard(mainBoard) DISPLAYSURF.blit(winnerImg, WINNERRECT) pygame.display.update() FPSCLOCK.tick() for event in pygame.event.get(): # event handling loop if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE): pygame.quit() sys.exit() elif event.type == MOUSEBUTTONUP: return def makeMove(board, player, column): lowest = getLowestEmptySpace(board, column) if lowest != -1: board[column][lowest] = player def drawBoard(board, extraToken=None): DISPLAYSURF.fill(BGCOLOR) # draw tokens spaceRect = pygame.Rect(0, 0, SPACESIZE, SPACESIZE) for x in range(BOARDWIDTH): for y in range(BOARDHEIGHT): spaceRect.topleft = (XMARGIN + (x * SPACESIZE), YMARGIN + (y * SPACESIZE)) if board[x][y] == RED: DISPLAYSURF.blit(REDTOKENIMG, spaceRect) elif board[x][y] == BLACK: DISPLAYSURF.blit(BLACKTOKENIMG, spaceRect) # draw the extra token if extraToken != None: if extraToken['color'] == RED: DISPLAYSURF.blit(REDTOKENIMG, (extraToken['x'], extraToken['y'], SPACESIZE, SPACESIZE)) elif extraToken['color'] == BLACK: DISPLAYSURF.blit(BLACKTOKENIMG, (extraToken['x'], extraToken['y'], SPACESIZE, SPACESIZE)) # draw board over the tokens for x in range(BOARDWIDTH): for y in range(BOARDHEIGHT): spaceRect.topleft = (XMARGIN + (x * SPACESIZE), YMARGIN + (y * SPACESIZE)) DISPLAYSURF.blit(BOARDIMG, spaceRect) # draw the red and black tokens off to the side DISPLAYSURF.blit(REDTOKENIMG, REDPILERECT) # red on the left DISPLAYSURF.blit(BLACKTOKENIMG, BLACKPILERECT) # black on the right def getNewBoard(): board = [] for x in range(BOARDWIDTH): board.append([EMPTY] * BOARDHEIGHT) return board def getHumanMove(board, isFirstMove): draggingToken = False tokenx, tokeny = None, None while True: for event in pygame.event.get(): # event handling loop if event.type == QUIT: pygame.quit() sys.exit() elif event.type == MOUSEBUTTONDOWN and not draggingToken and REDPILERECT.collidepoint(event.pos): # start of dragging on red token pile. draggingToken = True tokenx, tokeny = event.pos elif event.type == MOUSEMOTION and draggingToken: # update the position of the red token being dragged tokenx, tokeny = event.pos elif event.type == MOUSEBUTTONUP and draggingToken: # let go of the token being dragged if tokeny < YMARGIN and tokenx > XMARGIN and tokenx < WINDOWWIDTH - XMARGIN: # let go at the top of the screen. column = int((tokenx - XMARGIN) / SPACESIZE) if isValidMove(board, column): animateDroppingToken(board, column, RED) board[column][getLowestEmptySpace(board, column)] = RED drawBoard(board) pygame.display.update() return tokenx, tokeny = None, None draggingToken = False if tokenx != None and tokeny != None: drawBoard(board, {'x':tokenx - int(SPACESIZE / 2), 'y':tokeny - int(SPACESIZE / 2), 'color':RED}) else: drawBoard(board) if isFirstMove: # Show the help arrow for the player's first move. DISPLAYSURF.blit(ARROWIMG, ARROWRECT) pygame.display.update() FPSCLOCK.tick() def animateDroppingToken(board, column, color): x = XMARGIN + column * SPACESIZE y = YMARGIN - SPACESIZE dropSpeed = 1.0 lowestEmptySpace = getLowestEmptySpace(board, column) while True: y += int(dropSpeed) dropSpeed += 0.5 if int((y - YMARGIN) / SPACESIZE) >= lowestEmptySpace: return drawBoard(board, {'x':x, 'y':y, 'color':color}) pygame.display.update() FPSCLOCK.tick() def animateComputerMoving(board, column): x = BLACKPILERECT.left y = BLACKPILERECT.top speed = 1.0 # moving the black tile up while y > (YMARGIN - SPACESIZE): y -= int(speed) speed += 0.5 drawBoard(board, {'x':x, 'y':y, 'color':BLACK}) pygame.display.update() FPSCLOCK.tick() # moving the black tile over y = YMARGIN - SPACESIZE speed = 1.0 while x > (XMARGIN + column * SPACESIZE): x -= int(speed) speed += 0.5 drawBoard(board, {'x':x, 'y':y, 'color':BLACK}) pygame.display.update() FPSCLOCK.tick() # dropping the black tile animateDroppingToken(board, column, BLACK) def getComputerMove(board): potentialMoves = getPotentialMoves(board, BLACK, DIFFICULTY) # get the best fitness from the potential moves bestMoveFitness = -1 for i in range(BOARDWIDTH): if potentialMoves[i] > bestMoveFitness and isValidMove(board, i): bestMoveFitness = potentialMoves[i] # find all potential moves that have this best fitness bestMoves = [] for i in range(len(potentialMoves)): if potentialMoves[i] == bestMoveFitness and isValidMove(board, i): bestMoves.append(i) return random.choice(bestMoves) def getPotentialMoves(board, tile, lookAhead): if lookAhead == 0 or isBoardFull(board): return [0] * BOARDWIDTH if tile == RED: enemyTile = BLACK else: enemyTile = RED # Figure out the best move to make. potentialMoves = [0] * BOARDWIDTH for firstMove in range(BOARDWIDTH): dupeBoard = copy.deepcopy(board) if not isValidMove(dupeBoard, firstMove): continue makeMove(dupeBoard, tile, firstMove) if isWinner(dupeBoard, tile): # a winning move automatically gets a perfect fitness potentialMoves[firstMove] = 1 break # don't bother calculating other moves else: # do other player's counter moves and determine best one if isBoardFull(dupeBoard): potentialMoves[firstMove] = 0 else: for counterMove in range(BOARDWIDTH): dupeBoard2 = copy.deepcopy(dupeBoard) if not isValidMove(dupeBoard2, counterMove): continue makeMove(dupeBoard2, enemyTile, counterMove) if isWinner(dupeBoard2, enemyTile): # a losing move automatically gets the worst fitness potentialMoves[firstMove] = -1 break else: # do the recursive call to getPotentialMoves() results = getPotentialMoves(dupeBoard2, tile, lookAhead - 1) potentialMoves[firstMove] += (sum(results) / BOARDWIDTH) / BOARDWIDTH return potentialMoves def getLowestEmptySpace(board, column): # Return the row number of the lowest empty row in the given column. for y in range(BOARDHEIGHT-1, -1, -1): if board[column][y] == EMPTY: return y return -1 def isValidMove(board, column): # Returns True if there is an empty space in the given column. # Otherwise returns False. if column < 0 or column >= (BOARDWIDTH) or board[column][0] != EMPTY: return False return True def isBoardFull(board): # Returns True if there are no empty spaces anywhere on the board. for x in range(BOARDWIDTH): for y in range(BOARDHEIGHT): if board[x][y] == EMPTY: return False return True def isWinner(board, tile): # check horizontal spaces for x in range(BOARDWIDTH - 3): for y in range(BOARDHEIGHT): if board[x][y] == tile and board[x+1][y] == tile and board[x+2][y] == tile and board[x+3][y] == tile: return True # check vertical spaces for x in range(BOARDWIDTH): for y in range(BOARDHEIGHT - 3): if board[x][y] == tile and board[x][y+1] == tile and board[x][y+2] == tile and board[x][y+3] == tile: return True # check / diagonal spaces for x in range(BOARDWIDTH - 3): for y in range(3, BOARDHEIGHT): if board[x][y] == tile and board[x+1][y-1] == tile and board[x+2][y-2] == tile and board[x+3][y-3] == tile: return True # check \ diagonal spaces for x in range(BOARDWIDTH - 3): for y in range(BOARDHEIGHT - 3): if board[x][y] == tile and board[x+1][y+1] == tile and board[x+2][y+2] == tile and board[x+3][y+3] == tile: return True return False if __name__ == '__main__': main()
We love our lemurs and so we thought we'd give them a special treat this Valentine's day in the shape of some heart shaped beetroot. We hung them from the trees in their enclosure and they happily grabbed and devoured the juicy snacks. The black and white ruffed lemurs are listed as being critically endangered and in the wilds of Madagascar where they originate, the biggest threat to their survival is the destruction of their habitat. Ringtailed lemurs are considered a vulnerable species, as the forest in which they live in Madagascar is being destroyed by slash and burn agriculture, charcoal production and mining for gemstones and minerals.
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RYarn(RPackage): """Expedite large RNA-Seq analyses using a combination of previously developed tools. YARN is meant to make it easier for the user in performing basic mis-annotation quality control, filtering, and condition-aware normalization. YARN leverages many Bioconductor tools and statistical techniques to account for the large heterogeneity and sparsity found in very large RNA-seq experiments.""" homepage = "http://www.example.co://www.bioconductor.org/packages/yarn/" url = "https://git.bioconductor.org/packages/yarn" list_url = homepage version('1.2.0', git='https://git.bioconductor.org/packages/yarn', commit='28af616ef8c27dcadf6568e276dea8465486a697') depends_on('r-biobase', type=('build', 'run')) depends_on('r-biomart', type=('build', 'run')) depends_on('r-downloader', type=('build', 'run')) depends_on('r-edger', type=('build', 'run')) depends_on('r-gplots', type=('build', 'run')) depends_on('r-limma', type=('build', 'run')) depends_on('r-matrixstats', type=('build', 'run')) depends_on('r-preprocesscore', type=('build', 'run')) depends_on('r-readr', type=('build', 'run')) depends_on('r-rcolorbrewer', type=('build', 'run')) depends_on('r-quantro', type=('build', 'run')) depends_on('[email protected]:3.4.9', when='@1.2.0')
CICC US SECURITIES, INC. is an entity registered at NEW YORK county with company number 5319327. CORPORATION SERVICE COMPANY located at the address 80 State Street Albany, New York, 12207-2543. Company is incorporated on April 9, 2018. Current status of the company is active. CICCHETTI & GOMEZ REALTY CORP. CICC INVESTMENT MANAGEMENT (USA), INC.
# !/usr/bin/env python # Bulk of this document is based on code from here: http://code.google.com/appengine/articles/rpc.html import os import logging from django.utils import simplejson from google.appengine.ext import webapp from google.appengine.ext.webapp import template from google.appengine.ext.webapp import util from google.appengine.ext import db # import zlib class MainPage(webapp.RequestHandler): """Renders the main template.""" def get(self): template_values = { } self.response.headers['Content-Type'] = "application/xhtml+xml" path = os.path.join(os.path.dirname(__file__), "index.xhtml") self.response.out.write(template.render(path, template_values)) class SaveHandler(webapp.RequestHandler): """ Allows the functions defined in the RPCMethods class to be RPCed.""" def __init__(self): webapp.RequestHandler.__init__(self) # self.methods = RPCMethods() def post(self): data = simplejson.loads(self.request.body) length = self.request.headers['content-length'] # data = simplejson.loads(args) # blockcountlen = len(data[2]) dbdata = BlokField( author = data[0], title = data[1], field = simplejson.dumps(data[2]), revision = float(data[3]), blockcount = int(len(data[2])), repo = data[4] ) # ["CryptoQuick", "TestBlock", [[20, 12, 0, 0], [19, 11, 0, 0], [18, 11, 0, 0]], 0.01] dbdata.put() # if func[0] == '_': # self.error(403) # access denied # return # func = getattr(self.methods, func, None) # if not func: # self.error(404) # file not found # return # # result = func(*args) # self.response.out.write(simplejson.dumps(result)) self.response.out.write(str(length) + ' bytes of data saved to the server.') # class RPCMethods: # """ Defines the methods that can be RPCed. # NOTE: Do not allow remote callers access to private/protected "_*" methods. # """ # # def Save(self, *args): # # # #return len(args[0]) # return ''.join(args) + ' bytes of data saved to server.' class BlokField(db.Model): author = db.StringProperty(required=True)# db.UserProperty() title = db.StringProperty(required=True) field = db.StringProperty(required=True) datetime = db.DateTimeProperty(required=True, auto_now_add=True) revision = db.FloatProperty(required=True) blockcount = db.IntegerProperty(required=True) repo = db.StringProperty(required=True) def main(): app = webapp.WSGIApplication([ ('/', MainPage), ('/save', SaveHandler), # ('/load', LoadHandler), ], debug=True) util.run_wsgi_app(app) if __name__ == '__main__': main()
I continue to find the Puffin a delightful source of inspiration. Its comical and wistful clown's face tops a small, plump body packed with energy. One day I may find a different way of depicting the Puffin other than in a standing pose, but the charm of this little bird seems best expressed through a static pose. An edition of twelve of this bronze Puffin sculpture.
# -*- coding: utf-8 -*- # Copyright (C) 2014-present Taiga Agile LLC # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.db.models import Q from django.apps import apps from datetime import timedelta from django.utils import timezone from taiga.front.templatetags.functions import resolve from .base import Sitemap class MilestonesSitemap(Sitemap): def items(self): milestone_model = apps.get_model("milestones", "Milestone") # Get US of public projects OR private projects if anon user can view them and us and tasks queryset = milestone_model.objects.filter(Q(project__is_private=False) | Q(project__is_private=True, project__anon_permissions__contains=["view_milestones", "view_us", "view_tasks"])) queryset = queryset.exclude(name="") # Exclude blocked projects queryset = queryset.filter(project__blocked_code__isnull=True) # Project data is needed queryset = queryset.select_related("project") return queryset def location(self, obj): return resolve("taskboard", obj.project.slug, obj.slug) def lastmod(self, obj): return obj.modified_date def changefreq(self, obj): if (timezone.now() - obj.modified_date) > timedelta(days=90): return "monthly" return "weekly" def priority(self, obj): return 0.1
This celebration was everything an elopement should be. Exciting, emotional and above all else fun. These two laughed together and enjoyed every single moment of the day with each other. When we sat down for a little post ceremony prosecco, Danna told me that in every great relationship there is one person who is the kite, and the other who is the anchor. The kite is a little crazier & likes to be out there "flying" and needs to be grounded by the anchor. The anchor person on the other hand, is more stable and needs the "kite" to let loose and fly. Kite and anchor or not I don't think I have ever met two people who balance and complement each other as perfectly as the two of you.
from error_utils import check_is_unit_vector from numpy import pi, array, dot, dtype, concatenate from numpy.linalg import norm class Ray(object): """A ray, free from paraxial assumptions. """ def __init__(self, position, wavevector_unit, wavelength, energy=1): self.position = array(position, dtype=dtype(float)) self.wavevector_unit = array(wavevector_unit, dtype=dtype(float)) self.wavelength_vac = wavelength self.energy = energy @property def wavevector_unit(self): return self._wavevector_unit @wavevector_unit.setter def wavevector_unit(self, v): check_is_unit_vector(v) # useful for error checking but slow! self._wavevector_unit = array(v, dtype=dtype(float)) @property def wavevector_vac_mag(self): return 2 * pi / self.wavelength_vac @wavevector_vac_mag.setter def wavevector_vac_mag(self, v): self.wavelength_vac = 2 * pi / v @property def wavevector_vac(self): return self.wavevector_vac_mag * self.wavevector_unit @wavevector_vac.setter def wavevector_vac(self, v): self.wavevector_vac_mag = norm(v) self.wavevector_unit = array(v, dtype=dtype(float)) / self.wavevector_vac_mag def propagate_free_space(self, distance): self.position += self.wavevector_unit * distance def propagate_to_plane(self, point_on_plane, normal_to_plane): from_ray_to_point = point_on_plane - self.position distance = dot(from_ray_to_point, normal_to_plane) / dot(self.wavevector_unit, normal_to_plane) self.propagate_free_space(distance) def propagate_from_plane_to_plane(self, plane_z_separation, normal_to_first, normal_to_second): """Move ray from one plane to the next. Useful for moving between AOD surfaces in AolFull. """ point_on_first_plane = self.position z_displacement_from_point_to_origin = dot(point_on_first_plane[0:2], normal_to_first[0:2]) / normal_to_first[2] displacement_from_point_to_origin = concatenate( (-point_on_first_plane[0:2], [z_displacement_from_point_to_origin]) ) # assumes all AODs are rotated about (x,y)=(0,0), in future would be faster and more realistic to use an AOD centre property point_on_second_plane = point_on_first_plane + displacement_from_point_to_origin + [0,0,plane_z_separation] self.propagate_to_plane(point_on_second_plane, normal_to_second) def propagate_free_space_z(self, distance): """Move ray a given distance in the z-direction. Used only in AolSimple. """ self.propagate_to_plane(self.position + [0,0,distance], [0,0,1])
Engage the enemy with the LEGO Star Wars 75211 Imperial TIE Fighter! This brick-built version of the Galactic Empire’s iconic attack craft has a highly detailed design, including sturdy wings, opening minifigure cockpit and access hatch, and 2 spring-loaded shooters. This great building toy Star Wars gift also includes 4 LEGO Star Wars figures, including Han Solo and his accomplice Tobias Beckett. Build the iconic Star Wars Imperial TIE Fighter from the Solo: A Star Wars Story movie! 519 pieces – LEGO brick building set for boys and girls between the ages of 9 and 14.
#!/usr/bin/env python # -*- encoding: iso-8859-15 -*- # Use of this software is subject to the terms specified in the LICENCE # file included in the distribution package, and also available via # https://github.com/purcell/dbdoc # # # Provides support for reading and writing Java-style properties files # __author__ = 'Steve Purcell <stephen_purcell at yahoo dot com>' __version__ = "$Revision: 1.2 $"[11:-2] import UserDict, re, string, os class Properties(UserDict.UserDict): """All-purpose wrapper for properties files. Handles most sanely- formatted property entries. Does not support special characters in property names, but does support them in values. """ PROPERTY_RE = re.compile(r'^\s*([\w\.\-]+)\s*=\s*(\"?)(.*)\2$') ATTR_KEYS = () # keys that will look like instance attributes # translations for \-escaped characters _LOAD_TRANSLATIONS = {'=':'=', ':':':', ' ':' ', 't':'\t', 'r':'\r', 'n':'\n', 'f':'\f', '#':'#', '!':'!', '\\':'\\'} _SAVE_TRANSLATIONS = {} for k, v in _LOAD_TRANSLATIONS.items(): _SAVE_TRANSLATIONS[v] = k known_keys = {} # forward def to stop setattr and getattr complaining def __init__(self): self.data = {} self.known_keys = {} for key in self.ATTR_KEYS: self.known_keys[key] = 1 def save(self, stream): items = self.items() items.sort() for key, value in items: stream.write("%s=%s%s" % (key, self.escape_value(value), os.linesep)) def __getattr__(self, attr): if self.known_keys.has_key(attr): try: return self[attr] except KeyError: pass raise AttributeError, attr def __setattr__(self, attr, value): if self.known_keys.has_key(attr): self[attr] = value else: self.__dict__[attr] = value def unescape_value(self, value): chars = [] i = 0 while i < len(value): c = value[i] if c == '\\': i = i + 1 c = value[i] replacement = self._LOAD_TRANSLATIONS.get(c, None) if replacement: chars.append(replacement) i = i + 1 elif c == 'u': code = value[i+1:i+5] if len(code) != 4: raise ValueError, "illegal unicode escape sequence" chars.append(chr(string.atoi(code, 16))) i = i + 5 else: raise ValueError, "unknown escape \\%s" % c else: chars.append(c) i = i + 1 return string.join(chars, '') def escape_value(self, value): chars = [] for c in value: replacement = self._SAVE_TRANSLATIONS.get(c, None) if replacement: chars.append("\\%s" % replacement) elif ord(c) < 0x20 or ord(c) > 0x7e: chars.append("\\u%04X" % ord(c)) else: chars.append(c) return string.join(chars, '') def load(self, stream): while 1: line = stream.readline() if not line: break m = self.PROPERTY_RE.match(line) if m: name, quote, value = m.groups() self[name] = self.unescape_value(value) ############################################################################## # A sprinkling of test code that runs when the module is imported or executed ############################################################################## def test(): def checkmatch(regex, s, groups): match = regex.match(s) assert match, "failed on %s" % s assert (match.groups() == groups), str(match.groups()) regex = Properties.PROPERTY_RE checkmatch(regex, 'blah=foo\n', ('blah','','foo')) checkmatch(regex, ' blah = "foo"\n', ('blah','"','foo')) checkmatch(regex, ' blah = "foo "\n', ('blah','"','foo ')) checkmatch(regex, ' blah = "foo "\n', ('blah','"','foo ')) ## Trailing comments are not legal #checkmatch(regex, ' blah = "foo" # blah\n', ('blah','"','foo')) #checkmatch(regex, ' blah = "fo\\"o" # blah\n', ('blah','"','fo\\"o')) #checkmatch(regex, ' blah = fo\\"o # blah\n', ('blah','','fo\\"o')) p = Properties() from StringIO import StringIO unquoted = '!"§$%&/()=?ßµ' quoted = '\!"\u00A7$%&/()\=?\u00DF\u00B5' i = StringIO('key=%s\n' % quoted) p.load(i) assert p['key'] == unquoted o = StringIO() p.save(o) assert o.getvalue() == 'key=%s\n' % quoted if __name__ == '__main__': test()
Elegant & contemporary corner home on a quiet street in the desirable SW community of Glenbrook! A well thought out luminous & spacious floor plan w/ numerous special features: 9’ & 10’ ceilings, custom mill work, Plank high-grade laminate flooring, unique light fixtures & abundance of pot lighting, excellent sound proofing (never hear the nieghbours), gourmet kitchen w/ gas stove, wine fridge & quartz counter tops, spacious mudroom, 3 bdrms up w/ luxurious master including large walk-in closet (plenty of storage), ensuite w/ free standing tub/skylights/dbl sinks (all bdrms have black out blinds), convenient upper flr laundry w/ useful cabinetry, unwind in lower level media room (w/ wet bar & beverage fridge), appreciate the huge storage rm & 4th bdrm, potential to install A/C if so desired, detached garage w/ commercial grade epoxy floor, workbench, ski & bike racks. Private WEST facing yard w/ beautiful Tulip bed. Outstanding location: Minutes to Downtown, walk to LRT/parks/restaurants/schools & more!
"""rlocus_test.py - unit test for root locus diagrams RMM, 1 Jul 2011 """ import matplotlib.pyplot as plt import numpy as np from numpy.testing import assert_array_almost_equal import pytest import control as ct from control.rlocus import root_locus, _RLClickDispatcher from control.xferfcn import TransferFunction from control.statesp import StateSpace from control.bdalg import feedback @pytest.mark.usefixtures("mplcleanup") class TestRootLocus: """These are tests for the feedback function in rlocus.py.""" @pytest.fixture(params=[pytest.param((sysclass, sargs + (dt, )), id=f"{systypename}-{dtstring}") for sysclass, systypename, sargs in [ (TransferFunction, 'TF', ([1, 2], [1, 2, 3])), (StateSpace, 'SS', ([[1., 4.], [3., 2.]], [[1.], [-4.]], [[1., 0.]], [[0.]])), ] for dt, dtstring in [(0, 'ctime'), (True, 'dtime')] ]) def sys(self, request): """Return some simple LTI systems for testing""" # avoid construction during collection time: prevent unfiltered # deprecation warning sysfn, args = request.param return sysfn(*args) def check_cl_poles(self, sys, pole_list, k_list): for k, poles in zip(k_list, pole_list): poles_expected = np.sort(feedback(sys, k).pole()) poles = np.sort(poles) np.testing.assert_array_almost_equal(poles, poles_expected) def testRootLocus(self, sys): """Basic root locus (no plot)""" klist = [-1, 0, 1] roots, k_out = root_locus(sys, klist, plot=False) np.testing.assert_equal(len(roots), len(klist)) np.testing.assert_allclose(klist, k_out) self.check_cl_poles(sys, roots, klist) def test_without_gains(self, sys): roots, kvect = root_locus(sys, plot=False) self.check_cl_poles(sys, roots, kvect) @pytest.mark.parametrize('grid', [None, True, False]) def test_root_locus_plot_grid(self, sys, grid): rlist, klist = root_locus(sys, grid=grid) ax = plt.gca() n_gridlines = sum([int(line.get_linestyle() in [':', 'dotted', '--', 'dashed']) for line in ax.lines]) if grid is False: assert n_gridlines == 2 else: assert n_gridlines > 2 # TODO check validity of grid def test_root_locus_warnings(self): sys = TransferFunction([1000], [1, 25, 100, 0]) with pytest.warns(FutureWarning, match="Plot.*deprecated"): rlist, klist = root_locus(sys, Plot=True) with pytest.warns(FutureWarning, match="PrintGain.*deprecated"): rlist, klist = root_locus(sys, PrintGain=True) def test_root_locus_neg_false_gain_nonproper(self): """ Non proper TranferFunction with negative gain: Not implemented""" with pytest.raises(ValueError, match="with equal order"): root_locus(TransferFunction([-1, 2], [1, 2])) # TODO: cover and validate negative false_gain branch in _default_gains() def test_root_locus_zoom(self): """Check the zooming functionality of the Root locus plot""" system = TransferFunction([1000], [1, 25, 100, 0]) plt.figure() root_locus(system) fig = plt.gcf() ax_rlocus = fig.axes[0] event = type('test', (object,), {'xdata': 14.7607954359, 'ydata': -35.6171379864, 'inaxes': ax_rlocus.axes})() ax_rlocus.set_xlim((-10.813628105112421, 14.760795435937652)) ax_rlocus.set_ylim((-35.61713798641108, 33.879716621220311)) plt.get_current_fig_manager().toolbar.mode = 'zoom rect' _RLClickDispatcher(event, system, fig, ax_rlocus, '-') zoom_x = ax_rlocus.lines[-2].get_data()[0][0:5] zoom_y = ax_rlocus.lines[-2].get_data()[1][0:5] zoom_y = [abs(y) for y in zoom_y] zoom_x_valid = [ -5., - 4.61281263, - 4.16689986, - 4.04122642, - 3.90736502] zoom_y_valid = [0., 0., 0., 0., 0.] assert_array_almost_equal(zoom_x, zoom_x_valid) assert_array_almost_equal(zoom_y, zoom_y_valid) @pytest.mark.timeout(2) def test_rlocus_default_wn(self): """Check that default wn calculation works properly""" # # System that triggers use of y-axis as basis for wn (for coverage) # # This system generates a root locus plot that used to cause the # creation (and subsequent deletion) of a large number of natural # frequency contours within the `_default_wn` function in `rlocus.py`. # This unit test makes sure that is fixed by generating a test case # that will take a long time to do the calculation (minutes). # import scipy as sp import signal # Define a system that exhibits this behavior sys = ct.tf(*sp.signal.zpk2tf( [-1e-2, 1-1e7j, 1+1e7j], [0, -1e7j, 1e7j], 1)) ct.root_locus(sys)
Already have an account? Click to login now. We all want to develop our community in one way or another. Be part of this professional and responsible setting, whereby in connecting with the WF Business Networking community and benefit from service providers from other disciplines who have already joined the network, as a result will benefit in attaining referrals from existing members. Join us in actively promoting the use of community business within the community, by way of advertisements, publicity and a web-based index.
from __future__ import division import numpy as np from numpy.random import rand, poisson from sys import argv # based on: # Hanley & MacGibbon (2006) (http://www.ncbi.nlm.nih.gov/pubmed/16730851) # and # http://www.unofficialgoogledatascience.com/2015/08/an-introduction-to-poisson-bootstrap_26.html # see also: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Weighted_incremental_algorithm def increment(x,reps,ns,ms): counts = poisson(1,reps) temp = ns + counts deltas = x - ms Rs = [d*c / t if n > 0 else 0 for n,c,t,d in zip(ns,counts,temp,deltas)] return (Rs,deltas,temp) def onlineMeanVarBoot(xs,reps): ns = np.zeros(reps,dtype=np.int) ms = np.zeros(reps) M2s = np.zeros(reps) # for x in xs: while xs: Rs,deltas,temp = increment(x,reps,ns,ms) ms += Rs M2s += ns * deltas * Rs ns = temp if np.min(ns) < 2: return np.nan else: return M2s / ns if __name__== "__main__": test = rand(500) testBoot = onlineMeanVarBoot(test,4000) print "numpy est: %s, boot est: %s" %(np.var(test),np.mean(testBoot))
I love Fridays! American Apparel sweatshirt from the “days of the week”. Today’s face is a Happy Friday! This is a unisex size so women should order a size smaller. This days of the week sweatshirt is super comfortable and cosy for winter. Good quality material that needs no ironing.
import urllib2 import json import MySQLdb # Function to fetch json of reddit front page def fetch(): link = "https://www.reddit.com/.json" # Get the text version text = urllib2.urlopen(link).read() # Turn it into a dictionary data = json.loads(text) return data # Returns a list of tuples of titles and links def extract_links(data): data = data["data"]['children'] output = [] for post in data: link = post['data']['url'] title = post['data']['title'] output.append((title, link)) return output # Puts the data into the MySQL database defined in tables.sql def store(data): host = "localhost" user = "root" passwd = "adsauiuc" db = "adsa" db = MySQLdb.connect(host=host, user=user, passwd=passwd, db=db) # Creates a cursor that can execute SQL commands cursor = db.cursor() table = "reddit" columns = "link, title" for link in data: sql = """ INSERT INTO reddit ( link, title ) VALUES ( %s, %s ) """ cursor.execute(sql, (link[0].encode("latin-1", "replace"), link[1].encode("latin-1", "replace"))) # Commit the changes only after all have succeeded without errors db.commit() # Always close the connection db.close() if __name__ == "__main__": data = fetch() links = extract_links(data) store(links)
Lackawanna Cutoff project ready to roll? This map shows the route of the proposed Lackawanna Cutoff project to reinstitute passenger rail service for Warren and Sussex counties. New Jersey Transit and Andover Township have reached an agreement for the reimbursement of any land acquisition and legal costs associated with employing eminent domain of private property to push the Lackawanna Cutoff project forward. ANDOVER TOWNSHIP -- An agreement has been reached between New Jersey Transit and the township for reimbursement related to any land acquisition and legal costs associated with employing eminent domain of private property in order to advance the Lackawanna Cutoff railroad extension project. In March, the Township Committee unanimously approved an ordinance that authorized the acquisition of private property through purchase or eminent domain in order to execute a temporary construction order. A memorandum of understanding obtained through an Open Public Records Act request was executed on Nov. 10 and signed by Andover Township Mayor Tom Walsh, New Jersey Transit Assistant Executive Director Eric Daleo and Deputy Attorney General Martin Gill. According to the memorandum, NJ Transit will be responsible for "all costs and fees that the township may incur as a result" of acquiring the land. Last week, Walsh and other township officials, including the engineer and legal counsel, participated in a closed-door meeting with representatives from NJ Transit, the state Attorney General's office, the 24th District Legislative office and the attorney for the private property owner, AIT Reinsurance, which operates Hudson Farm West off Roseville Road. The state Department of Environmental Protection is requiring work on a culvert under the driveway of Hudson Farm West because computer models show the train station could be in danger of flooding in the event of a "hundred-year storm." A spokesperson for NJ Transit said that the board of directors approved an additional $800,000 in August for the Lackawanna Cutoff project to be used for "design and the DEP permit amendment" on the culvert. "The general approach is to work with the (state) DEP to develop a plan that will minimize the impact on private property," said Nancy Snyder, spokesperson for NJ Transit. "We continue to have discussions with the town and a private landowner to move the project forward. The goal for completion of construction is still 2020." Walsh said last week's meeting was productive and he is hopeful that an agreement can be reached with Hudson Farm West without having to resort to drastic measures such as eminent domain. A teleconference meeting is tentatively scheduled for early next year, Walsh said. "There's (still) a couple of small hurdles, but everybody seems to be pulling in the same direction," the mayor said. "I'm feeling very positive about this right now." Walsh said discussions at the Nov. 14 meeting involved, among other aspects, including a settlement for Hudson Farm to construct a stone wall, fence or similar facade at the driveway entrance. The culvert project would result in the removal of several decades-old oak trees, which currently stand near the property's entrance. "It makes sense," Walsh said of the very preliminary settlement discussion. "They'll have a nice entrance. And it's fair. But I like that (they're) willing to work (with us)." John Ursin, an attorney with Schenck, Price, Smith and King in Sparta who represents Hudson Farm, declined comment on either the memorandum or the closed door meeting. In 2011, NJ Transit, the largest statewide public transit system in the United States, launched the Lackawanna Cutoff Restoration Project. The first phase of the project is a 7.3-mile rail extension from Port Morris to a new train station in Andover Township. Roughly 5.25 miles of new railroad track has been placed by NJ Transit between Port Morris and the site of the proposed Andover station. The $61.6 million project includes construction of a high-level platform station with 55 parking spaces in an adjacent lot. The Andover extension will be funded by the Federal Transit Administration and the state Transportation Trust Fund. To date, the project has been partially funded with a federal earmark grant of $18.1 million. David Danzis can also be contacted on Facebook: ddanzisNJH, on Twitter: @ddanzisNJH, or by phone: 973-383-1274.
#!/usr/bin/python2.4 # Copyright 2008 Google Inc. # Author : Anoop Chandran <[email protected]> # # openduckbill is a simple backup application. It offers support for # transferring data to a local backup directory, NFS. It also provides # file system monitoring of directories marked for backup. Please read # the README file for more details. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Helper class, does command execution and returns value. This class has the method RunCommandPopen which executes commands passed to it and returns the status. """ import os import subprocess import sys class CommandHelper: """Run command and return status, either using Popen or call """ def __init__(self, log_handle=''): """Initialise logging state Logging enabled in debug mode. Args: log_handle: Object - a handle to the logging subsystem. """ self.logmsg = log_handle if self.logmsg.debug: self.stdout_debug = None self.stderr_debug = None else: self.stdout_debug = 1 self.stderr_debug = 1 def RunCommandPopen(self, runcmd): """Uses subprocess.Popen to run the command. Also prints the command output if being run in debug mode. Args: runcmd: List - path to executable and its arguments. Retuns: runretval: Integer - exit value of the command, after execution. """ stdout_val=self.stdout_debug stderr_val=self.stderr_debug if stdout_val: stdout_l = file(os.devnull, 'w') else: stdout_l=subprocess.PIPE if stderr_val: stderr_l = file(os.devnull, 'w') else: stderr_l=subprocess.STDOUT try: run_proc = subprocess.Popen(runcmd, bufsize=0, executable=None, stdin=None, stdout=stdout_l, stderr=stderr_l) if self.logmsg.debug: output = run_proc.stdout while 1: line = output.readline() if not line: break line = line.rstrip() self.logmsg.logger.debug("Command output: %s" % line) run_proc.wait() runretval = run_proc.returncode except OSError, e: self.logmsg.logger.error('%s', e) runretval = 1 except KeyboardInterrupt, e: self.logmsg.logger.error('User interrupt') sys.exit(1) if stdout_l: pass #stderr_l.close() if stderr_l: pass #stderr_l.close() return runretval
We would love the opportunity to talk with you and get to know your family. If you would like more information or to schedule an appointment, please fill out this form and a member of our leadership team will get back to you very soon! Our school is located at 42350 Tall Cedars Parkway South Riding, VA 20152 at the intersection of Gum Spring Road and Tall Cedars Parkway.
#!/usr/bin/env python import crawler2 as crawler import BaseHTTPServer import logging import os import SimpleHTTPServer import threading import unittest TEST_BASE_PATH = 'test/sites/' LISTEN_ADDRESS = '127.0.0.1' LISTEN_PORT = 4566 BASE_URL = 'http://%s:%d/test/sites/' % (LISTEN_ADDRESS, LISTEN_PORT) EXT_LISTEN_ADDRESS = '127.0.0.1' EXT_LISTEN_PORT = 80 EXT_BASE_URL = 'http://%s:%d/test/sites/' % (EXT_LISTEN_ADDRESS, EXT_LISTEN_PORT) class LocalCrawlerTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.server = BaseHTTPServer.HTTPServer( (LISTEN_ADDRESS, LISTEN_PORT), SimpleHTTPServer.SimpleHTTPRequestHandler) cls.server_thread = threading.Thread(target=cls.server.serve_forever) cls.server_thread.start() @classmethod def tearDownClass(cls): cls.server.shutdown() def setUp(self): self.ff = crawler.FormFiller() self.e = crawler.Engine(self.ff, None) def test_single_page(self): url = BASE_URL + 'single/single.html' e = self.e e.main([url]) self.assertIsInstance(e.cr.headreqresp.response.page, crawler.Page) self.assertEqual(len(e.cr.headreqresp.response.page.links), 1) class ExtCrawlerTest(unittest.TestCase): def setUp(self): self.ff = crawler.FormFiller() self.e = crawler.Engine(self.ff, None) def test_single_page(self): url = EXT_BASE_URL + 'single/single.html' e = self.e e.main([url]) self.assertIsInstance(e.cr.headreqresp.response.page, crawler.Page) self.assertEqual(len(e.cr.headreqresp.response.page.links), 1) def test_absolute_urls(self): url = EXT_BASE_URL + 'absolute_urls/index.php' e = self.e e.main([url]) self.assertEqual(len(e.ag.absrequests), 2) self.assertEqual(len(e.ag.abspages), 2) urls = set(r.split('/')[-1] for ar in e.ag.absrequests for r in ar.requestset) self.assertEqual(set(['link.php', 'index.php']), urls) def test_simple(self): # Truncate status files fd = os.open(TEST_BASE_PATH + '/simple/pages.data', os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.fchmod(fd, 0666) os.close(fd) fd = os.open(TEST_BASE_PATH + '/simple/pages.lock', os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.fchmod(fd, 0666) os.close(fd) url = EXT_BASE_URL + 'simple/index.php' e = self.e e.main([url]) self.assertEqual(len(e.ag.absrequests), 4) urls = set(r.split('/')[-1] for ar in e.ag.absrequests for r in ar.requestset) for url in set(['viewpage.php?id=%d' % i for i in range(9)] + ['addpage.php', 'index.php', 'static.php']): self.assertTrue(url in urls) self.assertEqual(len(e.ag.abspages), 4) self.assertTrue(e.ag.nstates == 2) def test_500_error(self): url = EXT_BASE_URL + '/500/index.php' e = self.e e.main([url]) def test_empty_form(self): url = EXT_BASE_URL + '/form_empty_params/index.php' e = self.e e.main([url]) self.assertEqual(len(e.ag.absrequests), 3) def test_wackopicko_form(self): url = EXT_BASE_URL + '/wackopicko_registration_form/index.php' e = self.e e.main([url]) def test_changing_state(self): os.chmod(TEST_BASE_PATH + '/changing_state', 0777) try: os.unlink(TEST_BASE_PATH + '/changing_state/.lock') except OSError: pass try: os.unlink(TEST_BASE_PATH + '/changing_state/a') except OSError: pass url = EXT_BASE_URL + '/changing_state/index.php' e = self.e e.main([url]) self.assertEqual(len(e.ag.absrequests), 4) urls = set(r.split('/')[-1] for ar in e.ag.absrequests for r in ar.requestset) self.assertEqual(set(['a.php', 'b.php', 'index.php', 'changestate.php']), urls) self.assertEqual(len(e.ag.abspages), 4) self.assertEqual(e.ag.nstates, 2) def test_traps(self): url = EXT_BASE_URL + '/traps/root.html' e = self.e e.main([url]) self.assertTrue(len(e.ag.absrequests) >= 12) urls = set(r.split('/')[-1] for ar in e.ag.absrequests for r in ar.requestset) want_to_see = set(['a.html', 'a1.html', 'a2.html', 'b.html', 'b1.html', 'dead1.html', 'dead2.html', 'private.php', 'root.html'] + ['trap.php?input=%d' % i for i in range(1, 19)] + ['trap2.php?input=%d' % i for i in range(1, 16)]) for url in want_to_see: self.assertTrue(url in urls) self.assertEqual(len(e.ag.abspages), 11) self.assertEqual(e.ag.nstates, 1) if __name__ == '__main__': #logging.basicConfig(level=logging.DEBUG) unittest.main()
Sunday afternoon my family and I drove down the road to the little village of Shere. It's a beautiful village and one of our favourites in the area, it's lovely to go for a wander and have a drink in The White Horse pub on a summer's eve. We took Dylan with us, and as soon as he saw the water he ran towards it and leapt in. It was boiling hot and standing on the bank watching him splash around in the cool stream was torture, eventually I couldn't take it any more so jumped in after him! We spent ages playing in the water and on the riverbank; mum and I threw sticks for Dylan then watched him leap into the water to fetch them. It was hilarious watching him chase them down the stream trying to grab them, and at one point he was so desperate for his stick he put his whole head under the water to try and get a grip on it! When Dylan had had enough splashing in the water and just wanted to sit and chew on his stick, we walked down to the pub and sat in the front garden with our drinks people watching and just watching the world go by. It was pure bliss being back in the countryside after living in London for the last 3 weeks, and it was the perfect relaxing end to a wonderful weekend of festival-going. Taking dogs to the water is so adorable, my dogs are lazy piglets that don't like swimming, so I thoroughly enjoyed these!
import os import logging import zipfile import io EXPORT_COMMANDS = [('status', 'report status of this repository.')] def load(options, readonly=False): assert 'path' in options if 'extensions' in options: allowed_extensions = options['extensions'].split(',') else: allowed_extensions = None if 'recursive' in options: recursive = bool(options['recursive']) else: recursive = True if 'unzip' in options: unzip = bool(options['unzip']) else: unzip = False return FileRepository(options['path'], allowedExtensions=allowed_extensions, recursive=recursive, unzip=unzip) class FileRepository: def __init__(self, path, allowedExtensions=None, recursive=True, unzip=False): self.root = path self.allowed_extensions = allowedExtensions self.recursive = recursive self.unzip = unzip def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): pass def status(self): print("Folder at {}".format(self.root)) def iterfiles(self): """ Iterate files in this file repository. Returns a generator of 3-tuples, containing a handle, filename and file extension of the current opened file. """ logging.debug("Iterating files in "+ self.root) failed_files = [] for root, _, files in os.walk(self.root): # iterate files, filtering only allowed extensions for filename in files: _, ext = os.path.splitext(filename) if self.allowed_extensions == None or ext in self.allowed_extensions: # hitting errors with decoding the data, iso-8859-1 seems to sort it with open(os.path.join(root, filename), 'r', encoding='iso-8859-1') as fp: yield (fp, filename, ext) # zip file auto-extract elif self.unzip and ext == '.zip': try: with zipfile.ZipFile(os.path.join(root, filename), 'r') as z: for zname in z.namelist(): _, ext = os.path.splitext(zname) if self.allowed_extensions == None or ext in self.allowed_extensions: with z.open(zname, 'r') as fp: # zipfile returns a binary file, so we require a # TextIOWrapper to decode it yield (io.TextIOWrapper(fp, encoding='iso-8859-1'), zname, ext) except (zipfile.BadZipFile, RuntimeError) as error: logging.warning("Unable to extract zip file %s: %s ", filename, error) failed_files.append(filename) # stop after first iteration if not recursive if not self.recursive: break if len(failed_files) > 0: logging.warning("Skipped %d files due to errors: %s", len(failed_files), repr(failed_files)) def close(self): pass
0:01:40 We switched it up, we've been alternating hostfuls and guest episodes. 0:02:10 Bryn's show "Lisa and Her Things" had a one night only revival at Intar Theatre, on January 13. 06:32 - Growing up, Laura thought she'd have ten kids and/or an orphanage, but then wasn't sure when she got older. 0:01:40 Laura met Jason at his theatre company "Three Cat Productions"- which he created to make a home for artists to come and take risks with their work. Beginning with the "Three Cat Variety Hour"
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDAnalysis --- https://www.mdanalysis.org # Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler, # D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # doi: 10.25080/majora-629e541a-00e # # N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # import numpy as np from MDAnalysisTests import datafiles from MDAnalysisTests.datafiles import (PDB_small, PDB, LAMMPSdata, LAMMPSdata2, LAMMPSdcd2, LAMMPSdata_mini, PSF_TRICLINIC, DCD_TRICLINIC, PSF_NAMD_TRICLINIC, DCD_NAMD_TRICLINIC) class RefAdKSmall(object): """Mixin class to provide comparison numbers. Based on small PDB with AdK (:data:`PDB_small`). .. Note:: All distances must be in ANGSTROEM as this is the MDAnalysis default unit. All readers must return Angstroem by default. """ filename = datafiles.PDB_small ref_coordinates = { # G11:CA, copied frm adk_open.pdb 'A10CA': np.array([-1.198, 7.937, 22.654]), } ref_distances = {'endtoend': 11.016959} ref_E151HA2_index = 2314 ref_n_atoms = 3341 ref_charmm_totalcharge = -4.0 ref_charmm_Hcharges = [0.33] + 203 * [0.31] ref_charmm_ArgCAcharges = 13 * [0.07] ref_charmm_ProNcharges = 10 * [-0.29] ref_unitcell = np.array([80.017, 80.017, 80.017, 60., 60., 90.], dtype=np.float32) ref_volume = 0.0 class RefAdK(object): """Mixin class to provide comparison numbers. Based on PDB/GRO with AdK in water + Na+ (:data:`PDB`). .. Note:: All distances must be in ANGSTROEM as this is the MDAnalysis default unit. All readers must return Angstroem by default. """ filename = datafiles.PDB ref_coordinates = { # Angstroem as MDAnalysis unit!! 'A10CA': np.array([62.97600174, 62.08800125, 20.2329998]), } ref_distances = {'endtoend': 9.3513174} ref_E151HA2_index = 2314 ref_n_atoms = 47681 ref_Na_sel_size = 4 # CRYST1 80.017 80.017 80.017 60.00 60.00 90.00 ref_unitcell = np.array([80.017, 80.017, 80.017, 60., 60., 90.], dtype=np.float32) #ref_volume = 362270.0 # computed with Gromacs ## NOT EXACT! ref_volume = 362269.520669292 class Ref2r9r(object): """Mixin class to provide comparison numbers. Based on S6 helices of chimeric Kv channel .. Note:: All distances must be in ANGSTROEM as this is the MDAnalysis default unit. All readers must return Angstroem by default. """ ref_n_atoms = 1284 ref_sum_centre_of_geometry = -98.24146 ref_n_frames = 10 class RefACHE(object): """Mixin class to provide comparison numbers. ACHE peptide # COM check in VMD:: set p [atomselect top "not water"] set total {0 0 0}; for {set i 0} {$i < 11} {incr i} { $p frame $i; set total [vecadd $total [measure center $p]]} puts [vecsum $total] # 472.2592159509659 """ ref_n_atoms = 252 ref_proteinatoms = ref_n_atoms ref_sum_centre_of_geometry = 472.2592159509659 # 430.44807815551758 ref_n_frames = 11 ref_periodic = False class RefCappedAla(object): """Mixin class to provide comparison numbers. Capped Ala in water # COM check in VMD (load trajectory as *AMBER with periodic box*!):: set p [atomselect top "not water"] set total {0 0 0}; for {set i 0} {$i < 11} {incr i} { $p frame $i; set total [vecadd $total [measure center $p]]} puts [vecsum $total] # 686.276834487915 """ ref_n_atoms = 5071 ref_proteinatoms = 22 ref_sum_centre_of_geometry = 686.276834487915 ref_n_frames = 11 ref_periodic = True class RefVGV(object): """Mixin class to provide comparison numbers. Computed from bala.trj:: w = MDAnalysis.Universe(PRMncdf, TRJncdf) ref_n_atoms = len(w.atoms) ref_proteinatoms = len(w.select_atoms("protein")) ref_sum_centre_of_geometry = np.sum([protein.center_of_geometry() for ts in w.trajectory]) """ topology = datafiles.PRMncdf filename = datafiles.NCDF ref_n_atoms = 2661 ref_proteinatoms = 50 ref_sum_centre_of_geometry = 1552.9125 ref_n_frames = 30 ref_periodic = True class RefTZ2(object): """Reference values for the cpptraj testcase tz2.truncoct.nc Used under the GPL v3. """ topology = datafiles.PRM7 filename = datafiles.NCDFtruncoct ref_n_atoms = 5827 ref_proteinatoms = 217 ref_sum_centre_of_geometry = -68.575745 ref_n_frames = 10 ref_periodic = True class RefTRZ(object): # ref_coordinates = {} # ref_distances = {'endtoend': } ref_n_atoms = 8184 ref_dimensions = np.array([55.422830581665039, 55.422830581665039, 55.422830581665039, 90., 90., 90.], dtype=np.float32) ref_volume = 170241.762765 ref_n_frames = 6 ref_coordinates = np.array([72.3163681, -130.31130981, 19.97969055], dtype=np.float32) ref_velocities = np.array([[14.83297443, 18.02611542, 6.07733774]], dtype=np.float32) ref_delta = 0.001 ref_time = 0.01 ref_title = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678901234' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678901234') class RefLAMMPSData(object): filename = LAMMPSdata n_atoms = 18364 pos_atom1 = np.array([11.89985657, 48.4455719, 19.09719849], dtype=np.float32) vel_atom1 = np.array([-0.005667593, 0.00791380978, -0.00300779533], dtype=np.float32) dimensions = np.array([55.42282867, 55.42282867, 55.42282867, 90., 90., 90. ], dtype=np.float32) class RefLAMMPSDataDCD(object): format = "LAMMPS" topology = LAMMPSdata2 trajectory = LAMMPSdcd2 n_atoms = 12421 n_frames = 5 dt = 0.5 # ps per frame mean_dimensions = np.array( [ 50.66186142, 47.18824387, 52.33762741, 90. , 90. , 90. ], dtype=np.float32) class RefLAMMPSDataMini(object): filename = LAMMPSdata_mini n_atoms = 1 pos_atom1 = np.array([11.89985657, 48.4455719, 19.09719849], dtype=np.float32) vel_atom1 = np.array([-0.005667593, 0.00791380978, -0.00300779533], dtype=np.float32) dimensions = np.array([60., 50., 30., 90., 90., 90.], dtype=np.float32)
Maggie: Is Griffin really coming with us? Duke: The big goober is on my leash. Griffin: Hey, there's a roof window! Griffin loves car rides, so he hopped in the back of the Element when we were leaving my parents' farm today. It took a few minutes to convince him he wasn't going, Maggie and Duke had to get out first. Maggie: If he's in here I'm out. Duke: I'll wait for him to get off my leash. LOL what a tangle. Hope you had a lovely memorial day weekend. Have a terrific Tuesday. Ha! I could barely see him back there! LOL - but holy woof is Griffin big! Yowzah. We have to play this game to. We have to pretend we are staying home and go in the house to make Damn dogs get out of the van. Then beat them back inside. Griffin it's a game you will have to get better at.
#!/usr/bin/env python from __future__ import print_function """ test_mkdir.py test product, combine, permute, combine_with_replacement """ import os tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0])) import os import sys # add grandparent to search path for testing grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) sys.path.insert(0, grandparent_dir) # module name = script name without extension module_name = os.path.splitext(os.path.basename(__file__))[0] from ruffus import pipeline_run, pipeline_printout, transform, split, mkdir, formatter, Pipeline from ruffus.ruffus_utility import RUFFUS_HISTORY_FILE, CHECKSUM_FILE_TIMESTAMPS #88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888 # # imports # #88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888 import unittest import shutil try: from StringIO import StringIO except: from io import StringIO import time #sub-1s resolution in system? #___________________________________________________________________________ # # generate_initial_files1 #___________________________________________________________________________ @split(1, [tempdir + "/" + prefix + "_name.tmp1" for prefix in "abcd"]) def generate_initial_files1(in_name, out_names): for on in out_names: with open(on, 'w') as outfile: pass #___________________________________________________________________________ # # test_product_task #___________________________________________________________________________ @mkdir(tempdir + "/test1") @mkdir(tempdir + "/test2") @mkdir(generate_initial_files1, formatter(), ["{path[0]}/{basename[0]}.dir", 3, "{path[0]}/{basename[0]}.dir2"]) @transform( generate_initial_files1, formatter(), "{path[0]}/{basename[0]}.dir/{basename[0]}.tmp2") def test_transform( infiles, outfile): with open(outfile, "w") as p: pass @mkdir(tempdir + "/test3") @mkdir(generate_initial_files1, formatter(), "{path[0]}/{basename[0]}.dir2") def test_transform2(): print(" Loose cannon!", file=sys.stderr) def cleanup_tmpdir(): os.system('rm -f %s %s' % (os.path.join(tempdir, '*'), RUFFUS_HISTORY_FILE)) class Testmkdir(unittest.TestCase): def setUp(self): try: os.mkdir(tempdir) except OSError: pass #___________________________________________________________________________ # # test mkdir() pipeline_printout and pipeline_run #___________________________________________________________________________ def test_mkdir_printout(self): """Input file exists, output doesn't exist""" cleanup_tmpdir() s = StringIO() pipeline_printout(s, [test_transform, test_transform2], verbose=5, wrap_width = 10000, pipeline= "main") #self.assertIn('Job needs update: Missing files ' # '[tmp_test_mkdir/a_name.tmp1, ' # 'tmp_test_mkdir/e_name.tmp1, ' # 'tmp_test_mkdir/h_name.tmp1, ' # 'tmp_test_mkdir/a_name.e_name.h_name.tmp2]', s.getvalue()) def test_mkdir_run(self): """Run mkdir""" # output is up to date, but function body changed (e.g., source different) cleanup_tmpdir() pipeline_run([test_transform, test_transform2], verbose=0, multiprocess = 2, pipeline= "main") def test_newstyle_mkdir_run(self): test_pipeline = Pipeline("test") test_pipeline.split(task_func = generate_initial_files1, input = 1, output = [tempdir + "/" + prefix + "_name.tmp1" for prefix in "abcd"]) test_pipeline.transform( task_func = test_transform, input = generate_initial_files1, filter = formatter(), output = "{path[0]}/{basename[0]}.dir/{basename[0]}.tmp2")\ .mkdir(tempdir + "/test1")\ .mkdir(tempdir + "/test2")\ .mkdir(generate_initial_files1, formatter(), ["{path[0]}/{basename[0]}.dir", 3, "{path[0]}/{basename[0]}.dir2"]) test_pipeline.mkdir(test_transform2, tempdir + "/test3")\ .mkdir(generate_initial_files1, formatter(), "{path[0]}/{basename[0]}.dir2") cleanup_tmpdir() pipeline_run([test_transform, test_transform2], verbose=0, multiprocess = 2, pipeline= "main") #___________________________________________________________________________ # # cleanup #___________________________________________________________________________ def tearDown(self): shutil.rmtree(tempdir) # # Necessary to protect the "entry point" of the program under windows. # see: http://docs.python.org/library/multiprocessing.html#multiprocessing-programming # if __name__ == '__main__': unittest.main()
The report indicates an inferred resource of approximately 17 million metric tonnes at a grade of about 1,060 ppm Li over a 455,800 square meters of the core drilling program. Vancouver, B.C, July 27, 2017 – Alba Minerals Ltd. (“Alba”) (TSX-V: AA.V: AXVEF:US Frankfurt: A117RU) and Noram Ventures Inc. (TSX-Venture: NRM / Frankfurt: N7R / OTCBB: NRVTF) (“Noram” or the “Company”) is pleased to report that Bradley Peek, MSc and Certified Professional Geologist has completed an update to its National Instrument 43-101 Technical Report on its Clayton Valley Lithium Brine/Clay Project. The Technical Report includes a detailed review of the exploration work completed to date, an inferred resource estimate, interpretations and conclusions, and recommendations for the next phases of work. As was earlier announced, Noram drilled 46 shallow core holes into the lithium rich sediments that were previously identified through surface sampling. The drill results have provided a basis for the definition of an inferred lithium resource. The lithium assays from the drilling provided results that were quite consistent over a reasonably large area of close-spaced drill holes. The model generated for the inferred resource estimate indicated a zone of higher lithium grades trending northwest-southeast through the area of the resource. The deposit remains open in several directions and at depth and the drilling only tested a very small portion (113 acres)(46 hectares) of the area covered by the extensive (17,739 acre)(7,178 hectare) claim holdings. There is considerable upside potential for increasing the size of the deposit. The model that was generated from the close-spaced drilling was not constrained by the lithology, since the lithology was very homogeneous and did not permit lithologic correlations between drill holes. Information about the mining, processing or other economic criteria does not allow for the designation of a clear cut-off grade for the deposit. These factors are to be determined by future testing and analysis. For these reasons, the model was generated using various ranges of lithium grades which will serve as guidelines as additional information becomes available to constrain the model. The model reports a resource of approximately 17 million metric tonnes at a grade of about 1060 ppm Li. If additional economic analyses indicate that the model requires further constraints, due to a wide variety of potentially significant economic factors, the tonnage and grade could fluctuate accordingly. The inferred mineral resource estimate is defined by the relatively shallow drilling depth, averaging 14.4 meters and a relatively large area extent of 455,800 square meters, determined by an area of close-spaced drill holes. All of the holes within the inferred resource area bottomed in mineralized sediments. Drill holes outside the area of close-spaced drilling were determined to be too far afield to be of use for the model. The resulting model derived from these parameters is a thin, pancake-like deposit. The close-spaced drill holes have been defined as: CVZ-01 thru CVZ-24, CVZ-26 thru CVZ-28 and CVZ-30 and CVZ-32, making a total of 29 core holes used in the model. The data for the resource estimate was generated using inverse distance algorythms in the Rockworks 17 computer program. The model was constructed using voxels with dimensions of 20m X 20m horizontally by 2m vertically, reflecting the relatively thin vertical component of the deposit. Bench scale testing by Membrane Development Specialties (MDS) has revealed that extraction of the lithium from the sediments is feasible using a new membrane technology. Preliminary test results indicate that the process may recover approximately 90% of the lithium with a processing cost of US$2000 ±25% (not including mining costs) to recover at tonne of lithium carbonate and with the ability to recycle a major portion of the acid and water used in the process. Testing is ongoing and is expected to result in a more definitive statement about the test results in the near future. The Complete NI 43-101 Technical Report is available on SEDAR and on the Company’s web site at www.albamineralsltd.com or www.noramventures.com. The technical information contained in this news release has been reviewed and approved by Bradley C. Peek, MSc and Certified Professional Geologist, who is a Qualified Person with respect to Noram’s Clayton Valley Lithium Project as defined under National Instrument 43-101. Noram Ventures Inc. (TSX-V: NRM Frankfurt: N7R) is a Canadian based junior exploration company, with a goal of becoming a force in the Green Energy Revolution through the development of lithium and graphite deposits and becoming a low-cost supplier for the burgeoning lithium battery industry. The Company’s primary business focus since formation has been the exploration of mineral projects that include lithium projects in the Clayton Valley in Nevada, the Hector Lode in San Bernardino county, California, the Arizaro East mineral claim located in the eastern portion of the Salar de Arizaro in north-western Argentina and the Jumbo graphite property in British Columbia. Noram’s long term strategy is to build a multi-national lithium-graphite dominant industrial minerals company to produce and sell lithium and graphite into the markets of Europe, North America and Asia. Alba Minerals Ltd. Is a Vancouver based junior resource company with projects in North and South America, focusing on the development of our Lithium properties. Our Lithium Projects are located in Clayton Valley Nevada where we can earn up to a 50% interest in the project and just completed 46 hole drilling program. Our second lthium project Quiron II consist of 2,421 hectares of prospective exploration property in the Pocitos Salar, Province of Salta, Argentina. The Project is located approximately 7 km South East of Millennial Lithium - Southern Lithium JV Pocitos North Cruz Brine Project and 12 km northeast from the Liberty One Lithium Corp. Neither the TSX Venture Exchange nor its Regulation Services Provider (as that term is defined in the policies of the TSX Venture Exchange) accepts responsibility for the adequacy or accuracy of this release. This news release may contain forward-looking information which is not comprised of historical facts. Forward-looking information involves risks, uncertainties and other factors that could cause actual events, results, performance, prospects and opportunities to differ materially from those expressed or implied by such forward-looking information. Forward-looking information in this news release includes statements regarding, among other things, the completion transactions completed in the Agreement. Factors that could cause actual results to differ materially from such forward-looking information include, but are not limited to, regulatory approval processes. Although Noram believes that the assumptions used in preparing the forward-looking information in this news release are reasonable, including that all necessary regulatory approvals will be obtained in a timely manner, undue reliance should not be placed on such information, which only applies as of the date of this news release, and no assurance can be given that such events will occur in the disclosed time frames or at all. Noram disclaims any intention or obligation to update or revise any forward-looking information, whether as a result of new information, future events or otherwise, other than as required by applicable securities laws.
# # Copyright (c) 2015 Intel Corporation # # Author: Julio Montes <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys import io from clearstack import shell as clearstack_shell def shell(argstr): orig = sys.stdout clean_env = {} _old_env, os.environ = os.environ, clean_env.copy() try: sys.stdout = io.StringIO() _shell = clearstack_shell.ClearstackConfiguratorShell() _shell.main(argstr.split()) except SystemExit: exc_type, exc_value, exc_traceback = sys.exc_info() assert exc_value, 0 finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = orig os.environ = _old_env return out
In this stand-alone issue, Shade begins to ponder what’s next for her on Earth. As the winter dance approaches, she reminisces about high school on Meta, and starts to wonder why she’s putting herself through that kind of hell again. Will she find the perfect dress, or will the dance be a perfect disaster? Plus, an all-new episode of “Life with Honey,” with art by Dan Parent (Kevin Keller). Hey, it's special guest illustrator Marguerite Sauvage! Hooraaaaaay! Imagine Kermit the Frog flailing his arms in glee here. And then check out some preview pages, below!
__problem_title__ = "Diophantine reciprocals II" __problem_url___ = "https://projecteuler.net/problem=110" __problem_description__ = "In the following equation , , and are positive integers. It can be " \ "verified that when = 1260 there are 113 distinct solutions and this " \ "is the least value of for which the total number of distinct " \ "solutions exceeds one hundred. What is the least value of for which " \ "the number of distinct solutions exceeds four million? NOTE: This " \ "problem is a much more difficult version of and as it is well beyond " \ "the limitations of a brute force approach it requires a clever " \ "implementation." import timeit class Solution(): @staticmethod def solution1(): pass @staticmethod def time_solutions(): setup = 'from __main__ import Solution' print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1)) if __name__ == '__main__': s = Solution() print(s.solution1()) s.time_solutions()
Follow us ♥ @dharwadmandi ♥ to get featured. we bring reviews, promote, streetphotography, historical places, eateries, shoutouts, and much more from dharwad/hubli under one name. 3. put a story tagging us.
import pickle import pytest from praw.models import Subreddit, WikiPage from ... import UnitTest class TestSubreddit(UnitTest): def test_equality(self): subreddit1 = Subreddit(self.reddit, _data={'display_name': 'dummy1', 'n': 1}) subreddit2 = Subreddit(self.reddit, _data={'display_name': 'Dummy1', 'n': 2}) subreddit3 = Subreddit(self.reddit, _data={'display_name': 'dummy3', 'n': 2}) assert subreddit1 == subreddit1 assert subreddit2 == subreddit2 assert subreddit3 == subreddit3 assert subreddit1 == subreddit2 assert subreddit2 != subreddit3 assert subreddit1 != subreddit3 assert 'dummy1' == subreddit1 assert subreddit2 == 'dummy1' def test_construct_failure(self): message = 'Either `display_name` or `_data` must be provided.' with pytest.raises(TypeError) as excinfo: Subreddit(self.reddit) assert str(excinfo.value) == message with pytest.raises(TypeError) as excinfo: Subreddit(self.reddit, 'dummy', {'id': 'dummy'}) assert str(excinfo.value) == message def test_fullname(self): subreddit = Subreddit(self.reddit, _data={'display_name': 'name', 'id': 'dummy'}) assert subreddit.fullname == 't5_dummy' def test_hash(self): subreddit1 = Subreddit(self.reddit, _data={'display_name': 'dummy1', 'n': 1}) subreddit2 = Subreddit(self.reddit, _data={'display_name': 'Dummy1', 'n': 2}) subreddit3 = Subreddit(self.reddit, _data={'display_name': 'dummy3', 'n': 2}) assert hash(subreddit1) == hash(subreddit1) assert hash(subreddit2) == hash(subreddit2) assert hash(subreddit3) == hash(subreddit3) assert hash(subreddit1) == hash(subreddit2) assert hash(subreddit2) != hash(subreddit3) assert hash(subreddit1) != hash(subreddit3) def test_pickle(self): subreddit = Subreddit(self.reddit, _data={'display_name': 'name', 'id': 'dummy'}) for level in range(pickle.HIGHEST_PROTOCOL + 1): other = pickle.loads(pickle.dumps(subreddit, protocol=level)) assert subreddit == other def test_repr(self): subreddit = Subreddit(self.reddit, display_name='name') assert repr(subreddit) == 'Subreddit(display_name=\'name\')' def test_search__params_not_modified(self): params = {'dummy': 'value'} subreddit = Subreddit(self.reddit, display_name='name') generator = subreddit.search(None, params=params) assert generator.params['dummy'] == 'value' assert params == {'dummy': 'value'} def test_str(self): subreddit = Subreddit(self.reddit, _data={'display_name': 'name', 'id': 'dummy'}) assert str(subreddit) == 'name' def test_submit_failure(self): message = 'Either `selftext` or `url` must be provided.' subreddit = Subreddit(self.reddit, display_name='name') with pytest.raises(TypeError) as excinfo: subreddit.submit('Cool title') assert str(excinfo.value) == message with pytest.raises(TypeError) as excinfo: subreddit.submit('Cool title', selftext='a', url='b') assert str(excinfo.value) == message class TestSubredditFlairTemplates(UnitTest): def test_bad_add(self): subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit) with pytest.raises(TypeError): subreddit.flair.templates.add('impossible', css_class='conflict', background_color='#ABCDEF') with pytest.raises(TypeError): subreddit.flair.templates.add('impossible', css_class='conflict', mod_only=False) with pytest.raises(TypeError): subreddit.flair.templates.add('impossible', css_class='conflict', text_color='dark') with pytest.raises(TypeError): subreddit.flair.templates.add('impossible', css_class='conflict', background_color='#ABCDEF', mod_only=False, text_color='dark') class TestSubredditLinkFlairTemplates(UnitTest): def test_bad_add(self): subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit) with pytest.raises(TypeError): subreddit.flair.link_templates.add('impossible', css_class='conflict', background_color='#ABCDEF') with pytest.raises(TypeError): subreddit.flair.link_templates.add('impossible', css_class='conflict', mod_only=False) with pytest.raises(TypeError): subreddit.flair.link_templates.add('impossible', css_class='conflict', text_color='dark') with pytest.raises(TypeError): subreddit.flair.link_templates.add('impossible', css_class='conflict', background_color='#ABCDEF', mod_only=False, text_color='dark') class TestSubredditWiki(UnitTest): def test__getitem(self): subreddit = Subreddit(self.reddit, display_name='name') wikipage = subreddit.wiki['Foo'] assert isinstance(wikipage, WikiPage) assert 'foo' == wikipage.name
This is a placeholder page for Andrea Miller, which means this person is not currently on this site. We do suggest using the tools below to find Andrea Miller. You are visiting the placeholder page for Andrea Miller. This page is here because someone used our placeholder utility to look for Andrea Miller. We created this page automatically in hopes Andrea Miller would find it. If you are not Andrea Miller, but are an alumni of Parkville High School, register on this site for free now.
#!/usr/bin/env python # -*- coding: utf-8 -*- import os,wx DS301_PDF_INDEX = {0x1000: 86, 0x1001: 87, 0x1002: 87, 0x1003: 88, 0x1005: 89, 0x1006: 90, 0x1007: 90, 0x1008: 91, 0x1009: 91, 0x100A: 91, 0x100C: 92, 0x100D: 92, 0x1010: 92, 0x1011: 94, 0x1012: 97, 0x1013: 98, 0x1014: 98, 0x1015: 99, 0x1016: 100, 0x1017: 101, 0x1018: 101, 0x1020: 117, 0x1200: 103, 0x1201: 103, 0x1280: 105, 0x1400: 106, 0x1600: 109, 0x1800: 111, 0x1A00: 112} def get_acroversion(): " Return version of Adobe Acrobat executable or None" import _winreg adobesoft = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, r'Software\Adobe') for index in range(_winreg.QueryInfoKey(adobesoft)[0]): key = _winreg.EnumKey(adobesoft, index) if "acrobat" in key.lower(): acrokey = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'Software\\Adobe\\%s' % key) for index in range(_winreg.QueryInfoKey(acrokey)[0]): numver = _winreg.EnumKey(acrokey, index) try: res = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, 'Software\\Adobe\\%s\\%s\\InstallPath' % (key, numver)) return res except: pass return None def OpenPDFDocIndex(index, cwd): if not os.path.isfile(os.path.join(cwd, "doc","301_v04000201.pdf")): return _("""No documentation file available. Please read can festival documentation to know how to obtain one.""") try: if index in DS301_PDF_INDEX: if wx.Platform == '__WXMSW__': readerpath = get_acroversion() readerexepath = os.path.join(readerpath,"AcroRd32.exe") if(os.path.isfile(readerexepath)): os.spawnl(os.P_DETACH, readerexepath, "AcroRd32.exe", "/A", "page=%d=OpenActions" % DS301_PDF_INDEX[index], '"%s"'%os.path.join(cwd, "doc","301_v04000201.pdf")) else: os.system("xpdf -remote DS301 %s %d &"%(os.path.join(cwd, "doc","301_v04000201.pdf"), DS301_PDF_INDEX[index])) else: if wx.Platform == '__WXMSW__': readerpath = get_acroversion() readerexepath = os.path.join(readerpath,"AcroRd32.exe") if(os.path.isfile(readerexepath)): os.spawnl(os.P_DETACH, readerexepath, "AcroRd32.exe", '"%s"'%os.path.join(cwd, "doc","301_v04000201.pdf")) else: os.system("xpdf -remote DS301 %s &"%os.path.join(cwd, "doc","301_v04000201.pdf")) return True except: if wx.Platform == '__WXMSW__': return _("Check if Acrobat Reader is correctly installed on your computer") else: return _("Check if xpdf is correctly installed on your computer")
An essential part of writing creative non-fiction is writing as truthfully as possible. This allows readers to better trust the author. Readers expect that the author will recount events as accurately as possible, or choose to market their writing as another genre. Sometimes writers choose to ignore this. Even with the best intentions, this is deceptive to the reader. Such trickery can turn even the most strong and powerful stories, stories with a message of hope for readers into crackpot writing that serves no other purpose other than to create controversy. A Million Little Pieces by James Frey is one of these cases. The author shares his riveting story of overcoming drug addiction to avoid an early death, forbidden and overwhelming love, and emotional reconnection with friends and family. Frey chooses to write the story without the use of quotation marks. When a new thought begins, a new line on the paper begins. Oftentimes, there is a lack of punctuation to distinguish the different thoughts and lines from different characters. However, this style also enables him to make his thoughts come alive, to share his inner dialogue with readers. The imagery that he uses brings the reader to him; to the rehabilitation center, the moment he reunites with his parents, and the first time he sees Lily, his love. His simple style makes his writing relatable, even to those of us who have never been addicted to crack or sniffed glue. “On the left side of my cheek a row of crusted scabbed stitches hold a deep 1 inch-long gash together. My nose is bent and swollen beneath its bandage and red lines streak from my nostrils. There are black and yellow bruises beneath both eyes, there is blood both wet and dry everywhere."
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Aug 19 17:53:46 2017 @author: benjaminsmith """ #do the regression in the #we need a design matrix #with linear, square, cubic time point regressors #plus intercept #plus whatever Design_Matrix files we want to put in. onsets_convolved.head() onsets_convolved['linearterm']=range(1,361) onsets_convolved['quadraticterm']=[pow(x,2) for x in onsets_convolved['linearterm']] onsets_convolved['cubicterm']=[pow(x,3) for x in onsets_convolved['linearterm']] onsets_convolved['ones']=[1]*360 c.head() onsets_convolved.heatmap() onsets_convolved #add in the Design Matrix msmrl1.X=onsets_convolved#=pd.DataFrame([msmrl1.X,onsets_convolved]) #msmrl1.X=pd.DataFrame(msmrl1.X) #msmrl1.X regression=msmrl1.regress() msm_predicted_pain=regression['t'].similarity(stats['weight_map'],'correlation') for brainimg in regression['t']: plotBrain(brainimg) regression['t'].shape() plotBrain(regression['t'][1,]) onsets_convolved.head() plotBrain(regression['t'][1,]) plotBrain(regression['t'][9,]) plotBrain(regression['t'][13,]) #regress out the linear trends #then dot product with the pain map # msm_predicted_pain=regression['beta'].similarity(stats['weight_map'],'dot_product') plt(msm_predicted_pain) np.shape(msm_predicted_pain) #raw data. msm_predicted_pain=msmrl1.similarity(stats['weight_map'],'dot_product') onsets_convolved.columns.tolist() ggplot( pd.DataFrame(data={ 'PainByBeta':msm_predicted_pain[0:9], 'RegressionBetaNum':range(0,9) }), aes('RegressionBetaNum','PainByBeta')) +\ geom_line() +\ stat_smooth(colour='blue', span=0.2)+ \ scale_x_continuous(breaks=[1,2,3], \ labels=["horrible", "ok", "awesome"])
Beth told me she was having an on going situation at the house and on the farm and wanted to know if I could drop by and visit with her. Beth has been a good friend for many years and knows she is the last person to think a creaky floor or flickering light was a ghost. I knew if she brought it up at all that something odd must have been going on. I decided to visit her right away to find out what was happening. As soon as I arrived at Beth’s farm she walked me to her house that sat on top of a small hill in front of the farm. Behind the house were paddocks, a big barn, with many different horses along with a pony, a goat, dogs, cats and chickens- I think you get the idea. Beth and her family love animals. The problem started about three weeks ago. Her oldest son was in her bathroom taking a shower about 7pm. Standing in Beth’s room ironing his shirt at the time was Beth’s other teenage son. All of a sudden a violent pounding started on the bathroom door. It was a loud clear knocking. The boy in the shower started to yell out to his brother in the parent’s bedroom ironing, asking him what he wanted. At the same time the boy ironing was yelling to his brother in the bathroom asking him why he was banging on the door. The commotion stopped and both boys continued on with what they were doing. A few minutes passed when the knocking started up again. This time the pounding was loud and vibrated the entire room. Both boys thought it was the other one doing it and both headed towards to door. The boy inside the bathroom opened the door and started to yell at his brother. The brother in the bedroom returned the screaming both insisting the other had been the door banger. Both boys started to walk out of their parent’s bedroom one behind the other when the door pounding started one last time. A few days passed when my friend was home in her kitchen with her oldest son who was the boy who was in the shower during the first incident. Suddenly they could hear hard loud knocking coming from the back of the house. They followed the noise to the end of the house where the first incident occurred. When they searched the house once again they found nothing out of the ordinary. The loud knocking continued with an incident occurring every few days. The family continued to search their property, check the pipes, and check for construction problems to try to find the cause of this strange happening. They came up empty handed. I heard a loud knocking noise like that today. I was hooking my truck up to a trailer and I kept hearing it. It was like...knock...knock...knock...knok...over and over again. I could hear it when I was in between two trailers but whenever I walked out I couldn't hear it anymore. Then I would go back in between the trailers and it would start up again. I have no idea what it was. I don't think it was a ghost. Just something weird.
#!/usr/bin/env python # # common.py -- Methods used by the cloud-ha scripts. # Created: Marcus Butler, 05-April-2017. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from boto3 import client from botocore.exceptions import ClientError import sys import json def get_rtb_assoc(subnet): ec2 = client('ec2') res = ec2.describe_route_tables() for table in res['RouteTables']: for assoc in table['Associations']: if assoc.has_key('SubnetId') and assoc['SubnetId'] == subnet: return assoc['RouteTableAssociationId'] return None def change_rtb(old_assoc, rtb): ec2 = client('ec2') res = ec2.replace_route_table_association(AssociationId = old_assoc, RouteTableId = rtb) return True def get_config(bucket, file): s3 = client('s3') obj = s3.get_object(Bucket=bucket, Key=file) dict = json.loads(obj['Body'].read()) return dict def fatal_error(errmsg): return { 'statusCode': 500, 'headers': { 'Content-Type': 'application/json' }, 'body': json.dumps({'errorMessage': errmsg}) }
Enhance blood circulation and stimulate your pets cognitive thinking with the hemp oil for dogs. Our hemp oil provides your pets with the necessities of life and also improve overall health. You can use hemp oil as your everyday pet dietary supplement.
#!/usr/bin/python import os import sys from collections import defaultdict import flask import flask.ext.sqlalchemy import flask.ext.restless from flask.ext.cors import CORS from flask import jsonify sys.path.insert(0, '.') import database def add_cors_headers(response): response.headers['Access-Control-Allow-Origin' ] = '*' #response.headers['Access-Control-Allow-Credentials'] = 'true' # Set whatever other headers you like... return response #https://flask-restless.readthedocs.org/en/latest/quickstart.html # Create the Flask application and the Flask-SQLAlchemy object. app = flask.Flask(__name__) db_name = 'sqlite:///' + os.path.abspath( sys.argv[1] ) print db_name app.config['DEBUG' ] = True app.config['SQLALCHEMY_DATABASE_URI'] = db_name app.config['SERVER_PORT' ] = 5000 cors = CORS(app, resources={r"/*": {"origins": "*"}}, headers="Content-Type") app.secret_key = 's3cr3tkeyverysecret' db = flask.ext.sqlalchemy.SQLAlchemy(app) # Create the Flask-Restless API manager. manager = flask.ext.restless.APIManager(app, flask_sqlalchemy_db=db) # Create API endpoints, which will be available at /api/<tablename> by # default. Allowed HTTP methods can be specified as well. databases_meta = defaultdict(list) for dbn in database.dbs: print "exporting", dbn #manager.create_api(dbn, methods=['GET'], allow_functions=True, collection_name=dbn.__tablename__) manager.create_api(dbn, methods=['GET'], allow_functions=True) if len( databases_meta ) == 0: tables = dbn.metadata.sorted_tables for table in tables: #print t for column in table.c: #print c #print dir(c) #print " anon_label ", column.anon_label #print " base_columns", column.base_columns #print " desc ", column.desc #print " description ", column.description #print " info ", column.info #print " key ", column.key #print " label ", column.label #print " name ", column.name #print " table ", column.table #print " type ", column.type #name format_str #table format_col #type VARCHAR databases_meta[ str(column.table) ].append( ( column.name, str(column.type) ) ) #print dir(t) #print "columns", table.columns #print "desc ", table.description #print "info ", table.info #print "meta ", table.metadata #print "named ", table.named_with_column #print "schema ", table.schema #databases_met[ t ] = #break #databases_met[0] = 1 #print "metadata ", databases_meta @app.route('/') def hello_world(): hlp = { '/' : 'help', '/metas' : 'all tables meta data', '/meta' : 'list of tables', '/meta/<CHROM>' : 'metadata of table <CHROM>', '/api/<TABLE>' : 'api for table <TABLE>' } return jsonify( { 'help': hlp } ) @app.route('/metas') def metas(): return jsonify( { 'metas': databases_meta } ) @app.route('/meta') def meta(): names = databases_meta.keys() names.sort() print names return jsonify( { 'meta': names } ) @app.route('/meta/<table>') def meta_table(table): if table not in databases_meta: flask.abort(404) dbn_meta = databases_meta[ table ] return jsonify({ 'meta': dbn_meta }) # start the flask loop app.run(host='0.0.0.0', debug=True, port=5000)
I use my.yahoo and I have Spine-Health articles being feed into my setup every day. Its really good. Now I have yet one more reason to stay glued to my Blackberry. will you please explain the steps to make this happen? Are you talking about for use with a blackberry type device or can this be on our PC as well? I'm a little behind times and I never thought I would be computer illiterate, but definitely showing my lack of knowledge on this one. Ok, for starters I am not a phone person, a phone is two cans hung between two strings, and when the phone does ring, that is what answering machines are for. I may be the only person in the world without a cell phone! Blackberry, I would enjoy, but theres that phone thing in there too! I used Spine-Health to set up my RSS Updates clickable right above where you can sign for the Spine-Health newsletter. From there it will prompt you to which application for RSS Feeds do you want the Spine-Health articles to go to. I am using My Yahoo. It is simple. I have about 10 articles that show up on my Yahoo when they are updated. I have been on this site for over 2.5 years and have clicked on the doctor written articles before and it always takes me to old (very old) articles. Once in a while I will see a title pop up on the main page that I click, and even then it is to an old article. I just clicked the RSS feed link at the top right corner and it pulls up a lot of articles written in the past few days. Where do we find these articles? I consider myself fairly computer literate, and if I have missed these, how many other people are missing them. Maybe it is the design of the main page, but I can't find these recent articles. What a wealth of information I am missing. The titles of the articles I find when I click the rss link look to be very informative. HELP! Someone needs to make the general membership of htese boards aware of how to access them. In the past you said that the forums are only part of this place, and I have checked to find a few articles, but never stumbled on this much. There has to be a way to feed them to a page where we can access them easily. webbased news readers. You click your choice. 4- Click on your choice (for me Its MyYahoo) and that should bring up My Yahoo with today's current articles. There should be two popups, 1- The summary of articles from Spine-Health and 2- The option to Keep the articles or cancel. 5- Once there, you can customized it to how many articles to display, how old they should be and if you want just the headline or headline and summary. I understand how to get the RSS feeds, but for those of us who just want to view these online in THIS website, how do we access them? If the only way to get to them is through the RSS link, please design some way to make them more noticable to the general membership, if that is possible. Cindy, as much as I can remember native Drupal doesnt have the RSS support inline. I believe there are addons to make it feedable. Man have my eyes been closed this whole time or what? I've heard you talk about all the "peer reviewed articles" and I've read several, but the links I followed previously were limited to just a few articles, and honestly I wasn't very impressed. Now I have just discovered what you have tried to help us see all of this time. Am I the only one who has been blind to the MANY articles available here?
# Import necessary modules from droneapi.lib import VehicleMode # Assume we are already connected to a vehicle (at the highest # level) and this has been assigned to __main__.v from __main__ import v def FetchParam(par): global v """ Take a list of parameters (par) and returns a corresponding list of values from the vehicle.""" v.flush() # just an extra one for luck val = par[:] # create list for values for (i,p_) in enumerate(par): if p_=='MODE': # Treat modes slightly differently val[i] = v.mode.name else: val[i] = v.parameters[p_] return val def SetMode(mode): global v """ Sets a new mode for the vehicle (e.g. MANUAL, AUTO, RTL). Function returns nothing.""" v.mode = VehicleMode(mode) # Assumed that v.flush is run SetParam() #v.flush() pass def SetParam(par,val): global v """ Sets a list of parameters (par) to a corresponding list of new values (val). Function returns nothing.""" for (p_,v_) in zip(par,val): if p_=='MODE': # Treat modes slightly differently SetMode(v_) else: v.parameters[p_] = v_ v.flush() # param changes SHOULD be guaranteed from now pass def ChangeParam(par,val,checks=3): """ Change a list of parameters (par) to a corresponing list of new values (val). The parameter is then checked to ensure it has changed using CheckParam(). Function returns 'True' if successful, otherwise returns a list of unset parameters.""" SetParam(par,val) check = CheckParam(par,val) ci=0 while (check!=True) and (ci<checks): ci+=1 v.flush() check = CheckParam(check[0],check[1]) if check!=True: print("Detected non-matching params: %s" %check[0]) return check return True def CheckParam(par,val): """ Checks a list of parameters (par) are set to a list of values (val).""" valC = FetchParam(par) # load Current parameter values parW = [] # list of params not correct valW = [] # list of values to be corrected # Iterate through each parameter, checking they have been # changed correctly. for (p_,v_,vC_) in zip(par,val,valC): if p_!='MODE' and v_!=vC_: # skips mode changes parW.append(p_) valW.append(v_) # Return unchanged params or True if len(parW) > 0: return [parW, valW] return True # Everything okay
The appearance of globalisation has resulted in airfare costs being lowered, and in consequence, there are a number of cheap flights to China, whether or not you need to take a flight from the UK to China between semesters and go on a sightseeing journey, or if you’re involved within the Beijing Olympics of 2008. And for all Flight attendants flying on Air Asia ,I feel as a result of I was flying quite a bit with Air Asia the administration despatched me an e-mail asking me to spy on the flight attendantsTell them how the service was ,get their names , I used to be also informed not to tell the flight attendants this ,and that they would ship me an electronic mail which they did with an inventory of questions in regards to the flight attendants service. I nonetheless travel Air Asia for home and brief haul routes. Airasia regards their prospects with disdain and it reveals in all elements of their service, from verify in by to arrival. But while they may help with some airways, others are better booked instantly.
import imp, json, os, shutil, sys, tempfile, zipfile import imp try: imp.find_module('texttable') from texttable import Texttable except ImportError: sys.stderr.write("Could not import Texttable\nRetry after 'pip install texttable'\n") exit() tmpdir = tempfile.mkdtemp() def extract_zip(filename): file_dir = os.path.join(tmpdir, os.path.splitext(filename)[0]) if not os.path.exists(file_dir): os.makedirs(file_dir) zip_ref = zipfile.ZipFile(os.path.abspath(filename), 'r') zip_ref.extractall(os.path.abspath(file_dir)) zip_ref.close() return file_dir def diff(file1, file2): # extract ZIP files file1_dir = extract_zip(file1) file2_dir = extract_zip(file2) # tez debugtool writes json data to TEZ_DAG file whereas tez UI writes to dag.json # also in dag.json data is inside "dag" root node file1_using_dag_json = True dag_json_file1 = os.path.join(file1_dir, "dag.json") if os.path.isfile(dag_json_file1) == False: file1_using_dag_json = False dag_json_file1 = os.path.join(file1_dir, "TEZ_DAG") if os.path.isfile(dag_json_file1) == False: print "Unable to find dag.json/TEZ_DAG file inside the archive " + file1 exit() file2_using_dag_json = True dag_json_file2 = os.path.join(file2_dir, "dag.json") if os.path.isfile(dag_json_file2) == False: file2_using_dag_json = False dag_json_file2 = os.path.join(file2_dir, "TEZ_DAG") if os.path.isfile(dag_json_file2) == False: print "Unable to find dag.json/TEZ_DAG file inside the archive " + file1 exit() # populate diff table difftable = {} with open(dag_json_file1) as data_file: file1_dag_json = json.load(data_file)["dag"] if file1_using_dag_json else json.load(data_file) counters = file1_dag_json['otherinfo']['counters'] for group in counters['counterGroups']: countertable = {} for counter in group['counters']: counterName = counter['counterName'] countertable[counterName] = [] countertable[counterName].append(counter['counterValue']) groupName = group['counterGroupName'] difftable[groupName] = countertable # add other info otherinfo = file1_dag_json['otherinfo'] countertable = {} countertable["TIME_TAKEN"] = [otherinfo['timeTaken']] countertable["COMPLETED_TASKS"] = [otherinfo['numCompletedTasks']] countertable["SUCCEEDED_TASKS"] = [otherinfo['numSucceededTasks']] countertable["FAILED_TASKS"] = [otherinfo['numFailedTasks']] countertable["KILLED_TASKS"] = [otherinfo['numKilledTasks']] countertable["FAILED_TASK_ATTEMPTS"] = [otherinfo['numFailedTaskAttempts']] countertable["KILLED_TASK_ATTEMPTS"] = [otherinfo['numKilledTaskAttempts']] difftable['otherinfo'] = countertable with open(dag_json_file2) as data_file: file2_dag_json = json.load(data_file)["dag"] if file2_using_dag_json else json.load(data_file) counters = file2_dag_json['otherinfo']['counters'] for group in counters['counterGroups']: groupName = group['counterGroupName'] if groupName not in difftable: difftable[groupName] = {} countertable = difftable[groupName] for counter in group['counters']: counterName = counter['counterName'] # if counter does not exist in file1, add it with 0 value if counterName not in countertable: countertable[counterName] = [0] countertable[counterName].append(counter['counterValue']) # append other info otherinfo = file2_dag_json['otherinfo'] countertable = difftable['otherinfo'] countertable["TIME_TAKEN"].append(otherinfo['timeTaken']) countertable["COMPLETED_TASKS"].append(otherinfo['numCompletedTasks']) countertable["SUCCEEDED_TASKS"].append(otherinfo['numSucceededTasks']) countertable["FAILED_TASKS"].append(otherinfo['numFailedTasks']) countertable["KILLED_TASKS"].append(otherinfo['numKilledTasks']) countertable["FAILED_TASK_ATTEMPTS"].append(otherinfo['numFailedTaskAttempts']) countertable["KILLED_TASK_ATTEMPTS"].append(otherinfo['numKilledTaskAttempts']) difftable['otherinfo'] = countertable # if some counters are missing, consider it as 0 and compute delta difference for k,v in difftable.items(): for key, value in v.items(): # if counter value does not exisit in file2, add it with 0 value if len(value) == 1: value.append(0) # store delta difference delta = value[1] - value[0] value.append(("+" if delta > 0 else "") + str(delta)) return difftable def print_table(difftable, name1, name2, detailed=False): table = Texttable(max_width=0) table.set_cols_align(["l", "l", "l", "l", "l"]) table.set_cols_valign(["m", "m", "m", "m", "m"]) table.add_row(["Counter Group", "Counter Name", name1, name2, "delta"]); for k in sorted(difftable): # ignore task specific counters in default output if not detailed and ("_INPUT_" in k or "_OUTPUT_" in k): continue v = difftable[k] row = [] # counter group. using shortname here instead of FQCN if detailed: row.append(k) else: row.append(k.split(".")[-1]) # keys as list (counter names) row.append("\n".join(list(v.keys()))) # counter values for dag1 for key, value in v.items(): if len(value) == 1: value.append(0) value.append(value[0] - value[1]) # dag1 counter values name1Val = [] for key, value in v.items(): name1Val.append(str(value[0])) row.append("\n".join(name1Val)) # dag2 counter values name2Val = [] for key, value in v.items(): name2Val.append(str(value[1])) row.append("\n".join(name2Val)) # delta values deltaVal = [] for key, value in v.items(): deltaVal.append(str(value[2])) row.append("\n".join(deltaVal)) table.add_row(row) print table.draw() + "\n" def main(argv): sysargs = len(argv) if sysargs < 2: print "Usage: python counter-diff.py dag_file1.zip dag_file2.zip [--detail]" return -1 file1 = argv[0] file2 = argv[1] difftable = diff(file1, file2) detailed = False if sysargs == 3 and argv[2] == "--detail": detailed = True print_table(difftable, os.path.splitext(file1)[0], os.path.splitext(file2)[0], detailed) if __name__ == "__main__": try: sys.exit(main(sys.argv[1:])) finally: shutil.rmtree(tmpdir)
Comments Off on Meet the Artists of Memory of Water! Culture on Waterfronts in Lisbon! Comments Off on Culture on Waterfronts in Lisbon!