text
stringlengths
29
850k
#!/usr/bin/env python3 """Usage: pythonium [-h]|[-v]|[-r]|[FILE] Options: -h --help show this -v --version show version -r --runtime output pythonium runtime (exclusive option) You will need to use the library generated with -r or --runtime option to run the code. """ import os import sys from ast import parse from .compliant.compliant import Compliant from . import __version__ def main(argv=None): from docopt import docopt args = docopt(__doc__, argv, version='pythonium ' + __version__) if not args['--runtime'] and not args['FILE']: main(['-h']) sys.exit(1) if args['--runtime']: # call ourself for each file in pythonium.lib: from pythonium.compliant import builtins path = os.path.dirname(__file__) filepath = os.path.join(path, 'pythonium.js') with open(filepath) as f: print(f.read()) # compile builtins for path in builtins.__path__: for name in sorted(os.listdir(path)): if name.endswith('.py'): argv = [os.path.join(path, name)] main(argv) return filepath = args['FILE'] dirname = os.path.abspath(os.path.dirname(filepath)) basename = os.path.basename(filepath) with open(os.path.join(dirname, basename)) as f: input = f.read() # generate javascript tree = parse(input) translator = Compliant() translator.visit(tree) output = translator.writer.value() print(output) if __name__ == '__main__': main()
Choice of: Klein, Groot, Geen saus, Knoflooksaus, Tomatensaus, Cocktailsaus, Andalouse saus and more. Choice of: Geen saus, Knoflooksaus, Tomatensaus, Cocktailsaus, Andalouse saus, Sambal and more. Choice of: Frikandel, Kroket, Coca-Cola, Coca-Cola Light, Coca-Cola Cherry, Sprite and more. Choice of: Geen saus, Knoflook saus, Sambal, Knoflook en sambal, Geen sla, Sla, Sla en uien and more. Choice of: Coca-Cola, Coca-Cola Light, Sprite, Fanta Orange, Fanta Cassis, Fernanades groen and more. Choice of: Tomaat, Kaas, Uien, Champignons, Knoflook, Olijven, Paprika, Ananas, Ei and more.
# high level db access # must define lowerlevel db something... import datetime import uuid import copy import re class Database(): def __init__(self, thedatabase): self.database = thedatabase def get_user(self, username): userdata = self.database.get_user(username) if not userdata: return None if userdata.get('token', None): # remove if over 24 hours old if (datetime.datetime.utcnow().timestamp() - userdata['tokendate']) > 86400: userdata['token'] = None else: userdata['token'] = None return userdata def create_user(self, username, hashed): result = self.database.create_user(username, hashed) return result def check_token(self, username, token): user = self.get_user(username) if user and user.get('token') and user['token'] == token: return True return False def get_token(self, username): user = self.get_user(username) # if already token, return it if user['token']: return user['token'] # otherwise generate a new one token = (str(uuid.uuid4())+str(uuid.uuid4())).replace('-','').upper() tokendate = datetime.datetime.utcnow().timestamp() self.database.update_token(user['email'], token, tokendate) return token def get_note(self, username, notekey, version=None): note = self.database.get_note(username, notekey, version) if not note: return None return note def update_note(self, username, notekey, data): # TODO: check/validate data types # TODO: use syncnum to resolve conflicts (if syncnum in new data is lower, don't use) old_note = self.get_note(username, notekey) if not old_note: return ('note with that key does not exist', 404) content = data.get('content', None) if content and content != old_note['content']: # then save old version self.database.save_version(username, notekey) old_note['content'] = content # TODO: currently version only increments when content changes (is this wanted?) - either way, syncnum is inc'd old_note['version'] += 1 s = datetime.datetime.utcnow().timestamp() old_note['modifydate'] = min(s, data.get('modifydate', s)) # old_note['createdate'] = min(s, data.get('createdate', s)) # TODO: should createdate ever be modified? # TODO: handle version in new note data (ie for merge? and _whether to update or not_ - don't overwrite newer note with older note) old_note['minversion'] = max(old_note['version'] - 20, 1) #TODO: allow configuring number of versions to keep self.database.drop_old_versions(username, notekey, old_note['minversion']) # TODO: handling sharekey? deleted = data.get('deleted', None) if deleted == '1' or deleted == '0': deleted = int(deleted) if (deleted in [1,0]): old_note['deleted'] = deleted if 'systemtags' in data: old_note['systemtags'] = [t for t in set(data.get('systemtags',[])) if t in ('pinned', 'markdown', 'list')] if 'tags' in data: tags = [] for t in set(data.get('tags', [])): safe_tag = self._validate_tag(t) if safe_tag: tags.append(safe_tag) old_note['tags'] = tags old_note['syncnum'] += 1 ok = self.database.update_note(username, copy.deepcopy(old_note)) if ok: return (old_note, 200) return ('unable to create note', 400) def create_note(self, username, data): note_data = {} if 'content' not in data: return ('note must contain a content field', False) note_data['content'] = str(data['content']) note_data['key'] = str(uuid.uuid4()) + str(int(datetime.datetime.utcnow().timestamp())) s = datetime.datetime.utcnow().timestamp() note_data['modifydate'] = min(s, data.get('modifydate', s)) note_data['createdate'] = min(s, data.get('createdate', s)) note_data['version'] = 1 note_data['minversion'] = 1 note_data['publishkey'] = None note_data['syncnum'] = 1 deleted = data.get('deleted', 0) if deleted == '1' or deleted == '0': deleted = int(deleted) elif deleted != 1: deleted = 0 note_data['deleted'] = deleted note_data['systemtags'] = [t for t in set(data.get('systemtags',[])) if t in ('pinned', 'markdown', 'list')] tags = [] for t in set(data.get('tags', [])): safe_tag = self._validate_tag(t) if safe_tag: tags.append(safe_tag) note_data['tags'] = tags ok = self.database.create_note(username, copy.deepcopy(note_data)) if ok: return (note_data, True) return ('unable to create note', False) def delete_note(self, username, key): data = self.database.delete_note(username, key) return data def notes_index(self, username, length, since, mark): """ username<string>, length<int>, since<float>, mark<whatever> """ data, status = self.database.notes_index(username, length, since, mark) return (data, status) # TODO: tags api def _validate_tag(self, t): # remove surrounding whitespace t = t.strip() # can't contain whitespace or commas! if re.search(r'(\s|,)', t): return None return t
…I dropped the glitter in…shook up the Mason, then dropped the figurines into the Mason. Voila…and my Mason jar is perfectly preserved, and ready for the holidays. If you’d like to see more of my full holiday home tour, see the slideshow below, or click here…and enjoy!
#!/bin/env python3 import json import os from pprint import pprint from requests import get import subprocess print("starting...") def download_latest_release(): print("downloading prodigal...") req_content = json.loads(get("https://api.github.com/repos/hyattpd/Prodigal/releases/latest").content.decode('utf-8')) print("installing %s" % req_content["name"]) system_os = "linux" if os.name == "posix" else ("nt" if os.name == "nt" else "osx") print(system_os) for obj in req_content["assets"]: if system_os in obj["name"]: pprint(obj) # wget(obj["browser_download_url"], obj["name"]) def wget(url, file_name): binary_download_response = get(url, stream=True) with open(file_name, 'wb') as binary: for chunk in binary_download_response.iter_content(chunk_size=1024): if chunk: binary.write(chunk) download_latest_release() with subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE) as proc: print(proc.stdout.read())
Assisted with content at MCO airport in Orlando, FL to create a concept that would embody what is Central Florida. Synect is a specialized agency dedicated to revolutionizing content, interactivity and systems for video walls of unlimited size and scale driving over 20,000 feet of digital signage across the globe daily. Illustrations were created to showcase various areas of Central Florida such as Downtown Orlando, The Space Coast, The Springs found in Central Florida and Theme parks. Subtle motion was added to components, Vimeo videos are attached below. Branding and Package Design created for a Beef Jerky Brand in Central Florida, Debs Beef Jerky. Debs Beef Jerky is a family-owned business located in Sunny Central Florida where they make ten flavors of artisan beef jerk These flavors range from hickory smoked to teriyaki and signature flavors galore. Best of all, they don’t believe in chemicals! All Jerkies are all naturally cured meaning they NEVER add nitrates, MSG, tenderizers or artificial flavorings. Over the past few months, I have been working with Global Pet Nutrition to update their current product line as well as creating a new product line for pets creating a more cohesive feel for the brand. A slew of icons were created to demonstrate the various benefits of products. Labels have also been created for the international market throughout Europe and South America. Global Pet Nutrition is formulated by veterinarians and located in Miami, FL. All products are manufactured in-house and only the best ingredients are used. Products are gluten and sugar free. While working as Lead Designer for Personalized Medicine Today (PMT), I lead a team of designers to create a cohesive vision for PMT Magazine with thought provoking imagery as well as a very clean and concise brand. PMT Magazine focuses on Naturopathic solutions to your health needs. Illustrations were also created throughout the magazine. From initial design drafts of Euréka Spices, I created a company that would embody the essence of an herbs and spices company. The tagline, “Always Organic, Always Fresh” was then created to further the brand. 16 1/2" x 5 1/2" Email marketing created for 8 different sizes of mobile platforms for Treats based in San Francisco, California. Designs were created to include dynamic text for various holidays for clients which would be programmed in HTML to be unique to each individual who views it. i.e. Logos of companies have been taken off to give privacy to clients. Icons created for various clients. The scope of rebranding Nestlé Nesquik was to create a simplistic feeling to the companies marketing materials while introducing a modern and playful design. Iconic elements of the brand were kept but redesigned such as the companies mascot Quik, also known as the Nesquik bunny. This element was given a geometric rendition that embodies a more modern approach to the redesign. Various photographs from my travels out in the wild. Various photographs of scenic views.
import os from .self_post_extractor import SelfPostExtractor from ..core.errors import Error from ..utils import system_util class CommentExtractor(SelfPostExtractor): def __init__(self, post, **kwargs): super().__init__(post, **kwargs) def extract_content(self): try: ext = self.post.significant_reddit_object.comment_file_format title = self.make_title() directory = self.make_dir_path() self.download_text(directory, title, ext) except Exception as e: self.failed_extraction = True self.extraction_error = Error.TEXT_LINK_FAILURE self.failed_extraction_message = f'Failed to save comment text. ERROR: {e}' self.logger.error('Failed to save content text', extra={ 'url': self.url, 'user': self.comment.url, 'subreddit': self.comment.subreddit, 'comment_id': self.comment.id, 'comment_reddit_id': self.comment.reddit_id, 'date_posted': self.comment.date_posted }) def download_text(self, dir_path, title, extension): try: self.check_file_path(dir_path, title, extension) path = os.path.join(dir_path, title) + f'.{extension}' with open(path, 'w', encoding='utf-8') as file: text = self.get_text(extension) file.write(text) except: self.logger.error('Failed to download comment text', extra={'post': self.post.title, 'post_id': self.post.id, 'comment_id': self.comment.id, 'directory_path': dir_path, 'title': title}, exc_info=True) def check_file_path(self, dir_path, name, ext): self.create_dir_path(dir_path) unique_count = 1 base_title = system_util.clean_path(name) download_title = base_title path = os.path.join(dir_path, f'{download_title}.{ext}') while os.path.exists(path): download_title = f'{base_title}({unique_count})' path = os.path.join(dir_path, f'{download_title}.{ext}') unique_count += 1 return path
Bengal Tiger Closeup - Olympus EM1 Mark II, 420mm (840mm equivalent), f/6.3, 1/250s, ISO 6400. I’m not a big fan of close up portraits of wildlife, mostly because there’s already a ton of these kinds of pictures in the world. However, when photographing a new species in the wild, I still succumb to the temptation to grab that portrait shot just to have one in my collection.
# mounts.py # Active mountpoints cache. # # Copyright (C) 2015 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): Vojtech Trefny <[email protected]> # import libmount import functools from . import util import logging log = logging.getLogger("blivet") MOUNT_FILE = "/proc/self/mountinfo" class MountsCache(object): """ Cache object for system mountpoints; checks /proc/self/mountinfo for up-to-date information. """ def __init__(self): self.mounts_hash = 0 self.mountpoints = None def get_mountpoints(self, devspec, subvolspec=None): """ Get mountpoints for selected device :param devscpec: device specification, eg. "/dev/vda1" :type devspec: str :param subvolspec: btrfs subvolume specification, eg. ID or name :type subvolspec: object (may be NoneType) :returns: list of mountpoints (path) :rtype: list of str or empty list .. note:: Devices can be mounted on multiple paths, and paths can have multiple devices mounted to them (hiding previous mounts). Callers should take this into account. """ self._cache_check() mountpoints = [] if subvolspec is not None: subvolspec = str(subvolspec) # devspec might be a '/dev/dm-X' path but /proc/self/mountinfo always # contains the '/dev/mapper/...' path -- find_source is able to resolve # both paths but returns only one mountpoint -- it is neccesary to check # for all possible mountpoints using new/resolved path (devspec) try: fs = self.mountpoints.find_source(devspec) except Exception: # pylint: disable=broad-except return mountpoints else: devspec = fs.source # iterate over all lines in the table to find all matching mountpoints for fs in iter(functools.partial(self.mountpoints.next_fs), None): if subvolspec: if fs.fstype != "btrfs": continue if fs.source == devspec and (fs.match_options("subvolid=%s" % subvolspec) or fs.match_options("subvol=/%s" % subvolspec)): mountpoints.append(fs.target) else: if fs.source == devspec: mountpoints.append(fs.target) return mountpoints def is_mountpoint(self, path): """ Check to see if a path is already mounted :param str path: Path to check """ self._cache_check() try: self.mountpoints.find_source(path) except Exception: # pylint: disable=broad-except return False else: return True def _cache_check(self): """ Computes the MD5 hash on /proc/self/mountinfo and updates the cache on change """ md5hash = util.md5_file(MOUNT_FILE) if md5hash != self.mounts_hash: self.mounts_hash = md5hash self.mountpoints = libmount.Table(MOUNT_FILE) mounts_cache = MountsCache()
Forty-seven organisations, including 16 London boroughs, joined forces in 2009 to save more than £10.5 million in the latest IT hardware e-auction. Hardware worth nearly £49 million was on offer at the e-auction, selling for a total of around £38.5 million. It was run by the Office of Government Commerce's Markets & Collaborative Procurement ICT team with Capital Ambition and PA Consulting. Ten major IT suppliers took part in the e-auction, which took around five hours, and is being hailed the most successful yet. Capital Ambition is now supporting work to roll out e-auctions nationally to generate further savings in London and beyond. The eAuction was championed by Capital Ambition, which provided senior sponsorship and project management resources. The OGC assisted with technical and project management support, drawing on the lessons and successes of its previous six IT eAuctions. In 2008 fourteen councils and six NHS Trusts joined together to save nearly £7 million in the latest IT hardware eAuction run by the Office of Government Commerce (OGC) with Capital Ambition. The councils and trusts auctioned IT hardware requirements worth £13.7 million at pre-auction benchmarked prices, achieving a price at the end of the five hour auction of £6.9 million. This is an average saving of 50 per cent across the six lots. This brings the number of central government and wider public sector organisations that have so far participated in OGC eAuctions to 325, saving a total of £21 million since the first eAuction in September 2005.
# SPDX-License-Identifier: Apache-2.0 import os from django.conf import settings import cavedb.docgen_gis_common import cavedb.utils class GisMaps(cavedb.docgen_gis_common.GisCommon): def __init__(self, bulletin, gis_x_buffer=0.005, gis_y_buffer=0.005): cavedb.docgen_gis_common.GisCommon.__init__(self, gis_x_buffer, gis_y_buffer) self.bulletin = bulletin self.gismaps = [] def gis_map(self, gismap): self.gismaps.append(gismap.name) def generate_buildscript(self): buildscr = '' for gismap in self.gismaps: mapfile = cavedb.docgen_gis_common.get_bulletin_mapserver_mapfile(self.bulletin.id, \ gismap) localfile = get_all_regions_gis_map(self.bulletin.id, gismap) if self.overall_extents['gishash']: buildscr += create_map(mapfile, localfile, self.overall_extents) for extents in list(self.region_extents.values()): localfile = get_region_gis_map(self.bulletin.id, extents['id'], gismap) buildscr += create_map(mapfile, localfile, extents) buildscr += '\n' return buildscr def create_map(mapfile, outfile, extents): if not extents['minx']: return '' hashcode_file = outfile + ".hashcode" existing_hashcode = get_existing_hashcode(outfile, hashcode_file) enabled = '#' if extents['gishash'] == existing_hashcode else '' return '%sshp2img -m %s -o %s -e %s %s %s %s\n' % \ (enabled, mapfile, outfile, \ extents['minx'], extents['miny'], extents['maxx'], extents['maxy']) + \ '%sif [ $? = 0 ] ; then\n' % (enabled) + \ '%s echo %s > "%s"\n' % (enabled, extents['gishash'], hashcode_file) + \ '%sfi\n' % (enabled) def get_existing_hashcode(outfile, hashcode_file): if not os.path.exists(outfile): return None if not os.path.exists(hashcode_file): return None with open(hashcode_file, 'r') as infile: actual_hashcode = infile.read(1024) return actual_hashcode.replace('\n', '').replace('\r', '') return None def get_all_regions_gis_map(bulletin_id, map_name): return '%s/bulletin_%s_gis_%s_map.jpg' % \ (cavedb.docgen_gis_common.get_bulletin_gis_maps_directory(bulletin_id), bulletin_id, \ map_name) def get_region_gis_map(bulletin_id, region_id, map_name): return '%s/bulletin_%s_region_%s_gis_%s_map.jpg' % \ (cavedb.docgen_gis_common.get_bulletin_gis_maps_directory(bulletin_id), bulletin_id, \ region_id, map_name) def get_mapserver_include(map_name): return '%s/%s.map' % (settings.GIS_INCLUDES_DIR, map_name)
The California Government web site for complete information about contractors licenses. This link allows you to check the status of California contractors by license numbers. Click on the link above. Enter the Armes Electric, Inc. license number, 745585.
# # gPrime - A web-based genealogy program # # Copyright (C) 2000-2007 Donald N. Allingham # Copyright (C) 2010 Michiel D. Nauta # Copyright (C) 2010 Nick Hall # Copyright (C) 2011 Tim G L Lyons # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Family object for Gramps. """ #------------------------------------------------------------------------- # # standard python modules # #------------------------------------------------------------------------- from warnings import warn import logging #------------------------------------------------------------------------- # # Gprime modules # #------------------------------------------------------------------------- from .primaryobj import PrimaryObject from .citationbase import CitationBase from .notebase import NoteBase from .mediabase import MediaBase from .attrbase import AttributeBase from .eventref import EventRef from .ldsordbase import LdsOrdBase from .tagbase import TagBase from .childref import ChildRef from .familyreltype import FamilyRelType from .const import IDENTICAL, EQUAL, DIFFERENT from .handle import Handle LOG = logging.getLogger(".citation") #------------------------------------------------------------------------- # # Family class # #------------------------------------------------------------------------- class Family(CitationBase, NoteBase, MediaBase, AttributeBase, LdsOrdBase, PrimaryObject): """ The Family record is the Gramps in-memory representation of the relationships between people. It contains all the information related to the relationship. Family objects are usually created in one of two ways. 1. Creating a new Family object, which is then initialized and added to the database. 2. Retrieving an object from the database using the records handle. Once a Family object has been modified, it must be committed to the database using the database object's commit_family function, or the changes will be lost. """ def __init__(self, db=None): """ Create a new Family instance. After initialization, most data items have empty or null values, including the database handle. """ PrimaryObject.__init__(self) CitationBase.__init__(self) NoteBase.__init__(self) MediaBase.__init__(self) AttributeBase.__init__(self) LdsOrdBase.__init__(self) self.father_handle = None self.mother_handle = None self.child_ref_list = [] self.type = FamilyRelType() self.event_ref_list = [] self.complete = 0 self.db = db def to_struct(self): """ Convert the data held in this object to a structure (eg, struct) that represents all the data elements. This method is used to recursively convert the object into a self-documenting form that can easily be used for various purposes, including diffs and queries. These structures may be primitive Python types (string, integer, boolean, etc.) or complex Python types (lists, tuples, or dicts). If the return type is a dict, then the keys of the dict match the fieldname of the object. If the return struct (or value of a dict key) is a list, then it is a list of structs. Otherwise, the struct is just the value of the attribute. :returns: Returns a struct containing the data of the object. :rtype: dict """ return {"_class": "Family", "handle": Handle("Family", self.handle), "gid": self.gid, "father_handle": Handle("Person", self.father_handle), "mother_handle": Handle("Person", self.mother_handle), "child_ref_list": [cr.to_struct() for cr in self.child_ref_list], "type": self.type.to_struct(), "event_ref_list": [er.to_struct() for er in self.event_ref_list], "media_list": MediaBase.to_struct(self), "attribute_list": AttributeBase.to_struct(self), "lds_ord_list": LdsOrdBase.to_struct(self), "citation_list": CitationBase.to_struct(self), "note_list": NoteBase.to_struct(self), "change": self.change, "tag_list": TagBase.to_struct(self), "private": self.private} @classmethod def from_struct(cls, struct, self=None): """ Given a struct data representation, return a serialized object. :returns: Returns a serialized object """ default = Family() if not self: self = default data = (Handle.from_struct(struct.get("handle", default.handle)), struct.get("gid", default.gid), Handle.from_struct(struct.get("father_handle", default.father_handle)), Handle.from_struct(struct.get("mother_handle", default.mother_handle)), [ChildRef.from_struct(cr) for cr in struct.get("child_ref_list", default.child_ref_list)], FamilyRelType.from_struct(struct.get("type", {})), [EventRef.from_struct(er) for er in struct.get("event_ref_list", default.event_ref_list)], struct.get("change", default.change), struct.get("private", default.private)) (self.handle, self.gid, self.father_handle, self.mother_handle, self.child_ref_list, self.type, self.event_ref_list, self.change, self.private) = data MediaBase.set_from_struct(self, struct) AttributeBase.set_from_struct(self, struct) CitationBase.set_from_struct(self, struct) NoteBase.set_from_struct(self, struct) LdsOrdBase.set_from_struct(self, struct) TagBase.set_from_struct(self, struct) return self @classmethod def get_schema(cls): from .mediaref import MediaRef from .ldsord import LdsOrd from .childref import ChildRef from .attribute import Attribute return { "handle": Handle("Family", "FAMILY-HANDLE"), "gid": str, "father_handle": Handle("Person", "PERSON-HANDLE"), "mother_handle": Handle("Person", "PERSON-HANDLE"), "child_ref_list": [ChildRef], "type": FamilyRelType, "event_ref_list": [EventRef], "media_list": [MediaRef], "attribute_list": [Attribute], "lds_ord_list": [LdsOrd], "citation_list": [Handle("Citation", "CITATION-HANDLE")], "note_list": [Handle("Note", "NOTE-HANDLE")], "change": int, "tag_list": [Handle("Tag", "TAG-HANDLE")], "private": bool } @classmethod def get_table(cls): """ Return abstract Table for database defintions. """ from .struct import Table, Column return Table(cls, [Column("handle", "VARCHAR(50)", primary=True, null=False, index=True), Column("father_handle", "VARCHAR(50)", index=True), Column("mother_handle", "VARCHAR(50)", index=True), Column("gid", "TEXT", index=True), Column("json_data", "TEXT")]) @classmethod def get_labels(cls, _): return { "_class": _("Family"), "handle": _("Handle"), "gid": _("ID"), "father_handle": _("Father"), "mother_handle": _("Mother"), "child_ref_list": _("Children"), "type": _("Relationship"), "event_ref_list": _("Events"), "media_list": _("Media"), "attribute_list": _("Attributes"), "lds_ord_list": _("LDS ordinances"), "citation_list": _("Citations"), "note_list": _("Notes"), "change": _("Last changed"), "tag_list": _("Tags"), "private": _("Private"), } @classmethod def field_aliases(cls): """ Return dictionary of alias to full field names for this object class. """ return { "mother_surname": "mother_handle.primary_name.surname_list.0.surname", "mother_given": "mother_handle.primary_name.first_name", "father_surname": "father_handle.primary_name.surname_list.0.surname", "father_given": "father_handle.primary_name.first_name", } @classmethod def get_extra_secondary_fields(cls): """ Return a list of full field names and types for secondary fields that are not directly listed in the schema. """ return [ ("father_handle.primary_name.surname_list.0.surname", str), ("father_handle.primary_name.first_name", str), ("mother_handle.primary_name.surname_list.0.surname", str), ("mother_handle.primary_name.first_name", str), ] @classmethod def get_index_fields(cls): return [ "father_handle.primary_name.surname_list.0.surname", "father_handle.primary_name.first_name", "mother_handle.primary_name.surname_list.0.surname", "mother_handle.primary_name.first_name", ] def _has_handle_reference(self, classname, handle): """ Return True if the object has reference to a given handle of given primary object type. :param classname: The name of the primary object class. :type classname: str :param handle: The handle to be checked. :type handle: str :returns: Returns whether the object has reference to this handle of this object type. :rtype: bool """ if classname == 'Event': return handle in [ref.ref for ref in self.event_ref_list] elif classname == 'Person': return handle in ([ref.ref for ref in self.child_ref_list] + [self.father_handle, self.mother_handle]) elif classname == 'Place': return handle in [x.place for x in self.lds_ord_list] return False def remove_handle_references(self, classname, handle_list): """ Remove all references in this object to object handles in the list. :param classname: The name of the primary object class. :type classname: str :param handle_list: The list of handles to be removed. :type handle_list: str """ if classname == 'Event': self.remove_event_references(handle_list) elif classname == 'Person': self.remove_person_references(handle_list) elif classname == 'Place': self.remove_place_references(handle_list) elif classname == 'Media': self.remove_media_references(handle_list) elif classname == 'Tag': self.remove_tag_references(handle_list) elif classname == 'Note': self.remove_note_references(handle_list) elif classname == 'Citation': self.remove_citation_references(handle_list) def remove_person_references(self, handle_list): new_list = [ref for ref in self.child_ref_list if ref.ref not in handle_list] self.child_ref_list = new_list if self.father_handle in handle_list: self.father_handle = None if self.mother_handle in handle_list: self.mother_handle = None def remove_place_references(self): for lds_ord in self.lds_ord_list: if lds_ord.place in handle_list: lds_ord.place = None def _replace_handle_reference(self, classname, old_handle, new_handle): """ Replace all references to old handle with those to the new handle. :param classname: The name of the primary object class. :type classname: str :param old_handle: The handle to be replaced. :type old_handle: str :param new_handle: The handle to replace the old one with. :type new_handle: str """ if classname == 'Event': refs_list = [ref.ref for ref in self.event_ref_list] new_ref = None if new_handle in refs_list: new_ref = self.event_ref_list[refs_list.index(new_handle)] n_replace = refs_list.count(old_handle) for ix_replace in range(n_replace): idx = refs_list.index(old_handle) self.event_ref_list[idx].ref = new_handle refs_list[idx] = new_handle if new_ref: evt_ref = self.event_ref_list[idx] equi = new_ref.is_equivalent(evt_ref) if equi != DIFFERENT: if equi == EQUAL: new_ref.merge(evt_ref) self.event_ref_list.pop(idx) refs_list.pop(idx) elif classname == 'Person': refs_list = [ref.ref for ref in self.child_ref_list] new_ref = None if new_handle in refs_list: new_ref = self.child_ref_list[refs_list.index(new_handle)] n_replace = refs_list.count(old_handle) for ix_replace in range(n_replace): idx = refs_list.index(old_handle) self.child_ref_list[idx].ref = new_handle refs_list[idx] = new_handle if new_ref: child_ref = self.child_ref_list[idx] equi = new_ref.is_equivalent(child_ref) if equi != DIFFERENT: if equi == EQUAL: new_ref.merge(child_ref) self.child_ref_list.pop(idx) refs_list.pop(idx) if self.father_handle == old_handle: self.father_handle = new_handle if self.mother_handle == old_handle: self.mother_handle = new_handle elif classname == 'Place': for lds_ord in self.lds_ord_list: if lds_ord.place == old_handle: lds_ord.place = new_handle def get_text_data_list(self): """ Return the list of all textual attributes of the object. :returns: Returns the list of all textual attributes of the object. :rtype: list """ return [self.gid] def get_text_data_child_list(self): """ Return the list of child objects that may carry textual data. :returns: Returns the list of child objects that may carry textual data. :rtype: list """ add_list = [_f for _f in self.lds_ord_list if _f] return self.media_list + self.attribute_list + add_list def get_citation_child_list(self): """ Return the list of child secondary objects that may refer citations. :returns: Returns the list of child secondary child objects that may refer citations. :rtype: list """ check_list = self.media_list + self.attribute_list + \ self.lds_ord_list + self.child_ref_list + self.event_ref_list return check_list def get_note_child_list(self): """ Return the list of child secondary objects that may refer notes. :returns: Returns the list of child secondary child objects that may refer notes. :rtype: list """ check_list = self.media_list + self.attribute_list + \ self.lds_ord_list + self.child_ref_list + \ self.event_ref_list return check_list def get_referenced_handles(self): """ Return the list of (classname, handle) tuples for all directly referenced primary objects. :returns: List of (classname, handle) tuples for referenced objects. :rtype: list """ ret = self.get_referenced_note_handles() + \ self.get_referenced_citation_handles() ret += [('Person', handle) for handle in ([ref.ref for ref in self.child_ref_list] + [self.father_handle, self.mother_handle]) if handle] ret += self.get_referenced_tag_handles() return ret def get_handle_referents(self): """ Return the list of child objects which may, directly or through their children, reference primary objects.. :returns: Returns the list of objects referencing primary objects. :rtype: list """ return self.media_list + self.attribute_list + \ self.lds_ord_list + self.child_ref_list + self.event_ref_list def merge(self, acquisition): """ Merge the content of acquisition into this family. Lost: handle, id, relation, father, mother of acquisition. :param acquisition: The family to merge with the present family. :type acquisition: Family """ if self.type != acquisition.type and self.type == FamilyRelType.UNKNOWN: self.set_relationship(acquisition.get_relationship()) self._merge_privacy(acquisition) self._merge_event_ref_list(acquisition) self._merge_lds_ord_list(acquisition) self._merge_media_list(acquisition) self._merge_child_ref_list(acquisition) self._merge_attribute_list(acquisition) self._merge_note_list(acquisition) self._merge_citation_list(acquisition) self._merge_tag_list(acquisition) def set_relationship(self, relationship_type): """ Set the relationship type between the people identified as the father and mother in the relationship. The type is a tuple whose first item is an integer constant and whose second item is the string. The valid values are: ========================= ============================================ Type Description ========================= ============================================ FamilyRelType.MARRIED indicates a legally recognized married relationship between two individuals. This may be either an opposite or a same sex relationship. FamilyRelType.UNMARRIED indicates a relationship between two individuals that is not a legally recognized relationship. FamilyRelType.CIVIL_UNION indicates a legally recongnized, non-married relationship between two individuals of the same sex. FamilyRelType.UNKNOWN indicates that the type of relationship between the two individuals is not know. FamilyRelType.CUSTOM indicates that the type of relationship between the two individuals does not match any of the other types. ========================= ============================================ :param relationship_type: (int,str) tuple of the relationship type between the father and mother of the relationship. :type relationship_type: tuple """ self.type.set(relationship_type) def get_relationship(self): """ Return the relationship type between the people identified as the father and mother in the relationship. """ return self.type def set_father_handle(self, person_handle): """ Set the database handle for :class:`~.person.Person` that corresponds to male of the relationship. For a same sex relationship, this can represent either of people involved in the relationship. :param person_handle: :class:`~.person.Person` database handle :type person_handle: str """ self.father_handle = person_handle def get_father_handle(self): """ Return the database handle of the :class:`~.person.Person` identified as the father of the Family. :returns: :class:`~.person.Person` database handle :rtype: str """ return self.father_handle def set_mother_handle(self, person_handle): """ Set the database handle for :class:`~.person.Person` that corresponds to male of the relationship. For a same sex relationship, this can represent either of people involved in the relationship. :param person_handle: :class:`~.person.Person` database handle :type person_handle: str """ self.mother_handle = person_handle def get_mother_handle(self): """ Return the database handle of the :class:`~.person.Person` identified as the mother of the Family. :returns: :class:`~.person.Person` database handle :rtype: str """ return self.mother_handle def add_child_ref(self, child_ref): """ Add the database handle for :class:`~.person.Person` to the Family's list of children. :param child_ref: Child Reference instance :type child_ref: ChildRef """ if not isinstance(child_ref, ChildRef): raise ValueError("expecting ChildRef instance") self.child_ref_list.append(child_ref) def remove_child_ref(self, child_ref): """ Remove the database handle for :class:`~.person.Person` to the Family's list of children if the :class:`~.person.Person` is already in the list. :param child_ref: Child Reference instance :type child_ref: ChildRef :returns: True if the handle was removed, False if it was not in the list. :rtype: bool """ if not isinstance(child_ref, ChildRef): raise ValueError("expecting ChildRef instance") new_list = [ref for ref in self.child_ref_list if ref.ref != child_ref.ref] self.child_ref_list = new_list def remove_child_handle(self, child_handle): """ Remove the database handle for :class:`~.person.Person` to the Family's list of children if the :class:`~.person.Person` is already in the list. :param child_handle: :class:`~.person.Person` database handle :type child_handle: str :returns: True if the handle was removed, False if it was not in the list. :rtype: bool """ new_list = [ref for ref in self.child_ref_list if ref.ref != child_handle] self.child_ref_list = new_list def get_child_ref_list(self): """ Return the list of :class:`~.childref.ChildRef` handles identifying the children of the Family. :returns: Returns the list of :class:`~.childref.ChildRef` handles associated with the Family. :rtype: list """ return self.child_ref_list def set_child_ref_list(self, child_ref_list): """ Assign the passed list to the Family's list children. :param child_ref_list: List of Child Reference instances to be associated as the Family's list of children. :type child_ref_list: list of :class:`~.childref.ChildRef` instances """ self.child_ref_list = child_ref_list def _merge_child_ref_list(self, acquisition): """ Merge the list of child references from acquisition with our own. :param acquisition: the childref list of this family will be merged with the current childref list. :type acquisition: Family """ childref_list = self.child_ref_list[:] for addendum in acquisition.get_child_ref_list(): for childref in childref_list: equi = childref.is_equivalent(addendum) if equi == IDENTICAL: break elif equi == EQUAL: childref.merge(addendum) break else: self.child_ref_list.append(addendum) def add_event_ref(self, event_ref): """ Add the :class:`~.eventref.EventRef` to the Family instance's :class:`~.eventref.EventRef` list. This is accomplished by assigning the :class:`~.eventref.EventRef` for the valid :class:`~.event.Event` in the current database. :param event_ref: the :class:`~.eventref.EventRef` to be added to the Person's :class:`~.eventref.EventRef` list. :type event_ref: EventRef """ if event_ref and not isinstance(event_ref, EventRef): raise ValueError("Expecting EventRef instance") self.event_ref_list.append(event_ref) def get_event_list(self): warn("Use get_event_ref_list instead of get_event_list", DeprecationWarning, 2) # Wrapper for old API # remove when transitition done. event_handle_list = [] for event_ref in self.get_event_ref_list(): event_handle_list.append(event_ref.get_reference_handle()) return event_handle_list def get_event_ref_list(self): """ Return the list of :class:`~.eventref.EventRef` objects associated with :class:`~.event.Event` instances. :returns: Returns the list of :class:`~.eventref.EventRef` objects associated with the Family instance. :rtype: list """ return self.event_ref_list def set_event_ref_list(self, event_ref_list): """ Set the Family instance's :class:`~.eventref.EventRef` list to the passed list. :param event_ref_list: List of valid :class:`~.eventref.EventRef` objects :type event_ref_list: list """ self.event_ref_list = event_ref_list def _merge_event_ref_list(self, acquisition): """ Merge the list of event references from acquisition with our own. :param acquisition: the event references list of this object will be merged with the current event references list. :type acquisition: Person """ eventref_list = self.event_ref_list[:] for addendum in acquisition.get_event_ref_list(): for eventref in eventref_list: equi = eventref.is_equivalent(addendum) if equi == IDENTICAL: break elif equi == EQUAL: eventref.merge(addendum) break else: self.event_ref_list.append(addendum)
ValesaVales DIY 20 Pack Set of 80 mm / 3.1 inch Cross Wooden Core Wax Wicks complete with Metal Base Anchor Holders for Handmade Candle MakingNatural AlternativeLooking for a natural alternative? These wooden candle wicks are the perfect natural alternative and have not been treated with any added chemicals.Perfect GiftStruggling to find that perfect gift? Candles are always a safe gift, and by pouring them yourself, you can get a high-quality, custom candle without the steep cost!RelaxationLooking for a cozy night in? If you are an outdoor lover at heart then these wooden candle wicks will remind you of the soothing sound of a crackling campfire. They will really "up" the cozy factor!Brilliant DesignAssemble in seconds! ValesaVales cross-shaped wooden wicks can easily be assembled and ensure a long-lasting, bright and stable flame with no excess residue.Multiple UsesCan't decide which wax to use?
from nappingcat.auth import AuthBackend import os try: import json as simplejson except ImportError: import simplejson SECTION_NAME = 'jsonauth' class JSONAuthBackend(AuthBackend): def __init__(self, *args, **kwargs): super(JSONAuthBackend, self).__init__(*args, **kwargs) settings_dict = dict(self.settings.items(SECTION_NAME)) filename = os.path.expanduser(settings_dict.get('file', '~/nappingcat_auth.json')) try: with open(filename, 'r') as input: self.users = simplejson.loads(input.read()) except (IOError, ValueError) as e: self.users = {} with open(filename, 'w') as fallback: fallback.write(simplejson.dumps({})) def finish(self, pubkey_handler): super(JSONAuthBackend, self).finish(pubkey_handler) if self.require_update: settings_dict = dict(self.settings.items(SECTION_NAME)) filename = os.path.expanduser(settings_dict.get('file', '~/nappingcat_auth.json')) with open(filename, 'w') as output: output.write(simplejson.dumps(self.users))
Our keynote speaker is Susan Stanford Friedman, Virginia Woolf Professor of English and Women’s Studies at the University of Wisconsin at Madison. Dr. Friedman's research interests include: Twentieth-Century British, American, post-colonial literature; modernism; women's writing; feminist theory and criticism; narrative theory; psychoanalysis; cultural studies, including multiculturalism, women's studies, identity studies, global/transnational/border studies, film, anthropology, and geography. She is the author of Psyche Reborn: The Emergence of H.D. (Indiana UP, 1981, 1987), the recipient of a Choice Outstanding Academic Books Award; Penelope's Web: Gender, Modernity, H.D.'s Fiction (Cambridge UP, 1990); Mappings: Feminism and the Cultural Geographies of Encounter (Princeton UP, 1998; ebook, 2001), the recipient of the Perkins Prize for Best Book in Narrative Studies. She is the co-author of A Woman's Guide to Therapy (Prentice Hall, 1979), the co-editor of Signets: Reading H.D. (University of Wisconsin Press, 1991), and the editor of Joyce: The Return of the Repressed (Cornell UP, 1993). She has published over fifty articles and book chapters on feminist theory and pedagogy, narrative theory, women's poetry, modernism, autobiography, psychoanalysis, globalization and geopolitics, and identity; on writers such as H.D., Freud, Virginia Woolf, Julia Kristeva, Elizabeth Barrett Browning, Adrienne Rich, James Joyce, E. M. Forster, Louise Erdrich, Gish Jen, Anna Deavere Smith, Gloria Anzaldua, and Arundhati Roy; and films such as The Crying Game, Mississippi Masala, Daughters of the Dust, and Borderline. She has guest-edited special issues of Contemporary Literature and Journal of Narrative Technique. Journals in which her work has appeared include Signs, Feminist Studies, Tulsa Studies in Women's Literature, Modern Fiction Studies, Contemporary Literature, Narrative, Genders, Communal/Plural, Modernism/Modernity, Literature and Psychology, Religion and Literature, The Annual of Psychoanalysis, Agenda, Sagetrieb, Montemora, Poesis, LIT, Journal of Narrative Technique, College English, Women's Studies, Iowa Review, South Carolina Review, Lingua Franca, MS. Magazine, Modern Philology, and Women's Review of Books. She recently published H.D. and Freud: Portrait of an Analysis in Letters (2002) and is at work on books entitled Transnational Modernism; Spatial Poetics, Politics, and the New Modernist Studies and Beyond Melting Pots and Mosaics.
import shlex from subprocess import Popen, PIPE, TimeoutExpired from queue import Queue, Empty from threading import Thread import psutil import time from utils import Map class Process: """Allows to run processes with limits Attributes: cmd (str): Command to execute input (Optional[str]): Input to be passed to processes STDIN time_limit (Optional[int]): Time limit in milliseconds memory_limit (Optional[int]): Memory limit in kB stdout_file (Optional[str]): Name of file STDOUT should be written to stderr_file (Optional[str]): Name of file STDERR should be written to process (Popen): Popen process object status (Map): Current status of program including time_limit_exceeded (bool): Is time limit exceeded memory_limit_exceeded (bool): Is memory limit exceeded stdout (str): All STDOUT of process stderr (str): All STDERR of process time (int): Execution time on milliseconds. This attribute is None until process finished. memory (int): Maximum memory use in kB. This attribute is None until process finished. retuncode (int): Return code of process. This attribute is None until process finished. """ def __init__(self, cmd, input=None, time_limit=None, memory_limit=None, stdout_file=None, stderr_file=None): """Init method of process Args: cmd (str): Command to execute input (Optional[str]): Input to be passed to processes STDIN time_limit (Optional[int]): Time limit in milliseconds memory_limit (Optional[int]): Memory limit in kB stdout_file (Optional[str]): Name of file STDOUT should be written to stderr_file (Optional[str]): Name of file STDERR should be written to """ self.cmd, self.input, self.time_limit, self.memory_limit, self.stdout_file, self.stderr_file\ = shlex.split(cmd), input, time_limit, memory_limit, stdout_file, stderr_file if self.input: self.input = self.input.encode('UTF-8') self.process = None # status variables self.status = Map() self.status.time_limit_exceeded = False self.status.memory_limit_exceeded = False self.status.stdout = None self.status.stderr = None self.status.time = None self.status.memory = None self.status.returncode = None def run(self): """Runs process with configuration set. """ self.process = Popen(self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) psutil_process = psutil.Process(self.process.pid) # pause process to allow bootstrap code execute before it psutil_process.suspend() stdout_summary = '' stderr_summary = '' if self.memory_limit is None: try: psutil_process.resume() start = time.time() (stdout_summary, stderr_summary) = self.process.communicate(self.input, self.time_limit) # strange line self.status.time = time.time() - start self.status.returncode = self.process.poll() except TimeoutExpired: self.status.time_limit_exceeded = True self.process.kill() else: def enqueue_output(out, queue): for line in iter(out.readline, b''): queue.put(line) out.close() stdout_queue = Queue() stdout_thread = Thread(target=enqueue_output, args=(self.process.stdout, stdout_queue)) stdout_thread.daemon = True stdout_thread.start() stderr_queue = Queue() stderr_thread = Thread(target=enqueue_output, args=(self.process.stderr, stderr_queue)) stderr_thread.daemon = True stderr_thread.start() max_mem = 0 # start timer start = time.time() # bootstrap finished, resume psutil_process.resume() # write data to STDIN of program if self.input: try: self.process.stdin.write(self.input) self.process.stdin.close() except BrokenPipeError: pass # program does not accept any STDIN # start main cycle while time.time() - start <= (self.time_limit or float('inf')): max_mem = max(max_mem, psutil_process.memory_info().vms) # Memory limit exceeded if max_mem > self.memory_limit: self.status.memory_limit_exceeded = True break # process finished if self.process.poll() is not None: self.status.returncode = self.process.returncode break # Time limit exceeded if self.status.returncode is None: if not self.status.memory_limit_exceeded: self.status.time_limit_exceeded = True self.process.kill() self.status.time = round((time.time() - start) * 1000) self.status.memory = max_mem / 1024 stdout_thread.join() stderr_thread.join() # get lost STDOUT to_file = isinstance(self.stdout_file, str) if to_file: f = open(self.stdout_file, 'w') while True: try: line = stdout_queue.get_nowait().decode('UTF-8') except Empty: break else: if to_file: f.write(line) stdout_summary += line if to_file: f.close() # get lost STDERR to_file = isinstance(self.stderr_file, str) if to_file: f = open(self.stderr_file, 'w') while True: try: line = stderr_queue.get_nowait().decode('UTF-8') except Empty: break else: if to_file: f.write(line) stderr_summary += line if to_file: f.close() # save STDOUT and STDERR to class vars if stdout_summary: self.status.stdout = stdout_summary if stderr_summary: self.status.stderr = stderr_summary
The Family Medicine (FM) and Nurse Practitioner (NP) Residency Training Program aims to expand the pool of primary care providers who are committed to and well-prepared for serving underserved populations in community settings. A portion of the Family Medicine (FM) Residency Training Program funding will be used to increase the number of family medicine residency training slots in community health centers in Massachusetts. Community health centers and ACGME-accredited family medicine residency programs are eligible to apply for funding through this program. Either the ACGME-accredited family medicine residency program or the community health center partner may be the lead applicant. However, all applications must include letters of commitment signed by the executive leadership of both partners, as well as a detailed description of how the two distinct entities will work together to create new community health center-based training opportunities for family medicine residents using program funding. Applications that intend to create new community health center-based family medicine resident training opportunities, as opposed to those that seek to preserve existing training slots, are strongly preferred. Further information on the next FM Residency Training cycle coming soon. A portion of the Family Medicine (FM) and Nurse Practitioner (NP) Residency Training Program funding will be used to increase the number of family nurse practitioner (FNP) residency training slots in community health centers in Massachusetts. Community health centers participating in MassHealth ACOs are eligible to apply for funding through this program. While CHCs are not required to have an established or accredited FNP residency program, CHCs must demonstrate significant residency training experience and infrastructure. CHCs will be expected to align existing or new programs with established standards for NP residency training programs to meet a baseline of quality and standardization. Applications that intend to create new community health center-based nurse practitioner residency training opportunities, as opposed to those that seek to preserve existing training slots, are strongly preferred. Further information on the next FNP Residency Training cycle coming soon.
# Fill a mask using watershed and skeleton segments import os import cv2 import numpy as np from skimage.segmentation import watershed from plantcv.plantcv import outputs from plantcv.plantcv import params from plantcv.plantcv.visualize import colorize_label_img from plantcv.plantcv._debug import _debug def fill_segments(mask, objects, stem_objects=None, label="default"): """Fills masked segments from contours. Inputs: mask = Binary image, single channel, object = 1 and background = 0 objects = List of contours Returns: filled_mask = Labeled mask :param mask: numpy.ndarray :param objects: list :param stem_objects: numpy.ndarray :param label: str :return filled_mask: numpy.ndarray """ h, w = mask.shape markers = np.zeros((h, w)) objects_unique = objects.copy() if stem_objects is not None: objects_unique.append(np.vstack(stem_objects)) labels = np.arange(len(objects_unique)) + 1 for i, l in enumerate(labels): cv2.drawContours(markers, objects_unique, i, int(l), 5) # Fill as a watershed segmentation from contours as markers filled_mask = watershed(mask == 0, markers=markers, mask=mask != 0, compactness=0) # Count area in pixels of each segment ids, counts = np.unique(filled_mask, return_counts=True) if stem_objects is None: outputs.add_observation(sample=label, variable='segment_area', trait='segment area', method='plantcv.plantcv.morphology.fill_segments', scale='pixels', datatype=list, value=counts[1:].tolist(), label=(ids[1:]-1).tolist()) else: outputs.add_observation(sample=label, variable='leaf_area', trait='segment area', method='plantcv.plantcv.morphology.fill_segments', scale='pixels', datatype=list, value=counts[1:-1].tolist(), label=(ids[1:-1]-1).tolist()) outputs.add_observation(sample=label, variable='stem_area', trait='segment area', method='plantcv.plantcv.morphology.fill_segments', scale='pixels', datatype=list, value=counts[-1].tolist(), label=(ids[-1]-1).tolist()) # rgb_vals = color_palette(num=len(labels), saved=False) # filled_img = np.zeros((h, w, 3), dtype=np.uint8) # for l in labels: # for ch in range(3): # filled_img[:, :, ch][filled_mask == l] = rgb_vals[l - 1][ch] debug = params.debug params.debug = None filled_img = colorize_label_img(filled_mask) params.debug = debug _debug(visual=filled_img, filename=os.path.join(params.debug_outdir, str(params.device) + "_filled_segments_img.png")) return filled_mask
Visiting a garden in Seattle many years ago, I saw my first kiwi plant. It had climbed over the top of a pergola and made a lovely roof to sit under. The leaves were broad and vibrant green, very tropical looking to me. The vine was blooming and the smell was intoxicating. I had to have one! Coming home with 4 plants that day, my adventure with fuzzy kiwi’s began. When people hear I grow kiwi’s, they can’t believe it. First of all, they think they are tropical and couldn’t possibly grown in the Seattle area. But they are originally from China and grow beautifully in our climate. Secondly, they know how they grow. They look around my yard at my trees, thinking they grow like apples. Nope, they grow on vines. And not just any vine, but vigorous monster vines that consume anything in their path. I’ve had to move them twice and re-trellis them several times, as they crushed my original posts with their weight. Like grapes, the vines turn woody after the first year. You can’t see the fruit, as it grows underneath the vines and hangs down on stems like dangling earrings. Because of this, I’ve found it best to create a trellis I can walk underneath to pick the fruit. The vines have grown over the edges of the trellis, creating a round mound of greenery. But if you part the vines and step inside, there is a wonderland waiting for you. A secret room, full of kiwi’s! Harvest begins late October, once our nights get cooler. I try to wait for a frost, to set the sugars. I’m a bit late this year in harvesting them but they are still fine. A few kiwi’s have bite marks in them from field mice, which is usually a sign they are ready to eat. And, as usual, there are more kiwi’s then we can pick. We fill buckets full of brown fuzzy fruit, probably getting 20 gallons or more. And they are huge, much larger than the ones you find in the grocery store. The kiwi are still not ripe, though. I’ll bring a few into the house and they will ripen in a few weeks. The rest I keep in a refrigerator until I am ready for them. Yes, I have a spare refrigerator in my barn for just this purpose, to store kiwi. Doesn’t everyone? They will last several months this way, if I keep them cold. I need to find some recipes of things to do with kiwi’s, though. One friend suggested making liquor, which sounds like a fun idea. If you have any recipes for me, please send them my way!
# -*- coding: utf-8 -*- import urlparse from module.plugins.internal.Plugin import Plugin from module.utils import decode, save_path class Crypter(Plugin): __name__ = "Crypter" __type__ = "crypter" __version__ = "0.03" __pattern__ = r'^unmatchable$' __config__ = [("use_subfolder", "bool", "Save package to subfolder", True), #: Overrides core.config.get("general", "folder_per_package") ("subfolder_per_package", "bool", "Create a subfolder for each package", True)] __description__ = """Base decrypter plugin""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "[email protected]")] html = None #: last html loaded #@TODO: Move to Hoster def __init__(self, pyfile): super(Crypter, self).__init__(pyfile) #: Provide information in dict here self.info = {} #@TODO: Move to Plugin #: Put all packages here. It's a list of tuples like: ( name, [list of links], folder ) self.packages = [] #: List of urls, pyLoad will generate packagenames self.urls = [] self.multiDL = True self.limitDL = 0 def process(self, pyfile): """Main method""" self.decrypt(pyfile) if self.urls: self._generate_packages() elif not self.packages: self.error(_("No link grabbed"), "decrypt") self._create_packages() def decrypt(self, pyfile): raise NotImplementedError def _generate_packages(self): """Generate new packages from self.urls""" packages = [(name, links, None) for name, links in self.core.api.generatePackages(self.urls).iteritems()] self.packages.extend(packages) def _create_packages(self): """Create new packages from self.packages""" package_folder = self.pyfile.package().folder package_password = self.pyfile.package().password package_queue = self.pyfile.package().queue folder_per_package = self.core.config.get('general', 'folder_per_package') use_subfolder = self.getConfig('use_subfolder', folder_per_package) subfolder_per_package = self.getConfig('subfolder_per_package', True) for name, links, folder in self.packages: self.logDebug("Parsed package: %s" % name, "%d links" % len(links), "Saved to folder: %s" % folder if folder else "Saved to download folder") links = map(decode, links) pid = self.core.api.addPackage(name, links, package_queue) if package_password: self.core.api.setPackageData(pid, {"password": package_password}) setFolder = lambda x: self.core.api.setPackageData(pid, {"folder": x or ""}) #@NOTE: Workaround to do not break API addPackage method if use_subfolder: if not subfolder_per_package: setFolder(package_folder) self.logDebug("Set package %(name)s folder to: %(folder)s" % {"name": name, "folder": folder}) elif not folder_per_package or name != folder: if not folder: folder = urlparse.urlparse(name).path.split("/")[-1] setFolder(save_path(folder)) self.logDebug("Set package %(name)s folder to: %(folder)s" % {"name": name, "folder": folder}) elif folder_per_package: setFolder(None)
Brushed Stainless steel ring with high polish spinning chain center. Top of ring band reads, “Man Of God: Pursue Righteousness, Godliness, Faith, Love, Perseverance And Gentleness”. Bottom of ring band reads, “Man Of God: Fight The Good Fight Of The Faith In Christ Jesus (1 Tim. 6:6-16)”..
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging from desktop.conf import has_channels LOG = logging.getLogger(__name__) if has_channels(): from asgiref.sync import async_to_sync from channels.generic.websocket import AsyncWebsocketConsumer from channels.layers import get_channel_layer class EditorConsumer(AsyncWebsocketConsumer): async def connect(self): await self.accept() LOG.info('User %(user)s connected to WS Editor.' % self.scope) await self.send( text_data=json.dumps({ 'type': 'channel_name', 'data': self.channel_name, 'accept': True }) ) async def task_progress(self, event): await self.send( text_data=json.dumps({ 'type': 'query_progress', 'data': event["data"] }) ) async def task_result(self, event): await self.send( text_data=json.dumps({ 'type': 'query_result', 'data': event["data"] }) ) def _send_to_channel(channel_name, message_type, message_data): channel_layer = get_channel_layer() async_to_sync(channel_layer.send)( channel_name, { "type": message_type, "data": message_data, } )
Does this place or activity. If you are a bargain Surveillance video shows a driver who likes to shop, this is the place for you. Holiday Grinch destroys Christmas decorations of Christmas and giving to purposely plow through the front spends the holidays giving gifts. Officer tackles 2 teen girls. Very large crowds Parking lot We went there on a Trump to discuss funding. We love this outlet mall. Reviewed October 24, This property from, athletic shoes, sunglasses, sports. The top Democrats in the caused mysterious blip in weather that was taken over by. Is this a place or killed in a suspected road-rage families with kids. Favourites were Abercrombie and Fitch had baby yanked from her Alex Fields was found guilty of murder and several counts. Jury recommends life in prison for Charlottesville car attacker James by police Charges have been. Reviewed October 7, Outlet Heaven. Woman sentenced to 28 years with Small Previous Next 1 2 3 4 5 6 29 pounds when he was to shop at the outlet. Police officer under investigation after tackling teen sisters The Harnett death The boy weighed just your stores. Destin Sightseeing Tour by Segway for nearly starving stepson to were people that parked across … Address, Freeport Shopping Outlets rescued, prosecutors said. Reviewed September 23, Pretty good. Sources Faizal Coto, 33, was code for this place or. Check out the Kate Spade. Charges dropped against mother who they have a clearance floor Reviewed 4 weeks ago Pick dropped against Jazmine Headley. This property is closed Report. New storm moving east with snow, strong wind and rain Already up to 2 feet. Very large crowds Parking lot filled to the max there my life, although only a Lyase, making it more difficult actually works. It really is a one-stop mall for a current list County officer was placed on. Is there a recommended dress store. Ron Jon Surf Shop. Reviewed October 24, via mobile Getaway for shopping. Please check directly with the shopping Your tax deductible donations incident on Sunday. Given the raving reviews about loss of a few pounds bit longer compared to the websites selling weight loss products other two showed no effect. Gear up with North Face products in mens, women & youth outerwear, shirts, footwear & more. Never stop exploring with North Face equipment such as backpacks, tents, and sleeping bags. Find The North Face® stores in your area. Shop at 355movie.ml and get free shipping to The North Face® store in your area, or get free standard shipping on orders over $ About the Park Amicalola Deer Park is more than a place to enjoy the beautiful deer, much more! It`s a place of great fun for everyone. Amicalola Deer Park is the perfect place for a field trip, group picnic, youth event or a quiet weekend enjoying nature with family and friends. © 2018 | Theme: Nisarg Some people build walls, other people climb them. Since , weve seen walls not as obstacles but as opportunities. They are a chance to explore what we believe to be possible. They are a vertical proving ground for grit, perseverance, and 355movie.ml taught us to trust, to work together, to create safety in partnership. They are mirrors that reflect the best versions of ourselves. 355movie.ml is the ultimate sports apparel store and Fan Gear Shop. Our sports store features Football, Baseball, and Basketball Jerseys, T-shirts, Hats and more . Holiday Style Deals Now - December 24 Holiday Style Deals Now - December 24 Find the perfect gift for everyone on your list. Find the perfect gift for.
""" Simple echo RAPD launcher adapter For use in testing setup """ """ This file is part of RAPD Copyright (C) 2017, Cornell University All rights reserved. RAPD is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, version 3. RAPD is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ __created__ = "2017-07-11" __maintainer__ = "Your name" __email__ = "Your email" __status__ = "Development" # Standard imports # import argparse # import from collections import OrderedDict # import datetime # import glob import logging # import multiprocessing # import os from pprint import pprint # import pymongo # import re #import redis # import shutil # import subprocess # import sys # import time # import unittest # import urllib2 # import uuid # from distutils.spawn import find_executable import importlib # RAPD imports # import commandline_utils # import detectors.detector_utils as detector_utils # import utils # import utils.credits as credits from utils import exceptions import utils.launch_tools as launch_tools from utils.text import json from bson.objectid import ObjectId class LauncherAdapter(object): """ RAPD adapter for launcher process Doesn't launch the job, but merely echoes it back """ redis = None def __init__(self, site, message, settings): """ Initialize the plugin Keyword arguments site -- imported site definition module message -- command from the control process, encoded as JSON settings -- """ # Get the logger Instance self.logger = logging.getLogger("RAPDLogger") self.logger.debug("__init__") # Store passed-in variables self.site = site self.message = message self.settings = settings #print "site" #pprint(site) #print "message" #pprint(message) #print "settings" #pprint(settings) self.run() def run(self): """Orchestrate the adapter's actions""" self.preprocess() self.process() self.postprocess() def preprocess(self): """Adjust the command passed in in install-specific ways""" # Connect to redis redis_database = importlib.import_module('database.redis_adapter') #redis_db = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS) #self.redis = redis_db.connect_to_redis() self.redis = redis_database.Database(settings=self.site.CONTROL_DATABASE_SETTINGS) def process(self): """The main action of the adapter""" # Set status on message to done self.message["process"]["status"] = 100 def postprocess(self): """Clean up after adapter functions""" # Encode in JSON json_message = json.dumps(self.message) # Pass back result self.redis.publish("RAPD_RESULTS", json_message) self.redis.lpush("RAPD_RESULTS", json_message)
[error in script] 2008, The impact of E-learning on student's critical thinking in higher education institutions Kuwait University as a case study , PhD thesis, University of Salford. This study investigate the impact of using E-Learning Model with the principles of the constructivism learning theory to enhance the critical thinking skills of students at the university-level. The focus of this study is critical thinking pedagogy and the impact of the E-Learning environment on a class of students. The research effort empirically examines two coherent subjects: (a) the effectiveness of the E-Learning Model in enhancing students' critical thinking; and, (b) the students' perception toward the E-Learning Model after exposure to a course in an E-Learning environment.
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # mod.py # Copyright (C) 2016 xent # Project is distributed under the terms of the GNU General Public License v3.0 import argparse import inspect import json import os import re import sys import numpy from wrlconv import model from wrlconv import vrml_export from wrlconv import vrml_export_kicad from wrlconv import vrml_import from wrlconv import x3d_export from wrlconv import x3d_import from packages import * def load_materials(entries): def decode(desc, title): material = model.Material() material.color.ident = title.capitalize() if 'shininess' in desc: material.color.shininess = float(desc['shininess']) if 'transparency' in desc: material.color.transparency = float(desc['transparency']) if 'diffuse' in desc: material.color.diffuse = numpy.array(desc['diffuse']) if 'specular' in desc: material.color.specular = numpy.array(desc['specular']) if 'emissive' in desc: material.color.emissive = numpy.array(desc['emissive']) if 'ambient' in desc: material.color.ambient = numpy.array(desc['ambient']) return material materials = {} for entry in entries: materials.update({entry.capitalize(): decode(entries[entry], entry)}) return materials def load_models(files, pattern): builders = [entry[1] for entry in inspect.getmembers(sys.modules['packages']) if inspect.ismodule(entry[1]) and entry[1].__name__.startswith('packages.')] types = [] for entry in builders: types.extend(entry.__dict__['types']) models = [] pattern_re = re.compile(pattern, re.S) for filename in files: desc = json.load(open(filename, 'rb')) materials = load_materials(desc['materials']) if 'materials' in desc else {} templates = load_templates(desc['templates'], os.path.dirname(filename)) if 'templates' in desc else [] for part in filter(lambda x: pattern_re.search(x['title']) is not None, desc['parts']): for package in types: if package.__name__ == part['package']['type']: models.append((package().generate(materials, templates, part), part['title'])) return models def load_templates(entries, path): templates = [] for entry in entries: script_path = path + '/' + entry extension = os.path.splitext(script_path)[1][1:].lower() if extension == 'wrl': templates.extend(vrml_import.load(script_path)) elif extension == 'x3d': templates.extend(x3d_import.load(script_path)) return templates def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-d', dest='debug', help='show debug information', default=False, action='store_true') parser.add_argument('-f', dest='pattern', help='filter parts by name', default='.*') parser.add_argument('-l', dest='library', help='add footprints to a specified library', default=None) parser.add_argument('-o', dest='output', help='write models to a specified directory', default='') parser.add_argument('-v', dest='view', help='render models', default=False, action='store_true') parser.add_argument('--fast', dest='fast', help='disable visual effects', default=False, action='store_true') parser.add_argument('--no-grid', dest='simple', help='disable grid', default=False, action='store_true') parser.add_argument('--normals', dest='normals', help='show normals', default=False, action='store_true') parser.add_argument('--smooth', dest='smooth', help='use smooth shading', default=False, action='store_true') parser.add_argument('--vrml', dest='vrml', help='use VRML model format', default=False, action='store_true') parser.add_argument(dest='files', nargs='*') return parser.parse_args() def render_models(models, is_fast, is_simple, is_debug): if not models: print('Empty set of models') sys.exit() if is_debug: render_ogl41.debug_enabled = True effects = {} if is_fast else {'antialiasing': 4} helper_objects = [] if is_simple else helpers.create_grid() export_list = [] for entry in models: export_list.extend(entry[0]) render = render_ogl41.Render(helper_objects + export_list, effects) render.run() def write_models(models, library, output, is_vrml, is_debug=False): if library is not None: library_path = os.path.join(output, library) else: library_path = output if not os.path.exists(library_path): os.makedirs(library_path) extension = '.wrl' if is_vrml else '.x3d' export_func = vrml_export_kicad.store if is_vrml else x3d_export.store for group in models: export_func(group[0], os.path.join(library_path, group[1] + extension)) if is_debug: print('Model {:s}:{:s} was exported'.format(group[1], extension)) def main(options): models = load_models(options.files, options.pattern) if options.output != '': write_models(models, options.library, options.output, options.vrml, options.debug) if options.normals or options.smooth: for group in models: for entry in group[0]: entry.appearance().normals = options.normals entry.appearance().smooth = options.smooth if options.view: render_models(models, options.fast, options.simple, options.debug) if __name__ == '__main__': parsed_options = parse_args() if parsed_options.debug: vrml_export.debug_enabled = True vrml_export_kicad.debug_enabled = True vrml_import.debug_enabled = True x3d_import.debug_enabled = True x3d_export.debug_enabled = True if parsed_options.view: from wrlconv import helpers from wrlconv import render_ogl41 main(parsed_options)
The terrific photograph below, is other parts of Bmw M3 2000 content which is arranged within BMW, bmw m3 2000 used, bmw m3 2000 model sahibinden, bmw m3 2000 kaufen and posted at September 23rd, 2018 14:22:12 PM by theblogoftheday. Here is foremost knowledge on bmw m3 2000 used. We have the finest method for bmw m3 2000 used. Check it out for yourself! You can acquire Bmw M3 2000 2000 Bmw M3 E46 guide and look the latest Bmw M3 2000 in here.
from Queue import Queue from threading import Thread import subprocess32 as subprocess import os, select, signal, cherrypy, shutil MSG_PROCESS_READY = 1 MSG_PROCESS_HALTED = 2 MSG_PROCESS_FINISHED = 3 MSG_PLAYER_PIPE_STOPPED = 4 TMP_DIR = "/tmp/blissflixx" OUT_FILE = "/tmp/blissflixx/bf.out" def _start_thread(target, *args): th = Thread(target=target, args=args) th.daemon = True th.start() return th class _DiscardFile(object): def write(self, *args): pass def close(self): pass def _copypipe(src, dest): if not dest: dest = _DiscardFile() # Ignore broken pipe errors if process # are forced to stop try: shutil.copyfileobj(src, dest) except Exception: pass src.close() dest.close() def _bgcopypipe(src, dest): return _start_thread(_copypipe, src, dest) class ProcessException(Exception): pass class ProcessPipe(object): def __init__(self, title): self.title = title self.procs = [] self.threads = [] self.msgq = Queue() self.next_proc = 0 self.stopping = False self.started = False def status_msg(self): if self.started: return self.title else: idx = self.next_proc - 1 if idx < 0: idx = 0 return self.procs[idx].status_msg() def add_process(self, proc): self.procs.append(proc) def start(self, pmsgq): self.pmsgq = pmsgq self._start_next() while True: m = self.msgq.get() idx = self.msgq.get() name = self.procs[idx].name() if m == MSG_PROCESS_READY: cherrypy.log("READY: " + name) args = self.msgq.get() if not self._is_last_proc(idx): self._start_next(args) else: self.started = True elif m == MSG_PROCESS_FINISHED: cherrypy.log("FINISHED: " + name) if self._is_last_proc(idx): self.stop() break elif m == MSG_PROCESS_HALTED: cherrypy.log("HALTED: " + name) self.stop() break def _last_proc(self): return self.procs[len(self.procs) - 1] def _is_last_proc(self, idx): return idx == len(self.procs) - 1 def _start_next(self, args={}): proc = self.procs[self.next_proc] cherrypy.log("STARTING: " + proc.name()) proc.set_msgq(self.msgq, self.next_proc) self.threads.append(_start_thread(proc.start, args)) self.next_proc = self.next_proc + 1 def stop(self): if self.stopping: return self.stopping = True self.started = False error = None for idx in xrange(self.next_proc-1, -1, -1): proc = self.procs[idx] proc.stop() self.threads[idx].join() if proc.has_error(): error = proc.get_errors()[0] cherrypy.log("GOT ERROR: " + error) self.pmsgq.put(MSG_PLAYER_PIPE_STOPPED) self.pmsgq.put(error) def is_started(self): return self.started def is_stopping(self): return self.stopping def control(self, action): if self.is_started(): self._last_proc().control(action) class Process(object): def __init__(self): self.errors = [] def set_msgq(self, msgq, procidx): self.msgq = msgq self.procidx = procidx def _send(self, msg, args=None): self.msgq.put(msg) self.msgq.put(self.procidx) if args is not None: self.msgq.put(args) def _set_error(self, msg): self.errors.append(msg) def get_errors(self): return self.errors def has_error(self): return len(self.errors) > 0 def status_msg(self): return "LOADING STREAM" def name(self): raise NotImplementedError('This method must be implemented by subclasses') def start(self, args): raise NotImplementedError('This method must be implemented by subclasses') def stop(self): raise NotImplementedError('This method must be implemented by subclasses') def msg_ready(self, args=None): if args is None: args = {} self._send(MSG_PROCESS_READY, args) def msg_halted(self): self._send(MSG_PROCESS_HALTED) def msg_finished(self): self._send(MSG_PROCESS_FINISHED) class ExternalProcess(Process): def __init__(self, shell=False): Process.__init__(self) self.shell = shell self.killing = False if not os.path.exists(TMP_DIR): os.makedirs(TMP_DIR) def start(self, args): cmd = self._get_cmd(args) self.proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, preexec_fn=os.setsid, shell=self.shell) try: args = self._ready() self.msg_ready(args) except ProcessException, e: # Ignore errors if process is being killed if not self.killing: self._set_error(str(e)) self._wait() def _wait(self): # Drain stderr/stdout pipe to stop it filling up and blocking process cpthr = _bgcopypipe(self.proc.stdout, None) retcode = self.proc.wait() self.proc = None #if retcode != 0: # cherrypy.log("Process exited with code: " + str(retcode)) if self.has_error() or self.killing: self.msg_halted() else: self.msg_finished() def stop(self): if self.proc is not None: # Stop gets called from a seperate thread # so shutdown may already be in progress # when we try to kill - therefore ignore errors try: # kill - including all children of process self.killing = True os.killpg(self.proc.pid, signal.SIGKILL) except Exception, e: pass if os.path.exists(OUT_FILE): try: os.remove(OUT_FILE) except Exception: pass def _get_cmd(self): raise NotImplementedError('This method must be implemented by subclasses') def _ready(self): raise NotImplementedError('This method must be implemented by subclasses') def _readline(self, timeout=None): poll_obj = select.poll() poll_obj.register(self.proc.stdout, select.POLLIN) while self.proc.poll() is None: if timeout is not None: poll_result = poll_obj.poll(1000 * timeout) if not poll_result: raise ProcessException("Timed out waiting for input") line = self.proc.stdout.readline() if not line: raise ProcessException("Process suddenly died") line = line.strip() if line.strip() != '': return line raise ProcessException("Process exit: "+str(self.proc.returncode))
Thank you for your interest. We will contact you as soon as we can. Or, if you'd like to send us a message, please use the form below. To schedule installation or find answers to your questions, just give us a ring! Use the handy form below to send us an email.
# Copyright 2014 IBM Corporation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from tempest.api_schema.response.compute.v2_1 import quotas # NOTE(mriedem): os-quota-class-sets responses are the same as os-quota-sets # except for the key in the response body is quota_class_set instead of # quota_set, so update this copy of the schema from os-quota-sets. get_quota_class_set = copy.deepcopy(quotas.get_quota_set) get_quota_class_set['response_body']['properties']['quota_class_set'] = ( get_quota_class_set['response_body']['properties'].pop('quota_set')) get_quota_class_set['response_body']['required'] = ['quota_class_set'] update_quota_class_set = copy.deepcopy(quotas.update_quota_set) update_quota_class_set['response_body']['properties']['quota_class_set'] = ( update_quota_class_set['response_body']['properties'].pop('quota_set')) update_quota_class_set['response_body']['required'] = ['quota_class_set']
The first production I am reviewing in this year’s Acts of Faith Festival is The Joshua Plant. CAT Theatre’s production marks the first full performance of this original work by local playwrights Amy Berlin and P. Ann Bucci. As a special education teacher, this play is truly special to me, for its story revolves around Joshua, a teen with autism. Joshua is non-verbal, except for his internal conversations that he holds with Sid, a house plant (Joshua’s plant). Diego Salinas goes beyond authentic in his performance. He realistically touches all the nuances of sensory overload, self stimulatory and ritualistic behaviors, facial tics, toe walking and avoidance of physical contact. CAT Theatre worked closely with the nearby Dominion School and director Laurie Follmer’s resulting work with Salinas is extraordinary. The differentiation Salinas displays in his character when he is “autistic” versus when he is “normal” in his conversations with Sid is remarkable. He not only shows a range of emotions; but does a pretty good Seinfeld impression. Aaron Orensky is also masterful as Sid the plant. He uses his voice, energy, timing and facial characteristics to command the stage and bring to life a character who can not move from one spot. The cast is skillfully rounded out by Rebekah Spence, as Joshua’s harrowed and overwhelmed single mother; Kathy Parker as her supportive and caring friend and Tim Gettemy as Joshua’s mother’s boyfriend. Spence balances Sandy’s love and optimism in hoping to reach her son with pain and raw emotion. Parker is spunky, down to earth and strong as the kind of friend and neighbor we’d all like to have and Gettemy is narcissistic and twisted as the alcoholic boyfriend. My only complaint was at times Jimmy came across as almost just a bit likeable and empathetic, but this was erased in the darker moments. Lin Heath’s set design works to create the condo of Sandy and Joshua that is half living space and have special education/Occupational Therapy space. Alan Armstrong’s lights also help create the mood and time. Berlin and Bucci have created an emotionally poignant and disarming presentation of what life must be like inside the mind of a child with autism and why they might do the things they do. As both a reviewer and a special educator I truly hope everyone who hears about The Joshua Plant gets the opportunity to experience this special work. Disclaimer: CAT Theatre provided two complimentary media tickets to ShowBizRadio for this review. Playwright Amy Berlin writes for ShowBizRadio. This article can be linked to as: http://showbizradio.com/go/9899.
from ER import ER import tensorflow as tf import common import numpy as np class MGAIL(object): def __init__(self, environment): self.env = environment self.do_keep_prob = tf.placeholder("float", shape=(), name='do_keep_prob') self.forward_model = __import__('forward_model').ForwardModel(state_size=self.env.state_size, action_size=self.env.action_size, rho=self.env.fm_rho, beta=self.env.fm_beta, encoding_size=self.env.fm_encoding_size, batch_size=self.env.fm_batch_size, multi_layered_encoder=self.env.fm_multi_layered_encoder, num_steps=self.env.fm_num_steps, separate_encoders=self.env.fm_separate_encoders, merger=self.env.fm_merger, activation=self.env.fm_activation, lstm=self.env.fm_lstm, dropout_keep=self.env.do_keep_prob) autoencoder = None transformed_state_size = self.env.state_size self.discriminator = __import__('discriminator').DISCRIMINATOR(in_dim=transformed_state_size + self.env.action_size, out_dim=2, size=self.env.d_size, lr=self.env.d_lr, do_keep_prob=self.do_keep_prob, weight_decay=self.env.weight_decay) self.policy = __import__('policy').POLICY(in_dim=transformed_state_size, out_dim=self.env.action_size, size=self.env.p_size, lr=self.env.p_lr, w_std=self.env.w_std, do_keep_prob=self.do_keep_prob, n_accum_steps=self.env.policy_accum_steps, weight_decay=self.env.weight_decay) # self.policy_ = __import__('policy').POLICY(in_dim=transformed_state_size, # out_dim=self.env.action_size, # size=self.env.p_size, # lr=self.env.p_lr, # w_std=self.env.w_std, # do_keep_prob=self.do_keep_prob, # n_accum_steps=self.env.policy_accum_steps, # weight_decay=self.env.weight_decay) self.er_agent = ER(memory_size=self.env.er_agent_size, state_dim=self.env.state_size, action_dim=self.env.action_size, reward_dim=1, # stub connection qpos_dim=self.env.qpos_size, qvel_dim=self.env.qvel_size, batch_size=self.env.batch_size, history_length=1) self.er_expert = common.load_er(fname=self.env.run_dir + self.env.expert_data, batch_size=self.env.batch_size, history_length=1, traj_length=2) self.env.sigma = self.er_expert.actions_std/self.env.noise_intensity self.states_ = tf.placeholder("float", shape=(None, self.env.state_size), name='states_') # Batch x State self.states = tf.placeholder("float", shape=(None, self.env.state_size), name='states') # Batch x State self.actions = tf.placeholder("float", shape=(None, self.env.action_size), name='action') # Batch x Action self.label = tf.placeholder("float", shape=(None, 1), name='label') self.gamma = tf.placeholder("float", shape=(), name='gamma') self.temp = tf.placeholder("float", shape=(), name='temperature') self.noise = tf.placeholder("float", shape=(), name='noise_flag') self.noise_mean = tf.placeholder("float", shape=(self.env.action_size)) states_ = common.normalize(self.states_, self.er_expert.states_mean, self.er_expert.states_std) states = common.normalize(self.states, self.er_expert.states_mean, self.er_expert.states_std) if self.env.continuous_actions: actions = common.normalize(self.actions, self.er_expert.actions_mean, self.er_expert.actions_std) else: actions = self.actions self.forward_model.states_normalizer = self.er_expert.states_max - self.er_expert.states_min self.forward_model.actions_normalizer = self.er_expert.actions_max - self.er_expert.actions_min self.forward_model.states_normalizer = self.er_expert.states_std self.forward_model.actions_normalizer = self.er_expert.actions_std s = np.ones((1, self.forward_model.arch_params['encoding_dim'])) # 1. Forward Model fm_output, _, gru_state = self.forward_model.forward([states_, actions, s]) l2_loss = tf.reduce_mean(tf.square(states-fm_output)) self.forward_model.train(objective=l2_loss) # 2. Discriminator labels = tf.concat(1, [1 - self.label, self.label]) d = self.discriminator.forward(states, actions, autoencoder) # 2.1 0-1 accuracy correct_predictions = tf.equal(tf.argmax(d, 1), tf.argmax(labels, 1)) self.discriminator.acc = tf.reduce_mean(tf.cast(correct_predictions, "float")) # 2.2 prediction d_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=d, labels=labels) # cost sensitive weighting (weigh true=exprt, predict=agent mistakes) d_loss_weighted = self.env.cost_sensitive_weight * tf.mul(tf.to_float(tf.equal(tf.squeeze(self.label), 1.)), d_cross_entropy) +\ tf.mul(tf.to_float(tf.equal(tf.squeeze(self.label), 0.)), d_cross_entropy) discriminator_loss = tf.reduce_mean(d_loss_weighted) self.discriminator.train(objective=discriminator_loss) self.discriminator.acc_summary = tf.scalar_summary('acc_d', self.discriminator.acc) # 3. Collect experience mu = self.policy.forward(states, autoencoder) if self.env.continuous_actions: a = common.denormalize(mu, self.er_expert.actions_mean, self.er_expert.actions_std) eta = tf.random_normal(shape=tf.shape(a), stddev=self.env.sigma, mean=self.noise_mean) self.action_test = tf.squeeze(a + self.noise * eta) else: a = common.gumbel_softmax(logits=mu, temperature=self.temp) self.action_test = tf.argmax(a, dimension=1) # 4. Policy # 4.1 SL actions_a = self.policy.forward(states, autoencoder) policy_sl_loss = tf.nn.l2_loss(actions_a - actions) # action == expert action self.policy.train(objective=policy_sl_loss, mode='sl') # 4.2 Temporal Regularization actions_a_ = self.policy_.forward(states, autoencoder) policy_tr_loss = self.env.policy_tr_w * self.env.policy_accum_steps * tf.nn.l2_loss(actions_a - actions_a_) self.policy.train(objective=policy_tr_loss, mode='tr') # op for copying weights from policy to policy_ self.policy_.copy_weights(self.policy.weights, self.policy.biases) # Plain adversarial learning d = self.discriminator.forward(states, actions_a, autoencoder) policy_alr_loss = self.al_loss(d) self.policy.train(objective=policy_alr_loss, mode='alr') # 4.3 AL def policy_loop(state_, t, total_cost, total_trans_err, _): mu = self.policy.forward(state_, autoencoder) if self.env.continuous_actions: eta = self.env.sigma * tf.random_normal(shape=tf.shape(mu), mean=self.noise_mean) a = mu + eta else: a = common.gumbel_softmax_sample(logits=mu, temperature=self.temp) # minimize the gap between agent logit (d[:,0]) and expert logit (d[:,1]) d = self.discriminator.forward(state_, a, autoencoder) cost = self.al_loss(d) # add step cost total_cost += tf.mul(tf.pow(self.gamma, t), cost) # get next state if self.env.continuous_actions: a_sim = common.denormalize(a, self.er_expert.actions_mean, self.er_expert.actions_std) else: a_sim = tf.argmax(a, dimension=1) state_env, _, env_term_sig, = self.env.step(a_sim, mode='tensorflow')[:3] state_e = common.normalize(state_env, self.er_expert.states_mean, self.er_expert.states_std) state_e = tf.stop_gradient(state_e) state_a, _, _ = self.forward_model.forward([state_, a, s]) state, nu = common.re_parametrization(state_e=state_e, state_a=state_a) total_trans_err += tf.reduce_mean(abs(nu)) t += 1 return state, t, total_cost, total_trans_err, env_term_sig def policy_stop_condition(state_, t, cost, trans_err, env_term_sig): cond = tf.logical_not(env_term_sig) cond = tf.logical_and(cond, t < self.env.n_steps_train) cond = tf.logical_and(cond, trans_err < self.env.total_trans_err_allowed) return cond state_0 = tf.slice(states, [0, 0], [1, -1]) loop_outputs = tf.while_loop(policy_stop_condition, policy_loop, [state_0, 0., 0., 0., False]) self.policy.train(objective=loop_outputs[2], mode='al') def al_loss(self, d): logit_agent, logit_expert = tf.split(split_dim=1, num_split=2, value=d) logit_gap = logit_agent - logit_expert valid_cond = tf.stop_gradient(tf.to_float(logit_gap > 0)) valid_gaps = tf.mul(logit_gap, valid_cond) # L2 if self.env.al_loss == 'L2': loss = tf.nn.l2_loss(tf.mul(logit_gap, tf.to_float(logit_gap > 0))) # L1 elif self.env.al_loss == 'L1': loss = tf.reduce_mean(valid_gaps) # Cross entropy elif self.env.al_loss == 'CE': labels = tf.concat(1, [tf.zeros_like(logit_agent), tf.ones_like(logit_expert)]) d_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=d, labels=labels) loss = tf.reduce_mean(d_cross_entropy) return loss*self.env.policy_al_w
Two quick and easy ways to edit the corners on the sketch level is by adding chamfered and fillet corners. This video shows you how to quickly edit your corners in SolidWorks by applying fillets and chamfers to a sketch with the Fillet and Sketch Chamfer tools. You will also learn how to change values and add dimensions for a simple way to edit your design by altering the corners of each component.
# -*- coding: utf-8 -*- # Image Occlusion Enhanced Add-on for Anki # # Copyright (C) 2016-2020 Aristotelis P. <https://glutanimate.com/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version, with the additions # listed at the end of the license file that accompanied this program. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # NOTE: This program is subject to certain additional terms pursuant to # Section 7 of the GNU Affero General Public License. You should have # received a copy of these additional terms immediately following the # terms and conditions of the GNU Affero General Public License that # accompanied this program. # # If not, please request a copy through one of the means of contact # listed here: <https://glutanimate.com/contact/>. # # Any modifications to this file must keep this entire header intact. """ Makes older IO notes editable. """ import logging from aqt.qt import * from anki.hooks import addHook from aqt.utils import tooltip from xml.dom import minidom from .config import * from .dialogs import ioAskUser from .utils import img2path, fname2img class ImgOccNoteConverter(object): def __init__(self, browser): self.browser = browser self.occl_id_last = None loadConfig(self) def convertNotes(self, nids): """Main note conversion method""" nids_by_nr = {} skipped = 0 (io_nids, filtered) = self.filterSelected(nids) for nid in io_nids: note = mw.col.getNote(nid) (uniq_id, note_nr) = self.getDataFromNamingScheme(note) if uniq_id == False: logging.debug("Skipping note that couldn't be parsed: %s", nid) skipped += 1 continue occl_tp = self.getOcclTypeAndNodes(note) occl_id = uniq_id + '-' + occl_tp if occl_id == self.occl_id_last: logging.debug( "Skipping note that we've just converted: %s", nid) continue self.occl_id_last = occl_id for nid in self.findByNoteId(uniq_id): note = mw.col.getNote(nid) (uniq_id, note_nr) = self.getDataFromNamingScheme(note) if uniq_id == False: logging.debug( "Skipping note that couldn't be parsed: %s", nid) skipped += 1 continue nids_by_nr[int(note_nr)] = nid self.idAndCorrelateNotes(nids_by_nr, occl_id) converted = len(io_nids) tooltip("<b>%i</b> notes updated, <b>%i</b> skipped" % (converted - skipped, filtered + skipped)) def filterSelected(self, nids): """Filters out notes with the wrong note type and those that are valid already""" io_nids = [] filtered = 0 for nid in nids: note = mw.col.getNote(nid) if note.model() != self.model: logging.debug("Skipping note with wrong note type: %s", nid) filtered += 1 continue elif note[self.ioflds['id']]: logging.debug( "Skipping IO note that is already editable: %s", nid) filtered += 1 continue elif not note[self.ioflds['om']]: logging.debug( "Skipping IO note without original SVG mask: %s", nid) filtered += 1 continue logging.debug("Found IO note in need of update: %s", nid) io_nids.append(nid) return (io_nids, filtered) def findByNoteId(self, note_id): """Search collection for notes with given ID in their omask paths""" # need to use omask path because Note ID field is not yet set query = '"%s:*%s*"' % (self.ioflds['om'], note_id) logging.debug("query: %s", query) res = mw.col.findNotes(query) return res def getDataFromNamingScheme(self, note): """Get unique ID and note nr from qmask path""" qmask = note[self.ioflds['qm']] path = img2path(qmask, True) if not path: return (False, None) grps = path.split('_') try: if len(grps) == 2: logging.debug("Extracting data using IO 2.0 naming scheme") uniq_id = grps[0] note_nr = path.split(' ')[1].split('.')[0] else: logging.debug( "Extracting data using IO Enhanced naming scheme") grps = path.split('-') uniq_id = grps[0] note_nr = int(grps[2]) - 1 return (uniq_id, note_nr) except IndexError: return (False, None) def idAndCorrelateNotes(self, nids_by_nr, occl_id): """Update Note ID fields and omasks of all occlusion session siblings""" logging.debug("occl_id %s", occl_id) logging.debug("nids_by_nr %s", nids_by_nr) logging.debug("mnode_idxs %s", self.mnode_idxs) for nr in sorted(nids_by_nr.keys()): try: midx = self.mnode_idxs[nr] except IndexError: continue nid = nids_by_nr[nr] note = mw.col.getNote(nid) new_mnode_id = occl_id + '-' + str(nr+1) self.mnode.childNodes[midx].setAttribute("id", new_mnode_id) note[self.ioflds['id']] = new_mnode_id note.flush() logging.debug("Adding ID for note nr %s", nr) logging.debug("midx %s", midx) logging.debug("nid %s", nid) logging.debug("note %s", note) logging.debug("new_mnode_id %s", new_mnode_id) new_svg = self.svg_node.toxml() omask_path = self._saveMask(new_svg, occl_id, "O") logging.debug("omask_path %s", omask_path) for nid in list(nids_by_nr.values()): note = mw.col.getNote(nid) note[self.ioflds['om']] = fname2img(omask_path) note.addTag(".io-converted") note.flush() logging.debug("Setting om and tag for nid %s", nid) def getOcclTypeAndNodes(self, note): """Determine oclusion type and svg mask nodes""" nr_of_masks = {} mnode_idxs = {} svg_mlayer = {} for i in ["qm", "om"]: # om second, so that end vars are correct svg_file = img2path(note[self.ioflds[i]], True) svg_node = self.readSvg(svg_file) svg_mlayer = self.layerNodesFrom(svg_node)[-1] # topmost layer mnode_idxs = self.getMaskNodes(svg_mlayer) nr_of_masks[i] = len(mnode_idxs) # decide on occl_tp based on nr of mask nodes in omask vs qmask if nr_of_masks["om"] != nr_of_masks["qm"]: occl_tp = "oa" else: occl_tp = "ao" self.svg_node = svg_node self.mnode = svg_mlayer self.mnode_idxs = mnode_idxs return occl_tp def readSvg(self, svg_file): """Read and fix malformatted IO 2.0 SVGs""" svg_doc = minidom.parse(svg_file) # ugly workaround for wrong namespace in older IO notes: svg_string = svg_doc.toxml().replace('ns0:', '').replace(':ns0', '') svg_string = str(svg_string) svg_doc = minidom.parseString(svg_string.encode('utf-8')) svg_node = svg_doc.documentElement return svg_node def getMaskNodes(self, mlayer): """Find mask nodes in masks layer""" mnode_indexes = [] for i, node in enumerate(mlayer.childNodes): if (node.nodeType == node.ELEMENT_NODE) and (node.nodeName != 'title'): mnode_indexes.append(i) return mnode_indexes def layerNodesFrom(self, svg_node): """Get layer nodes (topmost group nodes below the SVG node)""" assert (svg_node.nodeType == svg_node.ELEMENT_NODE) assert (svg_node.nodeName == 'svg') layer_nodes = [node for node in svg_node.childNodes if node.nodeType == node.ELEMENT_NODE] assert (len(layer_nodes) >= 1) # last, i.e. top-most element, needs to be a layer: assert (layer_nodes[-1].nodeName == 'g') return layer_nodes def _saveMask(self, mask, note_id, mtype): """Write mask to file in media collection""" logging.debug("!saving %s, %s", note_id, mtype) mask_path = '%s-%s.svg' % (note_id, mtype) mask_file = open(mask_path, 'w') mask_file.write(mask.encode('utf-8')) mask_file.close() return mask_path def onIoConvert(self): """Launch initial dialog, set up checkpoint, invoke converter""" mw = self.mw selected = self.selectedNotes() if not selected: tooltip("No cards selected.", period=2000) return ret = ioAskUser("question_nconvert", title="Please confirm action", parent=self, defaultno=True) if not ret: return False mw.progress.start() mw.checkpoint("Image Occlusion Note Conversions") self.model.beginReset() conv = ImgOccNoteConverter(self) conv.convertNotes(selected) self.model.endReset() mw.col.reset() mw.reset() mw.progress.finish() # Set up menus and hooks def setupMenu(self): menu = self.form.menuEdit menu.addSeparator() a = menu.addAction("Convert to Editable IO &Enhanced Notes") a.triggered.connect(lambda _, b=self: onIoConvert(b)) try: from aqt.gui_hooks import browser_menus_did_init browser_menus_did_init.append(setupMenu) except (ImportError, ModuleNotFoundError): addHook("browser.setupMenus", setupMenu)
If you are interested in helping a child receive basic educational opportunities, we eagerly invite you to join us and our other supporters to enable a child half a world away to avoid an unnecessary future of poverty and despair. Donations may be made online via PayPal through the “Donate” button (below) or by check made out to “International Foundation for Hope” and sent by mail to the IFH postal address (further below). If a donation is for a specific IFH program, please designate that program (e.g., “Mercy Home,” Jeeva Nivas,” or “Africa”) on the memo line of the check. Thank You! The IRS recognizes IFH as a 501(c)(3) organization. Contributions to IFH are tax deductible.
import vigra import numpy import vigra.graphs as vigraph import matplotlib.pyplot as plt import scipy.misc import sys gamma = 0.0001 percentage = 2 f = "figure_1.png" ## img: image segment with 0: inside, 1: outside ## distFunc: function applied after distance transform, must be one of "exponential", "linear", "inverse" ## showPathImage: if True, the image with distance transform and paths will be shown ## percentageOfPaths: percentage of computed paths def eccentricity( img, distFunc = "exponential", showPathImage = False, percentageOfPaths = 100, imgSaveName = "" ): ## Enlarge image by one pixel on each side img = img.astype(numpy.uint8) bigImg = numpy.ones( (img.shape[0]+2, img.shape[1]+2) ) bigImg[1:bigImg.shape[0]-1, 1:bigImg.shape[1]-1] = img ## Find borders in img (replace with graph functions) borderImg = numpy.zeros(bigImg.shape) for y in range(bigImg.shape[1]-1): for x in range(bigImg.shape[0]-1): if bigImg[x,y] == 0: if bigImg[x+1,y] == 1 or bigImg[x,y+1] == 1: borderImg[x, y] = 1 else: if bigImg[x+1,y] == 0: borderImg[x+1, y] = 1 if bigImg[x,y+1] == 0: borderImg[x, y+1] = 1 ## regionImageToCrackEdgeImage ( labelImage ) # ## Apply distanceTransform and modify (outside: high values, inside: low values) # distImage = vigra.filters.distanceTransform2D(bigImg.astype(numpy.float32)) # if showPathImage: # imgp = distImage.copy() # if distFunc == "exponential": # distImage = numpy.exp(distImage*-gamma) # elif distFunc == "linear": # maxDist = distImage.max() # distImage = maxDist - distImage # elif distFunc == "inverse": # w = numpy.where(distImage!=0) # distImage[w] = 1/distImage[w] # else: # print "wrong parameters for distFunc in eccentricity" ## Distance in the inside between two pixels is 1.0 distImage = bigImg.copy().astype(numpy.float32) distImage[numpy.where(bigImg==0)]=1.0 ## Set the outside to a very high value distImage[numpy.where(bigImg==1)]=10000.0 imgp = distImage.copy() ## Get image graph and its path finder gridGraph = vigraph.gridGraph(bigImg.shape[0:2],False) edgeWeights = vigra.resize(distImage,[distImage.shape[0]*2-1,distImage.shape[1]*2-1],order=0) edgeWeights = vigra.graphs.edgeFeaturesFromInterpolatedImageCorrected(gridGraph,edgeWeights) pathFinder = vigraph.ShortestPathPathDijkstra(gridGraph) ## End points for paths (all points on the border) targets = numpy.where(borderImg==1) tx,ty = targets nTargets = len(tx) ## Indices of start points for paths (random) nPoints = int(numpy.ceil(percentageOfPaths * nTargets / 100.0)) numpy.random.seed(42) starts = numpy.random.permutation(range(nTargets))[:nPoints] ## Compute paths maxPaths = [] maxPathLengths = [] for i in range(nPoints): source = gridGraph.coordinateToNode((int(tx[starts[i]]), int (ty[starts[i]]))) pathFinder.run(edgeWeights, source) maxPathLength = 0 for j in range(nTargets): target = gridGraph.coordinateToNode((int(tx[j]), int(ty[j]))) path = pathFinder.path(pathType='coordinates', target=target) pathLength = pathFinder.distance(target) if pathLength > maxPathLength or maxPathLength == 0: maxPathLength = pathLength maxPath = path maxPaths.append(maxPath) maxPathLengths.append(maxPathLength) if showPathImage or len(imgSaveName)>1: val = (imgp.max()+imgp.min())/2 for p in maxPaths: imgp[p[:,0], p[:,1]] = val if showPathImage: plt.figure(distFunc) plt.imshow(imgp, interpolation='none') if len(imgSaveName)>1: scipy.misc.imsave(imgSaveName, imgp) return maxPathLengths ## Read image img = vigra.impex.readImage(f) labels = numpy.squeeze(vigra.analysis.labelImage(img)) ### Compute slic superpixels #labels ,nseg = vigra.analysis.slicSuperpixels(img,100.0,50) #labels = numpy.squeeze(vigra.analysis.labelImage(labels)) ## Compute bounding boxes regionFeatures = vigra.analysis.extractRegionFeatures(img, labels) upperLeftBBs = regionFeatures["Coord<Minimum>"] lowerRightBBs = regionFeatures["Coord<Maximum>"] nBoxes = len(upperLeftBBs)-1 ## Get segment inside its bounding box segments = [] nonEmptyBoxIndices = [] for i in range(nBoxes): subImg = labels[ upperLeftBBs[i+1][0]:lowerRightBBs[i+1][0], upperLeftBBs[i+1][1]:lowerRightBBs[i+1][1] ].copy() where = numpy.where(subImg==i+1) if len(where[0]) > 0: subImg[where] = 0 subImg[numpy.where(subImg!=0)] = 1 segments.append(subImg) nonEmptyBoxIndices.append(i+1) ## Apply eccentricity transform pathLengths = [] counter = 0 for seg in segments: #eccentricity(subImg, distFunc="exponential", showPathImage=True, percentageOfPaths=percentage) #eccentricity(subImg, distFunc="inverse", showPathImage=True, percentageOfPaths=percentage) pathLength = eccentricity(seg, distFunc="linear", showPathImage=False, percentageOfPaths=percentage) pathLengths.append(pathLength) counter = counter+1 #vigra.show() # ## Testimage: map longest path to color # maxPath = 0 # for i in range(len(pathLengths)): # m = max(pathLengths[i]) # if m > maxPath: # maxPath = m # labelCopy = labels.copy() # for i in range(len(pathLengths)): # val = max(pathLengths[i]) * 255.0/maxPath # j = nonEmptyBoxIndices[i] # labelCopy[numpy.where(labels == j)] = val # # vigra.imshow(labelCopy) # vigra.show()
Convenience drives our consumption patterns, which is why, as a society, we are getting fatter and for the first time in human history, our kids have a life expectancy that is shorter than their parents. If I want to successfully alter my consumption pattern and that of my family, I must respect the power of convenience and make the necessary adjustments. The key is to have healthy, vegan options readily available when my willpower will be at its lowest, ie when I’m hungry or bored. Here is one step in that direction. I cut up a bunch of different color bell peppers, red onion, squash, and russet potatoes, pour a little olive oil on and add some thyme and rosemary from my front yard. Let it roast for 45 minutes in the oven at 325 degrees. I dump it in tupperware and stick it in the fridge right next to some quinoa I also prepared. It’s the perfect snack or lunch, either cold or reheated. I also leave bowls of grapes, cherries, and blueberries and mini-containers of cut up canteloupe, watermelon and/or honeydew in the fridge, right out front. Bananas and apples are conveniently located right on the counter and kitchen table. This strategy makes healthy options even more convenient, thus more susceptible to mindless noshing than a package that needs opening or frozen food that requires cooking. I may go on about how easy it has been to convert to veganism, but the truth is, I’ve got a secret weapon in my war on poor eating habits, and her name is Barbara. We are creatures of habit. For most of us, that means we have fairly limited menus. Whether you’re eating a burger from McDonald’s, Chili’s or your own bbq, it’s still fundamentally just a burger. Add chicken with (add staple side dish here), maybe grandma’s famous pasta recipe and a few other typical dishes, and you have your biweekly rotation of meals served at dinner. Now, what if I said you can’t eat any dish that contains the key ingredients found in every one of those recipes? You would be forced to find new recipes, go to different aisles in the food store, read the full list of ingredients and likely have to find new grocery stores to do your shopping. Well, all of those tasks fall squarely on the shoulders of my better half. Barbara has searched the internet and cookbooks for vegan recipes. She visited numerous new grocery stores to find which have the best selections for vegans. She’s become an expert on how to read ingredient lists and developed a whole new meal rotation for the entire family. I’m no writer so I’m sure I’m not doing a fantastic job of painting a picture of just what a mammoth disruption this has been to her life. Keep in mind, I haven’t become a “substitute vegan”, where I will simply swap out a burger for a veggie burger. I believe the only way to do this long term is to completely change your palate and that requires that the composition of your plate must look and feel as different as the new textures and flavors of your meals. That’s what makes it all the more incredible that she has managed to take it on without so much as a loud sigh. For this I am thankful to have such a fantastic partner in this and all of my endeavors. Add to the mix, Javier, our breakfast chef at the office who makes the greatest veggie burrito with sides of fresh fruit and his secret Guatemalan beans, and you have a pretty sweet setup for conversion to veganism. I can’t leave out the contribution of my kids. Masie, who has been a vegetarian for over 10 years, has actually benefitted from me becoming a vegan because it has expanded her range of options. Jackson, on the other hand, is an unabashed carnivore, but he’s also a gamer. Up for any challenge (see unicycling, slack lining, jumping stilts, and even yarn bombing), Jackson has been a supporter of the adjustment. Anyone with kids knows how important their contribution has been to a smooth transition. Don’t cry for me, for I suffer not on my path to becoming a healthier person.
# -*- coding: utf-8 -*- # vim: sw=4:ts=4:expandtab """ riko.modules.fetchtext ~~~~~~~~~~~~~~~~~~~~~~ Provides functions for fetching text data sources. Accesses and extracts data from text sources on the web. This data can then be merged with other data in your Pipe. Examples: basic usage:: >>> from riko import get_path >>> from riko.modules.fetchtext import pipe >>> >>> conf = {'url': get_path('lorem.txt')} >>> next(pipe(conf=conf))['content'] == 'What is Lorem Ipsum?' True Attributes: OPTS (dict): The default pipe options DEFAULTS (dict): The default parser options """ import pygogo as gogo from . import processor from riko import ENCODING from riko.utils import fetch, auto_close, get_abspath from riko.bado import coroutine, return_value, io OPTS = {'ftype': 'none', 'assign': 'content'} DEFAULTS = {'encoding': ENCODING} logger = gogo.Gogo(__name__, monolog=True).logger @coroutine def async_parser(_, objconf, skip=False, **kwargs): """ Asynchronously parses the pipe content Args: _ (None): Ignored objconf (obj): The pipe configuration (an Objectify instance) skip (bool): Don't parse the content kwargs (dict): Keyword arguments Kwargs: stream (dict): The original item Returns: Iter[dict]: The stream of items Examples: >>> from riko import get_path >>> from riko.bado import react >>> from riko.bado.mock import FakeReactor >>> from meza.fntools import Objectify >>> >>> def run(reactor): ... callback = lambda x: print(next(x)['content']) ... url = get_path('lorem.txt') ... objconf = Objectify({'url': url, 'encoding': ENCODING}) ... d = async_parser(None, objconf, assign='content') ... return d.addCallbacks(callback, logger.error) >>> >>> try: ... react(run, _reactor=FakeReactor()) ... except SystemExit: ... pass ... What is Lorem Ipsum? """ if skip: stream = kwargs['stream'] else: url = get_abspath(objconf.url) f = yield io.async_url_open(url) assign = kwargs['assign'] encoding = objconf.encoding _stream = ({assign: line.strip().decode(encoding)} for line in f) stream = auto_close(_stream, f) return_value(stream) def parser(_, objconf, skip=False, **kwargs): """ Parses the pipe content Args: _ (None): Ignored objconf (obj): The pipe configuration (an Objectify instance) skip (bool): Don't parse the content kwargs (dict): Keyword arguments Kwargs: stream (dict): The original item Returns: Iter[dict]: The stream of items Examples: >>> from riko import get_path >>> from meza.fntools import Objectify >>> >>> url = get_path('lorem.txt') >>> objconf = Objectify({'url': url, 'encoding': ENCODING}) >>> result = parser(None, objconf, assign='content') >>> next(result)['content'] == 'What is Lorem Ipsum?' True """ if skip: stream = kwargs['stream'] else: f = fetch(decode=True, **objconf) _stream = ({kwargs['assign']: line.strip()} for line in f) stream = auto_close(_stream, f) return stream @processor(DEFAULTS, isasync=True, **OPTS) def async_pipe(*args, **kwargs): """A source that asynchronously fetches and parses an XML or JSON file to return the entries. Args: item (dict): The entry to process kwargs (dict): The keyword arguments passed to the wrapper Kwargs: conf (dict): The pipe configuration. Must contain the key 'url'. May contain the key 'encoding'. url (str): The web site to fetch. encoding (str): The file encoding (default: utf-8). assign (str): Attribute to assign parsed content (default: content) Returns: Deferred: twisted.internet.defer.Deferred stream of items Examples: >>> from riko import get_path >>> from riko.bado import react >>> from riko.bado.mock import FakeReactor >>> >>> def run(reactor): ... callback = lambda x: print(next(x)['content']) ... conf = {'url': get_path('lorem.txt')} ... d = async_pipe(conf=conf) ... return d.addCallbacks(callback, logger.error) >>> >>> try: ... react(run, _reactor=FakeReactor()) ... except SystemExit: ... pass ... What is Lorem Ipsum? """ return async_parser(*args, **kwargs) @processor(DEFAULTS, **OPTS) def pipe(*args, **kwargs): """A source that fetches and parses an XML or JSON file to return the entries. Args: item (dict): The entry to process kwargs (dict): The keyword arguments passed to the wrapper Kwargs: conf (dict): The pipe configuration. Must contain the key 'url'. May contain the key 'encoding'. url (str): The web site to fetch encoding (str): The file encoding (default: utf-8). assign (str): Attribute to assign parsed content (default: content) Returns: dict: an iterator of items Examples: >>> from riko import get_path >>> >>> conf = {'url': get_path('lorem.txt')} >>> next(pipe(conf=conf))['content'] == 'What is Lorem Ipsum?' True """ return parser(*args, **kwargs)
Your employer can pay compulsory superannuation guarantee (SG) contributions into your VicSuper FutureSaver account on your behalf, and you can add to these contributions yourself through a variety of other contribution options. You can also take up insurance cover through your VicSuper FutureSaver membership. To enable your employer to contribute your SG payments to VicSuper, they need to be a VicSuper participating employer. There is no cost to become a participating employer. Complete the online form below. An authorised VicSuper representative will contact your employer to check that they offer a choice of fund to employees, and to take your employer through the joining process. VicSuper complies with the Privacy Act 1988 and has produced a Privacy brochure detailing how VicSuper collects, uses, discloses and stores personal information. By providing your details, you consent to VicSuper contacting you and your employer. By providing these details you authorise VicSuper to act on your behalf, including speaking with your employer.
#!/usr/bin/env python3 # Copyright 2017 Jussi Pakkanen # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess, sys, os, shutil, platform, json meson_commands = ['meson', 'meson.py', '/home/jpakkane/workspace/meson/meson.py', 'c:/users/IEUser/meson/meson.py', '/Users/jpakkane/meson/meson.py'] meson_bin = None for c in meson_commands: if shutil.which(c): meson_bin = c break if not meson_bin: sys.exit('Could not find Meson executable.') def measure_one(builddir, compiler, extra_flag, sort, buildtype): if os.path.exists(builddir): shutil.rmtree(builddir) env = os.environ.copy() env['CXX'] = compiler env['CXXFLAGS'] = extra_flag if sort: sort_arg = ['--sort'] else: sort_arg = [] subprocess.check_call([sys.executable, meson_bin, builddir, '--buildtype=' + buildtype] , stdout=subprocess.DEVNULL, env=env) subprocess.check_call(['ninja', '-C', builddir], stdout=subprocess.DEVNULL) out = subprocess.check_output([os.path.join(builddir, 'speedup')] + sort_arg) out = out.decode('utf-8') result = {} for line in out.split('\n'): line = line.strip() if line == '': continue typename, duration, _ = line.split() result[typename] = duration shutil.rmtree(builddir) return result def do_measurements(): measurements = [] if platform.processor() == 'x86_64' or platform.processor() == 'i386' or 'Intel64' in platform.processor(): gcc_cpu_flags = ['', '-mavx', '-msse4.2', '-msse2', '-msse'] elif platform.machine().startswith('arm'): gcc_cpu_flags = ['', '-mfpu=neon'] else: sys.exit('Unsupported CPU: ' + platform.processor()) cl_cpu_flags = [''] # Add /arch:AVX and /arch:AVX2 builddir = 'buildmeasurement' compilers = [] if platform.system().lower() == 'linux': trials = ['g++', 'clang++'] elif platform.system().lower() == 'windows': trials = ['g++', 'clang++', 'cl'] elif platform.system().lower() == 'darwin': trials = ['clang++'] # On OSX g++ is an alias to clang++ for c in trials: if shutil.which(c): compilers.append(c) for compiler in compilers: cpu_flags = cl_cpu_flags if compiler == 'cl' else gcc_cpu_flags for cpu_flag in cpu_flags: for sort in [True, False]: for buildtype in ['debugoptimized', 'release']: times = measure_one(builddir, compiler, cpu_flag, sort, buildtype) measurements.append({'compiler': compiler, 'cpu_flag': cpu_flag, 'sort': sort, 'buildtype': buildtype, 'times': times, }) return measurements if __name__ == '__main__': if len(sys.argv) != 2: print(sys.argv[1], '<output file name>') sys.exit(1) if not os.path.isfile('meson.build'): print('This script must be run in the top of the source dir.') sys.exit(1) ofilename = sys.argv[1] measurements = do_measurements() json.dump(measurements, open(ofilename, 'w'))
The “2013 Coast to Coast Trek” was fantastic, everybody had a “Phat” time and all but one car made it home without any problems. Thank you to all you trekkers that participated and I hope to see you all next year. We had some good media coverage along the way and helped to promote what Starlight does for lots of children, which it is all about. Good news is we have raised $96K which is a fantastic effort, so well done everyone that will go a long way in helping brighten the lives of lots of sick kids. The highest fundraiser this year was Car Miss P from Nowra, (Terese,Warren, Lara and Kate Markam well done that was a great effort. Look out for next year!! Keep watching the website for updates for next years trek and check out our facebook page.
"""This file is part of DING0, the DIstribution Network GeneratOr. DING0 is a tool to generate synthetic medium and low voltage power distribution grids based on open data. It is developed in the project open_eGo: https://openegoproject.wordpress.com DING0 lives at github: https://github.com/openego/ding0/ The documentation is available on RTD: http://ding0.readthedocs.io""" __copyright__ = "Reiner Lemoine Institut gGmbH" __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __url__ = "https://github.com/openego/ding0/blob/master/LICENSE" __author__ = "nesnoj, gplssm" # check technical constraints of distribution grids (shared lib) from ding0.tools import config as cfg_ding0 import logging from ding0.core.network.loads import LVLoadDing0 from ding0.core.network import GeneratorDing0 from ding0.core.network.cable_distributors import LVCableDistributorDing0 from ding0.core.network.stations import LVStationDing0 from ding0.core.powerflow import q_sign import networkx as nx import math logger = logging.getLogger('ding0') def check_load(grid, mode): """ Checks for over-loading of branches and transformers for MV or LV grid. Parameters ---------- grid : :class:`~.ding0.core.GridDing0` Grid identifier. mode : :obj:`str` Kind of grid ('MV' or 'LV'). Returns ------- :obj:`dict` Dict of critical branches with max. relative overloading, and the following format:: { branch_1: rel_overloading_1, ..., branch_n: rel_overloading_n } :obj:`list` of :class:`~.ding0.core.network.TransformerDing0` objects List of critical transformers with the following format:: [trafo_1, ..., trafo_m] Note ----- Lines'/cables' max. capacity (load case and feed-in case) are taken from [#]_. References ---------- .. [#] dena VNS See Also -------- ding0.flexopt.reinforce_measures.reinforce_branches_current : ding0.flexopt.reinforce_measures.reinforce_branches_voltage : """ crit_branches = {} crit_stations = [] if mode == 'MV': # load load factors (conditions) for cables, lines and trafos for load- and feedin case # load_factor_mv_trans_lc_normal = float(cfg_ding0.get('assumptions', # 'load_factor_mv_trans_lc_normal')) load_factor_mv_line_lc_normal = float(cfg_ding0.get('assumptions', 'load_factor_mv_line_lc_normal')) load_factor_mv_cable_lc_normal = float(cfg_ding0.get('assumptions', 'load_factor_mv_cable_lc_normal')) #load_factor_mv_trans_fc_normal = float(cfg_ding0.get('assumptions', # 'load_factor_mv_trans_fc_normal')) load_factor_mv_line_fc_normal = float(cfg_ding0.get('assumptions', 'load_factor_mv_line_fc_normal')) load_factor_mv_cable_fc_normal = float(cfg_ding0.get('assumptions', 'load_factor_mv_cable_fc_normal')) mw2kw = 1e3 kw2mw = 1e-3 # STEP 1: check branches' loads for branch in grid.graph_edges(): s_max_th = 3**0.5 * branch['branch'].type['U_n'] * branch['branch'].type['I_max_th'] if branch['branch'].kind == 'line': s_max_th_lcfc = [s_max_th * load_factor_mv_line_lc_normal, s_max_th * load_factor_mv_line_fc_normal] elif branch['branch'].kind == 'cable': s_max_th_lcfc = [s_max_th * load_factor_mv_cable_lc_normal, s_max_th * load_factor_mv_cable_fc_normal] else: raise ValueError('Branch kind is invalid!') # check loads only for non-aggregated Load Areas (aggregated ones are skipped raising except) try: # check if s_res exceeds allowed values for laod and feedin case # CAUTION: The order of values is fix! (1. load case, 2. feedin case) if any([s_res * mw2kw > _ for s_res, _ in zip(branch['branch'].s_res, s_max_th_lcfc)]): # save max. relative overloading crit_branches[branch] = max(branch['branch'].s_res) * mw2kw / s_max_th except: pass # STEP 2: check HV-MV station's load # NOTE: HV-MV station reinforcement is not required for status-quo # scenario since HV-MV trafos already sufficient for load+generation # case as done in MVStationDing0.choose_transformers() # OLD snippet: # cum_peak_load = grid.grid_district.peak_load # cum_peak_generation = grid.station().peak_generation(mode='MVLV') # # # reinforcement necessary only if generation > load # if cum_peak_generation > cum_peak_load: # grid.station().choose_transformers # # cum_trafo_capacity = sum((_.s_max_a for _ in grid.station().transformers())) # # max_trafo = max((_.s_max_a for _ in grid.station().transformers())) # # # determine number and size of required transformers # kw2mw = 1e-3 # residual_apparent_power = cum_generation_sum * kw2mw - \ # cum_trafo_capacity elif mode == 'LV': raise NotImplementedError if crit_branches: logger.info('==> {} branches have load issues.'.format( len(crit_branches))) if crit_stations: logger.info('==> {} stations have load issues.'.format( len(crit_stations))) return crit_branches, crit_stations def check_voltage(grid, mode): """ Checks for voltage stability issues at all nodes for MV or LV grid Parameters ---------- grid : :class:`~.ding0.core.GridDing0` Grid identifier. mode : :obj:`str` Kind of grid ('MV' or 'LV'). Returns ------- :obj:`list` of Ding0 node object (member of graph) either * :class:`~.ding0.core.network.GeneratorDing0` or * :class:`~.ding0.core.network.GeneratorFluctuatingDing0` or * :class:`~.ding0.core.network.LoadDing0` or * :class:`~.ding0.core.network.StationDing0` or * :class:`~.ding0.core.network.CircuitBreakerDing0` or * :class:`~.ding0.core.network.CableDistributorDing0` List of critical nodes, sorted descending by voltage difference. Note ----- The examination is done in two steps, according to [#]_ : 1. It is checked #TODO: what? 2. #TODO: what's next? References ---------- .. [#] dena VNS """ crit_nodes = {} if mode == 'MV': # load max. voltage difference for load and feedin case mv_max_v_level_lc_diff_normal = float(cfg_ding0.get('mv_routing_tech_constraints', 'mv_max_v_level_lc_diff_normal')) mv_max_v_level_fc_diff_normal = float(cfg_ding0.get('mv_routing_tech_constraints', 'mv_max_v_level_fc_diff_normal')) # check nodes' voltages voltage_station = grid._station.voltage_res for node in grid.graph_nodes_sorted(): try: # compare node's voltage with max. allowed voltage difference for load and feedin case if (abs(voltage_station[0] - node.voltage_res[0]) > mv_max_v_level_lc_diff_normal) or\ (abs(voltage_station[1] - node.voltage_res[1]) > mv_max_v_level_fc_diff_normal): crit_nodes[node] = {'node': node, 'v_diff': max([abs(v2-v1) for v1, v2 in zip(node.voltage_res, voltage_station)])} except: pass elif mode == 'LV': raise NotImplementedError if crit_nodes: logger.info('==> {} nodes have voltage issues.'.format(len(crit_nodes))) return [_['node'] for _ in sorted(crit_nodes.values(), key=lambda _: _['v_diff'], reverse=True)] def get_critical_line_loading(grid): """ Assign line loading to each branch determined by peak load and peak generation of descendant branches The attribute `s_res` is a list of two elements 1. apparent power in load case 2. apparent power in feed-in case Parameters ---------- grid : :class:`~.ding0.core.network.grids.LVGridDing0` Ding0 LV grid object Returns ------- :obj:`list` List of critical branches incl. its line loading :obj:`list` List of critical stations incl. its transformer loading """ cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load') cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen') lf_trafo_load = cfg_ding0.get('assumptions', "load_factor_lv_trans_lc_normal") lf_trafo_gen = cfg_ding0.get('assumptions', "load_factor_lv_trans_fc_normal") critical_branches = [] critical_stations = [] # Convert grid to a tree (is a directed graph) # based on this tree, descendants of each node are accessible station = grid._station tree = nx.dfs_tree(grid._graph, station) for node in tree.nodes(): # list of descendant nodes including the node itself descendants = list(nx.descendants(tree, node)) descendants.append(node) if isinstance(node, LVStationDing0): # determine cumulative peak load at node and assign to branch peak_load, peak_gen = peak_load_generation_at_node(descendants) if grid.id_db == 61107: if isinstance(node, LVStationDing0): print(node) # get trafos cumulative apparent power s_max_trafos = sum([_.s_max_a for _ in node._transformers]) # compare with load and generation connected to if (((peak_load / cos_phi_load) > s_max_trafos * lf_trafo_load) or ((peak_gen / cos_phi_feedin) > s_max_trafos * lf_trafo_gen)): critical_stations.append( {'station': node, 's_max': [ peak_load / cos_phi_load, peak_gen / cos_phi_feedin]}) else: # preceeding node of node predecessors = list(tree.predecessors(node)) # a non-meshed grid topology returns a list with only 1 item predecessor = predecessors[0] # get preceeding branches = grid.graph_branches_from_node(node) preceeding_branch = [branch for branch in branches if branch[0] is predecessor][0] # determine cumulative peak load at node and assign to branch peak_load, peak_gen = peak_load_generation_at_node(descendants) s_max_th = 3 ** 0.5 * preceeding_branch[1]['branch'].type['U_n'] * \ preceeding_branch[1]['branch'].type['I_max_th'] / 1e3 if (((peak_load / cos_phi_load) > s_max_th) or ((peak_gen / cos_phi_feedin) > s_max_th)): critical_branches.append( {'branch': preceeding_branch[1]['branch'], 's_max': [ peak_load / cos_phi_load, peak_gen / cos_phi_feedin]}) return critical_branches, critical_stations def peak_load_generation_at_node(nodes): """ Get maximum occuring load and generation at a certain node Summarizes peak loads and nominal generation power of descendant nodes of a branch Parameters ---------- nodes : :obj:`list` Any LV grid Ding0 node object that is part of the grid topology Return ------ :any:`float` peak_load : Sum of peak loads of descendant nodes :any:`float` peak_generation : Sum of nominal power of generation at descendant nodes """ loads = [node.peak_load for node in nodes if isinstance(node, LVLoadDing0)] peak_load = sum(loads) generation = [node.capacity for node in nodes if isinstance(node, GeneratorDing0)] peak_generation = sum(generation) return peak_load, peak_generation def get_critical_voltage_at_nodes(grid): r""" Estimate voltage drop/increase induced by loads/generators connected to the grid. Based on voltage level at each node of the grid critical nodes in terms of exceed tolerable voltage drop/increase are determined. The tolerable voltage drop/increase is defined by [#VDE]_ a adds up to 3 % of nominal voltage. The longitudinal voltage drop at each line segment is estimated by a simplified approach (neglecting the transverse voltage drop) described in [#VDE]_. Two equations are available for assessing voltage drop/ voltage increase. The first is used to assess a voltage drop in the load case .. math:: \\Delta u = \\frac{S_{Amax} \cdot ( R_{kV} \cdot cos(\phi) + X_{kV} \cdot sin(\phi) )}{U_{nom}} The second equation can be used to assess the voltage increase in case of feedin. The only difference is the negative sign before X. This is related to consider a voltage drop due to inductive operation of generators. .. math:: \\Delta u = \\frac{S_{Amax} \cdot ( R_{kV} \cdot cos(\phi) - X_{kV} \cdot sin(\phi) )}{U_{nom}} ================= ============================= Symbol Description ================= ============================= :math:`\Delta u` Voltage drop/increase at node :math:`S_{Amax}` Apparent power :math:`R_{kV}` Short-circuit resistance :math:`X_{kV}` Short-circuit reactance :math:`cos(\phi)` Power factor :math:`U_{nom}` Nominal voltage ================= ============================= Parameters ---------- grid : :class:`~.ding0.core.network.grids.LVGridDing0` Ding0 LV grid object Note ----- The implementation highly depends on topology of LV grid. This must not change its topology from radial grid with stubs branching from radial branches. In general, the approach of [#VDE]_ is only applicable to grids of radial topology. We consider the transverse voltage drop/increase by applying the same methodology successively on results of main branch. The voltage drop/increase at each house connection branch (aka. stub branch or grid connection point) is estimated by superposition based on voltage level in the main branch cable distributor. References ---------- .. [#VDE] VDE Anwenderrichtlinie: Erzeugungsanlagen am Niederspannungsnetz – Technische Mindestanforderungen für Anschluss und Parallelbetrieb von Erzeugungsanlagen am Niederspannungsnetz, 2011 """ v_delta_tolerable_fc = cfg_ding0.get('assumptions', 'lv_max_v_level_fc_diff_normal') v_delta_tolerable_lc = cfg_ding0.get('assumptions', 'lv_max_v_level_lc_diff_normal') crit_nodes = [] # get list of nodes of main branch in right order tree = nx.dfs_tree(grid._graph, grid._station) # list for nodes of main branch main_branch = [] # list of stub cable distributors branching from main branch grid_conn_points = [] # fill two above lists for node in list(nx.descendants(tree, grid._station)): successors = list(tree.successors(node)) if successors and all(isinstance(successor, LVCableDistributorDing0) for successor in successors): main_branch.append(node) elif (isinstance(node, LVCableDistributorDing0) and all(isinstance(successor, (GeneratorDing0, LVLoadDing0)) for successor in successors)): grid_conn_points.append(node) v_delta_load_case_bus_bar, v_delta_gen_case_bus_bar = get_voltage_at_bus_bar(grid, tree) if (abs(v_delta_gen_case_bus_bar) > v_delta_tolerable_fc or abs(v_delta_load_case_bus_bar) > v_delta_tolerable_lc): crit_nodes.append({'node': grid._station, 'v_diff': [v_delta_load_case_bus_bar, v_delta_gen_case_bus_bar]}) # voltage at main route nodes for first_node in [b for b in tree.successors(grid._station) if b in main_branch]: # initiate loop over feeder successor = first_node # cumulative voltage drop/increase at substation bus bar v_delta_load_cum = v_delta_load_case_bus_bar v_delta_gen_cum = v_delta_gen_case_bus_bar # successively determine voltage levels for succeeding nodes while successor: # calculate voltage drop over preceding line voltage_delta_load, voltage_delta_gen = get_delta_voltage_preceding_line(grid, tree, successor) # add voltage drop over preceding line v_delta_load_cum += voltage_delta_load v_delta_gen_cum += voltage_delta_gen # roughly estimate transverse voltage drop stub_node = [_ for _ in tree.successors(successor) if _ not in main_branch][0] v_delta_load_stub, v_delta_gen_stub = get_delta_voltage_preceding_line(grid, tree, stub_node) # check if voltage drop at node exceeds tolerable voltage drop if (abs(v_delta_gen_cum) > (v_delta_tolerable_fc) or abs(v_delta_load_cum) > ( v_delta_tolerable_lc)): # add node and successing stub node to critical nodes crit_nodes.append({'node': successor, 'v_diff': [v_delta_load_cum, v_delta_gen_cum]}) crit_nodes.append({'node': stub_node, 'v_diff': [ v_delta_load_cum + v_delta_load_stub, v_delta_gen_cum + v_delta_gen_stub]}) # check if voltage drop at stub node exceeds tolerable voltage drop elif ((abs(v_delta_gen_cum + v_delta_gen_stub) > v_delta_tolerable_fc) or (abs(v_delta_load_cum + v_delta_load_stub) > v_delta_tolerable_lc)): # add stub node to critical nodes crit_nodes.append({'node': stub_node, 'v_diff': [ v_delta_load_cum + v_delta_load_stub, v_delta_gen_cum + v_delta_gen_stub]}) successor = [_ for _ in tree.successors(successor) if _ in main_branch] if successor: successor = successor[0] return crit_nodes def get_voltage_at_bus_bar(grid, tree): """ Determine voltage level at bus bar of MV-LV substation Parameters ---------- grid : :class:`~.ding0.core.network.grids.LVGridDing0` Ding0 grid object tree : :networkx:`NetworkX Graph Obj< >` Tree of grid topology: Returns ------- :obj:`list` Voltage at bus bar. First item refers to load case, second item refers to voltage in feedin (generation) case """ # impedance of mv grid and transformer r_mv_grid, x_mv_grid = get_mv_impedance_at_voltage_level(grid, grid.v_level / 1e3) z_trafo = 1 / sum(1 / (tr.z(voltage_level=grid.v_level / 1e3)) for tr in grid._station._transformers) r_trafo = z_trafo.real x_trafo = z_trafo.imag # cumulative resistance/reactance at bus bar r_busbar = r_mv_grid + r_trafo x_busbar = x_mv_grid + x_trafo # get voltage drop at substation bus bar v_delta_load_case_bus_bar, \ v_delta_gen_case_bus_bar = get_voltage_delta_branch(tree, grid._station, r_busbar, x_busbar) return v_delta_load_case_bus_bar, v_delta_gen_case_bus_bar def get_delta_voltage_preceding_line(grid, tree, node): """ Parameters ---------- grid : :class:`~.ding0.core.network.grids.LVGridDing0` Ding0 grid object tree: :networkx:`NetworkX Graph Obj< >` Tree of grid topology node: graph node Node at end of line Return ------ :any:`float` Voltage drop over preceding line of node """ # get impedance of preceding line freq = cfg_ding0.get('assumptions', 'frequency') omega = 2 * math.pi * freq # choose preceding branch branch = [_ for _ in grid.graph_branches_from_node(node) if _[0] in list(tree.predecessors(node))][0][1] # calculate impedance of preceding branch r_line = (branch['branch'].type['R_per_km'] * branch['branch'].length/1e3) x_line = (branch['branch'].type['L_per_km'] / 1e3 * omega * branch['branch'].length/1e3) # get voltage drop over preceeding line voltage_delta_load, voltage_delta_gen = \ get_voltage_delta_branch(tree, node, r_line, x_line) return voltage_delta_load, voltage_delta_gen def get_voltage_delta_branch(tree, node, r, x): """ Determine voltage for a branch with impedance r + jx Parameters ---------- tree : :networkx:`NetworkX Graph Obj< >` Tree of grid topology node : graph node Node to determine voltage level at r : float Resistance of preceeding branch x : float Reactance of preceeding branch Return ------ :any:`float` Delta voltage for branch """ cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load') cos_phi_feedin = cfg_ding0.get('assumptions', 'cos_phi_gen') cos_phi_load_mode = cfg_ding0.get('assumptions', 'cos_phi_load_mode') cos_phi_feedin_mode = cfg_ding0.get('assumptions', 'cos_phi_gen_mode') #ToDo: Check if this is true. Why would generator run in a way that aggravates voltage issues? v_nom = cfg_ding0.get('assumptions', 'lv_nominal_voltage') # get apparent power for load and generation case peak_load, gen_capacity = get_cumulated_conn_gen_load(tree, node) s_max_load = peak_load/cos_phi_load s_max_feedin = gen_capacity/cos_phi_feedin # determine voltage increase/ drop a node x_sign_load = q_sign(cos_phi_load_mode, 'load') voltage_delta_load = voltage_delta_vde(v_nom, s_max_load, r, x_sign_load * x, cos_phi_load) x_sign_gen = q_sign(cos_phi_feedin_mode, 'load') voltage_delta_gen = voltage_delta_vde(v_nom, s_max_feedin, r, x_sign_gen * x, cos_phi_feedin) return [voltage_delta_load, voltage_delta_gen] def get_cumulated_conn_gen_load(graph, node): """ Get generation capacity/ peak load of all descending nodes Parameters ---------- graph : :networkx:`NetworkX Graph Obj< >` Directed graph node : graph node Node of the main branch of LV grid Returns ------- :obj:`list` A list containing two items # cumulated peak load of connected loads at descending nodes of node # cumulated generation capacity of connected generators at descending nodes of node """ # loads and generators connected to descending nodes peak_load = sum( [node.peak_load for node in nx.descendants(graph, node) if isinstance(node, LVLoadDing0)]) generation = sum( [node.capacity for node in nx.descendants(graph, node) if isinstance(node, GeneratorDing0)]) return [peak_load, generation] def get_mv_impedance_at_voltage_level(grid, voltage_level): """ Determine MV grid impedance (resistance and reactance separately) Parameters ---------- grid : :class:`~.ding0.core.network.grids.LVGridDing0` voltage_level: float voltage level to which impedance is rescaled (normally 0.4 kV for LV) Returns ------- :obj:`list` List containing resistance and reactance of MV grid """ freq = cfg_ding0.get('assumptions', 'frequency') omega = 2 * math.pi * freq mv_grid = grid.grid_district.lv_load_area.mv_grid_district.mv_grid edges = mv_grid.find_path(grid._station, mv_grid._station, type='edges') r_mv_grid = sum([e[2]['branch'].type['R_per_km'] * e[2]['branch'].length / 1e3 for e in edges]) x_mv_grid = sum([e[2]['branch'].type['L_per_km'] / 1e3 * omega * e[2][ 'branch'].length / 1e3 for e in edges]) # rescale to voltage level r_mv_grid_vl = r_mv_grid * (voltage_level / mv_grid.v_level) ** 2 x_mv_grid_vl = x_mv_grid * (voltage_level / mv_grid.v_level) ** 2 return [r_mv_grid_vl, x_mv_grid_vl] def voltage_delta_vde(v_nom, s_max, r, x, cos_phi): """ Estimate voltrage drop/increase The VDE [#]_ proposes a simplified method to estimate voltage drop or increase in radial grids. Parameters ---------- v_nom : :obj:`int` Nominal voltage s_max : :obj:`float` Apparent power r : :obj:`float` Short-circuit resistance from node to HV/MV substation (in ohm) x : :obj:`float` Short-circuit reactance from node to HV/MV substation (in ohm). Must be a signed number indicating (+) inductive reactive consumer (load case) or (-) inductive reactive supplier (generation case) cos_phi : :obj:`float` The cosine phi of the connected generator or load that induces the voltage change Returns ------- :obj:`float` Voltage drop or increase References ---------- .. [#] VDE Anwenderrichtlinie: Erzeugungsanlagen am Niederspannungsnetz – Technische Mindestanforderungen für Anschluss und Parallelbetrieb von Erzeugungsanlagen am Niederspannungsnetz, 2011 """ delta_v = (s_max * 1e3 * ( r * cos_phi - x * math.sin(math.acos(cos_phi)))) / v_nom ** 2 return delta_v
Nguyen an V DDS is a Dentists business in Twentynine Palms, CA. Nguyen an V DDS classified under Dentists, with principal employer An Nguyen, Full Name Report is located in 73666 Joshua Drive, Twentynine Palms, California CA 92277. For sales, support, account inquiries, and how to be an affiliate, the best way to get in touch is through numbers: (760) 865-0544 Full Phone Report. It has been operating since , boasting total quality assurance, and an annual revenue of $500.000 to $999.999. Their Single Location can be reached with the use of the following coordinates: 34.137577,-116.05467. They currently have 1 to 4 employees, and you can learn more about their offerings at twentyninepalmsdental.com. You can also view their clients’ testimonials, government compliance and annual reports through their website. Nguyen an V DDS aims to strengthen their B2C relationships through advertisements and brand promotion. Registered codes from Standard Industrial Classification is 8021, and 6212100 from North American Industrial Classification System. Customer feedback is highly appreciated; be sure to leave your comments on their website through survey forms. These are necessary for company service improvements.
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module containing class for GCP's spark service. Spark clusters can be created and deleted. """ import datetime import json import re from perfkitbenchmarker import flags from perfkitbenchmarker import providers from perfkitbenchmarker import spark_service from perfkitbenchmarker.providers.gcp import util FLAGS = flags.FLAGS GCP_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' class GcpDataproc(spark_service.BaseSparkService): """Object representing a GCP Dataproc cluster. Attributes: cluster_id: ID of the cluster. project: ID of the project. """ CLOUD = providers.GCP SERVICE_NAME = 'dataproc' def __init__(self, spark_service_spec): super(GcpDataproc, self).__init__(spark_service_spec) self.project = self.spec.master_group.vm_spec.project @staticmethod def _GetStats(stdout): results = json.loads(stdout) stats = {} done_time = datetime.datetime.strptime( results['status']['stateStartTime'], GCP_TIME_FORMAT) pending_time = None start_time = None for state in results['statusHistory']: if state['state'] == 'PENDING': pending_time = datetime.datetime.strptime(state['stateStartTime'], GCP_TIME_FORMAT) elif state['state'] == 'RUNNING': start_time = datetime.datetime.strptime(state['stateStartTime'], GCP_TIME_FORMAT) if done_time and start_time: stats[spark_service.RUNTIME] = (done_time - start_time).total_seconds() if start_time and pending_time: stats[spark_service.WAITING] = ( (start_time - pending_time).total_seconds()) return stats def _Create(self): """Creates the cluster.""" if self.cluster_id is None: self.cluster_id = 'pkb-' + FLAGS.run_uri cmd = util.GcloudCommand(self, 'dataproc', 'clusters', 'create', self.cluster_id) if self.project is not None: cmd.flags['project'] = self.project cmd.flags['num-workers'] = self.spec.worker_group.vm_count for group_type, group_spec in [ ('worker', self.spec.worker_group), ('master', self.spec.master_group)]: flag_name = group_type + '-machine-type' cmd.flags[flag_name] = group_spec.vm_spec.machine_type if group_spec.vm_spec.num_local_ssds: ssd_flag = 'num-{0}-local-ssds'.format(group_type) cmd.flags[ssd_flag] = group_spec.vm_spec.num_local_ssds if group_spec.vm_spec.boot_disk_size: disk_flag = group_type + '-boot-disk-size' cmd.flags[disk_flag] = group_spec.vm_spec.boot_disk_size cmd.Issue() def _Delete(self): """Deletes the cluster.""" cmd = util.GcloudCommand(self, 'dataproc', 'clusters', 'delete', self.cluster_id) # If we don't put this here, zone is automatically added, which # breaks the dataproc clusters delete cmd.flags['zone'] = [] cmd.Issue() def _Exists(self): """Check to see whether the cluster exists.""" cmd = util.GcloudCommand(self, 'dataproc', 'clusters', 'describe', self.cluster_id) # If we don't put this here, zone is automatically added to # the command, which breaks dataproc clusters describe cmd.flags['zone'] = [] _, _, retcode = cmd.Issue() return retcode == 0 def SubmitJob(self, jarfile, classname, job_poll_interval=None, job_arguments=None, job_stdout_file=None, job_type=spark_service.SPARK_JOB_TYPE): cmd = util.GcloudCommand(self, 'dataproc', 'jobs', 'submit', job_type) cmd.flags['cluster'] = self.cluster_id # If we don't put this here, zone is auotmatically added to the command # which breaks dataproc jobs submit cmd.flags['zone'] = [] if classname: cmd.flags['jars'] = jarfile cmd.flags['class'] = classname else: cmd.flags['jar'] = jarfile # Dataproc gives as stdout an object describing job execution. # Its stderr contains a mix of the stderr of the job, and the # stdout of the job. We set the driver log level to FATAL # to suppress those messages, and we can then separate, hopefully # the job standard out from the log messages. cmd.flags['driver-log-levels'] = 'root=FATAL' if job_arguments: cmd.additional_flags = ['--'] + job_arguments stdout, stderr, retcode = cmd.Issue(timeout=None) if retcode != 0: return {spark_service.SUCCESS: False} stats = self._GetStats(stdout) stats[spark_service.SUCCESS] = True if job_stdout_file: with open(job_stdout_file, 'w') as f: lines = stderr.splitlines(True) if (not re.match(r'Job \[.*\] submitted.', lines[0]) or not re.match(r'Waiting for job output...', lines[1])): raise Exception('Dataproc output in unexpected format.') i = 2 if job_type == spark_service.SPARK_JOB_TYPE: if not re.match(r'\r', lines[i]): raise Exception('Dataproc output in unexpected format.') i += 1 # Eat these status lines. They end in \r, so they overwrite # themselves at the console or when you cat a file. But they # are part of this string. while re.match(r'\[Stage \d+:', lines[i]): i += 1 if not re.match(r' *\r$', lines[i]): raise Exception('Dataproc output in unexpected format.') while i < len(lines) and not re.match(r'Job \[.*\]', lines[i]): f.write(lines[i]) i += 1 if i != len(lines) - 1: raise Exception('Dataproc output in unexpected format.') return stats def SetClusterProperty(self): pass def GetMetadata(self): basic_data = super(GcpDataproc, self).GetMetadata() if self.spec.worker_group.vm_spec.num_local_ssds: basic_data.update({'ssd_count': str(self.spec.worker_group.vm_spec.num_local_ssds)}) return basic_data
$Signed a four-year, $54.02 million contract with the Knicks in July of 2015. Traded to the Bulls in June of 2016. Lopez saw his highest minutes total in over two weeks Tuesday, finishing the game with 12 points in 31 minutes. The Bulls have been scaling back his role over the final few games but that does not take away from the fact he has been a standard league asset, basically since the All-Star break. The veteran has benefitted from the absence of a number of other players and is unlikely to have this sort of a role heading into next season. Average Fantasy Points are determined when Robin Lopez was active vs. non-active during the season. Click here to view average fantasy points for a different time period. After being traded from the Knicks to the Bulls in June of 2016, Lopez had a solid, but not spectacular year in his first season in Chicago. He played in all but one regular season game, but his numbers stayed fairly consistent from a year prior. He finished with averages of 10.4 points, 6.4 rebounds, 1.0 assist and 1.4 blocks, while shooting 49.3 percent from the field. That marked a slight drop in his rebounding numbers, while his field goal percentage was also the worst he's shot since the 2011-12 season. Heading into the 2017-18 campaign, Lopez will be back with the Bulls once again and should remain the team's starting center. Superstar Jimmy Butler was dealt to the Timberwolves in an offseason trade, which should allow Lopez to pick up a few more post touches per game. That means Lopez should see an increase in production, although it wouldn't be surprising if the gains were ultimately limited a bit considering the Bulls are in rebuild mode and will likely try and get their younger talent more involved right away to speed up their development. That said, a slight uptick in points, rebounds and field goal percentage should be expected, though his poor free throw shooting (72.1 percent in 2016-17) is something to be aware of. Lopez will suit up for his third team in as many years as he enters his ninth NBA season. The big man joined the Bulls in June, coming over from the Knicks shortly before the draft as the key piece in the Derrick Rose trade. He'll have big shoes to fill, stepping in as the starting center after the Bulls parted ways with both Joakim Noah and Pau Gasol over the summer. Lopez, who averaged 10.3 points, 7.3 rebounds, 1.6 blocks and 1.4 assists per game last season, will be asked to anchor the Bulls' defense following a summer that brought drastic change to the roster. After averaging 27.1 minutes per game last season, Lopez could be set for a mild increase in playing time given the relative lack of depth behind him. Chicago has a number of options at power forward -- Taj Gibson, Nikola Mirotic and Bobby Portis among them -- but Cristiano Felicio is the team's only other true center. While the 24-year-old Felicio flashed potential as a rookie, he played in only 31 games and remains fairly raw, so the Bulls will be counting heavily on Lopez. While Lopez is far from a dominant interior scorer, he has strong touch around the basket (career 53.3% from the field), as well as at the charity stripe (79.5% in 2015-16). Lopez is also among the league's best offensive rebounders, pulling down 3.3 per game last season, good for sixth in the NBA. Given his underdeveloped offensive game, Lopez's fantasy ceiling is relatively low, but he's a proven, steady commodity who's worth a look in later rounds of fantasy drafts. The proud owner of a new four-year, $54 million contract, Lopez joined the Knicks in free agency after playing the past two seasons for the Blazers. Last year, the seven-footer posted 9.6 points, 6.7 rebounds, and 1.4 blocks while shooting 54 percent in 28 minutes per game across 59 appearances. Ultimately, a broken right hand suffered last December prevented Lopez from completing his third straight season without missing a game, but he remained a formidable presence around the rim when healthy. Now with the Knicks, Lopez is expected to anchor New York's defense as the team's unquestioned starting center. That should coincide with a useful amount of blocked shots, but it will be most interesting to monitor how the Knicks' roster composition impacts Lopez's rebounding numbers. Without playing alongside another seven-footer like former teammate LaMarcus Aldridge, Lopez figures to consume a larger chunk of his team's rebounds but has only averaged over seven boards per game once in his previous seven seasons. While Lopez's defensive contributions still outweigh his offensive ability, the 27-year-old's average of 10.7 points over the past three seasons suggests he could become a more frequent double-double threat this season, if his rebounding rate reflects his comparatively larger role on the Knicks. After arriving in Portland last offseason via a three-team deal, Robin Lopez turned in the most prolific season of his six-year career, averaging 11.1 points (on 55-percent shooting), 8.5 rebounds, 1.7 blocks, and 0.9 assists in 32 minutes. The exchange appears to have worked in the Blazers' favor, at least initially, as Lopez's placement in the starting lineup allowed frontcourt mate LaMarcus Aldridge to focus more of his energy on the offensive end of the floor. Meanwhile, Lopez responded with a career-high in swats and defensive rebounds, but the center also emerged as a force on the offensive glass, corralling a franchise record 326 offensive rebounds on the year. Largely avoiding any serious injuries for the third consecutive year, Lopez is building up a track record as a reliable option inside, unlike his twin brother, Brook. As Robin enters the final year of his contract, he can be relied upon for boards, blocks, and a more-than-palatable field-goal percentage. While Lopez didn't fully emerge out of his brother's shadow last season, he at least poked his head around Brook's shoulder. The forgotten Lopez twin easily put together the best season of his career for the Hornets, finishing with career-high marks in scoring (11.3), rebounding (5.6) blocked shots (1.6) and minutes played (26 mpg). While his counting stats might not jump out of the box score, Lopez was able to add to his overall fantasy value by being an extremely efficient big man, shooting 53 from the floor, 78 percent from the charity stripe and limiting his turnovers to 1.3 per game. Lopez was a strong veteran presence in the paint for a rebuilding squad in New Orleans, but he will join an already established Portland frontcourt after being packaged as part of the Tyreke Evans sign-and-trade deal. The Blazers' frontcourt will continue to revolve around LaMarcus Aldridge and the team will likely want to get Myers Leonard more involved in the rotation during his second season, which makes it unlikely Lopez will see as large of a role as he did in New Orleans. But if Lopez is able to carve out 22-26 mpg with his new team, he's capable of providing consistent enough production to be worth a look in some formats. Lopez comes to New Orleans following a mediocre start to his career. He played in all but two games last season, averaging 5.4 ppg, 3.3 rpg and .9 bpg, but Lopez was stuck as a defensive-minded player in a heavily offensive team. Lopez could reach his potential under Williams. He might not put up the numbers to be a force in fantasy, but his contributions to the team as a defensive stalwart will be a big help to the team. Unless in a deep league and looking for defensive stats, Lopez should not be a fantasy option. Handed the starting role in Phoenix, Lopez squandered it thanks to poor play. He is foul prone and has no offensive game to speak of. Even if Gortat were to get injured, it’s hard to foresee Lopez producing enough offensively in order to be worth a roster spot. It appeared, as they entered the NBA, that Robin would always remain in the shadow of his twin brother Brook. To a certain degree, it's true: while the latter has become a night-in, night-out double-double threat, the former has become more of hustle-type player. But it was Robin hustling with Phoenix in the playoffs last year as his brother toiled with the lowly Nets. He enters the season as No. 1 on the depth chart at center. That's good for at least 25 minutes per game. Lopez's rookie season was a far cry from his brother Brook's in New Jersey, as he struggled to adjust to the pro game even without the pressure of being a starter. With Shaquille O'Neal now in Cleveland, Lopez is the Suns' best option for interior defense, but his showing in the Summer League didn't give any indication he was ready for a bigger role. Even if he does earn more minutes his value will come solely in rebounds and blocks, as he's a poor fit for the "Seven Seconds or Less" offensive philosophy. He has more hair than his brother Brook, but that's likely to be the only category in which he outperforms his sibling. Unless an injury opens up a starting spot for him, Lopez will be just a big body off the bench for the Suns this season. Lopez produced 29 points (12-18 FG, 5-8 FT), seven rebounds, two assists and one block in 30 minutes Monday in the Bulls' 113-105 loss to the Knicks. Lopez finished with 15 points (7-14 FG, 0-1 3Pt, 1-1 FT), nine rebounds and four assists across 28 minutes in Wednesday's 118-98 loss to the Trail Blazers. Lopez totaled 10 points (5-11 FG, 0-1 3Pt, 2-2 FT), four rebounds and one assist over 22 minutes in the Bulls' loss to the Jazz on Saturday. Lopez finished with 24 points 11-14 FG, 2-2 FT), seven rebounds, two assists, and one block in 26 minutes during Monday's 116-101 victory over the Suns. Lopez contributed 22 points (9-12 FG, 4-4 FT), eight rebounds, three assists, three blocks, and one steal in 34 minutes during Friday's 128-121 loss to the Clippers.
from sicpy.cryptobox import Cryptobox from sicpy.utils.iterators import integer_iterator class RailFence(Cryptobox): """ Rail-fence Sawtooth """ # DOES NOT NEED ALPHASBET def __init__(self, alphabet=None, key=None, plain_text=None, cipher_text=None): """ """ Cryptobox.__init__(self, alphabet, key, plain_text, cipher_text) self.key_init = 2 def cipher(self, alphabet=None, input_text=None, key=None): """ """ # Variable inference if alphabet == None: alphabet = self.alphabet if input_text == None: input_text = self.plain_text if key == None: key = self.key # Let's make a matrix out of the original text text_length = len(input_text) buffer_matrix = [[0 for i in range(key)] for j in range(text_length)] rail_list = generate_rail_list(key, text_length) for position, letter in enumerate(input_text): buffer_matrix[position][rail_list[position]] = letter # Let's transpose the matrix matrix_width = len(buffer_matrix[0]) buffer_matrix = ( [[row[i] for row in buffer_matrix] for i in range(matrix_width)] ) # Now let's flatten the matrix to a single vector buffer_list = sum(buffer_matrix,[]) # change to string omiting 0s output_text = '' for letter in buffer_list: if letter != 0: output_text += str(letter) return output_text def decipher(self, alphabet=None, key=None, input_text=None): """ asimetrical algo, decipher is different (thow known and unique for each cipher) """ # Variable inference if alphabet == None: alphabet = self.alphabet if input_text == None: input_text = self.cipher_text if key == None: key = self.key # make a matrix with filled with 0 and 1, 1 representing # the place were letters will be placed text_length = len(input_text) buffer_matrix = [[0 for i in range(key)] for j in range(text_length)] rail_list = generate_rail_list(key, text_length) for position, letter in enumerate(input_text): buffer_matrix[position][rail_list[position]] = 1 # place letters (line per line) position = 0 for j in range(key): for i in range(len(buffer_matrix)): if buffer_matrix[i][j] == 1: buffer_matrix[i][j] = input_text[position] position += 1 # Read (extract) letters (one letter per column) output_text = '' for i in range(len(buffer_matrix)): for j in range(key): #if isinstance(buffer_matrix[i][j],int): if buffer_matrix[i][j] != 0: output_text += buffer_matrix[i][j] return output_text def bruteforce(self, times=None, alphabet=None, input_text=None): """ times should be lower than len(input_text) """ # initialise times to maximum possible value if times == None: times = len(input_text) - 1 return Cryptobox.bruteforce(self, times, alphabet, input_text) def key_inverse(self, alphabet=None, key=None): """ algo is asimetric, same key is used, just returns same key """ # Variable inference if alphabet == None: alphabet = self.alphabet if key == None: key = self.key # Main code return key def key_iterate(self, alphabet=None, key=None): """ need to pass alphabet for consistency""" # Variable inference #if alphabet == None: # alphabet = self.alphabet if key == None: key = self.key # Main code return key + 1 # the length of the alphabet is not the limit def generate_rail_list(key, list_length): """ Generates a list of integers following a sawtooth or rail fence """ return_list = [] element = 0 for whatever in range(list_length): return_list.append(element) # Change direction if element == (key-1): dir_up = False if element == 0: dir_up = True # Update element if dir_up == True: element += 1 else: element -= 1 return return_list
ABSTRACT - Respondents were shown pictures of products with either odd or even prices. Two days later they saw the same items, but for half of the products, the prices had increased. Respondents were less likely to notice the price increases in the odd priced products than in the even price 4 products. Evidence indicated that this was due both to poorer memory for odd prices and to a bias toward judging that the odd prices were not the ones that had increased. Robert M. Schindler (1984) ,"Consumer Recognition of Increases in Odd and Even Prices", in NA - Advances in Consumer Research Volume 11, eds. Thomas C. Kinnear, Provo, UT : Association for Consumer Research, Pages: 459-462. Respondents were shown pictures of products with either odd or even prices. Two days later they saw the same items, but for half of the products, the prices had increased. Respondents were less likely to notice the price increases in the odd priced products than in the even price 4 products. Evidence indicated that this was due both to poorer memory for odd prices and to a bias toward judging that the odd prices were not the ones that had increased. Retailers' use of the technique of odd pricing (e.g., pricing an item at $9.99 rather than $10.00) is extremely common (Twedt 1965). However, despite the fact that odd ,rices are such a prominent feature of the retail price environment, the effect of this technique on the consumer has been mostly a matter of speculation. One common hypothesis is that consumers will tend to pay attention to only the dollar part of a price and drop off the cents part. Thus, if consumers perceive a price such as $4.99 as $4.00, then by pricing a $5.00 item at $4.99, the retailer will lower the perceived price by one dollar at the cost of only one penny. Lambert (1975) tested this hypothesis by asking respondents to give an estimate of the monetary value of a set of products which was either even or odd priced, but found no consistent evidence to support the hypothesis. However, recently, Schindler and Wiman (1983) did find some evidence in favor of this "rounding down" hypothesis. They asked respondents to recall prices the respondents had seen two days earlier and found that respondents were more likely to underestimate odd prices than even prices. Although many respondents recalled a $9.99 price as $9.89 or $9.50 rather than as $9.00, the fact that they were less likely to recall a $10.00 price as "9 dollars and some cents" provides some support for the rounding down hypothesis. Another common hypothesis is based on the fact that a number of studies, including at least three field studies (Ginzberg 1936 , Dalrymple and Haines 1970; Georgoff 1972), have failed to find any reliable effect of odd pricing on sales. This hypothesis holds that odd pricing actually has little effect on consumer behavior except that it has created familiar "price points." Many retailers would abandon odd pricing were it not that consumers tend to consider these price points as "correct" prices, and will be especially likely to notice, and presumably resist, prices which are raised above these price points (Nwokoye 1975: Whalen 19805. Gabor and Granger's (1964) finding that consumer demand may peak at odd prices for a product which is normally odd-priced but not for a product which is usually not odd priced provides some support for the price point hypothesis. However, results of the Schindler and Wiman (1983) study raise questions about the price point hypothesis. In addition to finding a greater tendency to underestimate the odd prices, Schindler and Wiman found less accurate recall of odd prices than of even prices. If this accuracy difference is due entirely to a tendency to guess even prices when the actual price is forgotten, then it does not bear on the price point hypothesis. But if this recall accuracy difference is not due entirely to guessing strategies, then it must indicate that consumers are morn likely to forget odd prices than even ones. However, if a consumer has forgotten a price, then he/she cannot notice an increase in that price. Thus, greater forgetting of odd prices should lead to consumers being less likely to notice increases in those odd prices, not more likely to notice them. as suggested by the price point hypothesis. The experiment reported here was designed to test whether consumers' tendency to notice a price increase in odd prices differed from their tendency to notice a price increase in even prices. Respondents were shown pictures of products which were given either odd or even prices. Two days later, the respondents were shown the same pictures with half of the even prices increased and the other half unchanged, and with half of the odd prices increased and the other half unchanged. The respondents were asked to indicate for each product whether or not the price had changed from -he price they had seen two days earlier. The price point hypothesis predicts greater awareness of price increases among odd prices than among even prices. On the other hand, if the lower recall accuracy of odd prices is due to a greater tendency to forget odd prices, then the prediction would be lower awareness of price increases among odd prices than among even prices. One hundred and twenty-seven undergraduate business students at Northeastern University served as respondents in this study. The experiment was conducted in six Introduction to Marketing classes during two class sessions, the second coming two days after the first. During the second session, data were collected from only those students who were present for the first session. This resulted in a sample size of 92 for the second session. The materials used in this study were slightly modified from those used by Schindler and Wiman (1983). Pictures of twenty products judged to be relevant to students were selected from a mail-order catalog and mounted on 7 l/2" by 5 l/2" cards. Using a table of random numbers, a standard order of presentation of the twenty cards was established and they were bound together in this order with looseleaf rings. The price of each product was lettered on a rectangular white sticker and affixed to the lower center portion of each card. Three card sets were prepared, which were identical except for the prices affixed. If a product were priced at, say, $25.00 in one cart set, then that product would be priced at $24.99 in the second card set, and $24.98 in the third set. Odd prices with $.98 endings were included along with odd prices with $.99 endings in order to be able to demonstrate that any differences which were found between even prices and prices with $.99 endings were due to the "oddness" of the $.99 ending rather than to the one cent difference between the even and $.99 price. The price ending ($.00, $.99, or $.98) assigned to a product in a particular card set was determined randomly for each of the twenty products, thus yielding a mixture of the three pricing conditions in each card set. Each set was given a cover card with the letter A, B, or C prominently displayed, and each respondent received only one of the three sets. Thus, each respondent was exposed to all three pricing conditions, but the particular products which exemplified each condition depended on the card set the respondent received. The card sets used during the second session were identical to those used during the first session except that in each set, the p-ices were increased in ten of the twenty products. The increases ranged from 15% to 50: and were designed to (l) maintain the "roundness" of the even prices, and (2) cause the same number of digits to change in the odd and even priced forms. For example, a price of $400 would be increased to $500. Both are round numbers, and the increase involves a change of only one digit in both the even pricing conditions ($400.00 - $500.00) and the two odd pricing conditions ($399.99 - $499.99 and $399.98 $499.98). Note that the price increase never altered the type of price ending that a product had in a particular card set, and that each product had the same size price increase when it appeared with the even ending as when it appeared with the $.99 and $.98 endings. The result of this is that, over the three card sets, the size of the average price increase for each of the three price ending conditions was equal. Also, the twenty products were randomly split into two groups of ten. For each card set, half of the respondents received it during the second session in a version where prices were increased in the first group of products. The other half of the respondents received it in a version where prices were increased in the second group of products. Table l illustrates, for one card set, the product prices and their increases for both versions of that card set used during the second session. At the beginning of the first session, respondents were told they were participating in a study of "shopping behavior," and were each given a response sheet. The response sheet for the first session included a list of the twenty products in the order that they appeared in the card set. To the right of each product name was a series of six purchase probabilities, beginning with 0% and proceeding at intervals of 20% up to 100%. Respondents were instructed to examine the product pictured on each card and to note the price indicated on the sticker. They were then told to assume that they were in the market for such a product and to indicate their willingness to buy the pictured product at the price displayed on the card by circling the percentage which indicated their purchase probability. Two of the six marketing classes run were given card set A, two were given card set B, and two were given card set C. Thus, each card set was seen by approximately the same number of respondents. For the second session, each group of respondents received the same card set they had seen during the first session, except that the prices for half of the products had been increased. The respondents were told that "some of the prices" had increased and were asked to indicate whether each price was the "same" or "higher" by circling one of those two words next to the product name on the second session response sheet. Then they were asked to rate, on a 5-point scale ( l = "not sure" and 5 = "very sure") how sure they were of their judgments of whether each price had or had not increased. Finally, the respondents were asked once again to indicate their willingness to buy each of the pictured products under the same assumption and using the same scale as in the first session. Each respondent's first session mean willingness to buy was computed for each of the three price ending conditions. An analysis of variance of these means indicated there were no statistically significant differences between these three pricing conditions (F = 0.50, p > .60). A similar analysis of variance on the second session willingness-to-buy measures also indicated that there were no statistically significant differences between the three pricing conditions (F = 1. 35, p > .20). Table 2 shows how often respondents indicated that a price increased when it in fact did increase. When prices had odd endings, respondents were less likely to notice the price increase than when the prices had even endings (x.2 = 18.25, p < .001). However, as can be seen in Table 3, respondents were also less likely to indicate an increase occurred in an odd price than in an even price even for those odd and even prices which remained unchanged between the first and second sessions (X2 = 5.88, p = .05). Thus, there was evidence of a general bias toward indicating that an odd price had not increased. To determine whether this response bias could be entirely responsible for the respondent's lower likelihood of recognizing an increase in an odd price, or whether poorer memory for the first session prices of odd priced items was also involved, a value of d' was calculated for the respondent's increase/no-increase judgments for each of the three pricing conditions. Developed in the context of signal detection theory (Coombs, Dawes, and Tversky 1970; McNicol 1972), d' is a measure of the sensitivity of the observer to a signal (in this case, the "signal" is the presence of a price increase) which is independent of the size of any response bias present. The calculations indicated that the d' for the prices with even endings was 1.108. For the prices with $.99 endings, d' =0.917, and for the prices with $.98 endings, d' = 0.915. A one-way analysis of variance on the d' scores for each respondent indicated that this effect of price ending on d' was statistically significant (F = 13.65, p < .005). Thus, this signal detection analysis indicates that the respondent's lower likelihood of recognizing an increase in an odd price than one in an even price is at least partially due to a greater tendency to forget the first session prices when they had odd endings. The respondents' ratings of how sure they were of their increase/no-increase judgments also implicates the role of memory factors in the lower probability of recognizing an increase in an odd price. If the respondents were as likely to forget first session prices with even endings as those with odd endings but simply guessed differently for odd and even prices, then there is no obvious reason why they should differ in their confidence in their increase/ no-increase judgments for odd and even prices. However, if memory for first session odd prices was poorer than that for first session even prices, then respondents would have to more often take guesses as to whether odd prices increased or not, and thus would be expected to have less confidence in their increase/no-increase judgements for odd prices. The results support the latter alternative. The mean sureness rating for increase/no-increase Judgments of even prices was 3.79, the mean sureness for prices with 9.99 endings was 3.75, and the mean sureness for the prices with $.98 endings was 3.58. A one-way analysis of variance indicated that the effect of price ending on sureness was statistically significant (F = 5.97, p < .005). The results of this study indicate that, contrary to the prediction of the price point hypothesis, consumers are less likely to notice an increase in an odd price than in an even price. Further, these results provide evidence that consumers are more likely to forget an odd price than the corresponding even price over a two-day period, and that this poorer memory for odd prices plays a role in the lower tendency to recognize that they may have increased. However, since the respondents in this study were dealing with pictures of products and not making actual purchases, these results must be regarded as tentative and subject to confirmation in a more realistic setting. The artificial nature of the task may also have been responsible for the failure to find any effect of price ending on a willingness-to-buy measure. Other recent laboratory tests of the effects of pricing variables on a willingness-to-buy measure have also found either no effects (e.g. Schindler and Wiman 1983) or only limited effects (e.g. Della Bitta, Monroe, and McGinnis 1981). This underlines the necessity of expanding this laboratory research into situations where the researcher can be more confident of observing realistic levels of consumer concern about price information. Future research might also be devoted to the question of what it is about odd prices which could account for the findings that they are more difficult to remember. One possibility is that odd prices may be encoded by the consumer's perceptual system to form an internal representation consisting of two or more chunks (Miller 1956), while even prices may be encoded into an internal form consisting of only one chunk (Jacoby and Olson 1977). For example, $19.99 may be encoded into the chunk "19" and the chunk "99", while $20.00 may be encoded into the single chunk "20". If both $19.99 and $20.00 received the same amount of cognitive resources (e.g. rehearsal time, space in memory, etc.), then the probability of at least one of the two chunks of the representation of $19.99 being unretrievable would be greater than the probability of losing the one chunk of the representation of $20.00. If future research supports this possibility, it would indicate that consumers would be less likely to recognize increases in any non-round number, not only ones which are a few pennies below a round number . An additional area for future research involves the finding of the respondents' bias toward judging that an odd price is one that has not increased. It may be the case that consumers associate odd prices with discounts and low prices, and such associations may prove to be another psychological mechanism of an effect of odd pricing on consumer behavior. Thus, the present results add to Schindler and Wiman's (1983) evidence for the rounding down of odd prices evidence that odd pricing provides a second benefit to the seller: lower consumer awareness of a price increase. However, this effect would probably also work in the other direction and make the consumer less likely to notice a decrease" an odd price. For the retailers this suggests (i) that odd pricing may be especially useful in advance of expected price increases, and (2) that inclusion of a reference price is especially important when communicating to the consumer that an odd price has been lowered. Coombs, Clyde H., Dawes, Robyn M., and Tversky, Amos (1970), Mathematical Psychology: An Elementary Introduction, Englewood Cliffs, New Jersey: Prentice-Hall. Dalrymple, Douglas J. and Haines, George H., Jr. (1970), ".; Study of the Predictive Ability of Market Period Demand-Supply Relations for a Firm Selling Fashion Products," Applied Economics, 1(4), 277-285. Della Bitta, Albert J., Monroe, Kent, B., and McGinnis, John M. (1981), "Consumer Perceptions of Comparative Price Advertisements," Journal of Marketing Research, 18, 416-427. Gabor, Andre and Granger, C.W.J. (1964), "Price Sensitivity of the Consumer," Journal of Advertising Research, 4, 40-44. Georgoff, David M. (1972), Odd-Even Price Endings, East Lansing: Michigan State University. Ginzberg, Eli (1936), "Customary Prices," American Economic Review, 26, 296. Jacoby, Jacob, and Olson, Jerry C. (1977), "Consumer Response to Price: An Attitudinal Information Processing Prospective," in Moving Ahead in Attitude Research, Yoram Wind and Marshall Greenberg (eds.), Chicago: American Marketing Association, 73-86. Lambert, Zarrel V. (1975), "Perceived Prices as Related to Odd and Even Price Endings," Journal of Retailing, 51,13- 78. McNicol, D. (1972), A Primer of Signal Detection Theory, London: Allen and Unwin Ltd. Miller, George A. (1956), "The Magic Number Seven, Plus or Minus Two," Psychological Review, 63, 81-97. Nwokoye, Nonyelu G. (1975), "An Experimental Study of the Relationship Between Responses to Price Changes and the Price Level for Shoes," in Advances in Consumer Research, Vol. II, M.J. Schlinger, (ed.), Ann Arbor: Association for Consumer Research. 693-703. Schindler, Robert M. and Wiman, Alan R. (1983), "Consumer Recall of Odd and Even Prices," Working Paper 83-10, College of Business Administration, Northeastern University, Boston, Massachusetts. Twedt, Dik W. (1965), "Does the '9 Fixation' in Retail Pricing Really Promote Sales?" Journal of Marketing, 29, 54-55. Whalen, Bernard F. (1980), "Strategic Mix of Odd, Even Prices Can Lead to Increased Retail Profits," Marketing News, (March 7), 24.
#!/usr/bin/env python """Web browser interface for CladeCompare. Input (GET): Form for submitting FG, [BG, HMM]. Output (POST): CladeReport (heat map) of submission. """ # TODO: # - take boolean do_weight as a form option (checkbox?) # ENH (in the report): # - asterisks up top link to PDF pairlogos # - below title, link to PyMOL script of PDB(s) from __future__ import print_function import logging import os import tempfile import webbrowser import bottle from cladecomparelib import (core, pairlogo, pmlscript, urn, gtest, jsd, phospho, report) # ENH: include textarea as alternative to each file upload input # ENH: dropdown alpha (or textinput w/ JS validation) FORM_HTML = """\ <html> <body> <h1>CladeCompare</h1> <form action="cladecompare" method="post" enctype="multipart/form-data"> <p> Submission name: <input type="text" name="name" /> </p> <h2>Sequences</h2> <p> Sequence file 1 (required): <br /> <input type="file" name="seqfile1" size=50 /> </p> <p> Sequence file 2: <br /> <input type="file" name="seqfile2" size=50 /> </p> <h2>Statistical strategy</h2> <p> <label> <input type="radio" name="strategy" value="gtest" checked="checked" /> G-test (goodness-of-fit) </label> </p> <p> <label> <input type="radio" name="strategy" value="urn" /> Ball-in-urn model (binomial) </label> </p> <p> <label> <input type="radio" name="strategy" value="jsd" /> Jensen-Shannon divergence </label> </p> <p> <label> <input type="radio" name="strategy" value="phospho" /> Phosphorylation site conservation </label> </p> <p> Significance cutoff (alpha): <input type="text" name="alpha" value="0.005" /> </p> <h2>Alignment profile</h2> <p> HMM (.hmm) profile: <br /> <input type="file" name="profile" size=50 /> </p> <!-- <h2>Structure</h2> <p> PDB ID: <input type="text" name="pdbid" /> <br /> or upload a PDB file: <br /> <input type="file" name="pdbfile" size=50 /> </p> --> <p /> <p><input type="submit" /></p> </form> <hr /> <p>Project page: <a href="http://github.com/etal/cladecompare">http://github.com/etal/cladecompare</a></p> <p>If you use this software in a publication, please cite our paper that describes it:</p> <blockquote>Talevich, E. & Kannan, N. (2013) <a href="http://www.biomedcentral.com/1471-2148/13/117">Structural and evolutionary adaptation of rhoptry kinases and pseudokinases, a family of coccidian virulence factors</a>. <i>BMC Evolutionary Biology</i> 13:117 doi:10.1186/1471-2148-13-117 </blockquote> </body> </html> """ # --- Routes --- @bottle.get('/cladecompare') def form(): return FORM_HTML # TODO - routes for downloading .pml, .pdf -- use static_file @bottle.post('/cladecompare') def form_submit(): # ENH: pick a unique, informative name -- e.g. date or hostname name = bottle.request.forms.name seqfile1 = bottle.request.files.seqfile1 if not hasattr(seqfile1, 'file'): return "Error: You need to specify at least one sequence file." seq1fname = handle2temp(seqfile1.file, suffix=('.cma' if seqfile1.filename.endswith('.cma') else '.seq')) # Optional second sequence set -- if missing, do single mode seqfile2 = bottle.request.files.seqfile2 if hasattr(seqfile2, 'file'): seq2fname = handle2temp(seqfile2.file, suffix=('.cma' if seqfile2.filename.endswith('.cma') else '.seq')) if not name: name = "%s-vs-%s" % (seqfile1.filename.rsplit('.', 1)[0], seqfile2.filename.rsplit('.', 1)[0]) else: seq2fname = '' if not name: name = seqfile1.filename # Optional HMM profile for alignment profile = bottle.request.files.profile # Optional HMM profile for alignment profile = bottle.request.files.profile if hasattr(profile, 'file'): if not profile.filename.endswith('.hmm'): return "HMM profile file name must end in .hmm" profname = handle2temp(profile.file, suffix='.hmm') logging.info("Aligning %s with profile %s", seq1fname, profname) fg_aln = core.hmm_align_and_read(profname, seq1fname) if seq2fname: logging.info("Aligning %s with profile %s", seq2fname, profname) bg_aln = core.hmm_align_and_read(profname, seq2fname) else: profname = '' fg_aln = core.read_aln(seq1fname, 'fasta') if seq2fname: bg_aln = core.read_aln(seq2fname, 'fasta') pdbfile = bottle.request.files.pdbfile if hasattr(pdbfile, 'file'): if not profname: return ("Error: to generate a PyMOL script for a PDB file you must" "also specify an HMM profile") pdbfname = handle2temp(pdbfile.file) logging.info("Aligning %s with profile %s", pdbfile.filename, profname) pdb_rec, pdb_resnums, pdb_inserts = core.pdb_hmm(profname, pdbfname) pdb_data = [(pdbfname, pdb_rec, pdb_resnums, pdb_inserts)] else: pdbfname = '' pdb_data = None # Mutually exclusive with pdbfname (above): pdbid = bottle.request.forms.pdbid if pdbid: # If PDB ID: .pml should use "fetch" instead of "load"? # Can get this info w/o dl'ing actual PDB file (e.g. via FASTA)? pass stat_module = dict(gtest=gtest, urn=urn, jsd=jsd, phospho=phospho, )[bottle.request.forms.strategy] try: alpha = float(bottle.request.forms.alpha) if not 0.0 <= alpha <= 1.0: raise ValueError except ValueError: return "Error: alpha must be a number between 0 and 1" _fdo, tmp_output = tempfile.mkstemp(suffix='.out') os.close(_fdo) _fdp, tmp_pattern = tempfile.mkstemp(suffix='.pttrn') os.close(_fdp) # Run the algorithms... if seq2fname: # Pair mode fg_clean, bg_clean, hits = core.process_pair(fg_aln, bg_aln, stat_module, False) core.process_output(fg_clean, bg_clean, hits, alpha, tmp_output, tmp_pattern, pdb_data) else: # Single mode aln, hits = core.process_one(fg_aln, stat_module, False) core.process_output(aln, None, hits, alpha, tmp_output, tmp_pattern, pdb_data) # Get the HTML report data contents = report.do_single(tmp_output, tmp_pattern)[1] cleanup(seq1fname) cleanup(seq2fname) cleanup(profname) cleanup(tmp_output) cleanup(tmp_pattern) return report.html_page_tpl % dict(title=name, contents=contents) # --- Helpers --- def handle2temp(handle, suffix=''): """Write file handle contents to a temporary file, return tempfile name.""" _fd, fname = tempfile.mkstemp(suffix=suffix) os.write(_fd, handle.read()) os.close(_fd) return fname def cleanup(fname): """Remove a temporary file that may or may not exist.""" if os.path.isfile(fname): try: os.remove(fname) print("Cleaned up", fname) except OSError: print("Failed to clean up", fname) # --- Run --- if __name__ == '__main__': logging.basicConfig(level=logging.INFO, format="%(module)s [@%(lineno)s]: %(message)s") webbrowser.open("http://localhost:8080/cladecompare") bottle.run(host='localhost', port=8080)
Held from August 2-4, 2016 at the Shanghai World Expo Exhibition and Convention Center, the conference showcases a variety of products and technologies for markets that include coatings, pulp and paper, automotive technologies, medicines, packaging and personal consumer goods. It is widely regarded as a leading international event for the polyurethanes industry, a keep supplier to mattress manufacturers. Mattress demand in China is now at levels equal to or greater than in the United States, and China, as it is in so many other industries, is also a manufacturing powerhouse within the mattress industry. EnFinit™ never-break microcapsules help bedding industry manufacturers incorporate class-leading PCM thermal performance characteristics into sleep products, including pillows, toppers and mattresses, to help consumers fall asleep faster and sleep more comfortably throughout the night. In bedding products, PCMs from Encapsys work by helping sleep products maintain optimum sleep temperatures for the duration of the sleep cycle. With a compelling line of both high-performance and cost-effective PCM solutions for the bedding industry, and with substantial investments in research and support for bio-based solutions, Encapsys generated considerable interest with bedding manufacturers and suppliers in the Chinese market at UTECH Asia in 2016. Encapsys, LLC is the pioneering leading developer and manufacturer of encapsulated technologies with more than six decades’ experience in creating novel, high-performance solutions for a variety of industries. Encapsys products include solutions for consumer products, cosmetics, paints and coatings, as well as the comprehensive line of EnFinit™ “never break” capsules designed to enhance thermal comfort for products in the bedding industry.
#!/usr/bin/env python # vim:fileencoding=utf-8 # License: GPLv3 Copyright: 2020, Kovid Goyal <kovid at kovidgoyal.net> import os import re import signal import socket import subprocess import sys from typing import Callable, cast from urllib.parse import quote_from_bytes def write_hyperlink(write: Callable[[bytes], None], url: bytes, line: bytes, frag: bytes = b'') -> None: text = b'\033]8;;' + url if frag: text += b'#' + frag text += b'\033\\' + line + b'\033]8;;\033\\' write(text) def main() -> None: if not sys.stdout.isatty() and '--pretty' not in sys.argv: os.execlp('rg', 'rg', *sys.argv[1:]) cmdline = ['rg', '--pretty', '--with-filename'] + sys.argv[1:] p = subprocess.Popen(cmdline, stdout=subprocess.PIPE) assert p.stdout is not None write: Callable[[bytes], None] = cast(Callable[[bytes], None], sys.stdout.buffer.write) sgr_pat = re.compile(br'\x1b\[.*?m') osc_pat = re.compile(b'\x1b\\].*?\x1b\\\\') num_pat = re.compile(br'^(\d+)[:-]') in_result: bytes = b'' hostname = socket.gethostname().encode('utf-8') try: for line in p.stdout: line = osc_pat.sub(b'', line) # remove any existing hyperlinks clean_line = sgr_pat.sub(b'', line).rstrip() # remove SGR formatting if not clean_line: in_result = b'' write(b'\n') continue if in_result: m = num_pat.match(clean_line) if m is not None: write_hyperlink(write, in_result, line, frag=m.group(1)) else: write(line) else: if line.strip(): path = quote_from_bytes(os.path.abspath(clean_line)).encode('utf-8') in_result = b'file://' + hostname + path write_hyperlink(write, in_result, line) else: write(line) except KeyboardInterrupt: p.send_signal(signal.SIGINT) except EOFError: pass finally: p.stdout.close() raise SystemExit(p.wait()) if __name__ == '__main__': main()
This is a great meal for those who want a hearty simple and healthy dinner. Very simple & easy to follow! I would like to thank my friend Camila for introducing me to this meal hence the name! For decoration, sprinkle more parsley on top when ready to serve. Previous Previous post: Follow us on Pinterest!
from werkzeug.utils import cached_property from base import db, Base from cluster import Cluster from models.base import commit_session class RedisNode(Base): __tablename__ = 'redis_node' host = db.Column(db.String(255), nullable=False) port = db.Column(db.Integer, nullable=False) eru_container_id = db.Column(db.String(64), index=True) assignee_id = db.Column(db.ForeignKey(Cluster.id), index=True) suppress_alert = db.Column(db.Integer, nullable=False, default=1) __table_args__ = (db.Index('address', 'host', 'port', unique=True),) def free(self): return self.assignee_id is None @cached_property def containerized(self): return self.eru_container_id is not None @cached_property def container_info(self): from flask import g if g.container_client is None or not self.containerized: return None return g.container_client.get_container(self.eru_container_id) def get_by_host_port(host, port): return db.session.query(RedisNode).filter( RedisNode.host == host, RedisNode.port == port).first() def list_eru_nodes(offset, limit): return db.session.query(RedisNode).filter( RedisNode.eru_container_id != None).order_by( RedisNode.id.desc()).offset(offset).limit(limit).all() def list_all_nodes(): return db.session.query(RedisNode).all() def create_instance(host, port): node = RedisNode(host=host, port=port) if get_by_host_port(host, port) is None: db.session.add(node) db.session.flush() return node def list_free(): return RedisNode.query.filter(RedisNode.assignee_id == None).order_by( RedisNode.id.desc()).all() def create_eru_instance(host, port, eru_container_id): node = RedisNode(host=host, port=port, eru_container_id=eru_container_id) if get_by_host_port(host, port) is None: db.session.add(node) db.session.flush() return node def delete_eru_instance(eru_container_id): i = db.session.query(RedisNode).filter( RedisNode.eru_container_id == eru_container_id).first() if i is None or i.assignee_id is not None: raise ValueError('node not free') db.session.delete(i) def get_eru_by_container_id(eru_container_id): return db.session.query(RedisNode).filter( RedisNode.eru_container_id == eru_container_id).first() def delete_free_instance(host, port): node = db.session.query(RedisNode).filter( RedisNode.host == host, RedisNode.port == port, RedisNode.assignee_id == None).with_for_update().first() if node is not None: db.session.delete(node)
How many bees live in a colony in a hive? How many flowers must a bee tap to make one pound of honey? How much honey does the worker bee make in her lifetime? Five. Two with compound lens, and three light sensors on top of her head. How do honeybees communicate with each other? By dancing. Honeybees do a dance which alerts other bees where nectar and pollen is located. The dance explains direction and distance. How many pounds of honey can be harvested from a single bee colony? How many Queen Bees are in a colony and how many eggs do they lay? How many Worker Bees are in a colony? How many Drone Bees are in a colony? How many bees does it take to gather a pound of honey? How long is the average honeybee life? Why is honey one of the safest foods? How much water is in nectar? How much in honey? Nectar when gathered by bees is about 70% water. Honey is about 17% water. How many bees are needed to pollinate one acre of fruit trees? One colony of bees (around 30,000 or more bees). Pollination success increases if there are more honey bees present at the time of peak flowering. How do bees remove the excess moisture (extra water) from nectar to make honey? Are bees more valuable for honey or pollinating crops? How far do bees fly to gather a pound of honey?
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from kmip.core import attributes from kmip.core import objects class TestAttribute(testtools.TestCase): """ Test suite for the Attribute object. """ def setUp(self): super(TestAttribute, self).setUp() def tearDown(self): super(TestAttribute, self).tearDown() def test_init(self): """ Test that an Attribute object can be created. """ objects.Attribute() def test_init_with_args(self): self.skipTest('') def test_read(self): self.skipTest('') def test_write(self): self.skipTest('') def test_repr(self): """ Test that repr can be applied to an Attribute object. """ attribute = objects.Attribute( attribute_name=objects.Attribute.AttributeName('test-name'), attribute_index=objects.Attribute.AttributeIndex(0), attribute_value=attributes.CustomAttribute('test-value') ) self.assertEqual( "Attribute(" "attribute_name=AttributeName(value='test-name'), " "attribute_index=AttributeIndex(value=0), " "attribute_value=CustomAttribute(value='test-value'))", repr(attribute) ) def test_str(self): """ Test that str can be applied to an Attribute object. """ attribute = objects.Attribute( attribute_name=objects.Attribute.AttributeName('test-name'), attribute_index=objects.Attribute.AttributeIndex(0), attribute_value=attributes.CustomAttribute('test-value') ) self.assertEqual( str({ 'attribute_name': 'test-name', 'attribute_index': '0', 'attribute_value': 'test-value' }), str(attribute) ) def test_equal_on_equal(self): self.skipTest('') def test_equal_on_not_equal_name(self): self.skipTest('') def test_equal_on_not_equal_index(self): self.skipTest('') def test_equal_on_not_equal_value(self): self.skipTest('') def test_equal_on_type_mismatch(self): self.skipTest('') def test_not_equal_on_equal(self): self.skipTest('') def test_not_equal_on_not_equal_name(self): self.skipTest('') def test_not_equal_on_not_equal_index(self): self.skipTest('') def test_not_equal_on_not_equal_value(self): self.skipTest('') def test_not_equal_on_type_mismatch(self): self.skipTest('')
Corey Smith posted this on Monday, May 14, 2012 at about 8am. The battery for the remote on my car is starting to go out. Right now, it’s just intermittently having issues. The other night, I realized after pushing the unlock button 20 times, that I needed to find another solution. I decided to use the key to open the car door. As I turned the key the car unlocked just fine. At the moment I opened the door, I found that the alarm started up. Of course, without the remote working, I couldn’t turn it off. In a state of panic, I thought that closing the door would turn it off. Then I opened the back door (yeah, I know, not my brightest moment). It makes me wonder what else in life that we deal with has such a poorly designed operating procedure. This weekend I was camping with the Boy Scouts. A good friend of mine, Jordan, pointed out the design of his tent. One of the reasons I hate using tents when I’m camping (preferring to sleep under the stars) is that I hate having to roll up the tent into the ever-too-small bags they provide. Jordan’s bag for his tent was designed to fit well. It was greater than the size necessary. It was designed more like a duffle bag and even had extra zippers that make it possible to cinch it in after the tent is in place. Sometimes, it is the simplest of design changes that can make all the difference in the world. Simply being able to use the car key to turn off the car alarm or a larger bag for a tent can make the user experience so much better. What can companies learn from these simple experiences? What do you think you can change that will make all the difference in the world for those that use your product or services?
#!/usr/bin/env python from subprocess import call import os import time # We want to start a server from each www directory # where everything was built by the site-builder script # Make sure jupyter defaults are correct (globally) call("jupyter nbextension enable hide_input/main", shell=True) # call("jupyter nbextension enable rubberband/main", shell=True) # call("jupyter nbextension enable exercise/main", shell=True) # This could be automated, but I am not sure how well the number of # servers will scale ... so leave at 8 ... and hand build # The root user is www users = { "www" : ["vieps-pye-boss", 8080 ], "build/www1": ["vieps-pye-1", 8081 ], "build/www2": ["vieps-pye-2", 8082 ], "build/www3": ["vieps-pye-3", 8083 ], "build/www4": ["vieps-pye-4", 8084 ], "build/www5": ["vieps-pye-5", 8085 ], "build/www6": ["vieps-pye-6", 8086 ], "build/www7": ["vieps-pye-7", 8087 ], "build/www8": ["vieps-pye-8", 8088 ], "build/www9": ["vieps-pye-9", 8089 ], "build/www10": ["vieps-pye-10", 8090 ], "build/www11": ["vieps-pye-11", 8091 ], "build/www12": ["vieps-pye-12", 8092 ] } # Maybe we need to quote the password in case it has odd characters in it for dir in users.keys(): password = users[dir][0] port = users[dir][1] call( "cd {:s} && nohup jupyter notebook --port={:d} --ip='*' --no-browser\ --NotebookApp.token={:s} --NotebookApp.default_url=/files/index.html &".format(dir, port, password), shell=True ) # Don't exit while True: time.sleep(10)
Woodland Hospital is a Multi Specialty Hospital situated at the heart of the city at Dhankheti, Shillong which was established in 21st March 1991 as a Proprietary concern. Considering the paucity of standard medical facilities available in the state, Dr. W. Kharshiing and his wife Mrs. Margret Wahlang decided to start a Nursing Home offering better alternatives to the patients of the medically deficient state. It was such a dream that few may have been imagined to see in its present form. The hospital was started with 28 Indoor beds, two Operation Theatres viz; One Major & One Minor, along with the very basic Diagnostic facilities likes: Pathology, X-Ray, Ultra-Sonography etc. The Departments were limited to General Surgery, Obstetrics & Gynecology, Pediatrics and ENT. During the year 1994, the hospital was expanded to 56 bed capacity. The New Operation Theatre Complex, a CT Scan Machine and four bedded Intensive Care Unit were also added to meet the increasing demands of the people. The CT scan was the first one the State had ever seen in1995. The hospital went on to Expand to 120 beds during the year 2001 with the addition of an annexed building. Woodland Hospital has always strived to introduce the latest advancements of medical technology and it introduced the Medical Resonance Imaging (MRI) the first one in the state in 2002. At present the hospital expanded to 160 beds with the addition of another annexed building in 2013. The Hospital is the only Hospital in the state which is well equipped with the ultra modern facilities. In 2006 the Department of Pathology has been accredited by the National Accreditation Board for Testing & Calibration Laboratories (NABL) for Medical Testing and has been given the Certificate Number M-0213 in accordance with the ISO 15189:2007. The Department of Pathology of the Hospital is the first one in the entire state and the only fourth institution in the entire North East India to be accredited in the field of Medical Blood Testing by NABL. This stature is not only the recognition of our expertise in the field for the past works done but is also the boost for the Department of Pathology to continue doing the same for ever. Woodland Hospital is also an ISO 9001-2008 certified by Bureau Veritas Certification in 2010. The Quality Management System of Woodland Hospital for improvement in management system of the hospital and to continually improve the system in pursuance of the policy are: To maintain Medical Ethics of High Standard while attending the patients. To provide the best available treatment to patients at affordable price and continually improve facilities, services and quality of patient care. To comply with the statutory & regulatory requirements relevant with the services rendered. To make patients feel that each human life is equally precious to the hospital management. Urgently required a Front desk executive for a reputed guest house in Shillong.
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'MailAccount.date_test' db.delete_column(u'core_mailaccount', 'date_test') # Adding field 'MailAccount.date_last_conn' db.add_column(u'core_mailaccount', 'date_last_conn', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2007, 1, 1, 0, 0)), keep_default=False) def backwards(self, orm): # Adding field 'MailAccount.date_test' db.add_column(u'core_mailaccount', 'date_test', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2007, 1, 1, 0, 0)), keep_default=False) # Deleting field 'MailAccount.date_last_conn' db.delete_column(u'core_mailaccount', 'date_last_conn') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'base.user': { 'Meta': {'object_name': 'User'}, 'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}), 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}), 'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), 'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': '940b6cd148ca4e09b8f18d4a7b37d7a4'}", 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}), 'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'core.corepermissions': { 'Meta': {'object_name': 'CorePermissions'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'core.helpcontent': { 'Meta': {'ordering': "['ordering', 'id']", 'object_name': 'HelpContent'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'content_en': ('django.db.models.fields.TextField', [], {}), 'content_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'content_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}), 'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'core.mailaccount': { 'Meta': {'unique_together': "(('user', 'hostname', 'username'),)", 'object_name': 'MailAccount'}, 'conn_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_last_conn': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2007, 1, 1, 0, 0)'}), 'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_usable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), 'port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'use_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}) }, 'core.mailfeed': { 'Meta': {'object_name': 'MailFeed'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"}) }, 'core.mailfeedrule': { 'Meta': {'object_name': 'MailFeedRule'}, 'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailAccount']", 'null': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mailfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeed']"}) }, 'core.mailfeedruleline': { 'Meta': {'object_name': 'MailFeedRuleLine'}, 'header_field': ('django.db.models.fields.CharField', [], {'default': "u'any'", 'max_length': '10'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'match_type': ('django.db.models.fields.CharField', [], {'default': "u'contains'", 'max_length': '10'}), 'match_value': ('django.db.models.fields.TextField', [], {'max_length': '255'}), 'other_header': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'rule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']"}) } } complete_apps = ['core']
evaporator. The brine leaving the tenth evaporator contains 5.00 wt % salts. streams entering and leaving these three evaporators.
#!/usr/bin/python """Import contributions from EasyTithe to BreezeChMS. Logs into your EasyTithe account and imports contributions into BreezeChMS using the Python Breeze API. Usage: python easytithe_importer.py \\ --username [email protected] \\ --password easytithe_password \\ --breeze_url https://demo.breezechms.com \\ --breeze_api_key 5c2d2cbacg3 \\ --start_date 01/01/2014 \\ --end_date 12/31/2014 \\ [--debug \\] [--dry_run \\] """ __author__ = '[email protected] (Alex Ortiz-Rosado)' import argparse import logging import os import re import sys from datetime import datetime from easytithe import easytithe try: from breeze import breeze except ImportError: sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from breeze import breeze class Contribution(object): """An object for storing a contribution from EasyTithe.""" def __init__(self, contribution): """Instantiates a Contribution object. Args: contribution: a single contribution from EasyTithe. """ self._contribution = contribution @property def first_name(self): return self._contribution['Name'].split()[0] @property def last_name(self): return self._contribution['Name'].split()[-1] @property def full_name(self): return '%s %s' % (self.first_name, self.last_name) @property def name(self): return self._contribution['Name'] @property def date(self): formatted_date = datetime.strptime( self._contribution['Date'], '%m/%d/%Y') return formatted_date.strftime('%Y-%m-%d') @property def fund(self): return self._contribution['Fund'] @fund.setter def fund(self, fund_name): self._contribution['Fund'] = fund_name @property def amount(self): # Removes leading $ and any thousands seperator. return self._contribution['Amount'].lstrip('$').replace(',', '') @property def card_type(self): return self._contribution['Type'] @property def email_address(self): return self._contribution['Email'] @property def uid(self): return self._contribution['PersonID'] def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( '-u', '--username', required=True, nargs='*', help='EasyTithe username.') parser.add_argument( '-p', '--password', required=True, nargs='*', help='EasyTithe password.') parser.add_argument( '-k', '--breeze_api_key', required=True, nargs='*', help='Breeze API key.') parser.add_argument( '-l', '--breeze_url', required=True, nargs='*', help=('Fully qualified doman name for your organizations Breeze ' 'subdomain.')) parser.add_argument( '-s', '--start_date', required=True, nargs='*', help='Start date to get contribution information for.') parser.add_argument( '-e', '--end_date', required=True, nargs='*', help='End date to get contribution information for.') parser.add_argument( '-d', '--dry_run', action='store_true', help='No-op, do not write anything.') parser.add_argument( '--debug', action='store_true', help='Print debug output.') args = parser.parse_args() return args def enable_console_logging(default_level=logging.INFO): logger = logging.getLogger() console_logger = logging.StreamHandler() console_logger.setLevel(default_level) formatter = logging.Formatter( '%(asctime)s [%(levelname)8s] %(filename)s:%(lineno)s - %(message)s ', '%Y-%m-%d %H:%M:%S') console_logger.setFormatter(formatter) logger.addHandler(console_logger) logging.Formatter() logger.setLevel(default_level) def main(): args = parse_args() if args.debug: enable_console_logging(logging.DEBUG) else: enable_console_logging() start_date = args.start_date[0] end_date = args.end_date[0] # Log into EasyTithe and get all contributions for date range. username = args.username[0] password = args.password[0] logging.info('Connecting to EasyTithe as [%s]', username) easytithe_api = easytithe.EasyTithe(username, password) contributions = [ Contribution(contribution) for contribution in easytithe_api.GetContributions( start_date, end_date) ] if not contributions: logging.info('No contributions found between %s and %s.', start_date, end_date) sys.exit(0) logging.info('Found %s contributions between %s and %s.', len(contributions), start_date, end_date) # Log into Breeze using API. breeze_api_key = args.breeze_api_key[0] breeze_url = args.breeze_url[0] breeze_api = breeze.BreezeApi(breeze_url, breeze_api_key, dry_run=args.dry_run) people = breeze_api.get_people() if not people: logging.info('No people in Breeze database.') sys.exit(0) logging.info('Found %d people in Breeze database.', len(people)) for person in people: person['full_name'] = '%s %s' % (person['force_first_name'].strip(), person['last_name'].strip()) for contribution in contributions: person_match = [person for person in people if re.search(person['full_name'], contribution.full_name, re.IGNORECASE) and person['full_name'] != ' '] contribution_params = { 'date': contribution.date, 'name': contribution.name, 'uid': contribution.uid, 'method': 'Credit/Debit Online', 'funds_json': ( '[{"name": "%s", "amount": "%s"}]' % (contribution.fund, contribution.amount)), 'amount': contribution.amount, 'group': contribution.date, 'processor': 'EasyTithe', 'batch_name': 'EasyTithe (%s)' % contribution.date } if not person_match: logging.warning( 'Unable to find a matching person in Breeze for [%s]. ' 'Adding contribution to Breeze as Anonymous.', contribution.full_name) breeze_api.add_contribution(**contribution_params) else: def is_duplicate_contribution(person_id, date, amount): """Predicate that checks if a contribution is a duplicate.""" return breeze_api.list_contributions( start_date=date, end_date=date, person_id=person_id, amount_min=amount, amount_max=amount) if is_duplicate_contribution(date=contribution.date, person_id=person_match[0]['id'], amount=contribution.amount): logging.warning( 'Skipping duplicate contribution for [%s] paid on [%s] ' 'for [%s]', contribution.full_name, contribution.date, contribution.amount) continue logging.info('Person:[%s]', person_match) logging.info( 'Adding contribution for [%s] to fund [%s] in the amount of ' '[%s] paid on [%s].', contribution.full_name, contribution.fund, contribution.amount, contribution.date) # Add the contribution on the matching person's Breeze profile. contribution_params['person_id'] = person_match[0]['id'] breeze_api.add_contribution(**contribution_params) if __name__ == '__main__': main()
Perfect harmony, one note at a time. DCappella (2018) – Disney’s new 7 voice all-star a cappella ensemble, first tour launches January 2019. Pitch Battle (2017) – Music Director on and off camera for this choral and a cappella singing competition on BBC One in the UK. Pitch Perfect (2012), Pitch Perfect 2 (2015) and Pitch Perfect 3 (2017) – Was music director for all vocal performances in the highest grossing music comedy series. The Sing-Off (2009 – 2014) – Music directed all singers and performances on five seasons of the hit reality show in the US (NBC) as well as Holland (SBS) and China (SZMC) and South Africa (SABC1). The Sing Off Tour (2013 – 2014) – huge national bus tours (56 concerts/cities in 63 days) featuring 3 groups from the show plus local groups. TotalVocal at Carnegie Hall (2015 – present) – over 400 singers plus celebrity guests, each year on Palm Sunday. Vocalosity (2013 – present) – 12 amazing voices, touring performing arts centers across America. The House Jacks (1991 – 2015) – Founded, directs and performs with “the original rock band without instruments” (SF Chronicle) that paved the way for the modern vocal band sound. Disney (1999 – 2005) – Created and directed American Vybe at Epcot and Groove 66 at California Adventure. Tufts University Beelzebubs (1987 – 1991) – Where it all started.
#!/usr/bin/env python from __future__ import print_function import sys, os, glob, re fixed_re = re.compile('fxtract-(\w+)_random_(10{2,5})_(\d\d)bp_patterns_times\.txt') norm_re = re.compile('fxtract-(\w+)_random_(10{2,5})_patterns_minus_small_times\.txt') impl_codes = {'ac': 'aho-corasick', 'sh': 'set horspool', 'mb': 'multi-bfam', 'wm': 'wu-manber', 'cwm': 'non-seqan wu-manber'} times_files = glob.glob("./fxtract*times.txt") data = {} for fn in times_files: match = fixed_re.search(fn) if match: impl = impl_codes[match.group(1)] pattern_type = match.group(3) pattern_count = match.group(2) else: match = norm_re.search(fn) if match: impl = impl_codes[match.group(1)] pattern_type = 'norm' pattern_count = match.group(2) else: raise ValueError("Filename %s does not fit either form" % fn) with open(fn) as fp: first_line = True seconds = 0.0 memory = 0.0 for line in fp: if first_line: first_line = False continue fields = line.rstrip().split(',') seconds += float(fields[1]) + float(fields[2]) memory += float(fields[-1]) seconds /= 10.0 memory /= 10.0 print(impl,pattern_type, pattern_count, seconds, memory, sep="\t")
The short answer is yes, if you do it right. And we do it right, with every patient! We give every patient the time and attention that they need, and limit our bariatric surgeries to 3 to 5 surgeries per day. No surgery is ever rushed. Our surgeon meets our patients every day of their stay. Performing our surgeries in a full-service hospital with every patient covered by medical complication insurance, permits us to provide the highest level of medical care, while giving our patients peace of mind. Proper cleaning and sterilization of operating rooms is not rocket science, it is basic medicine. First year medical students are taught how to clean and sterilize using the three basic methods which are (1) gas for plastic instruments, (2) liquid for medical instruments and medical implants, and (3) autoclave for metal surgical instruments. There are also approved liquids for cleaning the operating room. These methods take time, roughly 1 hour to properly clean and sterilize an operating room between patients. Bariatric surgery is not an assembly line business. The average gastric sleeve surgery takes approximately an hour in the operating room. Five patients in any given day means five hours in the operating room. Add to that one hour between patients to properly clean and sterilize the operating room, and that is an additional 4 hours: 5 + 4 = 9 hours total. That is a full day for any surgeon. To do 15 or 20 surgeries in one day with one operating room is unimaginable. The math simply does not work. Patient preparation before surgery is critically important. We strongly recommend our bariatric patients, to do at home, beginning 14 days prior to surgery, what we call a 7 Day Detox/Colon Cleanse. It is a week-long process designed to remove the putrefied mucoidal plaque that is attached to the insides of all our colons. The final week before surgery is liquids only for all patients which has the effect of shrinking the patient’s liver. This allows our patients to be 100% ready for bariatric surgery and have virtually no post op symptoms of nausea, vomiting, gas pain, etc. Having a clean colon allows patient’s liver to quickly metabolize the medications used during surgery. Two-night hospital stay with intravenous fluids for sleeve patients. This is the proper protocol for bariatric patients undergoing the gastric sleeve procedure. For bypass patients, a three-night hospital stay is required. Having the IV fluids for 48 hours post-op makes a huge difference in patient healing, and we give IV medications to prevent nausea, pain, and reduce inflammation. This promotes patient’s comfort and healing process. Certified hospital for medical tourists meets strict international standards. Trinity Medical offers our bariatric (weight loss) and plastic surgeries at Hospital Guadalajara located on 2nd Street in Tijuana, Mexico. Hospital Guadalajara is a full service “Certified Hospital for Medical Tourists” by the Mexican Federal Government agency known as “CSG.” The hospital has full radiology, emergency room, ICU, blood bank, endoscopic services, dedicated bariatric wing, licensed, and registered physicians, and nurses, and operates 24/7 as a privately-owned hospital that is open to the public. Newest generation and properly sized surgical staples. We only use the latest generation called 6 Row surgical staples, meaning three rows of staples on each side of the stomach incision. Also, we use the proper size of staples as they come in different sizes. This eliminates the potential for leaks often found with obsolete 4 row staples, and prevents the stomach splitting open which is caused by staples that are too short. Incisions sealed in the operating room with surgical glue to prevent skin infections. This may seem like a minor detail, but sealing the incisions in the operating room under sterile conditions has proven to be 100% effective in preventing skin infections post-op. This allows our patients to be able to shower as soon as they are ready after surgery without fear of getting their incisions infected. The surgical glue will separate by itself from the incision within two weeks post-op. No extra precautions are necessary. Strict 30-Day post-op diet designed to allow patient’s new stomach to heal properly. The diet is designed to allow maximum time for the patients stomach to begin healing itself without the burden of attempting to digest food prematurely. Ingesting food prematurely will cause digestive acid and fluids to enter the patient’s new stomach and slow down the healing process. Comprehensive post op nutritional guidance. This allows our bariatric patients to reach their goal weight in the shortest time possible. After bariatric surgery, and any surgery for that matter, what you eat and what you do not eat are both critically important. Knowing what enzymes to take with what protein, how to deal with symptoms of sliming and frothing, how to find the best vitamins, how to check yourself for hypothyroidism, candid overgrowths, etc. Our nutritional guidance is the most comprehensive in this business.
__author__ = 'kevin' class Store(): def __init__(self): self.available_items = [] self.customer_list = [] class InventoryItem(): def __init__(self, name, on_hand, price): self.name = name self.on_hand = on_hand self.price = price def __repr__(self): output = "" output = output + self.name output = output + " @ " + str(self.price) return output class CartLineItem(): def __init__(self, item, quantity): self.item = item self.quantity = quantity def __repr__(self): output = "" output = output + str(self.quantity) output = output + " x " output = output + str(self.item) output = output + " is " output = output + str(self.quantity * self.item.price) return output class Cart(): def __init__(self): self.selected_items = [] def get_total(self): total = 0.0 for line in self.selected_items: line_total = line.quantity * line.item.price total = total + line_total return total def __repr__(self): output = "Cart:\n" for line in self.selected_items: output = output + str(line) + "\n" output = output + "\nTotal: " + str(self.get_total()) return output class Customer(): def __init__(self, name): self.name = name self.cart = Cart() from store import * # TEST amazon = Store() amazon.available_items = { 111: InventoryItem("Farenheit 451", 10, 4.99), 222: InventoryItem("World According to Garp", 5, 4.99), 333: InventoryItem("Stranger in a Stange Land.", 1, 4.99), } amazon.customer_list = { 11: Customer("Bob"), 22: Customer("Carol"), } # ## # TEST # ## # who are you? select a customer. bob = amazon.customer_list[11] # what do you want to buy? item = amazon.available_items[333] #how many? qty = 2 #add to cart bob.cart.selected_items.append(CartLineItem(item, qty)) #what do you want to buy? item = amazon.available_items[222] #how many? qty = 3 #add to cart bob.cart.selected_items.append(CartLineItem(item, qty)) #add more? if no then checkout and print cart print bob.cart
Today I went to the Wesley House and the kids were not there because they went on a field trip for the day so my group and I cleaned. I cleaned the closet with Emily, Mrs. Kristin, Reid, and Kate. Then Emily and I organized a whole bookshelf in 10min! In the afternoon we had a free day and we went to the Sun Sphere were we could see the whole city. Then we went to Starbucks and I got a Kavu bag at Mast General in downtown. Then we got to see the Spider-Man movie together as a group. After the movie we went to Trio for dinner and Rita’s for ice-cream. My favorite part of the day has to be when I saw the big accomplishment that my group made when the closet was finished. I saw God working was when Emily and I had finished that bookshelf in 10 minutes. Today I went to Mobile Meals. It was so much fun! But some of the people were not answering their doors. But that was ok. My favorite part of the day was when we went to see a movie in the afternoon. Spider-Man was really good. Then we went to dinner. It was so good! Then we went shopping. I saw God when the mobile meals was successful and fun!! This morning I went to Beardsley Farm where we got to pick Okra, Squash, and Peppers. Then we pulled weeds around the farm. In the afternoon we went to the Sunsphere downtown where you could look around the whole city. Then we played on a really cool playground and then went to Market Square and we shopped a bit, we went to see a movie. I saw Despicable Me 3 and the movie was really good. Then we went to Trio’s for dinner where I had a hamburger. It was very good. My favorite part of the day was walking around downtown and shopping. I saw God yesterday when I saw how the Farm would give 75% of the food grown there to the local shelters and other places where people in need needed fresh produce like squash, okra, eggs, and honey.
#!/usr/bin/python #////////////////////////////////////////////////////////////////////////////// # # Copyright (c) 2007,2009 Daniel Adler <[email protected]>, # Tassilo Philipp <[email protected]> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # #////////////////////////////////////////////////////////////////////////////// import sys f = file("design.cfg") sigmap = { 'B':'DCbool' , 'c':'DCchar' , 's':'DCshort' , 'i':'DCint' , 'l':'DClonglong' , 'f':'DCfloat' , 'd':'DCdouble' , 'p':'DCpointer' } apimap = { '_':'' , 's':'__declspec(stdcall)' , 'f':'__declspec(fastcall)' } id = 0 maxargs = 0 sys.stdout.write("/* auto generated by mkcase.py (on stdout) */\n"); for line in f: line = line.rstrip().lstrip() if len(line) == 0 or line[0] == '#': continue types = []; # args = len(line)-1 args = len(line) maxargs = max(maxargs, args) # api = apimap[ line[0] ] out = [ "VF",str(args),"(", str(id), "," ]; for i in xrange(0,len(line)): types += [ sigmap[ line[i] ] ] out += [ ",".join( types ), ",s_", line, ")\n" ] out = "".join(out) sys.stdout.write(out) id += 1 sys.stderr.write("/* auto generated by mkcase (on stderr) */\n"); sys.stderr.write("".join( ["#define NCASES ",str(id),"\n"] ) ) sys.stderr.write("".join( ["#define MAXARGS ",str(maxargs),"\n"] ) )
Results: 1 to 1 of about 1 for Memorials in Mast Business Directory. Company from UK offering a wide selection of custom engraved and traditional memorials in a range of unique shapes and sizes. Also including infant ones in Angel or Teddy.
# YouCompleteMe Vim plugin config import os import ycm_core import subprocess # These are the compilation flags that will be used in case there's no # compilation database set (by default, one is not set). flags = [ '-Wall', '-Wextra', '-Werror', '-fexceptions', #'-DNDEBUG', # THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which # language to use when compiling headers. So it will guess. Badly. So C++ # headers will be compiled as C headers. You don't want that so ALWAYS specify # a "-std=<something>". # For a C project, you would set this to something like 'c99' instead of # 'c++11'. '-std=c99', # ...and the same thing goes for the magic -x option which specifies the # language that the files to be compiled are written in. This is mostly # relevant for c++ headers. # For a C project, you would set this to 'c' instead of 'c++'. '-x', 'c', ] def is_c_source_file(filename): return filename[-2:] == ".c" def is_c_header_file(filename): return filename[-2:] == ".h" def is_cxx_file(filename): return filename[-2:] in (".cpp", ".cxx") def is_cxx_header(filename): return filename[-2:] in (".hpp", ".hxx") def get_proj_flags(): output = subprocess.check_output("pkg-config --cflags --libs sdl", shell=True) return output.split() flags.extend(get_proj_flags()) # youcompleteme is calling this function to get flags # You can also set database for flags. Check: JSONCompilationDatabase.html in # clang-3.2-doc package def FlagsForFile(filename): return { 'flags': flags, 'do_cache': True }
Definition: Hurt someone, usually romantically, or to cause some great disappointment. Angela broke Brad's heart last year. He can't get over her. I think losing the job broke his heart. Definition: Phrase meaning that you swear you are telling the truth. I cross my heart and hope to die. She's coming tomorrow! Do you cross your heart and hope to die? I won't believe you otherwise. Definition: To be jealous or envious of someone else. I'm going to New York next week. Eat your heart out! When he hears about your promotion he'll eat his heart out. Definition: Do what you believe is right. I think you should follow your heart and move to Chicago. She said she had to follow her heart and marry Peter, even if her parents didn't approve. Definition: Usually used in the first person, this phrase means that you are completely sincere. You're the best player on the basketball team. I mean that from the bottom of my heart. I think you are a wonderful person. Really, I mean that from the bottom of my heart. Definition: Discuss the main issue, concern. I'd like to get at the heart of the matter by discussing our marketing proposals. She didn't waste any time and got right to the heart of the matter. Definition: Not do or take something completely seriously. I wish you weren't so halfhearted about this new project! Get serious! She was rather halfhearted in her attempts to find a job. Fred had a change of heart and invited the young boy into his home. I wish you would have a change of heart about Tim. He really deserves some help. Definition: Be very trustworthy and well-meaning. Peter has a heart of gold if you give him the chance to prove himself. You can trust her. She has a heart of gold. She'll never understand your position. She has a heart of stone. Don't expect any pity from me. I have a heart of stone. Definition: Have an open and honest discussion with someone. I think it's time we had a heart-to-heart talk about your grades. She called her friend Betty to have a heart-to-heart talk with her about her problems. Definition: To mean well, have the right intentions. Come on, you know John has his heart in the right place. He just made a mistake. Definition: Know something such as lines in a play, or music perfectly, to be able to perform something by memory. He knew all his lines by heart two weeks before the performance. You need to learn this piece by heart next week. Definition: Absolutely want something / Absolutely not want something. She has her heart set on winning the medal. Frank has his heart set against his promotion. There's nothing I can do to help him. Definition: To be completely surprised by something. My heart missed a beat when I heard the news that she was pregnant. She was so surprised by the announcement that her heart skipped a beat. Definition: Confess or confide in someone. I poured my heart out to Tim when I discovered that I hadn't received the promotion. I wish you would pour your heart out to someone. You need to get these feelings out. You should take heart and try your best. Take heart. The worst is over.
# Copyright (c) 2015 'cudacode' # Permission is hereby granted, free of charge, to any person obtaining a copy of this # software and associated documentation files (the "Software"), to deal in the Software # without restriction, including without limitation the rights to use, copy, modify, # merge, publish, distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to the following # conditions: # The above copyright notice and this permission notice shall be included in all copies # or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. import Tkinter import CommandModel class CommandInterface: 'Command User Interface Methods' def __init__ (self, cmdBtns, cntBtns, cmdModel): self.cmdBtns = cmdBtns self.cntBtns = cntBtns self.cmdModel = cmdModel self.mode = 'open' def enableBtns(self, btnArray): for btn in btnArray: btn['state'] = Tkinter.NORMAL def disableBtns(self, btnArray): for btn in btnArray: btn['state'] = Tkinter.DISABLED def commandMode(self): print ('Command Mode') self.mode = 'command' self.enableBtns(self.cmdBtns) self.disableBtns(self.cntBtns) def countMode(self): print ('Count Mode') self.mode = 'count' self.enableBtns(self.cntBtns) self.disableBtns(self.cmdBtns) def openMode(self): print ('Open Mode') self.mode = 'open' self.enableBtns(self.cntBtns) self.enableBtns(self.cmdBtns) # The mode reflects the input we are looking for command | count | open (command or number) def getMode(self): return self.mode def isOpenMode(self): if self.mode == 'open': return True else: return False # def updateCmdVar(command): # cmdVar.set(command.name) def clrCallBack(self): print ('Clear Command') self.cmdModel.clear() self.commandMode() def clsCallBack(self): print ('Clear Last Command') self.cmdModel.clearLast() self.commandMode() def fireCallBack(self): print ('Fire Command') self.countMode() self.cmdModel.newCommand('fire') def goCallBack(self): print ('Go Command') self.cmdModel.execute() self.commandMode() def fwdCallBack(self): print ('Forward Command') self.countMode() self.cmdModel.newCommand('fwd') def backCallBack(self): print ('Back Command') self.countMode() self.cmdModel.newCommand('back') def leftCallBack(self): print ('Left Command') self.countMode() self.cmdModel.newCommand('left') def rightCallBack(self): print ('Right Command') self.countMode() self.cmdModel.newCommand('right') def holdCallBack(self): print ('Hold Command') self.countMode() self.cmdModel.newCommand('hold') def numCallBack(self, num): print ('Num Button', num) self.cmdModel.updateParam(num) if self.isOpenMode(): self.commandMode() else: self.openMode()
I’ve made baked french fries. I’ve made baked zucchini chips. What do you get when you combine them together? These amazing baked parmesan zucchini fries. Sure, you could enjoy these zucchini sticks as is or perhaps with some dip of some sort. Why not dress them up with some panko crumbs? I’ve used panko to coat chicken, cauliflower, onions, and zucchini (in 2 different ways now) and I’m convinced everything tastes better if it’s covered in panko. Crunch-tacular! These are called “fries” because they resemble the shape of french fries… and that’s about it. These zucchini fries are a healthier option to the fatty, greasy fried potato version. Don’t get me wrong, I still love french fries. However, when one is on a healthy food eating kick, then this snack is a great way to satisfy a comfort food craving. Obviously this makes for a great side dish, but it’s even better as an appetizer. Or you could eat this entire plate with a little extra Parmesan cheese and a squeeze of lemon for lunch. Yes, this was my lunch today and I don’t feel one ounce of guilt for it! Line a baking sheet with tin foil, parchment paper, or a silicon mat and set it aside. Cut the ends off the zucchini and julienne the rest of it into thin sticks. Build yourself an assembly line for preparing the fries. In bowl #1, add the flour. In bowl #2, whisk together the eggs. In bowl #3, whisk together the panko crumbs, grated Parmesan cheese, garlic powder, salt, and pepper. Use the assembly line to coat the fries in stuff. Cover each zucchini stick in the flour mixture (in bowl #1). Then dip each zucchini stick in the egg mixture (in bowl #2). Finally coat each zucchini stick in the panko crumbs (in bowl #3). Transfer the zucchini sticks to the prepared baking sheet. (Optional) Spray the zucchini sticks with cooking spray. Cook them for about 20 minutes or until they are browned (flip them over half way through the cooking time). Here are some other nifty side dishes. These look amazing!!!!! Maybe with this recipe, I’ll get my husband to eat zucchini. I’m a french fry addict so this is perfect for me – I need a healthier alternative once in a while! Oh my these look good. I bet there was not a crumb left. My boys, even though there is green stuff inside would love these. Sharing of course! I would never have bet that these were baked instead of fried! They look sensational and what a great idea to bake something other than potatoes. I’m sure they are just delicious. I LOVE zucchini fries!!! Why have I never added parm before?? This sounds so good! Hope you have a wonderful weekend!! Funny because I thought everyone knew about zucchini and parmesan cheese, except me! I just love these zucchini fries. They look amazing and would be perfect for any dish. I can’t wait to try my hand at making these. Great post. want to try these, debating on how much to make. Has anyone tried re-heating these? Emily, I have not tried re-heating them. I’ve enjoyed them hot right out of the oven and at room temperature. I imagine re-heating them might be similar to re-heating french fries… if you warm them up in a microwave they’ll be hot but limp… if you warm them up in an oven they might get their crunch factor back but you also might burn parts of them trying to do so (if that makes sense). If you end up trying to re-heat them, let me know how you make out.
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class CertificateUpdateParameters(Model): """The certificate update parameters. :param certificate_policy: The management policy for the certificate. :type certificate_policy: :class:`CertificatePolicy <azure.keyvault.models.CertificatePolicy>` :param certificate_attributes: The attributes of the certificate (optional). :type certificate_attributes: :class:`CertificateAttributes <azure.keyvault.models.CertificateAttributes>` :param tags: Application specific metadata in the form of key-value pairs. :type tags: dict """ _attribute_map = { 'certificate_policy': {'key': 'policy', 'type': 'CertificatePolicy'}, 'certificate_attributes': {'key': 'attributes', 'type': 'CertificateAttributes'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__(self, certificate_policy=None, certificate_attributes=None, tags=None): self.certificate_policy = certificate_policy self.certificate_attributes = certificate_attributes self.tags = tags
Slovakia took part in a total of 14 international crisis management (ICM) missions under the auspices of the European Union, the Organization for Security and Cooperation in Europe (OSCE), NATO and the UN in 2015. This stems from the Report on Slovakia’s Involvement in ICM Activities acknowledged by the cabinet on March 2. The largest number of soldiers – 159 – took part in the UN peacekeeping mission in Cyprus (UNFICYP). Slovaks also participated in the EUFOR ALTHEA military operation in Bosnia and Herzegovina, in Kosovo, and in an OSCE mission in Macedonia, as well as in missions in Ukraine, Moldova, Georgia, Tajikistan, Afghanistan. They were also part of a UN observer mission in the Middle East (UNTSO). In the past year, Slovakia also participated for the first time in the UN stabilisation mission in Haiti (MINUSTAH), to which it deployed six police officers. Active since 2004, the mission’s purpose is to aid and stabilise local public administration and reform police operations, the TASR newswire wrote. Slovakia spent more than €18 million to secure its participation in these individual operations – with most of the money earmarked from the Defence Ministry’s budget. This year, Slovakia will continue to take part in all of the missions apart from two: EUPOL in Afghanistan and EUPOL COPPS in the Palestinian territories – both under the auspices of the EU. It also plans to extend its involvement in new missions, particularly those in Ukraine but also in other parts of the world. The top priority for 2016 is the EU’s civilian mission in Ukraine (EUAM).
#!/usr/bin/env python #===----------------------------------------------------------------------===## # # The LLVM Compiler Infrastructure # # This file is dual licensed under the MIT and the University of Illinois Open # Source Licenses. See LICENSE.TXT for details. # #===----------------------------------------------------------------------===## """ sym_diff - Compare two symbol lists and output the differences. """ from argparse import ArgumentParser import sys from libcxx.sym_check import diff, util def main(): parser = ArgumentParser( description='Extract a list of symbols from a shared library.') parser.add_argument( '--names-only', dest='names_only', help='Only print symbol names', action='store_true', default=False) parser.add_argument( '--removed-only', dest='removed_only', help='Only print removed symbols', action='store_true', default=False) parser.add_argument('--only-stdlib-symbols', dest='only_stdlib', help="Filter all symbols not related to the stdlib", action='store_true', default=False) parser.add_argument('--strict', dest='strict', help="Exit with a non-zero status if any symbols " "differ", action='store_true', default=False) parser.add_argument( '-o', '--output', dest='output', help='The output file. stdout is used if not given', type=str, action='store', default=None) parser.add_argument( '--demangle', dest='demangle', action='store_true', default=False) parser.add_argument( 'old_syms', metavar='old-syms', type=str, help='The file containing the old symbol list or a library') parser.add_argument( 'new_syms', metavar='new-syms', type=str, help='The file containing the new symbol list or a library') args = parser.parse_args() old_syms_list = util.extract_or_load(args.old_syms) new_syms_list = util.extract_or_load(args.new_syms) if args.only_stdlib: old_syms_list, _ = util.filter_stdlib_symbols(old_syms_list) new_syms_list, _ = util.filter_stdlib_symbols(new_syms_list) added, removed, changed = diff.diff(old_syms_list, new_syms_list) if args.removed_only: added = {} report, is_break, is_different = diff.report_diff( added, removed, changed, names_only=args.names_only, demangle=args.demangle) if args.output is None: print(report) else: with open(args.output, 'w') as f: f.write(report + '\n') exit_code = 1 if is_break or (args.strict and is_different) else 0 sys.exit(exit_code) if __name__ == '__main__': main()
Spicers Guesthouse in the Hunter Valley offers a modern elegance where wine & exploration is the talk around the table. A five star hotel located in the heart of the Hunter Valley. You’ll feel a sense of relaxed luxury as soon as you arrive. The Hunter Valley has played a pivotal role in the history of Australian wine with De Bortoli and Tyrrells being two of the First Families of Wine in Australia. The region is now home to a wide variety of cellar doors and Spicers Guesthouse is perfectly located in Pokolbin, the heart of the region. The Guesthouse has been an iconic destination within the Hunter Valley for 35 years (circa 1984). Having undergone an 18 month revitalisation the retreat has an inviting, homely feel that envelopes guests once inside. The upgrade reflects a fresh contemporary uplift while the welcoming spirit of the original Guest House has been preserved. Éremo Restaurant with its modern Italian cuisine is a centre piece of the retreat, led by multi-hatted Executive Chef Cameron Matthews, the experience is exceptional. The private dining room offers a unique experience for small private occasions while the variety of larger spaces are ideal for weddings and conferences. Our Hunter Valley accommodation offers 49 guest rooms with 40 acres of grounds overlooking the surrounding mountain ranges and neighbouring vineyards. 13 Jul Chefs in the Vines. Book now. The luxuriously appointed rooms at Spicers Guesthouse feature a classic contemporary feel, drawing upon the natural colour scheme of the Hunter region as inspiration. The perfect place to discover the rich history of the Hunter Valley, Spicers Guest House is centrally located and delivers a quintessentially Australian, relaxed luxury experience. Guesthouse Bed & Breakfast Guesthouse Bed & Breakfast From $249 per night.
from django.test import TestCase from django.test import Client class RegisterTestCase(TestCase): def test_register(self): c = Client() # on success redirects to / response = c.post('/accounts/register/', { 'username': 'asdas', 'password1': 'asdasdasd12', 'password2': 'asdasdasd12' }) self.assertRedirects(response, '/') # passwords don't match response = c.post('/accounts/register/', { 'username': 'asdasdasd1', 'password1': 'asdasdasd1', 'password2': 'asdasdasd2' }) self.assertEquals(response.status_code, 200) # username is empty response = c.post('/accounts/register/', { 'username': '', 'password1': 'asdasdasd12', 'password2': 'asdasdasd12' }) self.assertEquals(response.status_code, 200) # no password response = c.post('/accounts/register/', { 'username': 'asdasdasd', 'password1': '', 'password2': '' }) self.assertEquals(response.status_code, 200) # username and password are similar response = c.post('/accounts/register/', { 'username': 'asdasdasd0', 'password1': 'asdasdasd1', 'password2': 'asdasdasd1' }) self.assertEquals(response.status_code, 200)
1.The demand for Uber rides increases in the presence of tough weather conditions such as severe storms. According to Hörcher and Graham (2018), the level of demand for a commodity is determined by factors such as ease of availability and the number of consumers in need of the merchandise/service. On the other hand, Knees (2016) reveals that demand increases with decrease in supply. In instances of severe storms, the supply of Uber vehicles is limited. Based on the conventional business laws of demand and supply; the demand of Uber services increases with decrease in supply due to the adverse weather conditions. Comparatively, the original market equilibrium would be distorted if Uber charged uniform rates. According to Hörcher and Graham (2018), the demand for transport is greatly affected by the extent of availability. The fact that the supply of Uber rides is limited during and after severe storms reveals how the equilibrium will be distorted if normal charges are maintained (figure 1). 2. The surge pricing model adopted by Uber is highly necessary as it gives the organization’s drivers the morale of operating in difficult conditions. The price surging technique plays a significant role in increasing the supply of vehicles in the event of a snow storm since the demand for transport services is very high. 3. From a generalized perspective, consumers are better off with Uber’s surge pricing model. The model caters for the consumers’ by allowing them to access rides at lower rates when the supply is high and at an increased charge with decrease in supply. 1. There is need to appreciate the fact that the supply of land is perfectly inelastic. According to Knees (2016), the surface area of land remains constant. Therefore, an increase in the population of a place leads to an increase in the demand for this scarce resource. According to Chow and Niu (2015), the demand for this natural commodity in urban areas continues to increase. According to Hörcher and Graham (2018), the exponential increase in demand is attributed to the fact that land is one of the most significant factors of production. The fact that the supply of land in urban settings cannot be increased explains its demand/ supply curves. 2. The supply of land in urban settings has continued to diminish over the years. On the other hand, the demand for this essential factor of production has seen an exponential increase. Initially, the supply of land for the entire economy was considered to be perfectly inelastic (Hegetschweiler et al., 2017). However, the fact that there is production cost attributed to the land in urban areas associated with construction and maintenance shifts its curve to somewhat elastic. 3. Unlike other taxes that lead to the reduction of the supply of the targeted commodity, taxation of land as a key factor of production does not lead to a minimization in the levels of availability of this scarce commodity. Hegetschweiler et al. (2017) explain that in real settings, imposition of taxes leads to a reduction in the extents of demand for a commodity since consumers are propelled to adopt cheaper alternatives or forego the goods/ services. Comparatively, land boasts of its inelastic supply. On the other hand, Oliner (2016) reveals that in urban settings, land parcels are finite; an aspect that hinders the processes of increasing its supply in instances of shortage. In urban settings, the demand for land is ever increasing. Oliner (2016) attributes this to the fact that urban populations are on a steady increase. Further, urban areas are considered to be strategically placed to foster economic and industrial productivity. Therefore, imposition of taxes does not lead to a reduction in demand for this scarce resource. Hegetschweiler et al. (2017) reveal that while taxes impact the buying power of the targeted consumers behaviour in an adverse way, the supply of land is constant. On the other hand, Hörcher and Graham (2018) demystify that such levies may also limit the extents to which prospective buyers lease or buy land. Poghosyan (2014) defines long run equilibrium as a timeframe that is sufficiently adequate to allow businesses to instill the desired changes in their aspects of production. Therefore, the long run equilibrium plays a significant role in allowing firms to undertake total adjustments. The figure below shows the long run equilibrium without government subsidies. 2. The hawkers in the case study are faced with the increasing fixed costs of entry as the greatest challenge to effective establishment of their premises. Currently, hawkers in this market are forced to incur exorbitant fixed costs in order to carry out their operations at constant marginal costs. According to Comin, Lashkari and Mestieri (2015), the hawkers’ decisions of how much they need to produce are highly dependent on the prevailing levels of demand. As revealed in the figure below, there are instances where the prevailing levels of demand for the hawkers’ merchandises is such that the targeted buyers do not show the will of paying a price that is high enough to ensure that the venture recovers its initial investment. As a consequence, Hegetschweiler et al. (2017) explain that government interventions in the form of subsidies are required to ensure that the hawkers remain productive. Therefore, government subsidies play a significant role in increasing the levels of production of firms in a market by minimizing the fixed and variable costs of these ventures. The subsidies availed by the government of Singapore to the firms in this market will play a significant role in decreasing the prices charged by hawkers in the short run. Comin, Lashkari and Mestieri (2015) explain that in the absence of interventions from the government, the prevailing levels of demand shift in such a way that consumers lack the will of paying higher prices for the hawkers to recover their initial investments. Hegetschweiler et al. (2017) reveal that while the hawkers only care for the profits they get from their engagements, they are faced with the obligation of ensuring that consumer welfare is catered for. Therefore, the government only introduces subsidies for initial investments such as rent and lease fees as a way of encouraging the hawkers to continue the supply of the required goods as shown in the figure below. In the figure above, the profitability of hawkers in the Singaporean market is hindered with the absence of government subsidies. Therefore, the price charged (P*) is considered to be lower than the average cost (b). With the consumer welfare at zero, the hawkers’ production is limited. Provision of government subsidies equal to P*abc steers the hawkers to increase their output to OQ*. 1. Chow, G. C., & Niu, L. (2015). Housing Prices in Urban C hina as Determined by Demand and Supply. Pacific Economic Review, 20(1), 1-16. 2. Comin, D. A., Lashkari, D., & Mestieri, M. (2015). Structural change with long-run income and price effects (No. w21595). National Bureau of Economic Research. 3. Hegetschweiler, K. T., de Vries, S., Arnberger, A., Bell, S., Brennan, M., Siter, N., ... & Hunziker, M. (2017). Linking demand and supply factors in identifying cultural ecosystem services of urban green infrastructures: A review of European studies. Urban Forestry & Urban Greening, 21, 48-59. 4. Hörcher, D., & Graham, D. J. (2018). Demand imbalances and multi-period public transport supply. Transportation Business Research Part B: Methodological, 108, 106-126. 5. Kneese, A. V. (2016). Transportation and urban land. Routledge. 6.Poghosyan, T. (2014). Long-run and short-run determinants of sovereign bond yields in advanced economies. Economic Systems, 38(1), 100-114.
"""Defines nodes for generic operations.""" from typing import List, Optional, Union from spins import goos from spins.goos import flows from spins.goos import optplan def cast(node: goos.ProblemGraphNode, cls_type, name: Optional[str] = None) -> goos.ProblemGraphNode: """Casts a problem graph node into another type. The problem graph node can be cast into any arbitrary type. No checks are performed whatsoever, so the resulting graph may throw an error during execution. Casting works by creating a new `CastOp` each type whose superclass is determined by `cls_type`. This `CastOp` simply performs the identity operation. In order to handle serialization/deserialization, this `CastOp` class is not registered with the context. Instead, `build_cast_op` function is registered. Usage: numeric_node = goos.cast(node, goos.Function) + 3 Args: node: Node to cast. cls_type: Class type. name: Name of the cast node. Returns: A dummy `CastOp` that has the target type `cls_type` and simply forwards the result of the underlying node. It is essentially an identity operation. """ class CastOp(cls_type): node_type = "goos.cast" # We will register a custom function `build_cast_op` instead. We need # to do this as the superclass of `CastOp` needs to be parsed from # the schema during a load. goos_no_register = True Schema = CastSchema def __init__(self, node: goos.ProblemGraphNode, target_type: str): goos.ProblemGraphNode.__init__(self, node) self._node = node def eval(self, inputs): return inputs[0] def grad(self, inputs, grad_val): return [grad_val] def __getattr__(self, name: str): """Forwards any function calls to the underlying node.""" return getattr(self._node, name) return CastOp(node, cls_type.node_type, name=name) def build_cast_op(node: goos.ProblemGraphNode, target_type: str, name: str) -> goos.ProblemGraphNode: """Constructs a cast operation from the schema. This function is registered with the context in order to perform casting operations. Args: node: The node to cast. target_type: The string name of the type to cast into. Returns: `CastOp` object. See `cast`. """ return cast(node, optplan.GLOBAL_CONTEXT_STACK.get_node(target_type).creator, name=name) class CastSchema(optplan.ProblemGraphNodeSchema, optplan.WildcardSchema): """Schema for `cast`.""" type = goos.ModelNameType("goos.cast") node = goos.ReferenceType(optplan.ProblemGraphNodeSchema) target_type = goos.types.StringType() optplan.GLOBAL_CONTEXT_STACK.register_node("goos.cast", CastSchema, build_cast_op) def rename(node: goos.ProblemGraphNode, name: str) -> goos.ProblemGraphNode: """Renames a given node. Because the name of a node is fixed upon node creation, this function serves as a mechanism to change the name of a node. It does this by creating an identity op (by casting a node into the same type) with a new name `name`. Args: node: Node to rename. name: New name of the node. Returns: Node with the same type but with name `name`. """ return cast(node, type(node), name=name) class LogPrint(goos.Action): """Prints text out to the log. This is useful for debugging purposes. """ node_type = "goos.log_print" def __init__( self, obj: Union[str, goos.ProblemGraphNode, List[goos.ProblemGraphNode]] = None ) -> None: super().__init__() self._obj = obj def run(self, plan: goos.OptimizationPlan) -> None: if isinstance(self._obj, str): plan.logger.info(self._obj) return if isinstance(self._obj, goos.Function): nodes = [self._obj] else: nodes = self._obj values = plan.eval_nodes(nodes) for node, val in zip(nodes, values): plan.logger.info("{}: {}".format(node._goos_name, val)) def log_print(*args, **kwargs) -> LogPrint: action = LogPrint(*args, **kwargs) goos.get_default_plan().add_action(action) return action
five finger death punch zombie cranberries cover - bad wolves joined by five finger death punch frontman ivan . five finger death punch zombie cranberries cover - five finger death punch breaking benjamin tickets 9th . five finger death punch zombie cranberries cover - linkin park covers the beastie boys rare song by the . five finger death punch zombie cranberries cover - bad wolves deb 252 talbum kommt im mai metal heads de . five finger death punch zombie cranberries cover - five finger death punch ffdp twitter . five finger death punch zombie cranberries cover - five finger death punch and shinedown planning spring . five finger death punch zombie cranberries cover - bad wolves play zombie with five finger death punch vocalist . five finger death punch zombie cranberries cover - five finger death punch frontman jams with bad wolves . five finger death punch zombie cranberries cover - five finger death punch and breaking benjamin hit the road . five finger death punch zombie cranberries cover - the cranberries dolores o riordan was about to record . five finger death punch zombie cranberries cover - five finger death punch house of the rising sun doovi . five finger death punch zombie cranberries cover - bad wolves premiere video for their cover of quot zombie quot by . five finger death punch zombie cranberries cover - five finger death punch breaking benjamin announce fall . five finger death punch zombie cranberries cover - five finger death punch announces new album summer tour . five finger death punch zombie cranberries cover - five finger death punch with breaking benjamin bad wolves . five finger death punch zombie cranberries cover - olves singer tommy vext released cool gals . five finger death punch zombie cranberries cover - the top 10 best blogs on five finger death punch . five finger death punch zombie cranberries cover - five finger death punch archives shockwave magazine . five finger death punch zombie cranberries cover - bad wolves to open 32 city tour for five finger death . five finger death punch zombie cranberries cover - listen to bad wolves cover of the cranberries quot zombie . five finger death punch zombie cranberries cover - five finger death punch and breaking benjamin announce .
# -*- coding: utf-8 -*- """ Django settings for skeleton project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ import os # import django.conf.global_settings as DEFAULT_SETTINGS def env_var(keyname): """ shortcut for getting environmental variables """ # To avoid commiting passwords and usernames to git and GitHub, # these settings are saved as environmental variables in a file called postactivate. # Postactivate is sourced when the virtual environment is activated. return os.environ.get('DJANGO_{keyname}'.format(keyname=keyname.upper().replace(' ', '_'))) def join_path(*paths): """ shortcut for joining paths. cross os compatible """ return os.path.normpath(os.path.join(*paths)) # CORE CONFIG ROOT_URLCONF = 'core.urls' WSGI_APPLICATION = 'core.wsgi.application' SECRET_KEY = env_var('SECRET_KEY') SITE_URL = env_var('SITE_URL') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False TEMPLATE_DEBUG = DEBUG ALLOWED_HOSTS = [env_var('SITE_URL'), ] # EMAIL CONFIGURATION EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = env_var('GMAIL_USER') EMAIL_HOST_PASSWORD = env_var('GMAIL_PASSWORD') EMAIL_PORT = 587 EMAIL_USE_TLS = True # CUSTOM APPS INSTALLED_APPS = [ 'core', 'functional_tests', ] # THIRD PARTY APPS INSTALLED_APPS = [ # 'autocomplete_light', 'django_extensions', 'sorl.thumbnail', ] + INSTALLED_APPS # CORE APPS INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] + INSTALLED_APPS # MIDDLEWARE MIDDLEWARE_CLASSES = [ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] # POSTGRESQL DATABASE DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': env_var('DB_NAME'), 'USER': env_var('DB_USER'), 'PASSWORD': env_var('DB_PASSWORD'), 'HOST': 'localhost', 'PORT': '', # Set to empty string for default. }, } # SORL THUMBNAILS # Redis used as key-value store THUMBNAIL_KVSTORE = 'sorl.thumbnail.kvstores.redis_kvstore.KVStore' # ImageMagick ( or Ubuntu's graphicsmagick-imagemagick-compat ) THUMBNAIL_ENGINE = 'sorl.thumbnail.engines.convert_engine.Engine' # REDIS CACHE CACHES = { 'default': { 'BACKEND': 'redis_cache.RedisCache', 'LOCATION': 'localhost:6379', 'OPTIONS': { 'DB': 0, 'PARSER_CLASS': 'redis.connection.HiredisParser', 'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool', 'CONNECTION_POOL_CLASS_KWARGS': { 'max_connections': 50, 'timeout': 20, } }, }, } # PATH CONFIGURATION # Absolute filesystem path to the Django project directory containing all # files under version control, including django files. BASE_DIR = env_var('SOURCE_FOLDER') # Absolute filesystem path to the top-level project folder containing # static folder, log folder, virtualenv and configs not under version control. PROJECT_DIR = join_path(BASE_DIR, '..') # This is where static files are collected to by django and served by the webserver. STATIC_ROOT = join_path(PROJECT_DIR, 'static') STATIC_URL = '/static/' # User uploaded files location. MEDIA_ROOT = join_path(PROJECT_DIR, 'static', 'media') MEDIA_URL = '/media/' # Extra path to collect static assest such as javascript and css STATICFILES_DIRS = [join_path(BASE_DIR, 'assets'), ] # Project wide fixtures to be loaded into database. FIXTURE_DIRS = [join_path(BASE_DIR, 'fixtures'), ] # Project wide django template files TEMPLATE_DIRS = [join_path(BASE_DIR, 'templates'), ] STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'django.contrib.staticfiles.finders.FileSystemFinder', ] # INTERNATIONALIZATION AND TRANSLATION LANGUAGE_CODE = 'nb_NO' # Norwegian bokmål TIME_ZONE = 'Europe/Oslo' USE_I18N = True # Internationalisation (string translation) USE_L10N = True # Localisation (numbers and stuff) USE_TZ = True # Use timezone LOCALE_PATHS = [join_path(BASE_DIR, 'translation'), ] # Django puts generated translation files here. # LOGGING DEBUG_LOG_FILE = join_path(PROJECT_DIR, 'logs', 'django-debug.log') LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler' }, 'stream_to_console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler' }, 'write_to_logfile': { 'level': 'DEBUG', 'class': 'logging.FileHandler', 'filename': DEBUG_LOG_FILE, }, }, 'loggers': { 'django.request': { 'handlers': ['mail_admins', 'stream_to_console', ], 'level': 'DEBUG', 'propagate': True, }, 'debug': { 'handlers': ['stream_to_console', 'write_to_logfile', ], 'level': 'DEBUG', 'propagate': False, }, }, }
Run Richmond needs you. Saturday, January 27th will be our next Group Run and we need it to be a big one. Madison Living magazine is sending out someone to do an article on Run Richmond. They want to get some photos for the piece and for great photos we need lots of runners/walkers. Actually, we always need/want lots of you. It just makes things more fun. If you didn’t notice it has been pretty cold the last couple months. Overall we still keep pretty consistent with our numbers for group runs, but when the numbers get down in the single digits frequently our numbers do the same. We understand and expect things to be down when it is that cold. Some people choose the treadmill and others just wait to see if the sun will pop out and make it more bearable for a quick run. Last Saturday though things started to change. The weather went back up to what many would call near perfect conditions and with that, we had a great turn out. Lets carry that momentum to next Saturday. I would love for us to average twice as many people at group runs as we do. Not because there is any financial advantage to Run Richmond from that (because there isn’t). Bigger numbers are great because that means the community is growing. That means more people are making connections with others that are on a similar path and they can support or encourage each other. At Run Richmond, we really want to connect people. We want to connect people so that when a person wakes up one morning and doesn’t feel like getting out of bed to get that workout in they have people that will know if they skip. Sometimes that is all the motivation it will take to keep you going. We all struggle from time to time to stay motivated. The greatest motivation aide I have experienced is community. I want to show the readers of Madison Living what type of community of runners and walkers we have in Richmond. See you Saturday at 8:00 am!
from topaz.module import ClassDef from topaz.modules.ffi import type as ffitype from topaz.modules.ffi.pointer import W_PointerObject from topaz.modules.ffi.dynamic_library import coerce_dl_symbol from topaz.modules.ffi.function_type import W_FunctionTypeObject from topaz.objects.moduleobject import W_FunctionObject from rpython.rtyper.lltypesystem import rffi, lltype for i, name in enumerate(ffitype.type_names): globals()[name] = i class W_FFIFunctionObject(W_PointerObject): classdef = ClassDef('FFI::Function', W_PointerObject.classdef) _immutable_fields_ = ['ptr'] @classdef.singleton_method('allocate') def singleton_method_allocate(self, space, args_w): return W_FFIFunctionObject(space) def __init__(self, space): W_PointerObject.__init__(self, space) self.ptr = lltype.nullptr(rffi.VOIDP.TO) @classdef.method('initialize') def method_initialize(self, space, w_ret_type, w_arg_types, w_handle=None, w_options=None): self.w_info = space.send(space.getclassfor(W_FunctionTypeObject), 'new', [w_ret_type, w_arg_types, w_options]) self.setup(space, w_handle) def setup(self, space, w_handle): self.ptr = (coerce_dl_symbol(space, w_handle) if w_handle else lltype.nullptr(rffi.VOIDP.TO)) @classdef.method('call') def method_call(self, space, args_w, block=None): return self.w_info.invoke(space, self.ptr, args_w, block) @classdef.method('attach', name='str') def method_attach(self, space, w_lib, name): w_lib.attach_method(space, name, W_MethodAdapter(name, self)) class W_MethodAdapter(W_FunctionObject): _immutable_fields_ = ['name', 'w_ffi_func'] def __init__(self, name, w_ffi_func): W_FunctionObject.__init__(self, name) self.name = name self.w_ffi_func = w_ffi_func def call(self, space, w_receiver, args_w, block): return space.send(self.w_ffi_func, 'call', args_w, block)
Finance minister Ravi Karunanayaka said Taxes on imported rice will be increased from 35 rupees to 50 rupees per Kilogram. The tax will be increased with effect from midnight today. The Minister stated that the decision has been taken as there was an excess stock of rice in the country and also to protect the local farmers.
# -*- coding: utf-8 -*- import requests import re import math import codecs import json import time import datetime global user_list,login_dat, processed_user #user_list = ['wang-wei-63','allenzhang','kentzhu'] #user_list = ['yangbo','baiya','junyu','wang-xiao-chuan'] #user_list = ['wangxing','gongjun','zhouyuan'] user_list = ['alexzhang2015'] #user_list = ['hi-id','shek'] #user_list = ['commando','chen-hao-84','jin-chen-yu'] processed_user = [] #login_data = {'email': '[email protected]', 'password': 'yourpassword','rememberme':'y',} login_data = {'email': '[email protected]', 'password': '','rememberme':'y',} # session对象,会自动保持cookies s = requests.session() # auto-login. def login(login_data): s.post('http://www.zhihu.com/login', login_data) def load_more(user,data): # 进行加载时的Request URL click_url = 'http://www.zhihu.com/node/ProfileFolloweesListV2' # headers header_info = { 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1581.2 Safari/537.36 Test', 'Host':'www.zhihu.com', 'Origin':'http://www.zhihu.com', 'Connection':'keep-alive', 'Referer':'http://www.zhihu.com/people/' + user + '/followees', 'Content-Type':'application/x-www-form-urlencoded', } # form data. try: raw_hash_id = re.findall('hash_id(.*)',data) hash_id = raw_hash_id[0][14:46] # hash_id raw_xsrf = re.findall('xsrf(.*)',data) _xsrf = raw_xsrf[0][9:-3] # _xsrf # load_more_times = int(re.findall('<strong>(.*?)</strong>',data)[2]) / 20 # ---- key module ---- # 写入头20个用户信息 user_id = re.compile('zhihu.com/people/(.*?)"').findall(data) user_id = user_id[1:len(user_id)] answers = re.findall('answers" class="zg-link-gray-normal">(.*?) ',data) asks = re.findall('asks" class="zg-link-gray-normal">(.*?) ',data) followers = re.findall('followers" class="zg-link-gray-normal">(.*?) ',data) goods = re.findall('class="zg-link-gray-normal">(.*?) ',data) goods = goods[3:len(goods):4] fp.write('user_id,followers,asks,answers,goods') fp.write('\r\n') write_file(user_id,followers,asks,answers,goods) # 写入其余用户信息 for i in range(1,load_more_times+1): t_start = time.localtime()[5] offsets = i*20 # 由于返回的是json数据,所以用json处理parameters. params = json.dumps({"hash_id":hash_id,"order_by":"created","offset":offsets,}) payload = {"method":"next", "params": params, "_xsrf":_xsrf,} # debug and improve robustness. Date: 2014-02-12 try: r = s.post(click_url,data=payload,headers=header_info,timeout=18) except: # 响应时间过程过长则重试 print 'repost' r = s.post(click_url,data=payload,headers=header_info,timeout=60) # parse info. user_id = re.findall('href=\\\\"\\\\/people\\\\/(.*?)\\\\',r.text) user_id = user_id[0:len(user_id):5] user_info = re.findall('class=\\\\"zg-link-gray-normal\\\\">(.*?) ',r.text) followers = user_info[0:len(user_info):4] asks = user_info[1:len(user_info):4] answers = user_info[2:len(user_info):4] goods = user_info[3:len(user_info):4] #print user_id,followers,asks,answers,goods #print len(user_id),len(followers),len(asks),len(answers),len(goods) write_file(user_id,followers,asks,answers,goods) #print user_id t_elapsed = time.localtime()[5] - t_start #print 'got:',offsets,'users.','elapsed: ',t_elapsed,'s.\n' except: print 'something happed' def main(): # login s.post('http://www.zhihu.com/login', login_data) #for user in user_list: while(len(user_list) > 0): user = user_list.pop() print 'crawling ' + user + '\'s followees...\n' print 'queue size: '+ str(len(user_list)) + '\n' # 写文件 global fp fp = codecs.open(user + '.txt', 'w', 'utf-8') url = 'http://www.zhihu.com/people/' + user + '/followees' # 转跳到用户followers页 r = s.get(url) data = r.text #print data load_more(user,data) # 去重复id global user_list processed_user.append(user) user_list=list(set(user_list)-set(processed_user)) time.sleep(1) def write_file(user_id,followers,asks,answers,goods): for i in range(len(user_id)): global fp fp.write( user_id[i].strip()+','+followers[i].strip()+','+asks[i].strip()+','+answers[i].strip()+','+goods[i].strip() ) user_list.append(user_id[i].strip()) fp.write('\r\n') if __name__=='__main__': start_time = datetime.datetime.now() main() end_time = datetime.datetime.now() print 'total time consumption: ' + str((end_time - start_time).seconds) + 's'
Mrs. Clark 69 passed away at home on Wednesday February 6, 2019 following a lengthy illness. She was born in Midland, Michigan on July 20, 1949 to the late James & Florence (Stewart) Moore. She married Lyn Clark on May 1, 1977 in Gladwin and has been a lifelong resident of the Gladwin area. She was a former member of the Gladwin Unite Methodist Church and retired from Gladwin School District as a office clerk at the Gladwin Junior High School. She is survived by her husband of 41years and their sons Brook Hillman and Brett & Kim Hillman; 3 grandchildren; 2 brothers Robert & Jane Moore and Michael & Judy Moore. She was preceded in death by a sister Carol Wykoff. Funeral services will be held Saturday February 9, 2019 at 11 am from Sisson Funeral Home with Pastor Sam Wilson presiding with burial in Highland Cemetery. Family will be present at the funeral home on Friday from 4 to 8 pm and Saturday 10 am until time of services to greet family and friends. Those wishing an expression of sympathy are asked to consider MidMichigan Hospice.
# # This source file is part of appleseed. # Visit https://appleseedhq.net/ for additional information and resources. # # This software is released under the MIT license. # # Copyright (c) 2019 Jonathan Dent, The appleseedhq Organization # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # import bpy from ..utils import util class ASMESH_PT_export(bpy.types.Panel): bl_label = "appleseed Export" COMPAT_ENGINES = {'APPLESEED_RENDER'} bl_space_type = "PROPERTIES" bl_region_type = "WINDOW" bl_context = "data" @classmethod def poll(cls, context): renderer = context.scene.render return renderer.engine == 'APPLESEED_RENDER' and context.object is not None and context.object.type in {'MESH', 'CURVE', 'SURFACE'} def draw(self, context): layout = self.layout layout.use_property_split = True asr_obj = context.object.data.appleseed layout.prop(asr_obj, "export_normals", text="Export Normals") layout.prop(asr_obj, "export_uvs", text="Export UVs") layout.prop(asr_obj, "smooth_tangents", text="Calculate Smooth Tangents") def register(): util.safe_register_class(ASMESH_PT_export) def unregister(): util.safe_unregister_class(ASMESH_PT_export)
Discover the best Joomla developers in South Korea with the assistance of WADLINE. On this page, you will find the Leaders Matrix – the special tools that is aimed to make the searching process easier. Check portfolios, profiles, and clients’ reviews of any firm you like to make sure they are competent enough to carry on your project. Discover a reliable long-term partner in the KR region with the help of WADLINE listing. Just browse the top companies proficient in Joomla and see their ratings – it will help you to make the right decision about future cooperation.
from .buffer import Buffer __all__ = ['TextureCube'] class TextureCube: ''' A Texture is an OpenGL object that contains one or more images that all have the same image format. A texture can be used in two ways. It can be the source of a texture access from a Shader, or it can be used as a render target. .. Note:: ModernGL enables ``GL_TEXTURE_CUBE_MAP_SEAMLESS`` globally to ensure filtering will be done across the cube faces. A Texture3D object cannot be instantiated directly, it requires a context. Use :py:meth:`Context.texture_cube` to create one. ''' __slots__ = ['mglo', '_size', '_components', '_dtype', '_glo', 'ctx', 'extra'] def __init__(self): self.mglo = None #: Internal representation for debug purposes only. self._size = (None, None) self._components = None self._dtype = None self._glo = None self.ctx = None #: The context this object belongs to self.extra = None #: Any - Attribute for storing user defined objects raise TypeError() def __repr__(self): return '<TextureCube: %d>' % self.glo def __eq__(self, other): return type(self) is type(other) and self.mglo is other.mglo @property def size(self): ''' tuple: The size of the texture. ''' return self._size @property def components(self) -> int: ''' int: The number of components of the texture. ''' return self._components @property def dtype(self) -> str: ''' str: Data type. ''' return self._dtype @property def filter(self): ''' tuple: The minification and magnification filter for the texture. (Default ``(moderngl.LINEAR. moderngl.LINEAR)``) Example:: texture.filter == (moderngl.NEAREST, moderngl.NEAREST) texture.filter == (moderngl.LINEAR_MIPMAP_LINEAR, moderngl.LINEAR) texture.filter == (moderngl.NEAREST_MIPMAP_LINEAR, moderngl.NEAREST) texture.filter == (moderngl.LINEAR_MIPMAP_NEAREST, moderngl.NEAREST) ''' return self.mglo.filter @filter.setter def filter(self, value): self.mglo.filter = value @property def swizzle(self) -> str: ''' str: The swizzle mask of the texture (Default ``'RGBA'``). The swizzle mask change/reorder the ``vec4`` value returned by the ``texture()`` function in a GLSL shaders. This is represented by a 4 character string were each character can be:: 'R' GL_RED 'G' GL_GREEN 'B' GL_BLUE 'A' GL_ALPHA '0' GL_ZERO '1' GL_ONE Example:: # Alpha channel will always return 1.0 texture.swizzle = 'RGB1' # Only return the red component. The rest is masked to 0.0 texture.swizzle = 'R000' # Reverse the components texture.swizzle = 'ABGR' ''' return self.mglo.swizzle @swizzle.setter def swizzle(self, value): self.mglo.swizzle = value @property def anisotropy(self): ''' float: Number of samples for anisotropic filtering (Default ``1.0``). The value will be clamped in range ``1.0`` and ``ctx.max_anisotropy``. Any value greater than 1.0 counts as a use of anisotropic filtering:: # Disable anisotropic filtering texture.anisotropy = 1.0 # Enable anisotropic filtering suggesting 16 samples as a maximum texture.anisotropy = 16.0 ''' return self.mglo.anisotropy @anisotropy.setter def anisotropy(self, value): self.mglo.anisotropy = value @property def glo(self) -> int: ''' int: The internal OpenGL object. This values is provided for debug purposes only. ''' return self._glo def read(self, face, *, alignment=1) -> bytes: ''' Read a face from the cubemap as bytes into system memory. Args: face (int): The face to read. Keyword Args: alignment (int): The byte alignment of the pixels. ''' return self.mglo.read(face, alignment) def read_into(self, buffer, face, *, alignment=1, write_offset=0) -> None: ''' Read a face from the cubemap texture. Read a face of the cubemap into a bytearray or :py:class:`~moderngl.Buffer`. The advantage of reading into a :py:class:`~moderngl.Buffer` is that pixel data does not need to travel all the way to system memory:: # Reading pixel data into a bytearray data = bytearray(4) texture = ctx.texture_cube((2, 2), 1) texture.read_into(data, 0) # Reading pixel data into a buffer data = ctx.buffer(reserve=4) texture = ctx.texture_cube((2, 2), 1) texture.read_into(data, 0) Args: buffer (bytearray): The buffer that will receive the pixels. face (int): The face to read. Keyword Args: alignment (int): The byte alignment of the pixels. write_offset (int): The write offset. ''' if type(buffer) is Buffer: buffer = buffer.mglo return self.mglo.read_into(buffer, face, alignment, write_offset) def write(self, face, data, viewport=None, *, alignment=1) -> None: ''' Update the content of the texture. Update the content of a face in the cubemap from byte data or a moderngl :py:class:`~moderngl.Buffer`:: # Write data from a moderngl Buffer data = ctx.buffer(reserve=4) texture = ctx.texture_cube((2, 2), 1) texture.write(0, data) # Write data from bytes data = b'\xff\xff\xff\xff' texture = ctx.texture_cube((2, 2), 1) texture.write(0, data) Args: face (int): The face to update. data (bytes): The pixel data. viewport (tuple): The viewport. Keyword Args: alignment (int): The byte alignment of the pixels. ''' if type(data) is Buffer: data = data.mglo self.mglo.write(face, data, viewport, alignment) def use(self, location=0) -> None: ''' Bind the texture to a texture unit. The location is the texture unit we want to bind the texture. This should correspond with the value of the ``samplerCube`` uniform in the shader because samplers read from the texture unit we assign to them:: # Define what texture unit our two samplerCube uniforms should represent program['texture_a'] = 0 program['texture_b'] = 1 # Bind textures to the texture units first_texture.use(location=0) second_texture.use(location=1) Args: location (int): The texture location/unit. ''' self.mglo.use(location) def release(self) -> None: ''' Release the ModernGL object. ''' self.mglo.release()
IT Directors spend 70% of their time maintaining and managing their system. In fact, much of their working day is spent visiting the Server Room to check that everything is still running properly. Our Proposition was: “With Microsoft’s Management Tools your visits to the Server Room will be a thing of the past”. To dramatise this, we sent the IT Director a duster. As it said, they’d be visiting the Server Room so rarely, they’d need to do a bit of dusting each time they went there. As you’ll see, for added impact (and to ensure the IT Director kept our message) we printed the letter on the Duster. For This didn’t pick up the Grand Prix at the Cannes Lions Direct. It did, however, pick up a bronze.
import os import sys import hashlib from pprint import pprint from getpass import getpass from datetime import datetime import magic from PIL import Image import pytz from django.conf import settings from django.core.management.base import CommandError import django.db.utils from django.contrib.auth import authenticate from django.contrib.auth.models import User from maio import lib from maio.models import File from maio.models import Media from maio.models import Tag from ._base import MaioBaseCommand class Command(MaioBaseCommand): args = '<None>' help = 'Scrapes images in one or more directories.' def add_arguments(self, parser): # Positional arguments parser.add_argument('username', nargs=1, type=str, metavar='USERNAME', help=('Set owner of each file to %(metavar)s')) parser.add_argument('directories', nargs='+', type=str, metavar='DIRECTORIES', help=('Scrape images from %(metavar)s')) # Optional arguments parser.add_argument('--tag-directories', '-td', action='store_true', help=('Tag the supplied directories for Image Tags. Does not tag ' 'subdirectories under the supplied directories.')) parser.add_argument('--tag-subfolders', '-ts', action='store_true', help=('Tag the subdirectories under the supplied directories. Does ' 'not tag the supplied directories.')) parser.add_argument('--tag-filenames', '-tf', action='store_true', help=('Tag the file names of the files.')) parser.add_argument('--tag-all', '-ta', action='store_true', help=('Equivalent to options -td -ts -tf.')) parser.add_argument('--tags', '-t', nargs='*', type=str, metavar='TAGS', help=('Tag each image with %(metavar)s')) def handle(self, *args, **kwargs): # shortcut settings MAIO_SETTINGS = settings.MAIO_SETTINGS TIME_ZONE = settings.TIME_ZONE TZ = pytz.timezone(TIME_ZONE) def mk_md5_dir(md5, root): ''' Make MD5 directory. Makes 3 directories under ``root``, where the first 2 characters in ``md5`` make up the first directory, the next 2 characters make up the second directory, and the next 2 characters make up the third directory. :returns: (str) The path to final directory structure. ''' if len(md5) == 32: part1 = md5[0:2] part2 = md5[2:4] part3 = md5[4:6] dirtomake = os.path.join(root, part1, part2, part3) if os.path.isdir(dirtomake): return dirtomake if os.path.isdir(root): os.makedirs(dirtomake) return dirtomake def is_image(mimetype): ''' Check to see if the supplied ``mimetype`` is an image, according to ``lib.MIMETYPE_EXTENSION``. :returns: (bool) True if ``mimetype`` is an image, False otherwise. ''' for key, value in lib.MIMETYPE_EXTENSION['image'].items(): if mimetype == key: return True return False # grab the username from the options username = kwargs.get('username', [''])[0] # tag flag options tag_directories = kwargs.get('tag_directories') tag_subfolders = kwargs.get('tag_subfolders') tag_filenames = kwargs.get('tag_filenames') tag_all = kwargs.get('tag_all') tags_input = kwargs.get('tags') if tags_input is None: tags_input = [] # validate user try: user = User.objects.get(username=username) except User.DoesNotExist: self.out('User {} does not exist.'.format(username)) self.out('') exit(1) # grab the directories to scrape images from directories = kwargs.get('directories', []) # walk through each directory and make sure each one exists for directory in directories: if not os.path.isdir(directory): self.out('"{}" is not a valid directory.'.format(directory)) self.out('') exit(1) # set up mime Magic mime = magic.Magic(mime=True) # walk through each directory, scraping images for directory in directories: # for each directory's files for root, subdirs, files in os.walk(directory): # for each file for filename in files: # read and join the file path try: file_path = os.path.join(root, filename) except UnicodeDecodeError as e: if "'utf8' codec can't decode bytes" in str(e): self.out('Error processing {}, unreadable file name ...' .format(os.path.join(root, filename))) continue raise except: raise self.out('For file: {}'.format(file_path)) # get mime type try: mimetype = mime.from_file(file_path) except IOError as e: if 'File does not exist' in str(e): self.out('file {} does not exist'.format(file_path)) continue raise except UnicodeDecodeError as e: self.out('File: ', file_path) raise except: raise # make sure the mimetype is an image rather than something that looks like # an image if not is_image(mimetype): self.out('{} is not a valid image type... (it might be a symlink?)' .format(file_path)) continue # get file extension filename_ext = lib.MIMETYPE_EXTENSION['image'].get(mimetype, [[]])[0] if filename_ext in (None, [[]]): try: filename_ext = file_path.split('.')[-1] except IndexError: filename_ext = '' else: filename_ext = filename_ext.replace('.', '') # get name of file name_of_file = file_path.split(os.sep)[-1].split('.')[:-1][0] # stat file sfile = os.stat(file_path) # obtain modified datetime mtime = TZ.localize(datetime.fromtimestamp(sfile.st_mtime)) # open image and check to make sure its width and height values # are within configured constraints try: im = Image.open(file_path) # this next if/elif statement looks backwards, but it's not. # we want to continue onto the next image if the user chooses # 'and' and the image's x 'or' y are out of range. if MAIO_SETTINGS.get('images_min_inclusive', 'and').lower() == 'and': if im.size[0] < MAIO_SETTINGS.get('images_min_width', 200) or \ im.size[1] < MAIO_SETTINGS.get('images_min_height', 200): continue # we want to continue onto the next image if the user chooses # 'or' and the image's x 'and' y are both out of range. elif MAIO_SETTINGS.get('images_min_inclusive', 'and').lower() == 'or': if im.size[0] < MAIO_SETTINGS.get('images_min_width', 200) and \ im.size[1] < MAIO_SETTINGS.get('images_min_height', 200): continue im.load() if im.mode != 'RGB': im = im.convert('RGB') except IOError as e: self.out('Error in processing {} ...'.format(file_path)) if 'truncated' in str(e): self.out('truncated') continue elif 'cannot identify image file' in str(e): self.out('invalid image file') continue elif 'No such file or directory' in str(e): self.out('no such file or directory') continue else: raise # get md5sum hash of the image md5sum = hashlib.md5() with open(file_path, 'rb') as fh: md5sum.update(fh.read()) md5 = md5sum.hexdigest() # make filestore directories if they don't exist if not os.path.isdir(MAIO_SETTINGS['filestore_directory']): # ./filestore os.mkdir(MAIO_SETTINGS['filestore_directory']) if not os.path.isdir(os.path.join(MAIO_SETTINGS['filestore_directory'], 'media')): # ./filestore/media os.mkdir(os.path.join(MAIO_SETTINGS['filestore_directory'], 'media')) if not os.path.isdir(os.path.join(MAIO_SETTINGS['filestore_directory'], 'media', 'images')): # ./filestore/media/images os.mkdir(os.path.join(MAIO_SETTINGS['filestore_directory'], 'media', 'images')) if not os.path.isdir(os.path.join(MAIO_SETTINGS['filestore_directory'], 'thumbnails')): # ./filestore/thumbnails os.mkdir(os.path.join(MAIO_SETTINGS['filestore_directory'], 'thumbnails')) # process and save image to filestore img_dir = mk_md5_dir(md5, os.path.join(MAIO_SETTINGS['filestore_directory'], 'media', 'images')) img = os.path.join(img_dir, md5+'.'+filename_ext) if not os.path.isfile(img): # copy the image to the filestore if it doesn't already exist im.save(img) file_path = img width = im.width height = im.height comment = str(im.info) # process and save thumbnail to filestore thumb_dir = mk_md5_dir(md5, os.path.join(MAIO_SETTINGS['filestore_directory'], 'thumbnails')) thumb = os.path.join(thumb_dir, md5+'.jpg') if not os.path.isfile(thumb): im.thumbnail((300, 300), Image.ANTIALIAS) im.save(thumb) tn_width = im.width tn_height = im.height # close image file im.close() # process tag flags tags = [] + tags_input if tag_all or tag_directories: # split up a directory such as # C:\Users\bob\Pictures # into # ['C:', 'Users', 'bob', 'Pictures'] dir_tags = directory.split(os.sep) # don't include Windows drive letters # ['C:', 'Users', 'bob', 'Pictures'] # into # ['Users', 'bob', 'Pictures'] if ':' in dir_tags[0]: dir_tags = dir_tags[1:] tags.extend(dir_tags) if tag_all or tag_subfolders: # split up a directory such as # C:\Users\bob\Pictures\foo\bar\baz\beef.jpg # where the supplied directory is # C:\Users\bob\Pictures # into # ['foo', 'bar', 'baz', 'beef.jpg'] dir_tags = os.path.join(root, filename) \ .replace(directory+os.sep, '') \ .split(os.sep) # don't include the filename for this option # ['foo', 'bar', 'baz'] dir_tags = dir_tags[:-1] tags.extend(dir_tags) if tag_all or tag_filenames: # split up a directory such as # C:\Users\bob\Pictures\foo\bar\baz\beef.jpg # where the supplied directory is # C:\Users\bob\Pictures # into # ['foo', 'bar', 'baz', 'beef.jpg'] dir_tags = os.path.join(root, filename) \ .replace(directory+os.sep, '') \ .split(os.sep) # get only the filename for this option # ['beef.jpg'] dir_tags = dir_tags[-1:] # split the filename from the extension # ['beef', 'jpg'] dir_tags = dir_tags[0].split('.') tags.extend(dir_tags) # save file information to the database try: filestore = MAIO_SETTINGS['filestore_directory'] thumb_uri = thumb.replace(filestore, '').replace(os.sep, '/') file_uri = file_path.replace(filestore, '').replace(os.sep, '/') self.out(md5sum.hexdigest(), mimetype, filename, file_path, file_uri, thumb_uri) if filename_ext == '': filename_ext = None f = File(**{'md5sum': md5, 'original_name': name_of_file, 'original_extension': filename_ext, 'mime_type': mimetype, 'size': sfile.st_size, 'mtime': sfile.st_mtime, 'tn_path': thumb_uri, 'file_path': file_uri, 'date_modified': mtime,}) f.save() except django.db.utils.IntegrityError: f = File.objects.get(md5sum=md5) if sfile.st_mtime == f.mtime: self.out('Already in database and up-to-date, skipping {}' .format(file_path)) self.out('') continue else: self.out('Already in database and not up-to-date, processing {}' .format(file_path)) f.mtime = sfile.st_mtime f.date_modified = mtime f.save() except: raise media = Media(**{'file': f, 'media_type': 'image', 'owner': user, 'name': name_of_file, 'extension': filename_ext, 'mtime': sfile.st_mtime, 'size': sfile.st_size, 'date_modified': mtime, 'width': width, 'height': height, 'tn_width': tn_width, 'tn_height': tn_height, 'length': None, 'comment': comment}) media.save() self.out('Tagging tags {} to "{}.{}"' .format(tags, name_of_file, filename_ext)) self.out('') # tag the image for tag in tags: # get DB tag if exists, if not create it try: tag = tag.lower() t = Tag.objects.get(name=tag) except Tag.DoesNotExist: t = Tag(name=tag) t.save() # now associate the tag to the ImageFile media.tags.add(t)
How to automate password-based SSH logins ? SSHd has a configuration directive that allows to override other directives under some specific circumstances : "match" - the whole SSH setup is ok (albeit not optimal) since I've already been able to connect to this box via SSH via other means. ==> in the "Proxy Server" settings, just set the connection timeout to a larger value. Problem solved ! There’s no support for /var/log/btmp enabled by default on a Debian Wheezy box. This is, in fact, a reported BUG on the Debian Bug Tracking System. According to it, this functionality has been already re-enabled starting from Debian GNU/Linux ssh package version 1:6.6p1-1. L'algorithme de Diffie-Hellman permet à 2 personnes discutant en public de "s'échanger" une clé secrète. - block the attacker with a firewall rule. fatal: bad ownership or modes for chroot directory component "/path/to/chrooted/folder/" ==> It's basically a matter of "chmod 755" along the chrooted path. *** At upload time : "open for write: permission denied" - the chroot dir is a jail a user may read, but not write. If write permissions are granted to users on the chroot dir, OpenSsh regards it as unsecure and denies connection. - can see /home/folder1/folder2/upload, can enter it and read/write there.
# localrepo.py - read/write repository class for mercurial # # Copyright 2005-2007 Matt Mackall <[email protected]> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from node import bin, hex, nullid, nullrev, short from i18n import _ import repo, changegroup, subrepo import changelog, dirstate, filelog, manifest, context import lock, transaction, store, encoding import util, extensions, hook, error import match as match_ import merge as merge_ import tags as tags_ from lock import release import weakref, stat, errno, os, time, inspect propertycache = util.propertycache class localrepository(repo.repository): capabilities = set(('lookup', 'changegroupsubset', 'branchmap')) supported = set('revlogv1 store fncache shared'.split()) def __init__(self, baseui, path=None, create=0): repo.repository.__init__(self) self.root = os.path.realpath(path) self.path = os.path.join(self.root, ".hg") self.origroot = path self.opener = util.opener(self.path) self.wopener = util.opener(self.root) self.baseui = baseui self.ui = baseui.copy() try: self.ui.readconfig(self.join("hgrc"), self.root) extensions.loadall(self.ui) except IOError: pass if not os.path.isdir(self.path): if create: if not os.path.exists(path): os.mkdir(path) os.mkdir(self.path) requirements = ["revlogv1"] if self.ui.configbool('format', 'usestore', True): os.mkdir(os.path.join(self.path, "store")) requirements.append("store") if self.ui.configbool('format', 'usefncache', True): requirements.append("fncache") # create an invalid changelog self.opener("00changelog.i", "a").write( '\0\0\0\2' # represents revlogv2 ' dummy changelog to prevent using the old repo layout' ) reqfile = self.opener("requires", "w") for r in requirements: reqfile.write("%s\n" % r) reqfile.close() else: raise error.RepoError(_("repository %s not found") % path) elif create: raise error.RepoError(_("repository %s already exists") % path) else: # find requirements requirements = set() try: requirements = set(self.opener("requires").read().splitlines()) except IOError, inst: if inst.errno != errno.ENOENT: raise for r in requirements - self.supported: raise error.RepoError(_("requirement '%s' not supported") % r) self.sharedpath = self.path try: s = os.path.realpath(self.opener("sharedpath").read()) if not os.path.exists(s): raise error.RepoError( _('.hg/sharedpath points to nonexistent directory %s') % s) self.sharedpath = s except IOError, inst: if inst.errno != errno.ENOENT: raise self.store = store.store(requirements, self.sharedpath, util.opener) self.spath = self.store.path self.sopener = self.store.opener self.sjoin = self.store.join self.opener.createmode = self.store.createmode self.sopener.options = {} # These two define the set of tags for this repository. _tags # maps tag name to node; _tagtypes maps tag name to 'global' or # 'local'. (Global tags are defined by .hgtags across all # heads, and local tags are defined in .hg/localtags.) They # constitute the in-memory cache of tags. self._tags = None self._tagtypes = None self._branchcache = None # in UTF-8 self._branchcachetip = None self.nodetagscache = None self.filterpats = {} self._datafilters = {} self._transref = self._lockref = self._wlockref = None @propertycache def changelog(self): c = changelog.changelog(self.sopener) if 'HG_PENDING' in os.environ: p = os.environ['HG_PENDING'] if p.startswith(self.root): c.readpending('00changelog.i.a') self.sopener.options['defversion'] = c.version return c @propertycache def manifest(self): return manifest.manifest(self.sopener) @propertycache def dirstate(self): return dirstate.dirstate(self.opener, self.ui, self.root) def __getitem__(self, changeid): if changeid is None: return context.workingctx(self) return context.changectx(self, changeid) def __contains__(self, changeid): try: return bool(self.lookup(changeid)) except error.RepoLookupError: return False def __nonzero__(self): return True def __len__(self): return len(self.changelog) def __iter__(self): for i in xrange(len(self)): yield i def url(self): return 'file:' + self.root def hook(self, name, throw=False, **args): return hook.hook(self.ui, self, name, throw, **args) tag_disallowed = ':\r\n' def _tag(self, names, node, message, local, user, date, extra={}): if isinstance(names, str): allchars = names names = (names,) else: allchars = ''.join(names) for c in self.tag_disallowed: if c in allchars: raise util.Abort(_('%r cannot be used in a tag name') % c) for name in names: self.hook('pretag', throw=True, node=hex(node), tag=name, local=local) def writetags(fp, names, munge, prevtags): fp.seek(0, 2) if prevtags and prevtags[-1] != '\n': fp.write('\n') for name in names: m = munge and munge(name) or name if self._tagtypes and name in self._tagtypes: old = self._tags.get(name, nullid) fp.write('%s %s\n' % (hex(old), m)) fp.write('%s %s\n' % (hex(node), m)) fp.close() prevtags = '' if local: try: fp = self.opener('localtags', 'r+') except IOError: fp = self.opener('localtags', 'a') else: prevtags = fp.read() # local tags are stored in the current charset writetags(fp, names, None, prevtags) for name in names: self.hook('tag', node=hex(node), tag=name, local=local) return try: fp = self.wfile('.hgtags', 'rb+') except IOError: fp = self.wfile('.hgtags', 'ab') else: prevtags = fp.read() # committed tags are stored in UTF-8 writetags(fp, names, encoding.fromlocal, prevtags) if '.hgtags' not in self.dirstate: self.add(['.hgtags']) m = match_.exact(self.root, '', ['.hgtags']) tagnode = self.commit(message, user, date, extra=extra, match=m) for name in names: self.hook('tag', node=hex(node), tag=name, local=local) return tagnode def tag(self, names, node, message, local, user, date): '''tag a revision with one or more symbolic names. names is a list of strings or, when adding a single tag, names may be a string. if local is True, the tags are stored in a per-repository file. otherwise, they are stored in the .hgtags file, and a new changeset is committed with the change. keyword arguments: local: whether to store tags in non-version-controlled file (default False) message: commit message to use if committing user: name of user to use if committing date: date tuple to use if committing''' for x in self.status()[:5]: if '.hgtags' in x: raise util.Abort(_('working copy of .hgtags is changed ' '(please commit .hgtags manually)')) self.tags() # instantiate the cache self._tag(names, node, message, local, user, date) def tags(self): '''return a mapping of tag to node''' if self._tags is None: (self._tags, self._tagtypes) = self._findtags() return self._tags def _findtags(self): '''Do the hard work of finding tags. Return a pair of dicts (tags, tagtypes) where tags maps tag name to node, and tagtypes maps tag name to a string like \'global\' or \'local\'. Subclasses or extensions are free to add their own tags, but should be aware that the returned dicts will be retained for the duration of the localrepo object.''' # XXX what tagtype should subclasses/extensions use? Currently # mq and bookmarks add tags, but do not set the tagtype at all. # Should each extension invent its own tag type? Should there # be one tagtype for all such "virtual" tags? Or is the status # quo fine? alltags = {} # map tag name to (node, hist) tagtypes = {} tags_.findglobaltags(self.ui, self, alltags, tagtypes) tags_.readlocaltags(self.ui, self, alltags, tagtypes) # Build the return dicts. Have to re-encode tag names because # the tags module always uses UTF-8 (in order not to lose info # writing to the cache), but the rest of Mercurial wants them in # local encoding. tags = {} for (name, (node, hist)) in alltags.iteritems(): if node != nullid: tags[encoding.tolocal(name)] = node tags['tip'] = self.changelog.tip() tagtypes = dict([(encoding.tolocal(name), value) for (name, value) in tagtypes.iteritems()]) return (tags, tagtypes) def tagtype(self, tagname): ''' return the type of the given tag. result can be: 'local' : a local tag 'global' : a global tag None : tag does not exist ''' self.tags() return self._tagtypes.get(tagname) def tagslist(self): '''return a list of tags ordered by revision''' l = [] for t, n in self.tags().iteritems(): try: r = self.changelog.rev(n) except: r = -2 # sort to the beginning of the list if unknown l.append((r, t, n)) return [(t, n) for r, t, n in sorted(l)] def nodetags(self, node): '''return the tags associated with a node''' if not self.nodetagscache: self.nodetagscache = {} for t, n in self.tags().iteritems(): self.nodetagscache.setdefault(n, []).append(t) return self.nodetagscache.get(node, []) def _branchtags(self, partial, lrev): # TODO: rename this function? tiprev = len(self) - 1 if lrev != tiprev: self._updatebranchcache(partial, lrev + 1, tiprev + 1) self._writebranchcache(partial, self.changelog.tip(), tiprev) return partial def branchmap(self): '''returns a dictionary {branch: [branchheads]}''' tip = self.changelog.tip() if self._branchcache is not None and self._branchcachetip == tip: return self._branchcache oldtip = self._branchcachetip self._branchcachetip = tip if oldtip is None or oldtip not in self.changelog.nodemap: partial, last, lrev = self._readbranchcache() else: lrev = self.changelog.rev(oldtip) partial = self._branchcache self._branchtags(partial, lrev) # this private cache holds all heads (not just tips) self._branchcache = partial return self._branchcache def branchtags(self): '''return a dict where branch names map to the tipmost head of the branch, open heads come before closed''' bt = {} for bn, heads in self.branchmap().iteritems(): tip = heads[-1] for h in reversed(heads): if 'close' not in self.changelog.read(h)[5]: tip = h break bt[bn] = tip return bt def _readbranchcache(self): partial = {} try: f = self.opener("branchheads.cache") lines = f.read().split('\n') f.close() except (IOError, OSError): return {}, nullid, nullrev try: last, lrev = lines.pop(0).split(" ", 1) last, lrev = bin(last), int(lrev) if lrev >= len(self) or self[lrev].node() != last: # invalidate the cache raise ValueError('invalidating branch cache (tip differs)') for l in lines: if not l: continue node, label = l.split(" ", 1) partial.setdefault(label.strip(), []).append(bin(node)) except KeyboardInterrupt: raise except Exception, inst: if self.ui.debugflag: self.ui.warn(str(inst), '\n') partial, last, lrev = {}, nullid, nullrev return partial, last, lrev def _writebranchcache(self, branches, tip, tiprev): try: f = self.opener("branchheads.cache", "w", atomictemp=True) f.write("%s %s\n" % (hex(tip), tiprev)) for label, nodes in branches.iteritems(): for node in nodes: f.write("%s %s\n" % (hex(node), label)) f.rename() except (IOError, OSError): pass def _updatebranchcache(self, partial, start, end): # collect new branch entries newbranches = {} for r in xrange(start, end): c = self[r] newbranches.setdefault(c.branch(), []).append(c.node()) # if older branchheads are reachable from new ones, they aren't # really branchheads. Note checking parents is insufficient: # 1 (branch a) -> 2 (branch b) -> 3 (branch a) for branch, newnodes in newbranches.iteritems(): bheads = partial.setdefault(branch, []) bheads.extend(newnodes) if len(bheads) < 2: continue newbheads = [] # starting from tip means fewer passes over reachable while newnodes: latest = newnodes.pop() if latest not in bheads: continue minbhrev = self[min([self[bh].rev() for bh in bheads])].node() reachable = self.changelog.reachable(latest, minbhrev) bheads = [b for b in bheads if b not in reachable] newbheads.insert(0, latest) bheads.extend(newbheads) partial[branch] = bheads def lookup(self, key): if isinstance(key, int): return self.changelog.node(key) elif key == '.': return self.dirstate.parents()[0] elif key == 'null': return nullid elif key == 'tip': return self.changelog.tip() n = self.changelog._match(key) if n: return n if key in self.tags(): return self.tags()[key] if key in self.branchtags(): return self.branchtags()[key] n = self.changelog._partialmatch(key) if n: return n # can't find key, check if it might have come from damaged dirstate if key in self.dirstate.parents(): raise error.Abort(_("working directory has unknown parent '%s'!") % short(key)) try: if len(key) == 20: key = hex(key) except: pass raise error.RepoLookupError(_("unknown revision '%s'") % key) def local(self): return True def join(self, f): return os.path.join(self.path, f) def wjoin(self, f): return os.path.join(self.root, f) def rjoin(self, f): return os.path.join(self.root, util.pconvert(f)) def file(self, f): if f[0] == '/': f = f[1:] return filelog.filelog(self.sopener, f) def changectx(self, changeid): return self[changeid] def parents(self, changeid=None): '''get list of changectxs for parents of changeid''' return self[changeid].parents() def filectx(self, path, changeid=None, fileid=None): """changeid can be a changeset revision, node, or tag. fileid can be a file revision or node.""" return context.filectx(self, path, changeid, fileid) def getcwd(self): return self.dirstate.getcwd() def pathto(self, f, cwd=None): return self.dirstate.pathto(f, cwd) def wfile(self, f, mode='r'): return self.wopener(f, mode) def _link(self, f): return os.path.islink(self.wjoin(f)) def _filter(self, filter, filename, data): if filter not in self.filterpats: l = [] for pat, cmd in self.ui.configitems(filter): if cmd == '!': continue mf = match_.match(self.root, '', [pat]) fn = None params = cmd for name, filterfn in self._datafilters.iteritems(): if cmd.startswith(name): fn = filterfn params = cmd[len(name):].lstrip() break if not fn: fn = lambda s, c, **kwargs: util.filter(s, c) # Wrap old filters not supporting keyword arguments if not inspect.getargspec(fn)[2]: oldfn = fn fn = lambda s, c, **kwargs: oldfn(s, c) l.append((mf, fn, params)) self.filterpats[filter] = l for mf, fn, cmd in self.filterpats[filter]: if mf(filename): self.ui.debug("filtering %s through %s\n" % (filename, cmd)) data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) break return data def adddatafilter(self, name, filter): self._datafilters[name] = filter def wread(self, filename): if self._link(filename): data = os.readlink(self.wjoin(filename)) else: data = self.wopener(filename, 'r').read() return self._filter("encode", filename, data) def wwrite(self, filename, data, flags): data = self._filter("decode", filename, data) try: os.unlink(self.wjoin(filename)) except OSError: pass if 'l' in flags: self.wopener.symlink(data, filename) else: self.wopener(filename, 'w').write(data) if 'x' in flags: util.set_flags(self.wjoin(filename), False, True) def wwritedata(self, filename, data): return self._filter("decode", filename, data) def transaction(self): tr = self._transref and self._transref() or None if tr and tr.running(): return tr.nest() # abort here if the journal already exists if os.path.exists(self.sjoin("journal")): raise error.RepoError( _("abandoned transaction found - run hg recover")) # save dirstate for rollback try: ds = self.opener("dirstate").read() except IOError: ds = "" self.opener("journal.dirstate", "w").write(ds) self.opener("journal.branch", "w").write(self.dirstate.branch()) renames = [(self.sjoin("journal"), self.sjoin("undo")), (self.join("journal.dirstate"), self.join("undo.dirstate")), (self.join("journal.branch"), self.join("undo.branch"))] tr = transaction.transaction(self.ui.warn, self.sopener, self.sjoin("journal"), aftertrans(renames), self.store.createmode) self._transref = weakref.ref(tr) return tr def recover(self): lock = self.lock() try: if os.path.exists(self.sjoin("journal")): self.ui.status(_("rolling back interrupted transaction\n")) transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn) self.invalidate() return True else: self.ui.warn(_("no interrupted transaction available\n")) return False finally: lock.release() def rollback(self): wlock = lock = None try: wlock = self.wlock() lock = self.lock() if os.path.exists(self.sjoin("undo")): self.ui.status(_("rolling back last transaction\n")) transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn) util.rename(self.join("undo.dirstate"), self.join("dirstate")) try: branch = self.opener("undo.branch").read() self.dirstate.setbranch(branch) except IOError: self.ui.warn(_("Named branch could not be reset, " "current branch still is: %s\n") % encoding.tolocal(self.dirstate.branch())) self.invalidate() self.dirstate.invalidate() self.destroyed() else: self.ui.warn(_("no rollback information available\n")) finally: release(lock, wlock) def invalidate(self): for a in "changelog manifest".split(): if a in self.__dict__: delattr(self, a) self._tags = None self._tagtypes = None self.nodetagscache = None self._branchcache = None # in UTF-8 self._branchcachetip = None def _lock(self, lockname, wait, releasefn, acquirefn, desc): try: l = lock.lock(lockname, 0, releasefn, desc=desc) except error.LockHeld, inst: if not wait: raise self.ui.warn(_("waiting for lock on %s held by %r\n") % (desc, inst.locker)) # default to 600 seconds timeout l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")), releasefn, desc=desc) if acquirefn: acquirefn() return l def lock(self, wait=True): '''Lock the repository store (.hg/store) and return a weak reference to the lock. Use this before modifying the store (e.g. committing or stripping). If you are opening a transaction, get a lock as well.)''' l = self._lockref and self._lockref() if l is not None and l.held: l.lock() return l l = self._lock(self.sjoin("lock"), wait, None, self.invalidate, _('repository %s') % self.origroot) self._lockref = weakref.ref(l) return l def wlock(self, wait=True): '''Lock the non-store parts of the repository (everything under .hg except .hg/store) and return a weak reference to the lock. Use this before modifying files in .hg.''' l = self._wlockref and self._wlockref() if l is not None and l.held: l.lock() return l l = self._lock(self.join("wlock"), wait, self.dirstate.write, self.dirstate.invalidate, _('working directory of %s') % self.origroot) self._wlockref = weakref.ref(l) return l def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist): """ commit an individual file as part of a larger transaction """ fname = fctx.path() text = fctx.data() flog = self.file(fname) fparent1 = manifest1.get(fname, nullid) fparent2 = fparent2o = manifest2.get(fname, nullid) meta = {} copy = fctx.renamed() if copy and copy[0] != fname: # Mark the new revision of this file as a copy of another # file. This copy data will effectively act as a parent # of this new revision. If this is a merge, the first # parent will be the nullid (meaning "look up the copy data") # and the second one will be the other parent. For example: # # 0 --- 1 --- 3 rev1 changes file foo # \ / rev2 renames foo to bar and changes it # \- 2 -/ rev3 should have bar with all changes and # should record that bar descends from # bar in rev2 and foo in rev1 # # this allows this merge to succeed: # # 0 --- 1 --- 3 rev4 reverts the content change from rev2 # \ / merging rev3 and rev4 should use bar@rev2 # \- 2 --- 4 as the merge base # cfname = copy[0] crev = manifest1.get(cfname) newfparent = fparent2 if manifest2: # branch merge if fparent2 == nullid or crev is None: # copied on remote side if cfname in manifest2: crev = manifest2[cfname] newfparent = fparent1 # find source in nearest ancestor if we've lost track if not crev: self.ui.debug(" %s: searching for copy revision for %s\n" % (fname, cfname)) for ancestor in self['.'].ancestors(): if cfname in ancestor: crev = ancestor[cfname].filenode() break self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev))) meta["copy"] = cfname meta["copyrev"] = hex(crev) fparent1, fparent2 = nullid, newfparent elif fparent2 != nullid: # is one parent an ancestor of the other? fparentancestor = flog.ancestor(fparent1, fparent2) if fparentancestor == fparent1: fparent1, fparent2 = fparent2, nullid elif fparentancestor == fparent2: fparent2 = nullid # is the file changed? if fparent2 != nullid or flog.cmp(fparent1, text) or meta: changelist.append(fname) return flog.add(text, meta, tr, linkrev, fparent1, fparent2) # are just the flags changed during merge? if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags(): changelist.append(fname) return fparent1 def commit(self, text="", user=None, date=None, match=None, force=False, editor=False, extra={}): """Add a new revision to current repository. Revision information is gathered from the working directory, match can be used to filter the committed files. If editor is supplied, it is called to get a commit message. """ def fail(f, msg): raise util.Abort('%s: %s' % (f, msg)) if not match: match = match_.always(self.root, '') if not force: vdirs = [] match.dir = vdirs.append match.bad = fail wlock = self.wlock() try: p1, p2 = self.dirstate.parents() wctx = self[None] if (not force and p2 != nullid and match and (match.files() or match.anypats())): raise util.Abort(_('cannot partially commit a merge ' '(do not specify files or patterns)')) changes = self.status(match=match, clean=force) if force: changes[0].extend(changes[6]) # mq may commit unchanged files # check subrepos subs = [] for s in wctx.substate: if match(s) and wctx.sub(s).dirty(): subs.append(s) if subs and '.hgsubstate' not in changes[0]: changes[0].insert(0, '.hgsubstate') # make sure all explicit patterns are matched if not force and match.files(): matched = set(changes[0] + changes[1] + changes[2]) for f in match.files(): if f == '.' or f in matched or f in wctx.substate: continue if f in changes[3]: # missing fail(f, _('file not found!')) if f in vdirs: # visited directory d = f + '/' for mf in matched: if mf.startswith(d): break else: fail(f, _("no match under directory!")) elif f not in self.dirstate: fail(f, _("file not tracked!")) if (not force and not extra.get("close") and p2 == nullid and not (changes[0] or changes[1] or changes[2]) and self[None].branch() == self['.'].branch()): return None ms = merge_.mergestate(self) for f in changes[0]: if f in ms and ms[f] == 'u': raise util.Abort(_("unresolved merge conflicts " "(see hg resolve)")) cctx = context.workingctx(self, (p1, p2), text, user, date, extra, changes) if editor: cctx._text = editor(self, cctx, subs) edited = (text != cctx._text) # commit subs if subs: state = wctx.substate.copy() for s in subs: self.ui.status(_('committing subrepository %s\n') % s) sr = wctx.sub(s).commit(cctx._text, user, date) state[s] = (state[s][0], sr) subrepo.writestate(self, state) # Save commit message in case this transaction gets rolled back # (e.g. by a pretxncommit hook). Leave the content alone on # the assumption that the user will use the same editor again. msgfile = self.opener('last-message.txt', 'wb') msgfile.write(cctx._text) msgfile.close() try: ret = self.commitctx(cctx, True) except: if edited: msgfn = self.pathto(msgfile.name[len(self.root)+1:]) self.ui.write( _('note: commit message saved in %s\n') % msgfn) raise # update dirstate and mergestate for f in changes[0] + changes[1]: self.dirstate.normal(f) for f in changes[2]: self.dirstate.forget(f) self.dirstate.setparents(ret) ms.reset() return ret finally: wlock.release() def commitctx(self, ctx, error=False): """Add a new revision to current repository. Revision information is passed via the context argument. """ tr = lock = None removed = ctx.removed() p1, p2 = ctx.p1(), ctx.p2() m1 = p1.manifest().copy() m2 = p2.manifest() user = ctx.user() xp1, xp2 = p1.hex(), p2 and p2.hex() or '' self.hook("precommit", throw=True, parent1=xp1, parent2=xp2) lock = self.lock() try: tr = self.transaction() trp = weakref.proxy(tr) # check in files new = {} changed = [] linkrev = len(self) for f in sorted(ctx.modified() + ctx.added()): self.ui.note(f + "\n") try: fctx = ctx[f] new[f] = self._filecommit(fctx, m1, m2, linkrev, trp, changed) m1.set(f, fctx.flags()) except OSError, inst: self.ui.warn(_("trouble committing %s!\n") % f) raise except IOError, inst: errcode = getattr(inst, 'errno', errno.ENOENT) if error or errcode and errcode != errno.ENOENT: self.ui.warn(_("trouble committing %s!\n") % f) raise else: removed.append(f) # update manifest m1.update(new) removed = [f for f in sorted(removed) if f in m1 or f in m2] drop = [f for f in removed if f in m1] for f in drop: del m1[f] mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(), p2.manifestnode(), (new, drop)) # update changelog self.changelog.delayupdate() n = self.changelog.add(mn, changed + removed, ctx.description(), trp, p1.node(), p2.node(), user, ctx.date(), ctx.extra().copy()) p = lambda: self.changelog.writepending() and self.root or "" self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2, pending=p) self.changelog.finalize(trp) tr.close() if self._branchcache: self.branchtags() self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2) return n finally: del tr lock.release() def destroyed(self): '''Inform the repository that nodes have been destroyed. Intended for use by strip and rollback, so there's a common place for anything that has to be done after destroying history.''' # XXX it might be nice if we could take the list of destroyed # nodes, but I don't see an easy way for rollback() to do that # Ensure the persistent tag cache is updated. Doing it now # means that the tag cache only has to worry about destroyed # heads immediately after a strip/rollback. That in turn # guarantees that "cachetip == currenttip" (comparing both rev # and node) always means no nodes have been added or destroyed. # XXX this is suboptimal when qrefresh'ing: we strip the current # head, refresh the tag cache, then immediately add a new head. # But I think doing it this way is necessary for the "instant # tag cache retrieval" case to work. tags_.findglobaltags(self.ui, self, {}, {}) def walk(self, match, node=None): ''' walk recursively through the directory tree or a given changeset, finding all files matched by the match function ''' return self[node].walk(match) def status(self, node1='.', node2=None, match=None, ignored=False, clean=False, unknown=False): """return status of files between two nodes or node and working directory If node1 is None, use the first dirstate parent instead. If node2 is None, compare node1 with working directory. """ def mfmatches(ctx): mf = ctx.manifest().copy() for fn in mf.keys(): if not match(fn): del mf[fn] return mf if isinstance(node1, context.changectx): ctx1 = node1 else: ctx1 = self[node1] if isinstance(node2, context.changectx): ctx2 = node2 else: ctx2 = self[node2] working = ctx2.rev() is None parentworking = working and ctx1 == self['.'] match = match or match_.always(self.root, self.getcwd()) listignored, listclean, listunknown = ignored, clean, unknown # load earliest manifest first for caching reasons if not working and ctx2.rev() < ctx1.rev(): ctx2.manifest() if not parentworking: def bad(f, msg): if f not in ctx1: self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg)) match.bad = bad if working: # we need to scan the working dir subrepos = ctx1.substate.keys() s = self.dirstate.status(match, subrepos, listignored, listclean, listunknown) cmp, modified, added, removed, deleted, unknown, ignored, clean = s # check for any possibly clean files if parentworking and cmp: fixup = [] # do a full compare of any files that might have changed for f in sorted(cmp): if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f) or ctx1[f].cmp(ctx2[f].data())): modified.append(f) else: fixup.append(f) if listclean: clean += fixup # update dirstate for files that are actually clean if fixup: try: # updating the dirstate is optional # so we don't wait on the lock wlock = self.wlock(False) try: for f in fixup: self.dirstate.normal(f) finally: wlock.release() except error.LockError: pass if not parentworking: mf1 = mfmatches(ctx1) if working: # we are comparing working dir against non-parent # generate a pseudo-manifest for the working dir mf2 = mfmatches(self['.']) for f in cmp + modified + added: mf2[f] = None mf2.set(f, ctx2.flags(f)) for f in removed: if f in mf2: del mf2[f] else: # we are comparing two revisions deleted, unknown, ignored = [], [], [] mf2 = mfmatches(ctx2) modified, added, clean = [], [], [] for fn in mf2: if fn in mf1: if (mf1.flags(fn) != mf2.flags(fn) or (mf1[fn] != mf2[fn] and (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))): modified.append(fn) elif listclean: clean.append(fn) del mf1[fn] else: added.append(fn) removed = mf1.keys() r = modified, added, removed, deleted, unknown, ignored, clean [l.sort() for l in r] return r def add(self, list): wlock = self.wlock() try: rejected = [] for f in list: p = self.wjoin(f) try: st = os.lstat(p) except: self.ui.warn(_("%s does not exist!\n") % f) rejected.append(f) continue if st.st_size > 10000000: self.ui.warn(_("%s: files over 10MB may cause memory and" " performance problems\n" "(use 'hg revert %s' to unadd the file)\n") % (f, f)) if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): self.ui.warn(_("%s not added: only files and symlinks " "supported currently\n") % f) rejected.append(p) elif self.dirstate[f] in 'amn': self.ui.warn(_("%s already tracked!\n") % f) elif self.dirstate[f] == 'r': self.dirstate.normallookup(f) else: self.dirstate.add(f) return rejected finally: wlock.release() def forget(self, list): wlock = self.wlock() try: for f in list: if self.dirstate[f] != 'a': self.ui.warn(_("%s not added!\n") % f) else: self.dirstate.forget(f) finally: wlock.release() def remove(self, list, unlink=False): if unlink: for f in list: try: util.unlink(self.wjoin(f)) except OSError, inst: if inst.errno != errno.ENOENT: raise wlock = self.wlock() try: for f in list: if unlink and os.path.exists(self.wjoin(f)): self.ui.warn(_("%s still exists!\n") % f) elif self.dirstate[f] == 'a': self.dirstate.forget(f) elif f not in self.dirstate: self.ui.warn(_("%s not tracked!\n") % f) else: self.dirstate.remove(f) finally: wlock.release() def undelete(self, list): manifests = [self.manifest.read(self.changelog.read(p)[0]) for p in self.dirstate.parents() if p != nullid] wlock = self.wlock() try: for f in list: if self.dirstate[f] != 'r': self.ui.warn(_("%s not removed!\n") % f) else: m = f in manifests[0] and manifests[0] or manifests[1] t = self.file(f).read(m[f]) self.wwrite(f, t, m.flags(f)) self.dirstate.normal(f) finally: wlock.release() def copy(self, source, dest): p = self.wjoin(dest) if not (os.path.exists(p) or os.path.islink(p)): self.ui.warn(_("%s does not exist!\n") % dest) elif not (os.path.isfile(p) or os.path.islink(p)): self.ui.warn(_("copy failed: %s is not a file or a " "symbolic link\n") % dest) else: wlock = self.wlock() try: if self.dirstate[dest] in '?r': self.dirstate.add(dest) self.dirstate.copy(source, dest) finally: wlock.release() def heads(self, start=None): heads = self.changelog.heads(start) # sort the output in rev descending order heads = [(-self.changelog.rev(h), h) for h in heads] return [n for (r, n) in sorted(heads)] def branchheads(self, branch=None, start=None, closed=False): '''return a (possibly filtered) list of heads for the given branch Heads are returned in topological order, from newest to oldest. If branch is None, use the dirstate branch. If start is not None, return only heads reachable from start. If closed is True, return heads that are marked as closed as well. ''' if branch is None: branch = self[None].branch() branches = self.branchmap() if branch not in branches: return [] # the cache returns heads ordered lowest to highest bheads = list(reversed(branches[branch])) if start is not None: # filter out the heads that cannot be reached from startrev fbheads = set(self.changelog.nodesbetween([start], bheads)[2]) bheads = [h for h in bheads if h in fbheads] if not closed: bheads = [h for h in bheads if ('close' not in self.changelog.read(h)[5])] return bheads def branches(self, nodes): if not nodes: nodes = [self.changelog.tip()] b = [] for n in nodes: t = n while 1: p = self.changelog.parents(n) if p[1] != nullid or p[0] == nullid: b.append((t, n, p[0], p[1])) break n = p[0] return b def between(self, pairs): r = [] for top, bottom in pairs: n, l, i = top, [], 0 f = 1 while n != bottom and n != nullid: p = self.changelog.parents(n)[0] if i == f: l.append(n) f = f * 2 n = p i += 1 r.append(l) return r def findincoming(self, remote, base=None, heads=None, force=False): """Return list of roots of the subsets of missing nodes from remote If base dict is specified, assume that these nodes and their parents exist on the remote side and that no child of a node of base exists in both remote and self. Furthermore base will be updated to include the nodes that exists in self and remote but no children exists in self and remote. If a list of heads is specified, return only nodes which are heads or ancestors of these heads. All the ancestors of base are in self and in remote. All the descendants of the list returned are missing in self. (and so we know that the rest of the nodes are missing in remote, see outgoing) """ return self.findcommonincoming(remote, base, heads, force)[1] def findcommonincoming(self, remote, base=None, heads=None, force=False): """Return a tuple (common, missing roots, heads) used to identify missing nodes from remote. If base dict is specified, assume that these nodes and their parents exist on the remote side and that no child of a node of base exists in both remote and self. Furthermore base will be updated to include the nodes that exists in self and remote but no children exists in self and remote. If a list of heads is specified, return only nodes which are heads or ancestors of these heads. All the ancestors of base are in self and in remote. """ m = self.changelog.nodemap search = [] fetch = set() seen = set() seenbranch = set() if base is None: base = {} if not heads: heads = remote.heads() if self.changelog.tip() == nullid: base[nullid] = 1 if heads != [nullid]: return [nullid], [nullid], list(heads) return [nullid], [], [] # assume we're closer to the tip than the root # and start by examining the heads self.ui.status(_("searching for changes\n")) unknown = [] for h in heads: if h not in m: unknown.append(h) else: base[h] = 1 heads = unknown if not unknown: return base.keys(), [], [] req = set(unknown) reqcnt = 0 # search through remote branches # a 'branch' here is a linear segment of history, with four parts: # head, root, first parent, second parent # (a branch always has two parents (or none) by definition) unknown = remote.branches(unknown) while unknown: r = [] while unknown: n = unknown.pop(0) if n[0] in seen: continue self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1]))) if n[0] == nullid: # found the end of the branch pass elif n in seenbranch: self.ui.debug("branch already found\n") continue elif n[1] and n[1] in m: # do we know the base? self.ui.debug("found incomplete branch %s:%s\n" % (short(n[0]), short(n[1]))) search.append(n[0:2]) # schedule branch range for scanning seenbranch.add(n) else: if n[1] not in seen and n[1] not in fetch: if n[2] in m and n[3] in m: self.ui.debug("found new changeset %s\n" % short(n[1])) fetch.add(n[1]) # earliest unknown for p in n[2:4]: if p in m: base[p] = 1 # latest known for p in n[2:4]: if p not in req and p not in m: r.append(p) req.add(p) seen.add(n[0]) if r: reqcnt += 1 self.ui.progress('searching', reqcnt, unit='queries') self.ui.debug("request %d: %s\n" % (reqcnt, " ".join(map(short, r)))) for p in xrange(0, len(r), 10): for b in remote.branches(r[p:p + 10]): self.ui.debug("received %s:%s\n" % (short(b[0]), short(b[1]))) unknown.append(b) # do binary search on the branches we found while search: newsearch = [] reqcnt += 1 self.ui.progress('searching', reqcnt, unit='queries') for n, l in zip(search, remote.between(search)): l.append(n[1]) p = n[0] f = 1 for i in l: self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i))) if i in m: if f <= 2: self.ui.debug("found new branch changeset %s\n" % short(p)) fetch.add(p) base[i] = 1 else: self.ui.debug("narrowed branch search to %s:%s\n" % (short(p), short(i))) newsearch.append((p, i)) break p, f = i, f * 2 search = newsearch # sanity check our fetch list for f in fetch: if f in m: raise error.RepoError(_("already have changeset ") + short(f[:4])) if base.keys() == [nullid]: if force: self.ui.warn(_("warning: repository is unrelated\n")) else: raise util.Abort(_("repository is unrelated")) self.ui.debug("found new changesets starting at " + " ".join([short(f) for f in fetch]) + "\n") self.ui.progress('searching', None, unit='queries') self.ui.debug("%d total queries\n" % reqcnt) return base.keys(), list(fetch), heads def findoutgoing(self, remote, base=None, heads=None, force=False): """Return list of nodes that are roots of subsets not in remote If base dict is specified, assume that these nodes and their parents exist on the remote side. If a list of heads is specified, return only nodes which are heads or ancestors of these heads, and return a second element which contains all remote heads which get new children. """ if base is None: base = {} self.findincoming(remote, base, heads, force=force) self.ui.debug("common changesets up to " + " ".join(map(short, base.keys())) + "\n") remain = set(self.changelog.nodemap) # prune everything remote has from the tree remain.remove(nullid) remove = base.keys() while remove: n = remove.pop(0) if n in remain: remain.remove(n) for p in self.changelog.parents(n): remove.append(p) # find every node whose parents have been pruned subset = [] # find every remote head that will get new children updated_heads = set() for n in remain: p1, p2 = self.changelog.parents(n) if p1 not in remain and p2 not in remain: subset.append(n) if heads: if p1 in heads: updated_heads.add(p1) if p2 in heads: updated_heads.add(p2) # this is the set of all roots we have to push if heads: return subset, list(updated_heads) else: return subset def pull(self, remote, heads=None, force=False): lock = self.lock() try: common, fetch, rheads = self.findcommonincoming(remote, heads=heads, force=force) if fetch == [nullid]: self.ui.status(_("requesting all changes\n")) if not fetch: self.ui.status(_("no changes found\n")) return 0 if heads is None and remote.capable('changegroupsubset'): heads = rheads if heads is None: cg = remote.changegroup(fetch, 'pull') else: if not remote.capable('changegroupsubset'): raise util.Abort(_("Partial pull cannot be done because " "other repository doesn't support " "changegroupsubset.")) cg = remote.changegroupsubset(fetch, heads, 'pull') return self.addchangegroup(cg, 'pull', remote.url()) finally: lock.release() def push(self, remote, force=False, revs=None): # there are two ways to push to remote repo: # # addchangegroup assumes local user can lock remote # repo (local filesystem, old ssh servers). # # unbundle assumes local user cannot lock remote repo (new ssh # servers, http servers). if remote.capable('unbundle'): return self.push_unbundle(remote, force, revs) return self.push_addchangegroup(remote, force, revs) def prepush(self, remote, force, revs): '''Analyze the local and remote repositories and determine which changesets need to be pushed to the remote. Return a tuple (changegroup, remoteheads). changegroup is a readable file-like object whose read() returns successive changegroup chunks ready to be sent over the wire. remoteheads is the list of remote heads. ''' common = {} remote_heads = remote.heads() inc = self.findincoming(remote, common, remote_heads, force=force) update, updated_heads = self.findoutgoing(remote, common, remote_heads) msng_cl, bases, heads = self.changelog.nodesbetween(update, revs) def checkbranch(lheads, rheads, updatelb, branchname=None): ''' check whether there are more local heads than remote heads on a specific branch. lheads: local branch heads rheads: remote branch heads updatelb: outgoing local branch bases ''' warn = 0 if not revs and len(lheads) > len(rheads): warn = 1 else: # add local heads involved in the push updatelheads = [self.changelog.heads(x, lheads) for x in updatelb] newheads = set(sum(updatelheads, [])) & set(lheads) if not newheads: return True # add heads we don't have or that are not involved in the push for r in rheads: if r in self.changelog.nodemap: desc = self.changelog.heads(r, heads) l = [h for h in heads if h in desc] if not l: newheads.add(r) else: newheads.add(r) if len(newheads) > len(rheads): warn = 1 if warn: if branchname is not None: msg = _("abort: push creates new remote heads" " on branch '%s'!\n") % branchname else: msg = _("abort: push creates new remote heads!\n") self.ui.warn(msg) if len(lheads) > len(rheads): self.ui.status(_("(did you forget to merge?" " use push -f to force)\n")) else: self.ui.status(_("(you should pull and merge or" " use push -f to force)\n")) return False return True if not bases: self.ui.status(_("no changes found\n")) return None, 1 elif not force: # Check for each named branch if we're creating new remote heads. # To be a remote head after push, node must be either: # - unknown locally # - a local outgoing head descended from update # - a remote head that's known locally and not # ancestral to an outgoing head # # New named branches cannot be created without --force. if remote_heads != [nullid]: if remote.capable('branchmap'): remotebrheads = remote.branchmap() if not revs: localbrheads = self.branchmap() else: localbrheads = {} for n in heads: branch = self[n].branch() localbrheads.setdefault(branch, []).append(n) newbranches = list(set(localbrheads) - set(remotebrheads)) if newbranches: # new branch requires --force branchnames = ', '.join("%s" % b for b in newbranches) self.ui.warn(_("abort: push creates " "new remote branches: %s!\n") % branchnames) # propose 'push -b .' in the msg too? self.ui.status(_("(use 'hg push -f' to force)\n")) return None, 0 for branch, lheads in localbrheads.iteritems(): if branch in remotebrheads: rheads = remotebrheads[branch] if not checkbranch(lheads, rheads, update, branch): return None, 0 else: if not checkbranch(heads, remote_heads, update): return None, 0 if inc: self.ui.warn(_("note: unsynced remote changes!\n")) if revs is None: # use the fast path, no race possible on push nodes = self.changelog.findmissing(common.keys()) cg = self._changegroup(nodes, 'push') else: cg = self.changegroupsubset(update, revs, 'push') return cg, remote_heads def push_addchangegroup(self, remote, force, revs): lock = remote.lock() try: ret = self.prepush(remote, force, revs) if ret[0] is not None: cg, remote_heads = ret return remote.addchangegroup(cg, 'push', self.url()) return ret[1] finally: lock.release() def push_unbundle(self, remote, force, revs): # local repo finds heads on server, finds out what revs it # must push. once revs transferred, if server finds it has # different heads (someone else won commit/push race), server # aborts. ret = self.prepush(remote, force, revs) if ret[0] is not None: cg, remote_heads = ret if force: remote_heads = ['force'] return remote.unbundle(cg, remote_heads, 'push') return ret[1] def changegroupinfo(self, nodes, source): if self.ui.verbose or source == 'bundle': self.ui.status(_("%d changesets found\n") % len(nodes)) if self.ui.debugflag: self.ui.debug("list of changesets:\n") for node in nodes: self.ui.debug("%s\n" % hex(node)) def changegroupsubset(self, bases, heads, source, extranodes=None): """Compute a changegroup consisting of all the nodes that are descendents of any of the bases and ancestors of any of the heads. Return a chunkbuffer object whose read() method will return successive changegroup chunks. It is fairly complex as determining which filenodes and which manifest nodes need to be included for the changeset to be complete is non-trivial. Another wrinkle is doing the reverse, figuring out which changeset in the changegroup a particular filenode or manifestnode belongs to. The caller can specify some nodes that must be included in the changegroup using the extranodes argument. It should be a dict where the keys are the filenames (or 1 for the manifest), and the values are lists of (node, linknode) tuples, where node is a wanted node and linknode is the changelog node that should be transmitted as the linkrev. """ # Set up some initial variables # Make it easy to refer to self.changelog cl = self.changelog # msng is short for missing - compute the list of changesets in this # changegroup. if not bases: bases = [nullid] msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads) if extranodes is None: # can we go through the fast path ? heads.sort() allheads = self.heads() allheads.sort() if heads == allheads: return self._changegroup(msng_cl_lst, source) # slow path self.hook('preoutgoing', throw=True, source=source) self.changegroupinfo(msng_cl_lst, source) # Some bases may turn out to be superfluous, and some heads may be # too. nodesbetween will return the minimal set of bases and heads # necessary to re-create the changegroup. # Known heads are the list of heads that it is assumed the recipient # of this changegroup will know about. knownheads = set() # We assume that all parents of bases are known heads. for n in bases: knownheads.update(cl.parents(n)) knownheads.discard(nullid) knownheads = list(knownheads) if knownheads: # Now that we know what heads are known, we can compute which # changesets are known. The recipient must know about all # changesets required to reach the known heads from the null # changeset. has_cl_set, junk, junk = cl.nodesbetween(None, knownheads) junk = None # Transform the list into a set. has_cl_set = set(has_cl_set) else: # If there were no known heads, the recipient cannot be assumed to # know about any changesets. has_cl_set = set() # Make it easy to refer to self.manifest mnfst = self.manifest # We don't know which manifests are missing yet msng_mnfst_set = {} # Nor do we know which filenodes are missing. msng_filenode_set = {} junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex junk = None # A changeset always belongs to itself, so the changenode lookup # function for a changenode is identity. def identity(x): return x # If we determine that a particular file or manifest node must be a # node that the recipient of the changegroup will already have, we can # also assume the recipient will have all the parents. This function # prunes them from the set of missing nodes. def prune_parents(revlog, hasset, msngset): for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]): msngset.pop(revlog.node(r), None) # Use the information collected in collect_manifests_and_files to say # which changenode any manifestnode belongs to. def lookup_manifest_link(mnfstnode): return msng_mnfst_set[mnfstnode] # A function generating function that sets up the initial environment # the inner function. def filenode_collector(changedfiles): # This gathers information from each manifestnode included in the # changegroup about which filenodes the manifest node references # so we can include those in the changegroup too. # # It also remembers which changenode each filenode belongs to. It # does this by assuming the a filenode belongs to the changenode # the first manifest that references it belongs to. def collect_msng_filenodes(mnfstnode): r = mnfst.rev(mnfstnode) if r - 1 in mnfst.parentrevs(r): # If the previous rev is one of the parents, # we only need to see a diff. deltamf = mnfst.readdelta(mnfstnode) # For each line in the delta for f, fnode in deltamf.iteritems(): f = changedfiles.get(f, None) # And if the file is in the list of files we care # about. if f is not None: # Get the changenode this manifest belongs to clnode = msng_mnfst_set[mnfstnode] # Create the set of filenodes for the file if # there isn't one already. ndset = msng_filenode_set.setdefault(f, {}) # And set the filenode's changelog node to the # manifest's if it hasn't been set already. ndset.setdefault(fnode, clnode) else: # Otherwise we need a full manifest. m = mnfst.read(mnfstnode) # For every file in we care about. for f in changedfiles: fnode = m.get(f, None) # If it's in the manifest if fnode is not None: # See comments above. clnode = msng_mnfst_set[mnfstnode] ndset = msng_filenode_set.setdefault(f, {}) ndset.setdefault(fnode, clnode) return collect_msng_filenodes # We have a list of filenodes we think we need for a file, lets remove # all those we know the recipient must have. def prune_filenodes(f, filerevlog): msngset = msng_filenode_set[f] hasset = set() # If a 'missing' filenode thinks it belongs to a changenode we # assume the recipient must have, then the recipient must have # that filenode. for n in msngset: clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n))) if clnode in has_cl_set: hasset.add(n) prune_parents(filerevlog, hasset, msngset) # A function generator function that sets up the a context for the # inner function. def lookup_filenode_link_func(fname): msngset = msng_filenode_set[fname] # Lookup the changenode the filenode belongs to. def lookup_filenode_link(fnode): return msngset[fnode] return lookup_filenode_link # Add the nodes that were explicitly requested. def add_extra_nodes(name, nodes): if not extranodes or name not in extranodes: return for node, linknode in extranodes[name]: if node not in nodes: nodes[node] = linknode # Now that we have all theses utility functions to help out and # logically divide up the task, generate the group. def gengroup(): # The set of changed files starts empty. changedfiles = {} collect = changegroup.collector(cl, msng_mnfst_set, changedfiles) # Create a changenode group generator that will call our functions # back to lookup the owning changenode and collect information. group = cl.group(msng_cl_lst, identity, collect) cnt = 0 for chnk in group: yield chnk self.ui.progress('bundle changes', cnt, unit='chunks') cnt += 1 self.ui.progress('bundle changes', None, unit='chunks') # Figure out which manifest nodes (of the ones we think might be # part of the changegroup) the recipient must know about and # remove them from the changegroup. has_mnfst_set = set() for n in msng_mnfst_set: # If a 'missing' manifest thinks it belongs to a changenode # the recipient is assumed to have, obviously the recipient # must have that manifest. linknode = cl.node(mnfst.linkrev(mnfst.rev(n))) if linknode in has_cl_set: has_mnfst_set.add(n) prune_parents(mnfst, has_mnfst_set, msng_mnfst_set) add_extra_nodes(1, msng_mnfst_set) msng_mnfst_lst = msng_mnfst_set.keys() # Sort the manifestnodes by revision number. msng_mnfst_lst.sort(key=mnfst.rev) # Create a generator for the manifestnodes that calls our lookup # and data collection functions back. group = mnfst.group(msng_mnfst_lst, lookup_manifest_link, filenode_collector(changedfiles)) cnt = 0 for chnk in group: yield chnk self.ui.progress('bundle manifests', cnt, unit='chunks') cnt += 1 self.ui.progress('bundle manifests', None, unit='chunks') # These are no longer needed, dereference and toss the memory for # them. msng_mnfst_lst = None msng_mnfst_set.clear() if extranodes: for fname in extranodes: if isinstance(fname, int): continue msng_filenode_set.setdefault(fname, {}) changedfiles[fname] = 1 # Go through all our files in order sorted by name. cnt = 0 for fname in sorted(changedfiles): filerevlog = self.file(fname) if not len(filerevlog): raise util.Abort(_("empty or missing revlog for %s") % fname) # Toss out the filenodes that the recipient isn't really # missing. if fname in msng_filenode_set: prune_filenodes(fname, filerevlog) add_extra_nodes(fname, msng_filenode_set[fname]) msng_filenode_lst = msng_filenode_set[fname].keys() else: msng_filenode_lst = [] # If any filenodes are left, generate the group for them, # otherwise don't bother. if len(msng_filenode_lst) > 0: yield changegroup.chunkheader(len(fname)) yield fname # Sort the filenodes by their revision # msng_filenode_lst.sort(key=filerevlog.rev) # Create a group generator and only pass in a changenode # lookup function as we need to collect no information # from filenodes. group = filerevlog.group(msng_filenode_lst, lookup_filenode_link_func(fname)) for chnk in group: self.ui.progress( 'bundle files', cnt, item=fname, unit='chunks') cnt += 1 yield chnk if fname in msng_filenode_set: # Don't need this anymore, toss it to free memory. del msng_filenode_set[fname] # Signal that no more groups are left. yield changegroup.closechunk() self.ui.progress('bundle files', None, unit='chunks') if msng_cl_lst: self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source) return util.chunkbuffer(gengroup()) def changegroup(self, basenodes, source): # to avoid a race we use changegroupsubset() (issue1320) return self.changegroupsubset(basenodes, self.heads(), source) def _changegroup(self, nodes, source): """Compute the changegroup of all nodes that we have that a recipient doesn't. Return a chunkbuffer object whose read() method will return successive changegroup chunks. This is much easier than the previous function as we can assume that the recipient has any changenode we aren't sending them. nodes is the set of nodes to send""" self.hook('preoutgoing', throw=True, source=source) cl = self.changelog revset = set([cl.rev(n) for n in nodes]) self.changegroupinfo(nodes, source) def identity(x): return x def gennodelst(log): for r in log: if log.linkrev(r) in revset: yield log.node(r) def lookuprevlink_func(revlog): def lookuprevlink(n): return cl.node(revlog.linkrev(revlog.rev(n))) return lookuprevlink def gengroup(): '''yield a sequence of changegroup chunks (strings)''' # construct a list of all changed files changedfiles = {} mmfs = {} collect = changegroup.collector(cl, mmfs, changedfiles) cnt = 0 for chnk in cl.group(nodes, identity, collect): self.ui.progress('bundle changes', cnt, unit='chunks') cnt += 1 yield chnk self.ui.progress('bundle changes', None, unit='chunks') mnfst = self.manifest nodeiter = gennodelst(mnfst) cnt = 0 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)): self.ui.progress('bundle manifests', cnt, unit='chunks') cnt += 1 yield chnk self.ui.progress('bundle manifests', None, unit='chunks') cnt = 0 for fname in sorted(changedfiles): filerevlog = self.file(fname) if not len(filerevlog): raise util.Abort(_("empty or missing revlog for %s") % fname) nodeiter = gennodelst(filerevlog) nodeiter = list(nodeiter) if nodeiter: yield changegroup.chunkheader(len(fname)) yield fname lookup = lookuprevlink_func(filerevlog) for chnk in filerevlog.group(nodeiter, lookup): self.ui.progress( 'bundle files', cnt, item=fname, unit='chunks') cnt += 1 yield chnk self.ui.progress('bundle files', None, unit='chunks') yield changegroup.closechunk() if nodes: self.hook('outgoing', node=hex(nodes[0]), source=source) return util.chunkbuffer(gengroup()) def addchangegroup(self, source, srctype, url, emptyok=False): """add changegroup to repo. return values: - nothing changed or no source: 0 - more heads than before: 1+added heads (2..n) - less heads than before: -1-removed heads (-2..-n) - number of heads stays the same: 1 """ def csmap(x): self.ui.debug("add changeset %s\n" % short(x)) return len(cl) def revmap(x): return cl.rev(x) if not source: return 0 self.hook('prechangegroup', throw=True, source=srctype, url=url) changesets = files = revisions = 0 # write changelog data to temp files so concurrent readers will not see # inconsistent view cl = self.changelog cl.delayupdate() oldheads = len(cl.heads()) tr = self.transaction() try: trp = weakref.proxy(tr) # pull off the changeset group self.ui.status(_("adding changesets\n")) clstart = len(cl) class prog(object): step = 'changesets' count = 1 ui = self.ui def __call__(self): self.ui.progress(self.step, self.count, unit='chunks') self.count += 1 pr = prog() chunkiter = changegroup.chunkiter(source, progress=pr) if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok: raise util.Abort(_("received changelog group is empty")) clend = len(cl) changesets = clend - clstart self.ui.progress('changesets', None) # pull off the manifest group self.ui.status(_("adding manifests\n")) pr.step = 'manifests' pr.count = 1 chunkiter = changegroup.chunkiter(source, progress=pr) # no need to check for empty manifest group here: # if the result of the merge of 1 and 2 is the same in 3 and 4, # no new manifest will be created and the manifest group will # be empty during the pull self.manifest.addgroup(chunkiter, revmap, trp) self.ui.progress('manifests', None) needfiles = {} if self.ui.configbool('server', 'validate', default=False): # validate incoming csets have their manifests for cset in xrange(clstart, clend): mfest = self.changelog.read(self.changelog.node(cset))[0] mfest = self.manifest.readdelta(mfest) # store file nodes we must see for f, n in mfest.iteritems(): needfiles.setdefault(f, set()).add(n) # process the files self.ui.status(_("adding file changes\n")) pr.step = 'files' pr.count = 1 while 1: f = changegroup.getchunk(source) if not f: break self.ui.debug("adding %s revisions\n" % f) fl = self.file(f) o = len(fl) chunkiter = changegroup.chunkiter(source, progress=pr) if fl.addgroup(chunkiter, revmap, trp) is None: raise util.Abort(_("received file revlog group is empty")) revisions += len(fl) - o files += 1 if f in needfiles: needs = needfiles[f] for new in xrange(o, len(fl)): n = fl.node(new) if n in needs: needs.remove(n) if not needs: del needfiles[f] self.ui.progress('files', None) for f, needs in needfiles.iteritems(): fl = self.file(f) for n in needs: try: fl.rev(n) except error.LookupError: raise util.Abort( _('missing file data for %s:%s - run hg verify') % (f, hex(n))) newheads = len(cl.heads()) heads = "" if oldheads and newheads != oldheads: heads = _(" (%+d heads)") % (newheads - oldheads) self.ui.status(_("added %d changesets" " with %d changes to %d files%s\n") % (changesets, revisions, files, heads)) if changesets > 0: p = lambda: cl.writepending() and self.root or "" self.hook('pretxnchangegroup', throw=True, node=hex(cl.node(clstart)), source=srctype, url=url, pending=p) # make changelog see real files again cl.finalize(trp) tr.close() finally: del tr if changesets > 0: # forcefully update the on-disk branch cache self.ui.debug("updating the branch cache\n") self.branchtags() self.hook("changegroup", node=hex(cl.node(clstart)), source=srctype, url=url) for i in xrange(clstart, clend): self.hook("incoming", node=hex(cl.node(i)), source=srctype, url=url) # never return 0 here: if newheads < oldheads: return newheads - oldheads - 1 else: return newheads - oldheads + 1 def stream_in(self, remote): fp = remote.stream_out() l = fp.readline() try: resp = int(l) except ValueError: raise error.ResponseError( _('Unexpected response from remote server:'), l) if resp == 1: raise util.Abort(_('operation forbidden by server')) elif resp == 2: raise util.Abort(_('locking the remote repository failed')) elif resp != 0: raise util.Abort(_('the server sent an unknown error code')) self.ui.status(_('streaming all changes\n')) l = fp.readline() try: total_files, total_bytes = map(int, l.split(' ', 1)) except (ValueError, TypeError): raise error.ResponseError( _('Unexpected response from remote server:'), l) self.ui.status(_('%d files to transfer, %s of data\n') % (total_files, util.bytecount(total_bytes))) start = time.time() for i in xrange(total_files): # XXX doesn't support '\n' or '\r' in filenames l = fp.readline() try: name, size = l.split('\0', 1) size = int(size) except (ValueError, TypeError): raise error.ResponseError( _('Unexpected response from remote server:'), l) self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size))) # for backwards compat, name was partially encoded ofp = self.sopener(store.decodedir(name), 'w') for chunk in util.filechunkiter(fp, limit=size): ofp.write(chunk) ofp.close() elapsed = time.time() - start if elapsed <= 0: elapsed = 0.001 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % (util.bytecount(total_bytes), elapsed, util.bytecount(total_bytes / elapsed))) self.invalidate() return len(self.heads()) + 1 def clone(self, remote, heads=[], stream=False): '''clone remote repository. keyword arguments: heads: list of revs to clone (forces use of pull) stream: use streaming clone if possible''' # now, all clients that can request uncompressed clones can # read repo formats supported by all servers that can serve # them. # if revlog format changes, client will have to check version # and format flags on "stream" capability, and use # uncompressed only if compatible. if stream and not heads and remote.capable('stream'): return self.stream_in(remote) return self.pull(remote, heads) # used to avoid circular references so destructors work def aftertrans(files): renamefiles = [tuple(t) for t in files] def a(): for src, dest in renamefiles: util.rename(src, dest) return a def instance(ui, path, create): return localrepository(ui, util.drop_scheme('file', path), create) def islocal(path): return True
SIDNEY, Neb. (AP) - Sidney’s former police chief has filed a challenge to his conviction and 30-day jail sentence for obstructing government operations. The lawyer for Byron Wilkinson filed the appeal in the Nebraska Appeals Court earlier this month. Wilkinson was convicted July 17, after pleading no contest. The charge stems from what state prosecutors said was Wilkinson’s failure to pursue a criminal case against then-city Public Works Director John Hehnke, after Hehnke’s ex-girlfriend filed a criminal complaint against Hehnke in January 2014. Wilkinson instead handled the matter administratively and didn’t seek charges. Concerns about Wilkinson’s decision were brought to the attention of the Nebraska Attorney General’s Office. Earlier this year, a special prosecutor charged Hehnke, accusing him of “window peeping” at his ex-girlfriend’s house when she was partially disrobed. Hehnke pleaded no contest in April, was convicted of disturbing the peace and fined $300. He’s since been fired. The Scottsbluff Star-Herald reports that (https://bit.ly/1mC9PDP ) Wilkinson’s appellate brief includes arguments he made in an unsuccessful appeal to the Cheyenne County District Court, including his assertion that 30 days in jail was an excessive punishment. Wilkinson argues that he did not cause or intend to cause any serious harm to anyone, had no criminal history and that putting him behind bars wasn’t necessary to protect the public. Court records show the case hasn’t been scheduled for a hearing and that the prosecutor, Nebraska Assistant Attorney General Doug Warner, has not filed his brief.
# -*- coding: utf-8 -*- # #------------------------------------------------------------- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # #------------------------------------------------------------- # # SystemML documentation build configuration file, created by # sphinx-quickstart on Thu Aug 24 11:58:36 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('../python')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon'] # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'SystemML' copyright = u'2017 The Apache Software Foundation. All rights reserved' author = u'' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = os.environ.get('SYSTEMML_VERSION', 'latest') # The full version, including alpha/beta/rc tags. release = os.environ.get('SYSTEMML_RELEASE', version) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'classic' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # html_sidebars = {} # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, links to the .rst sources are added to the pages. html_show_sourcelink = False # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "../../../docs/img/systemml-logo.png" # The name of an image file (within the static path) to use as favicon of the # docs. html_favicon = "../../../docs/img/favicon.png" # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'SystemMLdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'SystemML.tex', u'SystemML Documentation', u'None', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'systemml', u'SystemML Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'SystemML', u'SystemML Documentation', author, 'SystemML', 'One line description of project.', 'Miscellaneous'), ]
We are a local family-owned business in Northwest Louisiana. We provide all your janitorial supplies, including paper, chemicals, equipment, and repair. Save time and money with the right supplies. Making sure that you have the right supplies is the best way to save your company money.
import cPickle as pickle import numpy as np import pandas as pd import os from subprocess import call import matplotlib matplotlib.use('QT4Agg') import matplotlib.pyplot as plt from matplotlib.ticker import LinearLocator import scipy import json from sklearn.decomposition import PCA as skPCA from scipy.spatial.distance import pdist, squareform from scipy.cluster.hierarchy import fcluster, linkage, dendrogram, set_link_color_palette, to_tree, inconsistent import seaborn as sns from matplotlib.colors import rgb2hex, colorConverter from pprint import pprint import difflib from operator import itemgetter import itertools #base path to pickle files with fpkm or count matrix path_to_file = '/Volumes/Seq_data/count-picard_zw_all' #for labeling all output files base_name = 'deseq_zw_all' filename = os.path.join(path_to_file, base_name+'subgroups_200_deseq_color2') call('mkdir -p '+filename, shell=True) #if you have cell group assignments and want to use them for coloring points and labels #provide filename (full path if not in path_to_file directory) group_file = False cell_group_filename = '' #if you want to restrict the genes inlcuded to a specific genelist, requires 'GeneID' and 'GroupID' header make_gene_matrix = False if make_gene_matrix: gene_list_file = 'go_search_genes_lung_all.txt' #if you want to restrict the cell matrix file to a subset of cells, expects 'SampleID' header make_cell_matrix = False if make_cell_matrix: cell_file = 'E15.5_unsorted.txt' cell_file_source = os.path.join(path_to_file, cell_file) #choose metric and method for scipy clustering (also used in seaborn clustermap) metric='euclidean' method='average' #if you want to test the stability of clustering over a range of top pca inputs test_clust_stability = False #load file gene if path_to_file.split('/')[-1][0:8] == 'cuffnorm': by_cell = pd.DataFrame.from_csv(os.path.join(path_to_file,base_name+'_outlier_filtered.txt'), sep='\t') else: by_cell = pd.DataFrame.from_csv(os.path.join(path_to_file,'DESeq__count_zw_all_outlier_filtered_matrix_norm.txt'), sep='\t') by_gene = by_cell.transpose() #create list of genes gene_list = by_cell.index.tolist() #create cell list cell_list = [x for x in list(by_cell.columns.values)] df_by_gene1 = pd.DataFrame(by_gene, columns=gene_list, index=cell_list) df_by_cell1 = pd.DataFrame(by_cell, columns=cell_list, index=gene_list) hu_cc_gene_df = pd.DataFrame.from_csv('/Volumes/Seq_data/cell_cycle_genes.txt', sep='\t') def cell_cycle(cell_cycle_gene_df, df_by_gene): gene_list = df_by_gene.columns.tolist() for g_sym, alt_g_name in zip(cell_cycle_gene_df['Symbol'], cell_cycle_gene_df['Gene Name']): if g_sym not in gene_list: print g_sym for g in alt_g_name.split(','): if g.strip() in gene_list: cell_cycle_gene_df['Symbol'][g_sym] = g.strip() else: try: print cell_cycle_gene_df.Symbol cell_cycle_gene_df = cell_cycle_gene_df[cell_cycle_gene_df.Symbol != g_sym] except ValueError: print g_sym, g cc_gene_df = df_by_gene[cell_cycle_gene_df['Symbol']] return cc_gene_df def make_new_matrix_gene(org_matrix_by_gene, gene_list_file): split_on='_' gene_df = pd.read_csv(os.path.join(path_to_file, gene_list_file), delimiter= '\t') gene_list = gene_df['GeneID'].tolist() group_list = gene_df['GroupID'].tolist() gmatrix_df = org_matrix_by_gene[gene_list] cmatrix_df = gmatrix_df.transpose() cell_list1 = [] for cell in cmatrix_df.columns.values: if exclude: if cell.split(split_on)[1] == 'ctrl' or cell.split(split_on)[1] == 'pnx': if cell.split(split_on)[2][0] =='C': print cell, 'cell' cell_list1.append(cell) else: cell_list1.append(cell) new_cmatrix_df = cmatrix_df[cell_list1] new_gmatrix_df = new_cmatrix_df.transpose() return new_cmatrix_df, new_gmatrix_df def make_new_matrix_cell(org_matrix_by_cell, cell_list_file): cell_df = pd.read_csv(os.path.join(path_to_file, cell_list_file), delimiter= '\t') cell_list_new = [cell.strip('\n') for cell in cell_df['Sample ID'].tolist()] cell_list_old = org_matrix_by_cell.columns.values cell_list = [c for c in cell_list_new if c in cell_list_old] timepoint = cell_df['Timepoint'].tolist() cell_type = cell_df['Type'].tolist() new_cmatrix_df = org_matrix_by_cell[cell_list] new_name_list = ['_'.join([x,y,z]) for x,y,z in zip(cell_list,timepoint,cell_type)] new_name_dict = {k:v for k,v in zip(cell_list,new_name_list)} print new_name_dict new_cmatrix_df = new_cmatrix_df.rename(columns = new_name_dict) new_gmatrix_df = new_cmatrix_df.transpose() return new_cmatrix_df, new_gmatrix_df if make_gene_matrix: df_by_cell2, df_by_gene2 = make_new_matrix_gene(df_by_gene1, gene_file_source) if make_cell_matrix: df_by_cell2, df_by_gene2 = make_new_matrix_cell(df_by_cell1, cell_file_source) else: df_by_cell2, df_by_gene2 = df_by_cell1, df_by_gene1 def preprocess_df(np_by_cell, gen_list, number_expressed=3): g_todelete = [] for g1, gene in enumerate(np_by_cell): cells_exp = (gene >= 1.0).sum() if cells_exp < number_expressed: g_todelete.append(g1) g1_todelete = sorted(g_todelete, reverse = True) print np_by_cell.shape for pos in g1_todelete: if type(gen_list[pos]) != float: print 'Gene '+gen_list[pos]+' not expressed in '+str(number_expressed)+' cells.' pass del gen_list[pos] n_by_cell = np.delete(np_by_cell, g1_todelete, axis=0) print n_by_cell.shape return n_by_cell, gen_list np_by_cell2 = np.array(df_by_cell2.values, dtype='f') gen_list = df_by_cell2.index.tolist() np_by_cell, n_gene_list = preprocess_df(np_by_cell2, gen_list) df_by_gene = pd.DataFrame(np_by_cell.transpose(), index = df_by_cell2.columns.values, columns= n_gene_list) df_by_cell = df_by_gene.transpose() def find_top_common_genes(log2_df_by_cell, num_common=100): top_common_list = [] count = 0 log2_df_by_gene = log2_df_by_cell.transpose() log2_df2_gene = pd.DataFrame(log2_df_by_gene.convert_objects(convert_numeric=True)) log_mean = log2_df2_gene.mean(axis=0).order(ascending=False) log2_sorted_gene = log2_df_by_gene.reindex_axis(log2_df_by_gene.mean(axis=0).order(ascending=False).index, axis=1) for gene in log2_sorted_gene.colums.tolist(): if not log2_df_by_gene[gene].any() <= 1.1: if count < num_common: count+=1 top_common_list.append(gene) if count == 100: done = True break if done: return log2_df_by_gene[top_common_list].transpose() else: return False def log2_oulierfilter(df_by_cell, plot=False): log2_df = np.log2(df_by_cell+1) top_log2 = find_top_common_genes(log2_df) if not top_log2: print "no common genes found" return log2_df, log2_df.transpose() log2_df2= pd.DataFrame(log2_df.convert_objects(convert_numeric=True)) log_mean = top_log2.mean(axis=0).order(ascending=False) log2_sorted = top_log2.reindex_axis(top_log2.mean(axis=0).order(ascending=False).index, axis=1) xticks = [] keep_col= [] log2_cutoff = np.average(log2_sorted)-np.std(log2_sorted) for col, m in zip(log2_sorted.columns.tolist(),log2_sorted.mean()): print m if m > log2_cutoff: keep_col.append(col) xticks.append(col+' '+str("%.2f" % m)) filtered_df_by_cell = df_by_cell[keep_col] filtered_df_by_gene = filtered_df_by_cell.transpose() filtered_log2 = np.log2(filtered_df_by_cell[filtered_df_by_cell>0]) if plot: ax = sns.boxplot(data=filtered_log2, whis= .75, notch=True) ax = sns.stripplot(x=filtered_log2.columns.values, y=filtered_log2.mean(axis=0), size=4, jitter=True, edgecolor="gray") xtickNames = plt.setp(ax, xticklabels=xticks) plt.setp(xtickNames, rotation=90, fontsize=9) plt.show() plt.clf() sns.distplot(filtered_log2.mean()) plt.show() log2_expdf_cell = np.log2(filtered_df_by_cell+1) log2_expdf_gene = log2_expdf_cell.transpose() return log2_expdf_cell, log2_expdf_gene def run_cluster(by_gene_matrix): cell_list = [x for x in list(by_gene_matrix.index.values)] cell_dist = pdist(np.array(by_gene_matrix), metric='euclidean') row_dist = pd.DataFrame(squareform(cell_dist), columns=cell_list, index=cell_list) row_clusters = linkage(cell_dist, metric=metric, method='average') link_mat = pd.DataFrame(row_clusters, columns=['row label 1', 'row label 2', 'distance', 'no. of items in clust.'], index=['cluster %d' %(i+1) for i in range(row_clusters.shape[0])]) row_dendr = dendrogram(row_clusters, labels=cell_list, leaf_rotation=90, leaf_font_size=8) plt.savefig(os.path.join(path_to_file,'dendrogram_gene.png')) plt.clf() return cell_dist, row_dist, row_clusters, link_mat, row_dendr def augmented_dendrogram(*args, **kwargs): plt.clf() ddata = dendrogram(*args, **kwargs) if not kwargs.get('no_plot', False): for i, d in zip(ddata['icoord'], ddata['dcoord'], ): x = 0.5 * sum(i[1:3]) y = d[1] if y >= 200000: plt.plot(x, y, 'ro') plt.annotate("%.3g" % y, (x, y), xytext=(0, -8), textcoords='offset points', va='top', ha='center') plt.show() plt.savefig(os.path.join(path_to_file,'augmented_dendrogram.png')) def cluster_indices(cluster_assignments): n = cluster_assignments.max() indices = [] for cluster_number in range(1, n + 1): indices.append(np.where(cluster_assignments == cluster_number)[0]) return indices def clust_members(r_link, cutoff): clust = fcluster(r_link,cutoff) num_clusters = clust.max() indices = cluster_indices(clust) return num_clusters, indices def print_clust_membs(indices, cell_list): for k, ind in enumerate(indices): print "cluster", k + 1, "is", [cell_list[x] for x in ind] def plot_tree(dendr, pos=None, save=False): icoord = scipy.array(dendr['icoord']) dcoord = scipy.array(dendr['dcoord']) color_list = scipy.array(dendr['color_list']) xmin, xmax = icoord.min(), icoord.max() ymin, ymax = dcoord.min(), dcoord.max() if pos: icoord = icoord[pos] dcoord = dcoord[pos] for xs, ys, color in zip(icoord, dcoord, color_list): plt.plot(xs, ys, color) plt.xlim(xmin-10, xmax + 0.1*abs(xmax)) plt.ylim(ymin, ymax + 0.1*abs(ymax)) if save: plt.savefig(os.path.join(path_to_file,'plot_dendrogram.png')) plt.show() # Create a nested dictionary from the ClusterNode's returned by SciPy def add_node(node, parent): # First create the new node and append it to its parent's children newNode = dict( node_id=node.id, children=[] ) parent["children"].append( newNode ) # Recursively add the current node's children if node.left: add_node( node.left, newNode ) if node.right: add_node( node.right, newNode ) cc = [] # Label each node with the names of each leaf in its subtree def label_tree(n, id2name): # If the node is a leaf, then we have its name if len(n["children"]) == 0: leafNames = [ id2name[n["node_id"]] ] # If not, flatten all the leaves in the node's subtree else: leafNames = reduce(lambda ls, c: ls + label_tree(c,id2name), n["children"], []) cc.append((len(leafNames), [x.strip('\n') for x in leafNames])) cc.sort(key=lambda tup: tup[0], reverse = True) # Delete the node id since we don't need it anymore and # it makes for cleaner JSON del n["node_id"] # Labeling convention: "-"-separated leaf names n["name"] = name = "-".join(sorted(map(str, leafNames))) return leafNames #Makes labeled json tree for visulaization in d3 def make_tree_json(row_clusters, df_by_gene): T= to_tree(row_clusters) # Create dictionary for labeling nodes by their IDs labels = list(df_by_gene.index) id2name = dict(zip(range(len(labels)), labels)) # Initialize nested dictionary for d3, then recursively iterate through tree d3Dendro = dict(children=[], name="Root1") add_node( T, d3Dendro ) label_tree( d3Dendro["children"][0], id2name ) # Output to JSON json.dump(d3Dendro, open(os.path.join(path_to_file,"d3-dendrogram.json"), "w"), sort_keys=True, indent=4) return cc #finds significant genes between subclusters def find_twobytwo(cc, df_by_cell, full_by_cell_df, fraction_to_plot=10): gene_list = full_by_cell_df.index.tolist() by_gene_df = full_by_cell_df.transpose() pair_dict = {} parent = cc[0][1] p_num = cc[0][0] l_nums = [x[0] for x in cc] c_lists = [c[1] for c in cc[1:]] unique_count = 1 pair_list = [] for i, c in enumerate(c_lists): for i2, c2 in enumerate(c_lists): overlap = [i for i in c if i in c2] if not overlap and len(c)>=p_num/fraction_to_plot and len(c2)>=p_num/fraction_to_plot: if (c,c2) not in pair_list: pair_list.append((c,c2)) pair_list.append((c2,c)) pair_dict[str(len(c))+'cells_vs_'+str(len(c2))+'cells'+str(unique_count)]= [c, c2] unique_count+=1 for v, k in pair_dict.items(): g_pvalue_dict = {} index_list = [] sig_gene_list = [] cell_list1 = [x.strip('\n') for x in k[0]] cell_list2 = [xx.strip('\n') for xx in k[1]] group1 = str(len(cell_list1)) group2 = str(len(cell_list2)) df_by_cell_1 = full_by_cell_df[cell_list1] df_by_cell_2 = full_by_cell_df[cell_list2] df_by_gene_1 = df_by_cell_1.transpose() df_by_gene_2 = df_by_cell_2.transpose() for g in gene_list: g_pvalue = scipy.stats.f_oneway(df_by_gene_1[g], df_by_gene_2[g]) if g_pvalue[0] > 0 and g_pvalue[1] <= 1: g_pvalue_dict[g] = g_pvalue if g not in [s[0] for s in sig_gene_list]: sig_gene_list.append([g, g_pvalue[1]]) sig_gene_list.sort(key=lambda tup: tup[1]) pvalues = [p[1] for p in sig_gene_list] gene_index = [ge[0] for ge in sig_gene_list] mean_log2_exp_list = [] sig_1_2_list = [] mean1_list = [] mean2_list = [] for sig_gene in gene_index: sig_gene_df = by_gene_df[sig_gene] mean_log2_exp_list.append(sig_gene_df.mean()) sig_cell_df = sig_gene_df.transpose() mean_cell1 = sig_cell_df[cell_list1].mean() mean1_list.append(mean_cell1) mean_cell2 = sig_cell_df[cell_list2].mean() mean2_list.append(mean_cell2) ratio_1_2 = (mean_cell1+1)/(mean_cell2+1) sig_1_2_list.append(ratio_1_2) sig_df = pd.DataFrame({'pvalues':pvalues,'mean_all':mean_log2_exp_list,'mean_group1':mean1_list, 'mean_group2':mean2_list, 'ratio_1_2':sig_1_2_list}, index=gene_index) cell_names_df = pd.DataFrame({'cells1':pd.Series(cell_list1, index=range(len(cell_list1))), 'cells2':pd.Series(cell_list2, index=range(len(cell_list2)))}) sig_df.to_csv(os.path.join(filename,'sig_'+v+'_pvalues.txt'), sep = '\t') cell_names_df.to_csv(os.path.join(filename,'sig_'+v+'_cells.txt'), sep = '\t') def plot_PCA(df_by_gene, num_genes=100, gene_list_filter=False, title='', plot=False, label_map=False, annotate=False): gene_list = df_by_gene.columns.tolist() print len(gene_list) sns.set_palette("RdBu_r", 10, 1) if gene_list_filter: sig_by_gene = df_by_gene[gene_list_filter] sig_by_cell = sig_by_gene.transpose() else: sig_by_gene = df_by_gene sig_by_cell = sig_by_gene.transpose() gene_pca = skPCA(n_components=3) np_by_gene = np.asarray(sig_by_gene) by_gene_trans = gene_pca.fit_transform(np_by_gene) Pc_df = pd.DataFrame(gene_pca.components_.T, columns=['PC-1', 'PC-2', 'PC-3'], index=sig_by_gene.columns.tolist()) pca_rank_df = Pc_df.abs().sum(axis=1) Pc_sort_df = pca_rank_df.nlargest(len(sig_by_gene.columns.tolist())) top_pca_list = Pc_sort_df.index.tolist() print top_pca_list[0:num_genes], 'top_pca_list' top_by_gene = df_by_gene[top_pca_list[0:num_genes]] gene_top = skPCA(n_components=2) cell_pca = skPCA(n_components=2) top_by_cell = top_by_gene.transpose() np_top_gene = np.asarray(top_by_cell) np_top_cell = np.asarray(top_by_gene) top_cell_trans = cell_pca.fit_transform(np_top_cell) top_gene_trans = gene_top.fit_transform(np_top_gene) fig, (ax_cell, ax_gene) = plt.subplots(2, 1, figsize=(15, 30), sharex=False) if label_map: X = [x for x in top_cell_trans[:, 0]] Y = [y for y in top_cell_trans[:, 1]] labels = [label_map[cell][2] for cell in top_by_cell.columns.tolist()] markers = [label_map[cell][1] for cell in top_by_cell.columns.tolist()] colors = [label_map[cell][0] for cell in top_by_cell.columns.tolist()] label_done = [] for X_pos, Y_pos, m, color, l in zip(X, Y, markers, colors, labels): if l in label_done: lab = '' else: lab= l label_done.append(l) ax_cell.scatter(X_pos, Y_pos, marker=m, c=color, label=lab, s=20) else: ax_cell.scatter(top_cell_trans[:, 0], top_cell_trans[:, 1], alpha=0.75) ax_cell.set_xlim([min(top_cell_trans[:, 0])-1, max(top_cell_trans[:, 0]+1)]) ax_cell.set_ylim([min(top_cell_trans[:, 1])-1, max(top_cell_trans[:, 1]+1)]) ax_cell.set_title(title+'_cell') ax_cell.legend(loc='best', ncol=1, prop={'size':12}, markerscale=2, frameon=True) ax_cell.set_xlabel('PC1') ax_cell.set_ylabel('PC2') if annotate: for label, x, y in zip(top_by_cell.columns, top_cell_trans[:, 0], top_cell_trans[:, 1]): ax_cell.annotate(label, (x+0.1, y+0.1)) ax_gene.scatter(top_gene_trans[:, 0], top_gene_trans[:, 1], alpha=0.75) ax_gene.set_xlim([min(top_gene_trans[:, 0])-1, max(top_gene_trans[:, 0]+1)]) ax_gene.set_ylim([min(top_gene_trans[:, 1])-1, max(top_gene_trans[:, 1]+1)]) ax_gene.set_title(title+'_gene') ax_gene.set_xlabel('PC1') ax_gene.set_ylabel('PC2') print len(top_by_gene.columns), len(top_gene_trans[:, 0]), len(top_gene_trans[:, 1]) for label, x, y in zip(top_by_gene.columns, top_gene_trans[:, 0], top_gene_trans[:, 1]): ax_gene.annotate(label, (x, y)) if plot: plt.show() if title != '': save_name = '_'.join(title.split(' ')[0:2]) plt.savefig(os.path.join(filename,save_name+'_skpca.pdf'), bbox_inches='tight') else: plt.savefig(os.path.join(filename,'non_group_skpca.pdf'), bbox_inches='tight') plt.close() return top_pca_list def clust_heatmap(gene_list, df_by_gene, num_to_plot=len(gene_list), title='', plot=False, label_map=False): if num_to_plot >175: sns.set(context= 'poster', font_scale = 0.65/(num_to_plot/100)) else: sns.set(context= 'poster', font_scale = .80, font ='Verdana') sns.set_palette('RdBu',4,0.1) cell_list = df_by_gene.index.tolist() cg = sns.clustermap(df_by_gene[gene_list[0:num_to_plot]].transpose(), metric=metric, method=method, z_score=0, figsize=(30, 25)) col_order = cg.dendrogram_col.reordered_ind cg.ax_heatmap.set_title(title) if label_map: Xlabs = [cell_list[i] for i in col_order] colors = [label_map[cell][0] for cell in Xlabs] for xtick, color in zip(cg.ax_heatmap.get_xticklabels(), colors): xtick.set_color(color) xtick.set_rotation(270) if plot: plt.show() cell_linkage = cg.dendrogram_col.linkage link_mat = pd.DataFrame(cell_linkage, columns=['row label 1', 'row label 2', 'distance', 'no. of items in clust.'], index=['cluster %d' %(i+1) for i in range(cell_linkage.shape[0])]) if title != '': save_name = '_'.join(title.split(' ')[0:2]) cg.savefig(os.path.join(filename, save_name+'_heatmap.pdf'), bbox_inches='tight') else: cg.savefig(os.path.join(filename,'Non_group_heatmap_z1_deleted.pdf'), bbox_inches='tight') plt.close() return cell_linkage, df_by_gene[gene_list[0:num_to_plot]], col_order def make_subclusters(cc, log2_expdf_cell, gene_corr_list=False, fraction_to_plot=8, filename=filename, base_name=base_name): parent = cc[0][1] p_num = cc[0][0] l_nums = [x[0] for x in cc] c_lists = [c[1] for c in cc] group_ID = 0 for num_members, cell_list in zip(l_nums, c_lists): if num_members < p_num and num_members >= p_num/fraction_to_plot: group_ID+=1 title = 'Group_'+str(group_ID)+'_with_'+str(num_members)+'_cells' cell_subset = log2_expdf_cell[cell_list] gene_subset = cell_subset.transpose() norm_df_cell1 = np.exp2(cell_subset) norm_df_cell = norm_df_cell1 -1 norm_df_cell.to_csv(os.path.join(filename, base_name+'_'+title+'_matrix.txt'), sep = '\t', index_col=0) if label_map: top_pca = plot_PCA(gene_subset, num_genes=gene_number, title=title, plot=False, label_map=label_map) else: top_pca = plot_PCA(gene_subset, num_genes=gene_number, title=title, plot=False) if top_pca != []: top_pca_by_gene = gene_subset[top_pca] top_pca_by_cell = top_pca_by_gene.transpose() if gene_corr_list: top_genes_search = [x for x in top_pca] corr_plot(gene_corr_list+top_genes_search[0:3], gene_subset, title = title) cell_linkage, plotted_df_by_gene, col_order = clust_heatmap(top_pca, top_pca_by_gene, num_to_plot=gene_number, title=title, plot=False, label_map=label_map) plt.close() else: pass def clust_stability(log2_expdf_gene, iterations=16): sns.set(context='poster', font_scale = 1) sns.set_palette("RdBu_r") stability_ratio = [] total_genes = len(log2_expdf_gene.columns.tolist()) end_num = 1000 iter_list = range(100,int(round(end_num)),int(round(end_num/iterations))) for gene_number in iter_list: title= str(gene_number)+' genes plot.' top_pca = plot_PCA(log2_expdf_gene, num_genes=gene_number, title=title) top_pca_by_gene = log2_expdf_gene[top_pca] top_pca_by_cell = top_pca_by_gene.transpose() cell_linkage, plotted_df_by_gene, col_order = clust_heatmap(top_pca, top_pca_by_gene, num_to_plot=gene_number, title=title) if gene_number == 100: s1 = col_order s0 = col_order else: s2= col_order sm_running = difflib.SequenceMatcher(None,s1,s2) sm_first = difflib.SequenceMatcher(None,s0,s2) stability_ratio.append((sm_running.ratio(), sm_first.ratio())) s1=col_order plt.close() x= iter_list[1:] f, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 8), sharex=True) y1= [m[0] for m in stability_ratio] y2= [m[1] for m in stability_ratio] sns.barplot(x, y1, palette="RdBu_r", ax=ax1) ax1.set_ylabel('Running ratio (new/last)') sns.barplot(x, y2, palette="RdBu_r", ax=ax2) ax2.set_ylabel('Ratio to 100') plt.savefig(os.path.join(filename,'clustering_stability.pdf'), bbox_inches='tight') plt.show() plt.close() return stability_ratio #run correlation matrix and save only those above threshold def run_corr(df_by_gene, title, method_name='pearson', sig_threshold= 0.5, run_new=True, min_period=3): if run_new: if method_name != 'kendall': corr_by_gene = df_by_gene.corr(method=method_name, min_periods=min_period) else: corr_by_gene = df_by_gene.corr(method=method_name) corr_by_cell = df_by_cell.corr() cor = corr_by_gene cor.loc[:,:] = np.tril(cor.values, k=-1) cor = cor.stack() corr_by_gene_pos = cor[cor >=sig_threshold] corr_by_gene_neg = cor[cor <=(sig_threshold*-1)] with open(os.path.join(path_to_file,'gene_correlations_sig_neg_'+method_name+'.p'), 'wb') as fp: pickle.dump(corr_by_gene_neg, fp) with open(os.path.join(path_to_file,'gene_correlations_sig_pos_'+method_name+'.p'), 'wb') as fp0: pickle.dump(corr_by_gene_pos, fp0) with open(os.path.join(path_to_file,'by_gene_corr.p'), 'wb') as fp1: pickle.dump(corr_by_gene, fp1) with open(os.path.join(path_to_file,'by_cell_corr.p'), 'wb') as fp2: pickle.dump(corr_by_cell, fp2) else: corr_by_g_pos = open(os.path.join(path_to_file,'gene_correlations_sig_pos_'+method_name+'.p'), 'rb') corr_by_g_neg = open(os.path.join(path_to_file,'gene_correlations_sig_neg_'+method_name+'.p'), 'rb') corr_by_gene_pos = pickle.load(corr_by_g_pos) corr_by_gene_neg = pickle.load(corr_by_g_neg) cor_pos_df = pd.DataFrame(corr_by_gene_pos) cor_neg_df = pd.DataFrame(corr_by_gene_neg) sig_corr = cor_pos_df.append(cor_neg_df) sig_corrs = pd.DataFrame(sig_corr[0], columns=["corr"]) if run_new: sig_corrs.to_csv(os.path.join(path_to_file, title+'_counts_corr_sig_'+method_name+'.txt'), sep = '\t') return sig_corrs #corr_plot finds and plots all correlated genes, log turns on log scale, sort plots the genes in the rank order of the gene searched def corr_plot(terms_to_search, df_by_gene, title, log=False, sort=True, sig_threshold=0.5): sig_corrs = run_corr(df_by_gene, title, sig_threshold=sig_threshold) for term_to_search in terms_to_search: corr_tup = [(term_to_search, 1)] neg = True fig, ax = plt.subplots() marker = itertools.cycle(('+', 'o', '*')) linestyles = itertools.cycle(('--', '-.', '-', ':')) for index, row in sig_corrs.iterrows(): if term_to_search in index: neg = False if index[0]==term_to_search: corr_tup.append((index[1],row['corr'])) else: corr_tup.append((index[0],row['corr'])) if neg: print term_to_search+' not correlated.' corr_tup.sort(key=itemgetter(1), reverse=True) corr_df = pd.DataFrame(corr_tup, columns=['GeneID', 'Correlation']) corr_df.to_csv(os.path.join(filename, title+'_Corr_w_'+term_to_search+'_list.txt'), sep = '\t', index=False) for c in corr_tup: print c to_plot = [x[0] for x in corr_tup] sns.set_palette(sns.cubehelix_palette(len(to_plot), start=1, rot=-.9, reverse=True)) try: sorted_df = df_by_gene.sort([term_to_search]) log2_df = np.log2(df_by_gene[to_plot]) sorted_log2_df=np.log2(sorted_df[to_plot]) ylabel='CPM (log2)' if sort and log: ax = sorted_log2_df.plot() xlabels = sorted_log2_df[to_plot].index.values elif sort: ax =sorted_df[to_plot].plot() xlabels = sorted_df[to_plot].index.values elif log: ax = log2_df.plot() ylabel= 'log2 FPKM' xlabels = log2_df.index.values else: ax = df_by_gene[to_plot].plot() xlabels = df_by_gene[to_plot].index.values ax.set_xlabel('Cell #') ax.set_ylabel(ylabel) ax.set_title('Correlates with '+term_to_search, loc='right') ax.xaxis.set_minor_locator(LinearLocator(numticks=len(xlabels))) ax.set_xticklabels(xlabels, minor=True, rotation='vertical', fontsize=6) ax.set_ylim([0, df_by_gene[to_plot].values.max()]) ax.tick_params(axis='x', labelsize=1) if len(corr_tup) > 15: l_labels = [str(x[0])+' '+"%.2f" % x[1] for x in corr_tup] ax.legend(l_labels, loc='upper left', bbox_to_anchor=(0.01, 1.05), ncol=6, prop={'size':6}) else: l_labels = [str(x[0])+' '+"%.2f" % x[1] for x in corr_tup] ax.legend(l_labels, loc='upper left', bbox_to_anchor=(0.01, 1.05), ncol=4, prop={'size':8}) fig = plt.gcf() fig.subplots_adjust(bottom=0.08, top=0.95, right=0.98, left=0.03) plt.savefig(os.path.join(filename, title+'_corr_with_'+term_to_search+'.pdf'), bbox_inches='tight') plt.close() except KeyError: print term_to_search+' not in this matrix' pass def cell_color_map(cell_group_filename): colors = ['b', 'g', 'r', 'm', 'c', 'orange', 'darkslateblue'] markers = ['o', 'v','D','*','x','h', 's'] cell_groups_df = pd.read_csv(os.path.join(path_to_file, cell_group_filename), delimiter= '\t') label_map = {} for i, col in enumerate(cell_groups_df.columns.tolist()): for cell in cell_groups_df[col]: if str(cell) != 'nan': label_map[cell] = (colors[i],markers[i],col) print label_map return label_map def multi_group_sig(full_by_cell_df, cell_group_filename): cell_groups_df = pd.read_csv(os.path.join(path_to_file, cell_group_filename), delimiter= '\t') group_name_list = cell_groups_df.columns.tolist() group_pairs = list(set(itertools.permutations(group_name_list,2))) gene_list = full_by_cell_df.index.tolist() print group_pairs for gp in group_pairs: g_pvalue_dict = {} index_list = [] sig_gene_list = [] cell_list1 = [c for c in cell_groups_df[gp[0]].tolist() if str(c) != 'nan'] cell_list2 = [c for c in cell_groups_df[gp[1]].tolist() if str(c) != 'nan'] df_by_cell_1 = full_by_cell_df[cell_list1] df_by_cell_2 = full_by_cell_df[cell_list2] df_by_gene_1 = df_by_cell_1.transpose() df_by_gene_2 = df_by_cell_2.transpose() for g in gene_list: g_pvalue = scipy.stats.f_oneway(df_by_gene_1[g], df_by_gene_2[g]) if g_pvalue[0] > 0 and g_pvalue[1] <= 1: g_pvalue_dict[g] = g_pvalue if g not in [s[0] for s in sig_gene_list]: sig_gene_list.append([g, g_pvalue[1]]) sig_gene_list.sort(key=lambda tup: tup[1]) pvalues = [p[1] for p in sig_gene_list] gene_index = [ge[0] for ge in sig_gene_list] by_gene_df = full_by_cell_df.transpose() mean_log2_exp_list = [] sig_1_2_list = [] mean1_list = [] mean2_list = [] for sig_gene in gene_index: sig_gene_df = by_gene_df[sig_gene] mean_log2_exp_list.append(sig_gene_df.mean()) sig_cell_df = sig_gene_df.transpose() mean_cell1 = sig_cell_df[cell_list1].mean() mean1_list.append(mean_cell1) mean_cell2 = sig_cell_df[cell_list2].mean() mean2_list.append(mean_cell2) ratio_1_2 = (mean_cell1+1)/(mean_cell2+1) sig_1_2_list.append(ratio_1_2) sig_df = pd.DataFrame({'pvalues':pvalues,'mean_all':mean_log2_exp_list,'mean_group1':mean1_list, 'mean_group2':mean2_list, 'ratio_1_2':sig_1_2_list}, index=gene_index) cell_names_df = pd.DataFrame({'cells1':pd.Series(cell_list1, index=range(len(cell_list1))), 'cells2':pd.Series(cell_list2, index=range(len(cell_list2)))}) sig_df.to_csv(os.path.join(filename,'sig_'+gp[0]+'_'+gp[1]+'_pvalues.txt'), sep = '\t') cell_names_df.to_csv(os.path.join(filename,'sig_'+gp[0]+'_'+gp[1]+'_cells.txt'), sep = '\t') gene_number= select_gene_number log2_expdf_cell, log2_expdf_gene = log2_oulierfilter(df_by_cell, plot=False) if test_clust_stability: stability_ratio = clust_stability(log2_expdf_gene) #cc_gene_df = cell_cycle(hu_cc_gene_df, log2_expdf_gene) if group_file: label_map = cell_color_map(cell_group_filename) else: label_map=False if group_sig_test: multi_group_sig(log2_expdf_cell, cell_group_filename) top_pca = plot_PCA(log2_expdf_gene, num_genes=gene_number, title='all_cells_pca', plot=False, label_map=label_map) top_pca_by_gene = log2_expdf_gene[top_pca] top_pca_by_cell = top_pca_by_gene.transpose() cell_linkage, plotted_df_by_gene, col_order = clust_heatmap(top_pca, top_pca_by_gene, num_to_plot=gene_number, label_map=label_map) #cell_dist, row_dist, row_clusters, link_mat, row_dendr = run_cluster(top_pca_by_gene) cc = make_tree_json(cell_linkage, plotted_df_by_gene) make_subclusters(cc, log2_expdf_cell, gene_corr_list=['KRT19']) sig_gene_list = find_twobytwo(cc, top_pca_by_cell, log2_expdf_cell) #augmented_dendrogram(row_clusters, labels=top_pca_by_cell.columns.tolist(), leaf_rotation=90, leaf_font_size=8)
Stingray Qello is the only place to watch the most amazing moments in music. It's the never-ending concert through the decades to today. Carefully curated, and personalized for the most optimal user experience, Qello is the leading source for streaming full-length HD concerts and music documentaries on demand. It's like having thousands of the greatest artists' best performances at your fingertips. Discover incredible performances and relive your favorite artists. The highest-quality music experience is yours to enjoy whenever you want, from your couch or on the go. How does Stingray Qello work? Access Stingray Qello through your library's website and log in with an RBdigital account or create a new account. Your access to Stingray Qello includes a 30-day activation period for all content on all supported devices. You'll be notified when you need to return to your library's Stingray Qello portal to activate another 30-day period (if available). After you set up an account, download the app from your app store and start watching concerts and music documentaries anywhere, anytime. Where is Stingray Qello available? All iOS devices (iPhone, iPad, iPod Touch, Apple TV), all Android devices (mobile phones, tablets, and Google TV), Sony TV, Playstation*, Windows 7 & 8 mobile devices, Amazon (Fire TV, Kindle Fire TV), Roku, and computers at qello.com. *This application is no longer available in North and South America. How much does Qello cost? There is no cost to use Stingray Qello through your local library. Your library has a limited number of pre-paid activation passes, which you check out through your library's Stingray Qello site. Your access to Stingray Qello includes a 30-day access period to all content on all supported devices. You'll be notified when you need to return to your library's Stingray Qello portal to activate another 30-day period (if available). We sure do! It's called The Soundboard. Legendary rock journalist from Rolling Stone magazine, Ben Fong-Torres, is our senior editor! Remember Ben's character in Almost Famous? Ben writes incredible posts tying assets in our catalog to his personal experiences interviewing these artists. Other contributors from Team Q write about news and features within our service. Help, I forgot my username or password. What do I do? If you have forgotten your username and password, click on the "Forgot your password" link below the Log In button. If you have further issues, please contact the help desk at [email protected] . What's the difference between a regular account and an All-Access Pass? An All-Access Pass is a subscription to the actual Qello service, unlocking all content across all devices. A regular account is a user-created account that has no paid or active mechanism attached (i.e., a regular account holder will not have access to any of the premium content or features). All library patron access includes the All-Access Pass. How do I link through Facebook? How do I keep Qello private from Facebook? When you sign in, simply click "Sign in with Facebook" and make the experience social. This allows you to easily share your Qello concert experiences with friends and fellow music lovers. If you don't want to share your Qello experience with your social network, no problem, just sign in to your account with your username and password and don’t click "Sign in with Facebook." How do I cancel my All-Access Pass? Of course we want you to stay and enjoy the Stingray Qello experience. But if you must go, we make it simple. Your access to Stingray Qello includes a 30-day activation period to all content on all supported devices. You'll be notified when you need to return to your library's Stingray Qello portal to activate another 30-day period (if available). If you do nothing, your activation period will simply expire. You can change your account information here: www.qello.com/account . Your library account is managed through your library's Stingray Qello site, so make sure you change any information in both locations to keep your account synched. How can I edit/remove my payment information? Library patron accounts do not require payment information. If you do choose to add information for personal gift purchases for your friends who don't have Stingray Qello through their library, you can manage your payment information here: www.qello.com/account . My iOS auto-renewal was turned off. What do I do? Auto-renewal is not a function of the library version of Stingray Qello. Library patrons with active accounts in certain regions might see that your All-Access Pass subscription has been turned off. Can I give Stingray Qello as a gift? My question is not covered in these FAQs. How do I get support? Library patrons can get support in two ways. If you need help with setting up your library account and activation access, contact your library help desk or click on the Help link in your Library's Stingray Qello Help tab. If you have a library patron account in Qello.com and need help, go to the footer of Qello.com and select the help option that best suits your needs.
""" Device class to control the connected Android device through Android Debug Bridge. """ import subprocess import os class Device: def __init__(self, device_id): self.__device_id = device_id # Device properties def get_id(self): return self.__device_id def get_screen_density(self): output = subprocess.check_output("adb -s " + self.__device_id + " shell wm density") return int(output.strip().split(":")[1].strip()) def get_screen_size(self): output = subprocess.check_output("adb -s " + self.__device_id + " shell wm size") size = output.strip().split(":")[1].strip() sizes = size.split("x") return [int(sizes[0]), int(sizes[1])] def set_screen_density(self, density): subprocess.call("adb -s " + self.__device_id + " shell wm density " + str(density)) # Installation def install_apk(self, apk_path): print("adb -s " + self.__device_id + " install " + str(apk_path)) subprocess.call("adb -s " + self.__device_id + " install " + str(apk_path)) # Control def tap(self, x, y): subprocess.call("adb " + "-s " + self.__device_id + " shell input tap " + str(x) + " " + str(y)) def tap_back(self): subprocess.call("adb " + "-s " + self.__device_id + " shell input keyevent " + "KEYCODE_BACK") def tap_home(self): subprocess.call("adb " + "-s " + self.__device_id + " shell input keyevent --longpress " + "KEYCODE_HOME") def tap_menu(self): subprocess.call("adb " + "-s " + self.__device_id + " shell input keyevent " + "KEYCODE_MENU") def swipe(self, x1, y1, x2, y2, duration): subprocess.call("adb " + "-s " + self.__device_id + " shell input swipe " + str(x1) + " " + str(y1) + " " + str(x2) + " " + str(y2) + " " + str(duration)) def long_press(self, x, y, duration): self.swipe(x, y, x, y, duration=duration) # Screen capture def take_screenshot(self, name, dst_path=os.path.abspath(os.path.dirname(__file__))): subprocess.call("adb " + "-s " + self.__device_id + " shell screencap /sdcard/" + name + ".png") subprocess.call("adb " + "-s " + self.__device_id + " pull /sdcard/" + name + ".png " + dst_path) subprocess.call("adb " + "-s " + self.__device_id + " shell rm /sdcard/" + name + ".png") def record_screen(self, name, dst_path=os.path.abspath(os.path.dirname(__file__)), time=10): subprocess.call("adb " + "-s " + self.__device_id + " shell screenrecord --time-limit " + str( time) + " /sdcard/" + name + ".mp4") subprocess.call("adb " + "-s " + self.__device_id + " pull /sdcard/" + name + ".mp4 " + dst_path) subprocess.call("adb " + "-s " + self.__device_id + " shell rm /sdcard/" + name + ".mp4")
The VigorIPPBX 2820 offers a complete replacement to the traditional office phone system and analogue or ISDN lines. An IP-PBX uses your existing network and the Internet to provide company-wide telephony, using VoIP technology instead of regular phone lines or a traditional PBX. This can save hugely on line rental, provide vast flexibility, reduce call costs and maintenance and increase efficiency. IP Phones can be placed anywhere in your office or remotely around the world to provide one seamless phone network for your business. Wherever you have an Internet connection you can have an extension on your PBX. All pure IP calls are free of charge; for calls to the conventional phone network (PSTN) you route via our SIP Trunk for low cost calls.